All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 01/24] drm/i915: Hide unshrinkable context objects from the shrinker
@ 2019-07-15  8:09 Chris Wilson
  2019-07-15  8:09 ` [PATCH 02/24] drm/i915/gt: Move the [class][inst] lookup for engines onto the GT Chris Wilson
                   ` (26 more replies)
  0 siblings, 27 replies; 33+ messages in thread
From: Chris Wilson @ 2019-07-15  8:09 UTC (permalink / raw)
  To: intel-gfx

The shrinker cannot touch objects used by the contexts (logical state
and ring). Currently we mark those as "pin_global" to let the shrinker
skip over them, however, if we remove them from the shrinker lists
entirely, we don't event have to include them in our shrink accounting.

By keeping the unshrinkable objects in our shrinker tracking, we report
a large number of objects available to be shrunk, and leave the shrinker
deeply unsatisfied when we fail to reclaim those. The shrinker will
persist in trying to reclaim the unavailable objects, forcing the system
into a livelock (not even hitting the dread oomkiller).

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gem/i915_gem_object.c   | 11 ++---
 drivers/gpu/drm/i915/gem/i915_gem_object.h   |  4 ++
 drivers/gpu/drm/i915/gem/i915_gem_pages.c    | 13 +-----
 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 42 ++++++++++++++++++++
 drivers/gpu/drm/i915/gt/intel_context.c      |  4 +-
 drivers/gpu/drm/i915/gt/intel_ringbuffer.c   | 17 ++++----
 drivers/gpu/drm/i915/i915_debugfs.c          |  3 +-
 drivers/gpu/drm/i915/i915_vma.c              | 15 +++++++
 drivers/gpu/drm/i915/i915_vma.h              |  4 ++
 9 files changed, 82 insertions(+), 31 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index d5197a2a106f..4ea97fca9c35 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -63,6 +63,8 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
 	spin_lock_init(&obj->vma.lock);
 	INIT_LIST_HEAD(&obj->vma.list);
 
+	INIT_LIST_HEAD(&obj->mm.link);
+
 	INIT_LIST_HEAD(&obj->lut_list);
 	INIT_LIST_HEAD(&obj->batch_pool_link);
 
@@ -273,14 +275,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
 	 * or else we may oom whilst there are plenty of deferred
 	 * freed objects.
 	 */
-	if (i915_gem_object_has_pages(obj) &&
-	    i915_gem_object_is_shrinkable(obj)) {
-		unsigned long flags;
-
-		spin_lock_irqsave(&i915->mm.obj_lock, flags);
-		list_del_init(&obj->mm.link);
-		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
-	}
+	i915_gem_object_make_unshrinkable(obj);
 
 	/*
 	 * Since we require blocking on struct_mutex to unbind the freed
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 67aea07ea019..3714cf234d64 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -394,6 +394,10 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 				     unsigned int flags);
 void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
 
+void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
+void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
+void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
+
 static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
 {
 	if (obj->cache_dirty)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index b36ad269f4ea..92ad3cc220e3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -153,24 +153,13 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
 struct sg_table *
 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 	struct sg_table *pages;
 
 	pages = fetch_and_zero(&obj->mm.pages);
 	if (IS_ERR_OR_NULL(pages))
 		return pages;
 
-	if (i915_gem_object_is_shrinkable(obj)) {
-		unsigned long flags;
-
-		spin_lock_irqsave(&i915->mm.obj_lock, flags);
-
-		list_del(&obj->mm.link);
-		i915->mm.shrink_count--;
-		i915->mm.shrink_memory -= obj->base.size;
-
-		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
-	}
+	i915_gem_object_make_unshrinkable(obj);
 
 	if (obj->mm.mapping) {
 		void *ptr;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 3f4c6bdcc3c3..eb9f56f8e9b5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -530,3 +530,45 @@ void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
 	if (unlock)
 		mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_);
 }
+
+void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj)
+{
+	if (!list_empty(&obj->mm.link)) {
+		struct drm_i915_private *i915 = to_i915(obj->base.dev);
+		unsigned long flags;
+
+		spin_lock_irqsave(&i915->mm.obj_lock, flags);
+		list_del_init(&obj->mm.link);
+		i915->mm.shrink_count--;
+		i915->mm.shrink_memory -= obj->base.size;
+		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+	}
+}
+
+void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj)
+{
+	if (i915_gem_object_is_shrinkable(obj)) {
+		struct drm_i915_private *i915 = to_i915(obj->base.dev);
+		unsigned long flags;
+
+		spin_lock_irqsave(&i915->mm.obj_lock, flags);
+		list_add_tail(&obj->mm.link, &i915->mm.shrink_list);
+		i915->mm.shrink_count++;
+		i915->mm.shrink_memory += obj->base.size;
+		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+	}
+}
+
+void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj)
+{
+	if (i915_gem_object_is_shrinkable(obj)) {
+		struct drm_i915_private *i915 = to_i915(obj->base.dev);
+		unsigned long flags;
+
+		spin_lock_irqsave(&i915->mm.obj_lock, flags);
+		list_add_tail(&obj->mm.link, &i915->mm.purge_list);
+		i915->mm.shrink_count++;
+		i915->mm.shrink_memory += obj->base.size;
+		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+	}
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 1110fc8f657a..f012ec898aba 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -118,7 +118,7 @@ static int __context_pin_state(struct i915_vma *vma)
 	 * And mark it as a globally pinned object to let the shrinker know
 	 * it cannot reclaim the object until we release it.
 	 */
-	vma->obj->pin_global++;
+	i915_vma_make_unshrinkable(vma);
 	vma->obj->mm.dirty = true;
 
 	return 0;
@@ -126,8 +126,8 @@ static int __context_pin_state(struct i915_vma *vma)
 
 static void __context_unpin_state(struct i915_vma *vma)
 {
-	vma->obj->pin_global--;
 	__i915_vma_unpin(vma);
+	i915_vma_make_shrinkable(vma);
 }
 
 static void __intel_context_retire(struct i915_active *active)
diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
index f1e571fa2e6d..f39c74740d8d 100644
--- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
@@ -1200,7 +1200,7 @@ int intel_ring_pin(struct intel_ring *ring)
 		goto err_ring;
 	}
 
-	vma->obj->pin_global++;
+	i915_vma_make_unshrinkable(vma);
 
 	GEM_BUG_ON(ring->vaddr);
 	ring->vaddr = addr;
@@ -1229,6 +1229,8 @@ void intel_ring_reset(struct intel_ring *ring, u32 tail)
 
 void intel_ring_unpin(struct intel_ring *ring)
 {
+	struct i915_vma *vma = ring->vma;
+
 	if (!atomic_dec_and_test(&ring->pin_count))
 		return;
 
@@ -1237,18 +1239,17 @@ void intel_ring_unpin(struct intel_ring *ring)
 	/* Discard any unused bytes beyond that submitted to hw. */
 	intel_ring_reset(ring, ring->tail);
 
-	GEM_BUG_ON(!ring->vma);
-	i915_vma_unset_ggtt_write(ring->vma);
-	if (i915_vma_is_map_and_fenceable(ring->vma))
-		i915_vma_unpin_iomap(ring->vma);
+	i915_vma_unset_ggtt_write(vma);
+	if (i915_vma_is_map_and_fenceable(vma))
+		i915_vma_unpin_iomap(vma);
 	else
-		i915_gem_object_unpin_map(ring->vma->obj);
+		i915_gem_object_unpin_map(vma->obj);
 
 	GEM_BUG_ON(!ring->vaddr);
 	ring->vaddr = NULL;
 
-	ring->vma->obj->pin_global--;
-	i915_vma_unpin(ring->vma);
+	i915_vma_unpin(vma);
+	i915_vma_make_purgeable(vma);
 
 	intel_timeline_unpin(ring->timeline);
 }
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 0ac9b8d5e8b9..8bdf1d100627 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -363,8 +363,9 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
 	struct drm_i915_private *i915 = node_to_i915(m->private);
 	int ret;
 
-	seq_printf(m, "%u shrinkable objects, %llu bytes\n",
+	seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n",
 		   i915->mm.shrink_count,
+		   atomic_read(&i915->mm.free_count),
 		   i915->mm.shrink_memory);
 
 	seq_putc(m, '\n');
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index ee73baf29415..1d003ec9da19 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -1030,6 +1030,21 @@ int i915_vma_unbind(struct i915_vma *vma)
 	return 0;
 }
 
+void i915_vma_make_unshrinkable(struct i915_vma *vma)
+{
+	i915_gem_object_make_unshrinkable(vma->obj);
+}
+
+void i915_vma_make_shrinkable(struct i915_vma *vma)
+{
+	i915_gem_object_make_shrinkable(vma->obj);
+}
+
+void i915_vma_make_purgeable(struct i915_vma *vma)
+{
+	i915_gem_object_make_purgeable(vma->obj);
+}
+
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/i915_vma.c"
 #endif
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 4b769db649bf..a24bd6787ef7 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -459,4 +459,8 @@ void i915_vma_parked(struct drm_i915_private *i915);
 struct i915_vma *i915_vma_alloc(void);
 void i915_vma_free(struct i915_vma *vma);
 
+void i915_vma_make_unshrinkable(struct i915_vma *vma);
+void i915_vma_make_shrinkable(struct i915_vma *vma);
+void i915_vma_make_purgeable(struct i915_vma *vma);
+
 #endif
-- 
2.22.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 33+ messages in thread

* [PATCH 02/24] drm/i915/gt: Move the [class][inst] lookup for engines onto the GT
  2019-07-15  8:09 [PATCH 01/24] drm/i915: Hide unshrinkable context objects from the shrinker Chris Wilson
@ 2019-07-15  8:09 ` Chris Wilson
  2019-07-15  8:09 ` [PATCH 03/24] drm/i915/gtt: Recursive ppgtt alloc for gen8 Chris Wilson
                   ` (25 subsequent siblings)
  26 siblings, 0 replies; 33+ messages in thread
From: Chris Wilson @ 2019-07-15  8:09 UTC (permalink / raw)
  To: intel-gfx

To maintain a fast lookup from a GT centric irq handler, we want the
engine lookup tables on the intel_gt. To avoid having multiple copies of
the same multi-dimension lookup table, move the generic user engine
lookup into an rbtree (for fast and flexible indexing).

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
---
 drivers/gpu/drm/i915/Makefile                |  1 +
 drivers/gpu/drm/i915/gem/i915_gem_context.c  |  1 +
 drivers/gpu/drm/i915/gt/intel_engine.h       |  3 -
 drivers/gpu/drm/i915/gt/intel_engine_cs.c    | 52 +++++-----------
 drivers/gpu/drm/i915/gt/intel_engine_types.h |  3 +
 drivers/gpu/drm/i915/gt/intel_engine_user.c  | 65 ++++++++++++++++++++
 drivers/gpu/drm/i915/gt/intel_engine_user.h  | 20 ++++++
 drivers/gpu/drm/i915/gt/intel_gt_types.h     |  4 ++
 drivers/gpu/drm/i915/gt/selftest_lrc.c       | 15 +++--
 drivers/gpu/drm/i915/i915_drv.h              |  7 ++-
 drivers/gpu/drm/i915/i915_irq.c              |  2 +-
 drivers/gpu/drm/i915/i915_pmu.c              |  1 +
 12 files changed, 124 insertions(+), 50 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/gt/intel_engine_user.c
 create mode 100644 drivers/gpu/drm/i915/gt/intel_engine_user.h

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 524516251a40..fafc3763dc2d 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -74,6 +74,7 @@ gt-y += \
 	gt/intel_context.o \
 	gt/intel_engine_cs.o \
 	gt/intel_engine_pm.o \
+	gt/intel_engine_user.o \
 	gt/intel_gt.o \
 	gt/intel_gt_pm.o \
 	gt/intel_hangcheck.o \
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index c5f8bfa3f7b0..6b5644d5a99b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -70,6 +70,7 @@
 #include <drm/i915_drm.h>
 
 #include "gt/intel_lrc_reg.h"
+#include "gt/intel_engine_user.h"
 
 #include "i915_gem_context.h"
 #include "i915_globals.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index db5c73ce86ee..30856383e4c5 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -432,9 +432,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
 		       struct drm_printer *m,
 		       const char *header, ...);
 
-struct intel_engine_cs *
-intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance);
-
 static inline void intel_engine_context_in(struct intel_engine_cs *engine)
 {
 	unsigned long flags;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 022e00eb79ad..75099500d1ed 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -32,6 +32,7 @@
 
 #include "intel_engine.h"
 #include "intel_engine_pm.h"
+#include "intel_engine_user.h"
 #include "intel_context.h"
 #include "intel_lrc.h"
 #include "intel_reset.h"
@@ -285,9 +286,7 @@ static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
 	intel_engine_set_hwsp_writemask(engine, ~0u);
 }
 
-static int
-intel_engine_setup(struct drm_i915_private *dev_priv,
-		   enum intel_engine_id id)
+static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
 {
 	const struct engine_info *info = &intel_engines[id];
 	struct intel_engine_cs *engine;
@@ -303,10 +302,9 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
 	if (GEM_DEBUG_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
 		return -EINVAL;
 
-	if (GEM_DEBUG_WARN_ON(dev_priv->engine_class[info->class][info->instance]))
+	if (GEM_DEBUG_WARN_ON(gt->engine_class[info->class][info->instance]))
 		return -EINVAL;
 
-	GEM_BUG_ON(dev_priv->engine[id]);
 	engine = kzalloc(sizeof(*engine), GFP_KERNEL);
 	if (!engine)
 		return -ENOMEM;
@@ -315,12 +313,12 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
 
 	engine->id = id;
 	engine->mask = BIT(id);
-	engine->i915 = dev_priv;
-	engine->gt = &dev_priv->gt;
-	engine->uncore = &dev_priv->uncore;
+	engine->i915 = gt->i915;
+	engine->gt = gt;
+	engine->uncore = gt->uncore;
 	__sprint_engine_name(engine->name, info);
 	engine->hw_id = engine->guc_id = info->hw_id;
-	engine->mmio_base = __engine_mmio_base(dev_priv, info->mmio_bases);
+	engine->mmio_base = __engine_mmio_base(gt->i915, info->mmio_bases);
 	engine->class = info->class;
 	engine->instance = info->instance;
 
@@ -332,12 +330,12 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
 
 	engine->uabi_class = intel_engine_classes[info->class].uabi_class;
 
-	engine->context_size = intel_engine_context_size(dev_priv,
+	engine->context_size = intel_engine_context_size(gt->i915,
 							 engine->class);
 	if (WARN_ON(engine->context_size > BIT(20)))
 		engine->context_size = 0;
 	if (engine->context_size)
-		DRIVER_CAPS(dev_priv)->has_logical_contexts = true;
+		DRIVER_CAPS(gt->i915)->has_logical_contexts = true;
 
 	/* Nothing to do here, execute in order of dependencies */
 	engine->schedule = NULL;
@@ -349,8 +347,11 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
 	/* Scrub mmio state on takeover */
 	intel_engine_sanitize_mmio(engine);
 
-	dev_priv->engine_class[info->class][info->instance] = engine;
-	dev_priv->engine[id] = engine;
+	engine->gt->engine_class[info->class][info->instance] = engine;
+
+	intel_engine_add_user(engine);
+	gt->i915->engine[id] = engine;
+
 	return 0;
 }
 
@@ -433,7 +434,7 @@ int intel_engines_init_mmio(struct drm_i915_private *i915)
 		if (!HAS_ENGINE(i915, i))
 			continue;
 
-		err = intel_engine_setup(i915, i);
+		err = intel_engine_setup(&i915->gt, i);
 		if (err)
 			goto cleanup;
 
@@ -1534,29 +1535,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
 	intel_engine_print_breadcrumbs(engine, m);
 }
 
-static u8 user_class_map[] = {
-	[I915_ENGINE_CLASS_RENDER] = RENDER_CLASS,
-	[I915_ENGINE_CLASS_COPY] = COPY_ENGINE_CLASS,
-	[I915_ENGINE_CLASS_VIDEO] = VIDEO_DECODE_CLASS,
-	[I915_ENGINE_CLASS_VIDEO_ENHANCE] = VIDEO_ENHANCEMENT_CLASS,
-};
-
-struct intel_engine_cs *
-intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
-{
-	if (class >= ARRAY_SIZE(user_class_map))
-		return NULL;
-
-	class = user_class_map[class];
-
-	GEM_BUG_ON(class > MAX_ENGINE_CLASS);
-
-	if (instance > MAX_ENGINE_INSTANCE)
-		return NULL;
-
-	return i915->engine_class[class][instance];
-}
-
 /**
  * intel_enable_engine_stats() - Enable engine busy tracking on engine
  * @engine: engine to enable stats collection
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 8be63019d707..6822fbb44d2d 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -12,6 +12,7 @@
 #include <linux/kref.h>
 #include <linux/list.h>
 #include <linux/llist.h>
+#include <linux/rbtree.h>
 #include <linux/timer.h>
 #include <linux/types.h>
 
@@ -276,6 +277,8 @@ struct intel_engine_cs {
 
 	u32 uabi_capabilities;
 
+	struct rb_node uabi_node;
+
 	struct intel_sseu sseu;
 
 	struct intel_ring *buffer;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c
new file mode 100644
index 000000000000..1b6796938c6f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c
@@ -0,0 +1,65 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_engine.h"
+#include "intel_engine_user.h"
+
+struct intel_engine_cs *
+intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
+{
+	struct rb_node *p = i915->uabi_engines.rb_node;
+
+	while (p) {
+		struct intel_engine_cs *it =
+			rb_entry(p, typeof(*it), uabi_node);
+
+		if (class < it->uabi_class)
+			p = p->rb_left;
+		else if (class > it->uabi_class || instance > it->instance)
+			p = p->rb_right;
+		else if (instance < it->instance)
+			p = p->rb_left;
+		else
+			return it;
+	}
+
+	return NULL;
+}
+
+void intel_engine_add_user(struct intel_engine_cs *engine)
+{
+	struct rb_root *root = &engine->i915->uabi_engines;
+	struct rb_node **p, *parent;
+
+	parent = NULL;
+	p = &root->rb_node;
+	while (*p) {
+		struct intel_engine_cs *it;
+
+		parent = *p;
+		it = rb_entry(parent, typeof(*it), uabi_node);
+
+		/* All user class:instance identifiers must be unique */
+		GEM_BUG_ON(it->uabi_class == engine->uabi_class &&
+			   it->instance == engine->instance);
+
+		if (engine->uabi_class < it->uabi_class)
+			p = &parent->rb_left;
+		else if (engine->uabi_class > it->uabi_class ||
+			 engine->instance > it->instance)
+			p = &parent->rb_right;
+		else
+			p = &parent->rb_left;
+	}
+
+	rb_link_node(&engine->uabi_node, parent, p);
+	rb_insert_color(&engine->uabi_node, root);
+
+	GEM_BUG_ON(intel_engine_lookup_user(engine->i915,
+					    engine->uabi_class,
+					    engine->instance) != engine);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.h b/drivers/gpu/drm/i915/gt/intel_engine_user.h
new file mode 100644
index 000000000000..091dc8a4a39f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_user.h
@@ -0,0 +1,20 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_ENGINE_USER_H
+#define INTEL_ENGINE_USER_H
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct intel_engine_cs;
+
+struct intel_engine_cs *
+intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance);
+
+void intel_engine_add_user(struct intel_engine_cs *engine);
+
+#endif /* INTEL_ENGINE_USER_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index 34d4a868e4f1..5fd11e361d03 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -21,6 +21,7 @@
 
 struct drm_i915_private;
 struct i915_ggtt;
+struct intel_engine_cs;
 struct intel_uncore;
 
 struct intel_hangcheck {
@@ -76,6 +77,9 @@ struct intel_gt {
 	u32 pm_ier;
 
 	u32 pm_guc_events;
+
+	struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
+					    [MAX_ENGINE_INSTANCE + 1];
 };
 
 enum intel_gt_scratch_field {
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 678e9b2edf8d..bde0164551b1 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -1764,6 +1764,7 @@ static int live_virtual_engine(void *arg)
 	struct drm_i915_private *i915 = arg;
 	struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
 	struct intel_engine_cs *engine;
+	struct intel_gt *gt = &i915->gt;
 	enum intel_engine_id id;
 	unsigned int class, inst;
 	int err = -ENODEV;
@@ -1787,10 +1788,10 @@ static int live_virtual_engine(void *arg)
 
 		nsibling = 0;
 		for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
-			if (!i915->engine_class[class][inst])
+			if (!gt->engine_class[class][inst])
 				continue;
 
-			siblings[nsibling++] = i915->engine_class[class][inst];
+			siblings[nsibling++] = gt->engine_class[class][inst];
 		}
 		if (nsibling < 2)
 			continue;
@@ -1911,6 +1912,7 @@ static int live_virtual_mask(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
 	struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+	struct intel_gt *gt = &i915->gt;
 	unsigned int class, inst;
 	int err = 0;
 
@@ -1924,10 +1926,10 @@ static int live_virtual_mask(void *arg)
 
 		nsibling = 0;
 		for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
-			if (!i915->engine_class[class][inst])
+			if (!gt->engine_class[class][inst])
 				break;
 
-			siblings[nsibling++] = i915->engine_class[class][inst];
+			siblings[nsibling++] = gt->engine_class[class][inst];
 		}
 		if (nsibling < 2)
 			continue;
@@ -2088,6 +2090,7 @@ static int live_virtual_bond(void *arg)
 	};
 	struct drm_i915_private *i915 = arg;
 	struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+	struct intel_gt *gt = &i915->gt;
 	unsigned int class, inst;
 	int err = 0;
 
@@ -2102,11 +2105,11 @@ static int live_virtual_bond(void *arg)
 
 		nsibling = 0;
 		for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
-			if (!i915->engine_class[class][inst])
+			if (!gt->engine_class[class][inst])
 				break;
 
 			GEM_BUG_ON(nsibling == ARRAY_SIZE(siblings));
-			siblings[nsibling++] = i915->engine_class[class][inst];
+			siblings[nsibling++] = gt->engine_class[class][inst];
 		}
 		if (nsibling < 2)
 			continue;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 203cdba98550..86b0a16632bc 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1373,11 +1373,12 @@ struct drm_i915_private {
 	wait_queue_head_t gmbus_wait_queue;
 
 	struct pci_dev *bridge_dev;
-	struct intel_engine_cs *engine[I915_NUM_ENGINES];
+
 	/* Context used internally to idle the GPU and setup initial state */
 	struct i915_gem_context *kernel_context;
-	struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
-					    [MAX_ENGINE_INSTANCE + 1];
+
+	struct intel_engine_cs *engine[I915_NUM_ENGINES];
+	struct rb_root uabi_engines;
 
 	struct resource mch_res;
 
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 91f8c81028c3..af1bb653579a 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -3106,7 +3106,7 @@ gen11_engine_irq_handler(struct intel_gt *gt, const u8 class,
 	struct intel_engine_cs *engine;
 
 	if (instance <= MAX_ENGINE_INSTANCE)
-		engine = gt->i915->engine_class[class][instance];
+		engine = gt->engine_class[class][instance];
 	else
 		engine = NULL;
 
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index eff86483bec0..81fbb262807d 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -8,6 +8,7 @@
 #include <linux/pm_runtime.h>
 
 #include "gt/intel_engine.h"
+#include "gt/intel_engine_user.h"
 
 #include "i915_drv.h"
 #include "i915_pmu.h"
-- 
2.22.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 33+ messages in thread

* [PATCH 03/24] drm/i915/gtt: Recursive ppgtt alloc for gen8
  2019-07-15  8:09 [PATCH 01/24] drm/i915: Hide unshrinkable context objects from the shrinker Chris Wilson
  2019-07-15  8:09 ` [PATCH 02/24] drm/i915/gt: Move the [class][inst] lookup for engines onto the GT Chris Wilson
@ 2019-07-15  8:09 ` Chris Wilson
  2019-07-15  8:09 ` [PATCH 04/24] drm/i915/gtt: Tidy up ppgtt insertion " Chris Wilson
                   ` (24 subsequent siblings)
  26 siblings, 0 replies; 33+ messages in thread
From: Chris Wilson @ 2019-07-15  8:09 UTC (permalink / raw)
  To: intel-gfx

Refactor the separate allocation routines into a single recursive
function.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem_gtt.c | 272 ++++++++++------------------
 1 file changed, 97 insertions(+), 175 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 1e3ccdf7fe1b..49de6d39488f 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1007,199 +1007,119 @@ static void gen8_ppgtt_clear(struct i915_address_space *vm,
 			   start, start + length, vm->top);
 }
 
-static void gen8_ppgtt_clear_pd(struct i915_address_space *vm,
-				struct i915_page_directory *pd,
-				u64 start, u64 length)
-{
-	GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
-	GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
-
-	start >>= GEN8_PTE_SHIFT;
-	length >>= GEN8_PTE_SHIFT;
-
-	__gen8_ppgtt_clear(vm, pd, start, start + length, 1);
-}
-
-static void gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
-				 struct i915_page_directory * const pdp,
-				 u64 start, u64 length)
-{
-	GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
-	GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
-
-	start >>= GEN8_PTE_SHIFT;
-	length >>= GEN8_PTE_SHIFT;
-
-	__gen8_ppgtt_clear(vm, pdp, start, start + length, 2);
-}
-
-static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
-			       struct i915_page_directory *pd,
-			       u64 start, u64 length)
+static int __gen8_ppgtt_alloc(struct i915_address_space * const vm,
+			      struct i915_page_directory * const pd,
+			      u64 * const start, u64 end, int lvl)
 {
-	struct i915_page_table *pt, *alloc = NULL;
-	u64 from = start;
-	unsigned int pde;
+	const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
+	struct i915_page_table *alloc = NULL;
+	unsigned int idx, len;
 	int ret = 0;
 
+	len = gen8_pd_range(*start, end, lvl--, &idx);
+	DBG("%s(%p):{lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d}\n",
+	    __func__, vm, lvl + 1, *start, end,
+	    idx, len, atomic_read(px_used(pd)));
+	GEM_BUG_ON(!len || (idx + len - 1) >> gen8_pd_shift(1));
+
 	spin_lock(&pd->lock);
-	gen8_for_each_pde(pt, pd, start, length, pde) {
-		const int count = gen8_pte_count(start, length);
+	GEM_BUG_ON(!atomic_read(px_used(pd))); /* Must be pinned! */
+	do {
+		struct i915_page_table *pt = pd->entry[idx];
 
 		if (!pt) {
 			spin_unlock(&pd->lock);
 
-			pt = fetch_and_zero(&alloc);
-			if (!pt)
-				pt = alloc_pt(vm);
-			if (IS_ERR(pt)) {
-				ret = PTR_ERR(pt);
-				goto unwind;
-			}
+			DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n",
+			    __func__, vm, lvl + 1, idx);
 
-			if (count < GEN8_PTES || intel_vgpu_active(vm->i915))
-				fill_px(pt, vm->scratch[0].encode);
+			pt = fetch_and_zero(&alloc);
+			if (lvl) {
+				if (!pt) {
+					pt = &alloc_pd(vm)->pt;
+					if (IS_ERR(pt)) {
+						ret = PTR_ERR(pt);
+						goto out;
+					}
+				}
 
-			spin_lock(&pd->lock);
-			if (!pd->entry[pde]) {
-				set_pd_entry(pd, pde, pt);
+				fill_px(pt, vm->scratch[lvl].encode);
 			} else {
-				alloc = pt;
-				pt = pd->entry[pde];
-			}
-		}
-
-		atomic_add(count, &pt->used);
-	}
-	spin_unlock(&pd->lock);
-	goto out;
-
-unwind:
-	gen8_ppgtt_clear_pd(vm, pd, from, start - from);
-out:
-	if (alloc)
-		free_px(vm, alloc);
-	return ret;
-}
-
-static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
-				struct i915_page_directory *pdp,
-				u64 start, u64 length)
-{
-	struct i915_page_directory *pd, *alloc = NULL;
-	u64 from = start;
-	unsigned int pdpe;
-	int ret = 0;
+				if (!pt) {
+					pt = alloc_pt(vm);
+					if (IS_ERR(pt)) {
+						ret = PTR_ERR(pt);
+						goto out;
+					}
+				}
 
-	spin_lock(&pdp->lock);
-	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
-		if (!pd) {
-			spin_unlock(&pdp->lock);
-
-			pd = fetch_and_zero(&alloc);
-			if (!pd)
-				pd = alloc_pd(vm);
-			if (IS_ERR(pd)) {
-				ret = PTR_ERR(pd);
-				goto unwind;
+				if (intel_vgpu_active(vm->i915) ||
+				    gen8_pt_count(*start, end) < I915_PDES)
+					fill_px(pt, vm->scratch[lvl].encode);
 			}
 
-			fill_px(pd, vm->scratch[1].encode);
+			spin_lock(&pd->lock);
+			if (likely(!pd->entry[idx]))
+				set_pd_entry(pd, idx, pt);
+			else
+				alloc = pt, pt = pd->entry[idx];
+		}
 
-			spin_lock(&pdp->lock);
-			if (!pdp->entry[pdpe]) {
-				set_pd_entry(pdp, pdpe, pd);
-			} else {
-				alloc = pd;
-				pd = pdp->entry[pdpe];
+		if (lvl) {
+			atomic_inc(&pt->used);
+			spin_unlock(&pd->lock);
+
+			ret = __gen8_ppgtt_alloc(vm, as_pd(pt),
+						 start, end, lvl);
+			if (unlikely(ret)) {
+				if (release_pd_entry(pd, idx, pt, scratch))
+					free_px(vm, pt);
+				goto out;
 			}
-		}
-		atomic_inc(px_used(pd));
-		spin_unlock(&pdp->lock);
 
-		ret = gen8_ppgtt_alloc_pd(vm, pd, start, length);
-		if (unlikely(ret))
-			goto unwind_pd;
+			spin_lock(&pd->lock);
+			atomic_dec(&pt->used);
+			GEM_BUG_ON(!atomic_read(&pt->used));
+		} else {
+			unsigned int count = gen8_pt_count(*start, end);
 
-		spin_lock(&pdp->lock);
-		atomic_dec(px_used(pd));
-	}
-	spin_unlock(&pdp->lock);
-	goto out;
+			DBG("%s(%p):{lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d} inserting pte\n",
+			    __func__, vm, lvl, *start, end,
+			    gen8_pd_index(*start, 0), count,
+			    atomic_read(&pt->used));
 
-unwind_pd:
-	if (release_pd_entry(pdp, pdpe, &pd->pt, &vm->scratch[2]))
-		free_px(vm, pd);
-unwind:
-	gen8_ppgtt_clear_pdp(vm, pdp, from, start - from);
+			atomic_add(count, &pt->used);
+			GEM_BUG_ON(atomic_read(&pt->used) > I915_PDES);
+			*start += count;
+		}
+	} while (idx++, --len);
+	spin_unlock(&pd->lock);
 out:
 	if (alloc)
 		free_px(vm, alloc);
 	return ret;
 }
 
-static int gen8_ppgtt_alloc_3lvl(struct i915_address_space *vm,
-				 u64 start, u64 length)
-{
-	return gen8_ppgtt_alloc_pdp(vm,
-				    i915_vm_to_ppgtt(vm)->pd, start, length);
-}
-
-static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
-				 u64 start, u64 length)
+static int gen8_ppgtt_alloc(struct i915_address_space *vm,
+			    u64 start, u64 length)
 {
-	struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
-	struct i915_page_directory * const pml4 = ppgtt->pd;
-	struct i915_page_directory *pdp, *alloc = NULL;
 	u64 from = start;
-	int ret = 0;
-	u32 pml4e;
-
-	spin_lock(&pml4->lock);
-	gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
-		if (!pdp) {
-			spin_unlock(&pml4->lock);
-
-			pdp = fetch_and_zero(&alloc);
-			if (!pdp)
-				pdp = alloc_pd(vm);
-			if (IS_ERR(pdp)) {
-				ret = PTR_ERR(pdp);
-				goto unwind;
-			}
-
-			fill_px(pdp, vm->scratch[2].encode);
+	int err;
 
-			spin_lock(&pml4->lock);
-			if (!pml4->entry[pml4e]) {
-				set_pd_entry(pml4, pml4e, pdp);
-			} else {
-				alloc = pdp;
-				pdp = pml4->entry[pml4e];
-			}
-		}
-		atomic_inc(px_used(pdp));
-		spin_unlock(&pml4->lock);
+	GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
+	GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
 
-		ret = gen8_ppgtt_alloc_pdp(vm, pdp, start, length);
-		if (unlikely(ret))
-			goto unwind_pdp;
+	start >>= GEN8_PTE_SHIFT;
+	length >>= GEN8_PTE_SHIFT;
+	GEM_BUG_ON(length == 0);
 
-		spin_lock(&pml4->lock);
-		atomic_dec(px_used(pdp));
-	}
-	spin_unlock(&pml4->lock);
-	goto out;
+	err = __gen8_ppgtt_alloc(vm, i915_vm_to_ppgtt(vm)->pd,
+				 &start, start + length, vm->top);
+	if (unlikely(err))
+		__gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
+				   from, start, vm->top);
 
-unwind_pdp:
-	if (release_pd_entry(pml4, pml4e, &pdp->pt, &vm->scratch[3]))
-		free_px(vm, pdp);
-unwind:
-	gen8_ppgtt_clear(vm, from, start - from);
-out:
-	if (alloc)
-		free_px(vm, alloc);
-	return ret;
+	return err;
 }
 
 static inline struct sgt_dma {
@@ -1496,19 +1416,22 @@ static int gen8_init_scratch(struct i915_address_space *vm)
 static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
 {
 	struct i915_address_space *vm = &ppgtt->vm;
-	struct i915_page_directory *pdp = ppgtt->pd;
-	struct i915_page_directory *pd;
-	u64 start = 0, length = ppgtt->vm.total;
-	unsigned int pdpe;
+	struct i915_page_directory *pd = ppgtt->pd;
+	unsigned int idx;
+
+	GEM_BUG_ON(vm->top != 2);
+	GEM_BUG_ON((vm->total >> __gen8_pte_shift(2)) != GEN8_3LVL_PDPES);
+
+	for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) {
+		struct i915_page_directory *pde;
 
-	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
-		pd = alloc_pd(vm);
-		if (IS_ERR(pd))
-			return PTR_ERR(pd);
+		pde = alloc_pd(vm);
+		if (IS_ERR(pde))
+			return PTR_ERR(pde);
 
-		fill_px(pd, vm->scratch[1].encode);
-		set_pd_entry(pdp, pdpe, pd);
-		atomic_inc(px_used(pd)); /* keep pinned */
+		fill_px(pde, vm->scratch[1].encode);
+		set_pd_entry(pd, idx, pde);
+		atomic_inc(px_used(pde)); /* keep pinned */
 	}
 
 	return 0;
@@ -1597,7 +1520,6 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
 	}
 
 	if (i915_vm_is_4lvl(&ppgtt->vm)) {
-		ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_4lvl;
 		ppgtt->vm.insert_entries = gen8_ppgtt_insert_4lvl;
 	} else {
 		if (intel_vgpu_active(i915)) {
@@ -1606,10 +1528,10 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
 				goto err_free_pd;
 		}
 
-		ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_3lvl;
 		ppgtt->vm.insert_entries = gen8_ppgtt_insert_3lvl;
 	}
 
+	ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
 	ppgtt->vm.clear_range = gen8_ppgtt_clear;
 
 	if (intel_vgpu_active(i915))
-- 
2.22.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 33+ messages in thread

* [PATCH 04/24] drm/i915/gtt: Tidy up ppgtt insertion for gen8
  2019-07-15  8:09 [PATCH 01/24] drm/i915: Hide unshrinkable context objects from the shrinker Chris Wilson
  2019-07-15  8:09 ` [PATCH 02/24] drm/i915/gt: Move the [class][inst] lookup for engines onto the GT Chris Wilson
  2019-07-15  8:09 ` [PATCH 03/24] drm/i915/gtt: Recursive ppgtt alloc for gen8 Chris Wilson
@ 2019-07-15  8:09 ` Chris Wilson
  2019-07-15  8:09 ` [PATCH 05/24] drm/i915/selftests: Ignore self-preemption suppression under gvt Chris Wilson
                   ` (23 subsequent siblings)
  26 siblings, 0 replies; 33+ messages in thread
From: Chris Wilson @ 2019-07-15  8:09 UTC (permalink / raw)
  To: intel-gfx

Apply the new radix shift helpers to extract the multi-level indices
cleanly when inserting pte into the gtt tree.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem_gtt.c | 115 +++++++++++-----------------
 drivers/gpu/drm/i915/i915_gem_gtt.h |  90 ++--------------------
 2 files changed, 48 insertions(+), 157 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 49de6d39488f..220aba5a94d2 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1131,47 +1131,28 @@ static inline struct sgt_dma {
 	return (struct sgt_dma) { sg, addr, addr + sg->length };
 }
 
-struct gen8_insert_pte {
-	u16 pml4e;
-	u16 pdpe;
-	u16 pde;
-	u16 pte;
-};
-
-static __always_inline struct gen8_insert_pte gen8_insert_pte(u64 start)
-{
-	return (struct gen8_insert_pte) {
-		 gen8_pml4e_index(start),
-		 gen8_pdpe_index(start),
-		 gen8_pde_index(start),
-		 gen8_pte_index(start),
-	};
-}
-
-static __always_inline bool
+static __always_inline u64
 gen8_ppgtt_insert_pte_entries(struct i915_ppgtt *ppgtt,
 			      struct i915_page_directory *pdp,
 			      struct sgt_dma *iter,
-			      struct gen8_insert_pte *idx,
+			      u64 idx,
 			      enum i915_cache_level cache_level,
 			      u32 flags)
 {
 	struct i915_page_directory *pd;
 	const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
 	gen8_pte_t *vaddr;
-	bool ret;
 
-	GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
-	pd = i915_pd_entry(pdp, idx->pdpe);
-	vaddr = kmap_atomic_px(i915_pt_entry(pd, idx->pde));
+	pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
+	vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
 	do {
-		vaddr[idx->pte] = pte_encode | iter->dma;
+		vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
 
 		iter->dma += I915_GTT_PAGE_SIZE;
 		if (iter->dma >= iter->max) {
 			iter->sg = __sg_next(iter->sg);
 			if (!iter->sg) {
-				ret = false;
+				idx = 0;
 				break;
 			}
 
@@ -1179,30 +1160,22 @@ gen8_ppgtt_insert_pte_entries(struct i915_ppgtt *ppgtt,
 			iter->max = iter->dma + iter->sg->length;
 		}
 
-		if (++idx->pte == GEN8_PTES) {
-			idx->pte = 0;
-
-			if (++idx->pde == I915_PDES) {
-				idx->pde = 0;
-
+		if (gen8_pd_index(++idx, 0) == 0) {
+			if (gen8_pd_index(idx, 1) == 0) {
 				/* Limited by sg length for 3lvl */
-				if (++idx->pdpe == GEN8_PML4ES_PER_PML4) {
-					idx->pdpe = 0;
-					ret = true;
+				if (gen8_pd_index(idx, 2) == 0)
 					break;
-				}
 
-				GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
-				pd = pdp->entry[idx->pdpe];
+				pd = pdp->entry[gen8_pd_index(idx, 2)];
 			}
 
 			kunmap_atomic(vaddr);
-			vaddr = kmap_atomic_px(i915_pt_entry(pd, idx->pde));
+			vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
 		}
 	} while (1);
 	kunmap_atomic(vaddr);
 
-	return ret;
+	return idx;
 }
 
 static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
@@ -1212,9 +1185,9 @@ static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
 {
 	struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
 	struct sgt_dma iter = sgt_dma(vma);
-	struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
 
-	gen8_ppgtt_insert_pte_entries(ppgtt, ppgtt->pd, &iter, &idx,
+	gen8_ppgtt_insert_pte_entries(ppgtt, ppgtt->pd, &iter,
+				      vma->node.start >> GEN8_PTE_SHIFT,
 				      cache_level, flags);
 
 	vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
@@ -1231,39 +1204,38 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
 	dma_addr_t rem = iter->sg->length;
 
 	do {
-		struct gen8_insert_pte idx = gen8_insert_pte(start);
 		struct i915_page_directory *pdp =
-			i915_pdp_entry(pml4, idx.pml4e);
-		struct i915_page_directory *pd = i915_pd_entry(pdp, idx.pdpe);
-		unsigned int page_size;
-		bool maybe_64K = false;
+			i915_pd_entry(pml4, __gen8_pte_index(start, 3));
+		struct i915_page_directory *pd =
+			i915_pd_entry(pdp, __gen8_pte_index(start, 2));
 		gen8_pte_t encode = pte_encode;
+		unsigned int maybe_64K = -1;
+		unsigned int page_size;
 		gen8_pte_t *vaddr;
-		u16 index, max;
+		u16 index;
 
 		if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
 		    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
-		    rem >= I915_GTT_PAGE_SIZE_2M && !idx.pte) {
-			index = idx.pde;
-			max = I915_PDES;
-			page_size = I915_GTT_PAGE_SIZE_2M;
-
+		    rem >= I915_GTT_PAGE_SIZE_2M &&
+		    !__gen8_pte_index(start, 0)) {
+			index = __gen8_pte_index(start, 1);
 			encode |= GEN8_PDE_PS_2M;
+			page_size = I915_GTT_PAGE_SIZE_2M;
 
 			vaddr = kmap_atomic_px(pd);
 		} else {
-			struct i915_page_table *pt = i915_pt_entry(pd, idx.pde);
+			struct i915_page_table *pt =
+				i915_pt_entry(pd, __gen8_pte_index(start, 1));
 
-			index = idx.pte;
-			max = GEN8_PTES;
+			index = __gen8_pte_index(start, 0);
 			page_size = I915_GTT_PAGE_SIZE;
 
 			if (!index &&
 			    vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
 			    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
 			    (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
-			     rem >= (max - index) * I915_GTT_PAGE_SIZE))
-				maybe_64K = true;
+			     rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))
+				maybe_64K = __gen8_pte_index(start, 1);
 
 			vaddr = kmap_atomic_px(pt);
 		}
@@ -1284,16 +1256,16 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
 				iter->dma = sg_dma_address(iter->sg);
 				iter->max = iter->dma + rem;
 
-				if (maybe_64K && index < max &&
+				if (maybe_64K != -1 && index < I915_PDES &&
 				    !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
 				      (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
-				       rem >= (max - index) * I915_GTT_PAGE_SIZE)))
-					maybe_64K = false;
+				       rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)))
+					maybe_64K = -1;
 
 				if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
 					break;
 			}
-		} while (rem >= page_size && index < max);
+		} while (rem >= page_size && index < I915_PDES);
 
 		kunmap_atomic(vaddr);
 
@@ -1303,14 +1275,14 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
 		 * it and have reached the end of the sg table and we have
 		 * enough padding.
 		 */
-		if (maybe_64K &&
-		    (index == max ||
+		if (maybe_64K != -1 &&
+		    (index == I915_PDES ||
 		     (i915_vm_has_scratch_64K(vma->vm) &&
 		      !iter->sg && IS_ALIGNED(vma->node.start +
 					      vma->node.size,
 					      I915_GTT_PAGE_SIZE_2M)))) {
 			vaddr = kmap_atomic_px(pd);
-			vaddr[idx.pde] |= GEN8_PDE_IPS_64K;
+			vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
 			kunmap_atomic(vaddr);
 			page_size = I915_GTT_PAGE_SIZE_64K;
 
@@ -1327,8 +1299,7 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
 				u16 i;
 
 				encode = vma->vm->scratch[0].encode;
-				vaddr = kmap_atomic_px(i915_pt_entry(pd,
-								     idx.pde));
+				vaddr = kmap_atomic_px(i915_pt_entry(pd, maybe_64K));
 
 				for (i = 1; i < index; i += 16)
 					memset64(vaddr + i, encode, 15);
@@ -1354,13 +1325,13 @@ static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
 		gen8_ppgtt_insert_huge_entries(vma, pml4, &iter, cache_level,
 					       flags);
 	} else {
-		struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
+		u64 idx = vma->node.start >> GEN8_PTE_SHIFT;
 
-		while (gen8_ppgtt_insert_pte_entries(ppgtt,
-						     i915_pdp_entry(pml4, idx.pml4e++),
-						     &iter, &idx, cache_level,
-						     flags))
-			GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
+		while ((idx = gen8_ppgtt_insert_pte_entries(ppgtt,
+							    i915_pd_entry(pml4, gen8_pd_index(idx, 3)),
+							    &iter, idx, cache_level,
+							    flags)))
+			;
 
 		vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index de156634a889..cea59ef1a365 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -115,29 +115,18 @@ typedef u64 gen8_pte_t;
 #define HSW_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0x7f0))
 #define HSW_PTE_ADDR_ENCODE(addr)	HSW_GTT_ADDR_ENCODE(addr)
 
-/* GEN8 32b style address is defined as a 3 level page table:
+/*
+ * GEN8 32b style address is defined as a 3 level page table:
  * 31:30 | 29:21 | 20:12 |  11:0
  * PDPE  |  PDE  |  PTE  | offset
  * The difference as compared to normal x86 3 level page table is the PDPEs are
  * programmed via register.
- */
-#define GEN8_3LVL_PDPES			4
-#define GEN8_PDE_SHIFT			21
-#define GEN8_PDE_MASK			0x1ff
-#define GEN8_PTE_MASK			0x1ff
-#define GEN8_PTES			I915_PTES(sizeof(gen8_pte_t))
-
-/* GEN8 48b style address is defined as a 4 level page table:
+ *
+ * GEN8 48b style address is defined as a 4 level page table:
  * 47:39 | 38:30 | 29:21 | 20:12 |  11:0
  * PML4E | PDPE  |  PDE  |  PTE  | offset
  */
-#define GEN8_PML4ES_PER_PML4		512
-#define GEN8_PML4E_SHIFT		39
-#define GEN8_PML4E_MASK			(GEN8_PML4ES_PER_PML4 - 1)
-#define GEN8_PDPE_SHIFT			30
-/* NB: GEN8_PDPE_MASK is untrue for 32b platforms, but it has no impact on 32b page
- * tables */
-#define GEN8_PDPE_MASK			0x1ff
+#define GEN8_3LVL_PDPES			4
 
 #define PPAT_UNCACHED			(_PAGE_PWT | _PAGE_PCD)
 #define PPAT_CACHED_PDE			0 /* WB LLC */
@@ -521,15 +510,6 @@ static inline u32 gen6_pde_index(u32 addr)
 	return i915_pde_index(addr, GEN6_PDE_SHIFT);
 }
 
-static inline unsigned int
-i915_pdpes_per_pdp(const struct i915_address_space *vm)
-{
-	if (i915_vm_is_4lvl(vm))
-		return GEN8_PML4ES_PER_PML4;
-
-	return GEN8_3LVL_PDPES;
-}
-
 static inline struct i915_page_table *
 i915_pt_entry(const struct i915_page_directory * const pd,
 	      const unsigned short n)
@@ -544,66 +524,6 @@ i915_pd_entry(const struct i915_page_directory * const pdp,
 	return pdp->entry[n];
 }
 
-static inline struct i915_page_directory *
-i915_pdp_entry(const struct i915_page_directory * const pml4,
-	       const unsigned short n)
-{
-	return pml4->entry[n];
-}
-
-/* Equivalent to the gen6 version, For each pde iterates over every pde
- * between from start until start + length. On gen8+ it simply iterates
- * over every page directory entry in a page directory.
- */
-#define gen8_for_each_pde(pt, pd, start, length, iter)			\
-	for (iter = gen8_pde_index(start);				\
-	     length > 0 && iter < I915_PDES &&				\
-		     (pt = i915_pt_entry(pd, iter), true);		\
-	     ({ u64 temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT);		\
-		    temp = min(temp - start, length);			\
-		    start += temp, length -= temp; }), ++iter)
-
-#define gen8_for_each_pdpe(pd, pdp, start, length, iter)		\
-	for (iter = gen8_pdpe_index(start);				\
-	     length > 0 && iter < i915_pdpes_per_pdp(vm) &&		\
-		     (pd = i915_pd_entry(pdp, iter), true);		\
-	     ({ u64 temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT);	\
-		    temp = min(temp - start, length);			\
-		    start += temp, length -= temp; }), ++iter)
-
-#define gen8_for_each_pml4e(pdp, pml4, start, length, iter)		\
-	for (iter = gen8_pml4e_index(start);				\
-	     length > 0 && iter < GEN8_PML4ES_PER_PML4 &&		\
-		     (pdp = i915_pdp_entry(pml4, iter), true);		\
-	     ({ u64 temp = ALIGN(start+1, 1ULL << GEN8_PML4E_SHIFT);	\
-		    temp = min(temp - start, length);			\
-		    start += temp, length -= temp; }), ++iter)
-
-static inline u32 gen8_pte_index(u64 address)
-{
-	return i915_pte_index(address, GEN8_PDE_SHIFT);
-}
-
-static inline u32 gen8_pde_index(u64 address)
-{
-	return i915_pde_index(address, GEN8_PDE_SHIFT);
-}
-
-static inline u32 gen8_pdpe_index(u64 address)
-{
-	return (address >> GEN8_PDPE_SHIFT) & GEN8_PDPE_MASK;
-}
-
-static inline u32 gen8_pml4e_index(u64 address)
-{
-	return (address >> GEN8_PML4E_SHIFT) & GEN8_PML4E_MASK;
-}
-
-static inline u64 gen8_pte_count(u64 address, u64 length)
-{
-	return i915_pte_count(address, length, GEN8_PDE_SHIFT);
-}
-
 static inline dma_addr_t
 i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n)
 {
-- 
2.22.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 33+ messages in thread

* [PATCH 05/24] drm/i915/selftests: Ignore self-preemption suppression under gvt
  2019-07-15  8:09 [PATCH 01/24] drm/i915: Hide unshrinkable context objects from the shrinker Chris Wilson
                   ` (2 preceding siblings ...)
  2019-07-15  8:09 ` [PATCH 04/24] drm/i915/gtt: Tidy up ppgtt insertion " Chris Wilson
@ 2019-07-15  8:09 ` Chris Wilson
  2019-07-15  9:11   ` Zhenyu Wang
  2019-07-15  8:09 ` [PATCH 06/24] drm/i915: Lock the engine while dumping the active request Chris Wilson
                   ` (22 subsequent siblings)
  26 siblings, 1 reply; 33+ messages in thread
From: Chris Wilson @ 2019-07-15  8:09 UTC (permalink / raw)
  To: intel-gfx

GVT forces single port submission of individual requests. We do not
enjoy the context amalgamation that the test depends upon for setting up
the test (where port 0 has a large number of requests with a priority
change somewhere in the middle). Under single request submission of gvt
it is quite able for the preemption event to occur while another context
is active and so there be a real need to act upon that preemption.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Zhenyu Wang <zhenyuw@linux.intel.com>
---
 drivers/gpu/drm/i915/gt/selftest_lrc.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index bde0164551b1..071e21c0ac15 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -854,6 +854,9 @@ static int live_suppress_self_preempt(void *arg)
 	if (USES_GUC_SUBMISSION(i915))
 		return 0; /* presume black blox */
 
+	if (intel_vgpu_active(i915))
+		return 0; /* GVT forces single port & request submission */
+
 	mutex_lock(&i915->drm.struct_mutex);
 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
 
-- 
2.22.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 33+ messages in thread

* [PATCH 06/24] drm/i915: Lock the engine while dumping the active request
  2019-07-15  8:09 [PATCH 01/24] drm/i915: Hide unshrinkable context objects from the shrinker Chris Wilson
                   ` (3 preceding siblings ...)
  2019-07-15  8:09 ` [PATCH 05/24] drm/i915/selftests: Ignore self-preemption suppression under gvt Chris Wilson
@ 2019-07-15  8:09 ` Chris Wilson
  2019-07-16 10:26   ` Tvrtko Ursulin
  2019-07-15  8:09 ` [PATCH 07/24] drm/i915: Rely on spinlock protection for GPU error capture Chris Wilson
                   ` (21 subsequent siblings)
  26 siblings, 1 reply; 33+ messages in thread
From: Chris Wilson @ 2019-07-15  8:09 UTC (permalink / raw)
  To: intel-gfx; +Cc: Alex Shumsky

We cannot let the request be retired and freed while we are trying to
dump it during error capture. It is not sufficient just to grab a
reference to the request, as during retirement we may free the ring
which we are also dumping. So take the engine lock to prevent retiring
and freeing of the request.

Reported-by: Alex Shumsky <alexthreed@gmail.com>
Fixes: 83c317832eb1 ("drm/i915: Dump the ringbuffer of the active request for debugging")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Alex Shumsky <alexthreed@gmail.com>
---
 drivers/gpu/drm/i915/gt/intel_engine_cs.c | 11 ++++-------
 drivers/gpu/drm/i915/i915_gpu_error.c     |  6 ++++--
 2 files changed, 8 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 75099500d1ed..5f8909546d36 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -1472,6 +1472,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
 	struct i915_gpu_error * const error = &engine->i915->gpu_error;
 	struct i915_request *rq;
 	intel_wakeref_t wakeref;
+	unsigned long flags;
 
 	if (header) {
 		va_list ap;
@@ -1491,10 +1492,9 @@ void intel_engine_dump(struct intel_engine_cs *engine,
 		   i915_reset_engine_count(error, engine),
 		   i915_reset_count(error));
 
-	rcu_read_lock();
-
 	drm_printf(m, "\tRequests:\n");
 
+	spin_lock_irqsave(&engine->active.lock, flags);
 	rq = intel_engine_find_active_request(engine);
 	if (rq) {
 		print_request(m, rq, "\t\tactive ");
@@ -1514,8 +1514,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
 
 		print_request_ring(m, rq);
 	}
-
-	rcu_read_unlock();
+	spin_unlock_irqrestore(&engine->active.lock, flags);
 
 	wakeref = intel_runtime_pm_get_if_in_use(&engine->i915->runtime_pm);
 	if (wakeref) {
@@ -1654,7 +1653,6 @@ struct i915_request *
 intel_engine_find_active_request(struct intel_engine_cs *engine)
 {
 	struct i915_request *request, *active = NULL;
-	unsigned long flags;
 
 	/*
 	 * We are called by the error capture, reset and to dump engine
@@ -1667,7 +1665,7 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
 	 * At all other times, we must assume the GPU is still running, but
 	 * we only care about the snapshot of this moment.
 	 */
-	spin_lock_irqsave(&engine->active.lock, flags);
+	lockdep_assert_held(&engine->active.lock);
 	list_for_each_entry(request, &engine->active.requests, sched.link) {
 		if (i915_request_completed(request))
 			continue;
@@ -1682,7 +1680,6 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
 		active = request;
 		break;
 	}
-	spin_unlock_irqrestore(&engine->active.lock, flags);
 
 	return active;
 }
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 78e388fa059c..c5b89bf4d616 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1411,6 +1411,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
 		struct intel_engine_cs *engine = i915->engine[i];
 		struct drm_i915_error_engine *ee = &error->engine[i];
 		struct i915_request *request;
+		unsigned long flags;
 
 		ee->engine_id = -1;
 
@@ -1422,10 +1423,11 @@ static void gem_record_rings(struct i915_gpu_state *error)
 		error_record_engine_registers(error, engine, ee);
 		error_record_engine_execlists(engine, ee);
 
+		spin_lock_irqsave(&engine->active.lock, flags);
 		request = intel_engine_find_active_request(engine);
 		if (request) {
 			struct i915_gem_context *ctx = request->gem_context;
-			struct intel_ring *ring;
+			struct intel_ring *ring = request->ring;
 
 			ee->vm = ctx->vm ?: &engine->gt->ggtt->vm;
 
@@ -1455,7 +1457,6 @@ static void gem_record_rings(struct i915_gpu_state *error)
 			ee->rq_post = request->postfix;
 			ee->rq_tail = request->tail;
 
-			ring = request->ring;
 			ee->cpu_ring_head = ring->head;
 			ee->cpu_ring_tail = ring->tail;
 			ee->ringbuffer =
@@ -1463,6 +1464,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
 
 			engine_record_requests(engine, request, ee);
 		}
+		spin_unlock_irqrestore(&engine->active.lock, flags);
 
 		ee->hws_page =
 			i915_error_object_create(i915,
-- 
2.22.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 33+ messages in thread

* [PATCH 07/24] drm/i915: Rely on spinlock protection for GPU error capture
  2019-07-15  8:09 [PATCH 01/24] drm/i915: Hide unshrinkable context objects from the shrinker Chris Wilson
                   ` (4 preceding siblings ...)
  2019-07-15  8:09 ` [PATCH 06/24] drm/i915: Lock the engine while dumping the active request Chris Wilson
@ 2019-07-15  8:09 ` Chris Wilson
  2019-07-16 10:48   ` Tvrtko Ursulin
  2019-07-15  8:09 ` [PATCH 08/24] drm/i915/oa: Reconfigure contexts on the fly Chris Wilson
                   ` (20 subsequent siblings)
  26 siblings, 1 reply; 33+ messages in thread
From: Chris Wilson @ 2019-07-15  8:09 UTC (permalink / raw)
  To: intel-gfx

Trust that we now have adequate protection over the low level structures
via the engine->active.lock to allow ourselves to capture the GPU error
state without the heavy hammer of stop_machine(). Sadly this does mean
that we have to forgo some of the lesser used information (not derived
from the active state) that is not controlled by the active locks.

A useful side-effect is that this allows us to restore error capturing
for Braswell and Broxton.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem_gtt.c   |   5 -
 drivers/gpu/drm/i915/i915_gpu_error.c | 503 +++++++++++---------------
 drivers/gpu/drm/i915/i915_gpu_error.h |  17 -
 3 files changed, 207 insertions(+), 318 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 220aba5a94d2..5c9c7cae4817 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2966,11 +2966,6 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
 		ggtt->vm.insert_page    = bxt_vtd_ggtt_insert_page__BKL;
 		if (ggtt->vm.clear_range != nop_clear_range)
 			ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
-
-		/* Prevent recursively calling stop_machine() and deadlocks. */
-		dev_info(dev_priv->drm.dev,
-			 "Disabling error capture for VT-d workaround\n");
-		i915_disable_error_state(dev_priv, -ENODEV);
 	}
 
 	ggtt->invalidate = gen6_ggtt_invalidate;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index c5b89bf4d616..026ee1f5536d 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -30,7 +30,6 @@
 #include <linux/ascii85.h>
 #include <linux/nmi.h>
 #include <linux/scatterlist.h>
-#include <linux/stop_machine.h>
 #include <linux/utsname.h>
 #include <linux/zlib.h>
 
@@ -46,6 +45,9 @@
 #include "i915_scatterlist.h"
 #include "intel_csr.h"
 
+#define ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
+#define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN)
+
 static inline const struct intel_engine_cs *
 engine_lookup(const struct drm_i915_private *i915, unsigned int id)
 {
@@ -67,26 +69,6 @@ engine_name(const struct drm_i915_private *i915, unsigned int id)
 	return __engine_name(engine_lookup(i915, id));
 }
 
-static const char *tiling_flag(int tiling)
-{
-	switch (tiling) {
-	default:
-	case I915_TILING_NONE: return "";
-	case I915_TILING_X: return " X";
-	case I915_TILING_Y: return " Y";
-	}
-}
-
-static const char *dirty_flag(int dirty)
-{
-	return dirty ? " dirty" : "";
-}
-
-static const char *purgeable_flag(int purgeable)
-{
-	return purgeable ? " purgeable" : "";
-}
-
 static void __sg_set_buf(struct scatterlist *sg,
 			 void *addr, unsigned int len, loff_t it)
 {
@@ -114,7 +96,7 @@ static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len)
 	if (e->cur == e->end) {
 		struct scatterlist *sgl;
 
-		sgl = (typeof(sgl))__get_free_page(GFP_KERNEL);
+		sgl = (typeof(sgl))__get_free_page(ALLOW_FAIL);
 		if (!sgl) {
 			e->err = -ENOMEM;
 			return false;
@@ -134,7 +116,7 @@ static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len)
 	}
 
 	e->size = ALIGN(len + 1, SZ_64K);
-	e->buf = kmalloc(e->size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+	e->buf = kmalloc(e->size, ALLOW_FAIL);
 	if (!e->buf) {
 		e->size = PAGE_ALIGN(len + 1);
 		e->buf = kmalloc(e->size, GFP_KERNEL);
@@ -211,47 +193,127 @@ i915_error_printer(struct drm_i915_error_state_buf *e)
 	return p;
 }
 
+/* single threaded page allocator with a reserved stash for emergencies */
+struct pool {
+	void *stash[31];
+	unsigned int count;
+};
+
+static void *__alloc_page(gfp_t gfp)
+{
+	return (void *)__get_free_page(gfp);
+}
+
+static void pool_fini(struct pool *p)
+{
+	while (p->count--)
+		free_page((unsigned long)p->stash[p->count]);
+}
+
+static int pool_refill(struct pool *p, gfp_t gfp)
+{
+	while (p->count < ARRAY_SIZE(p->stash)) {
+		p->stash[p->count] = __alloc_page(gfp);
+		if (!p->stash[p->count])
+			return -ENOMEM;
+
+		p->count++;
+	}
+
+	return 0;
+}
+
+static int pool_init(struct pool *p, gfp_t gfp)
+{
+	int err;
+
+	p->count = 0;
+
+	err = pool_refill(p, gfp);
+	if (err)
+		pool_fini(p);
+
+	return err;
+}
+
+static void *pool_alloc(struct pool *p, gfp_t gfp)
+{
+	void *ptr;
+
+	ptr = __alloc_page(gfp);
+	if (ptr)
+		return ptr;
+
+	if (p->count)
+		return p->stash[--p->count];
+
+	return NULL;
+}
+
+static void pool_free(struct pool *p, void *page)
+{
+	if (p->count < ARRAY_SIZE(p->stash)) {
+		p->stash[p->count++] = page;
+		return;
+	}
+
+	free_page((unsigned long)page);
+}
+
 #ifdef CONFIG_DRM_I915_COMPRESS_ERROR
 
 struct compress {
+	struct pool pool;
 	struct z_stream_s zstream;
 	void *tmp;
 };
 
 static bool compress_init(struct compress *c)
 {
-	struct z_stream_s *zstream = memset(&c->zstream, 0, sizeof(c->zstream));
+	struct z_stream_s *zstream = &c->zstream;
 
-	zstream->workspace =
-		kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
-			GFP_ATOMIC | __GFP_NOWARN);
-	if (!zstream->workspace)
+	if (pool_init(&c->pool, ALLOW_FAIL))
 		return false;
 
-	if (zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) != Z_OK) {
-		kfree(zstream->workspace);
+	zstream->workspace =
+		kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
+			ALLOW_FAIL);
+	if (!zstream->workspace) {
+		pool_fini(&c->pool);
 		return false;
 	}
 
 	c->tmp = NULL;
 	if (i915_has_memcpy_from_wc())
-		c->tmp = (void *)__get_free_page(GFP_ATOMIC | __GFP_NOWARN);
+		c->tmp = pool_alloc(&c->pool, ALLOW_FAIL);
 
 	return true;
 }
 
-static void *compress_next_page(struct drm_i915_error_object *dst)
+static bool compress_start(struct compress *c)
+{
+	struct z_stream_s *zstream = &c->zstream;
+	void *workspace = zstream->workspace;
+
+	memset(zstream, 0, sizeof(*zstream));
+	zstream->workspace = workspace;
+
+	return zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) == Z_OK;
+}
+
+static void *compress_next_page(struct compress *c,
+				struct drm_i915_error_object *dst)
 {
-	unsigned long page;
+	void *page;
 
 	if (dst->page_count >= dst->num_pages)
 		return ERR_PTR(-ENOSPC);
 
-	page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
+	page = pool_alloc(&c->pool, ATOMIC_MAYFAIL);
 	if (!page)
 		return ERR_PTR(-ENOMEM);
 
-	return dst->pages[dst->page_count++] = (void *)page;
+	return dst->pages[dst->page_count++] = page;
 }
 
 static int compress_page(struct compress *c,
@@ -267,7 +329,7 @@ static int compress_page(struct compress *c,
 
 	do {
 		if (zstream->avail_out == 0) {
-			zstream->next_out = compress_next_page(dst);
+			zstream->next_out = compress_next_page(c, dst);
 			if (IS_ERR(zstream->next_out))
 				return PTR_ERR(zstream->next_out);
 
@@ -295,7 +357,7 @@ static int compress_flush(struct compress *c,
 	do {
 		switch (zlib_deflate(zstream, Z_FINISH)) {
 		case Z_OK: /* more space requested */
-			zstream->next_out = compress_next_page(dst);
+			zstream->next_out = compress_next_page(c, dst);
 			if (IS_ERR(zstream->next_out))
 				return PTR_ERR(zstream->next_out);
 
@@ -316,15 +378,17 @@ static int compress_flush(struct compress *c,
 	return 0;
 }
 
-static void compress_fini(struct compress *c,
-			  struct drm_i915_error_object *dst)
+static void compress_finish(struct compress *c)
 {
-	struct z_stream_s *zstream = &c->zstream;
+	zlib_deflateEnd(&c->zstream);
+}
 
-	zlib_deflateEnd(zstream);
-	kfree(zstream->workspace);
+static void compress_fini(struct compress *c)
+{
+	kfree(c->zstream.workspace);
 	if (c->tmp)
-		free_page((unsigned long)c->tmp);
+		pool_free(&c->pool, c->tmp);
+	pool_fini(&c->pool);
 }
 
 static void err_compression_marker(struct drm_i915_error_state_buf *m)
@@ -335,9 +399,18 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m)
 #else
 
 struct compress {
+	struct pool pool;
 };
 
 static bool compress_init(struct compress *c)
+{
+	if (pool_init(&c->pool, ALLOW_FAIL))
+		return false;
+
+	return true;
+}
+
+static bool compress_start(struct compress *c)
 {
 	return true;
 }
@@ -346,14 +419,12 @@ static int compress_page(struct compress *c,
 			 void *src,
 			 struct drm_i915_error_object *dst)
 {
-	unsigned long page;
 	void *ptr;
 
-	page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
-	if (!page)
+	ptr = pool_alloc(&c->pool, ATOMIC_MAYFAIL);
+	if (!ptr)
 		return -ENOMEM;
 
-	ptr = (void *)page;
 	if (!i915_memcpy_from_wc(ptr, src, PAGE_SIZE))
 		memcpy(ptr, src, PAGE_SIZE);
 	dst->pages[dst->page_count++] = ptr;
@@ -367,11 +438,15 @@ static int compress_flush(struct compress *c,
 	return 0;
 }
 
-static void compress_fini(struct compress *c,
-			  struct drm_i915_error_object *dst)
+static void compress_finish(struct compress *c)
 {
 }
 
+static void compress_fini(struct compress *c)
+{
+	pool_fini(&c->pool);
+}
+
 static void err_compression_marker(struct drm_i915_error_state_buf *m)
 {
 	err_puts(m, "~");
@@ -379,36 +454,6 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m)
 
 #endif
 
-static void print_error_buffers(struct drm_i915_error_state_buf *m,
-				const char *name,
-				struct drm_i915_error_buffer *err,
-				int count)
-{
-	err_printf(m, "%s [%d]:\n", name, count);
-
-	while (count--) {
-		err_printf(m, "    %08x_%08x %8u %02x %02x",
-			   upper_32_bits(err->gtt_offset),
-			   lower_32_bits(err->gtt_offset),
-			   err->size,
-			   err->read_domains,
-			   err->write_domain);
-		err_puts(m, tiling_flag(err->tiling));
-		err_puts(m, dirty_flag(err->dirty));
-		err_puts(m, purgeable_flag(err->purgeable));
-		err_puts(m, err->userptr ? " userptr" : "");
-		err_puts(m, i915_cache_level_str(m->i915, err->cache_level));
-
-		if (err->name)
-			err_printf(m, " (name: %d)", err->name);
-		if (err->fence_reg != I915_FENCE_REG_NONE)
-			err_printf(m, " (fence: %d)", err->fence_reg);
-
-		err_puts(m, "\n");
-		err++;
-	}
-}
-
 static void error_print_instdone(struct drm_i915_error_state_buf *m,
 				 const struct drm_i915_error_engine *ee)
 {
@@ -734,33 +779,6 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
 			error_print_engine(m, &error->engine[i], error->epoch);
 	}
 
-	for (i = 0; i < ARRAY_SIZE(error->active_vm); i++) {
-		char buf[128];
-		int len, first = 1;
-
-		if (!error->active_vm[i])
-			break;
-
-		len = scnprintf(buf, sizeof(buf), "Active (");
-		for (j = 0; j < ARRAY_SIZE(error->engine); j++) {
-			if (error->engine[j].vm != error->active_vm[i])
-				continue;
-
-			len += scnprintf(buf + len, sizeof(buf), "%s%s",
-					 first ? "" : ", ",
-					 m->i915->engine[j]->name);
-			first = 0;
-		}
-		scnprintf(buf + len, sizeof(buf), ")");
-		print_error_buffers(m, buf,
-				    error->active_bo[i],
-				    error->active_bo_count[i]);
-	}
-
-	print_error_buffers(m, "Pinned (global)",
-			    error->pinned_bo,
-			    error->pinned_bo_count);
-
 	for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
 		const struct drm_i915_error_engine *ee = &error->engine[i];
 
@@ -974,10 +992,6 @@ void __i915_gpu_state_free(struct kref *error_ref)
 		kfree(ee->requests);
 	}
 
-	for (i = 0; i < ARRAY_SIZE(error->active_bo); i++)
-		kfree(error->active_bo[i]);
-	kfree(error->pinned_bo);
-
 	kfree(error->overlay);
 	kfree(error->display);
 
@@ -990,12 +1004,12 @@ void __i915_gpu_state_free(struct kref *error_ref)
 
 static struct drm_i915_error_object *
 i915_error_object_create(struct drm_i915_private *i915,
-			 struct i915_vma *vma)
+			 struct i915_vma *vma,
+			 struct compress *compress)
 {
 	struct i915_ggtt *ggtt = &i915->ggtt;
 	const u64 slot = ggtt->error_capture.start;
 	struct drm_i915_error_object *dst;
-	struct compress compress;
 	unsigned long num_pages;
 	struct sgt_iter iter;
 	dma_addr_t dma;
@@ -1006,22 +1020,21 @@ i915_error_object_create(struct drm_i915_private *i915,
 
 	num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT;
 	num_pages = DIV_ROUND_UP(10 * num_pages, 8); /* worstcase zlib growth */
-	dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *),
-		      GFP_ATOMIC | __GFP_NOWARN);
+	dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), ATOMIC_MAYFAIL);
 	if (!dst)
 		return NULL;
 
+	if (!compress_start(compress)) {
+		kfree(dst);
+		return NULL;
+	}
+
 	dst->gtt_offset = vma->node.start;
 	dst->gtt_size = vma->node.size;
 	dst->num_pages = num_pages;
 	dst->page_count = 0;
 	dst->unused = 0;
 
-	if (!compress_init(&compress)) {
-		kfree(dst);
-		return NULL;
-	}
-
 	ret = -EINVAL;
 	for_each_sgt_dma(dma, iter, vma->pages) {
 		void __iomem *s;
@@ -1029,69 +1042,23 @@ i915_error_object_create(struct drm_i915_private *i915,
 		ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
 
 		s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
-		ret = compress_page(&compress, (void  __force *)s, dst);
+		ret = compress_page(compress, (void  __force *)s, dst);
 		io_mapping_unmap_atomic(s);
 		if (ret)
 			break;
 	}
 
-	if (ret || compress_flush(&compress, dst)) {
+	if (ret || compress_flush(compress, dst)) {
 		while (dst->page_count--)
-			free_page((unsigned long)dst->pages[dst->page_count]);
+			pool_free(&compress->pool, dst->pages[dst->page_count]);
 		kfree(dst);
 		dst = NULL;
 	}
+	compress_finish(compress);
 
-	compress_fini(&compress, dst);
 	return dst;
 }
 
-static void capture_bo(struct drm_i915_error_buffer *err,
-		       struct i915_vma *vma)
-{
-	struct drm_i915_gem_object *obj = vma->obj;
-
-	err->size = obj->base.size;
-	err->name = obj->base.name;
-
-	err->gtt_offset = vma->node.start;
-	err->read_domains = obj->read_domains;
-	err->write_domain = obj->write_domain;
-	err->fence_reg = vma->fence ? vma->fence->id : -1;
-	err->tiling = i915_gem_object_get_tiling(obj);
-	err->dirty = obj->mm.dirty;
-	err->purgeable = obj->mm.madv != I915_MADV_WILLNEED;
-	err->userptr = obj->userptr.mm != NULL;
-	err->cache_level = obj->cache_level;
-}
-
-static u32 capture_error_bo(struct drm_i915_error_buffer *err,
-			    int count, struct list_head *head,
-			    unsigned int flags)
-#define ACTIVE_ONLY BIT(0)
-#define PINNED_ONLY BIT(1)
-{
-	struct i915_vma *vma;
-	int i = 0;
-
-	list_for_each_entry(vma, head, vm_link) {
-		if (!vma->obj)
-			continue;
-
-		if (flags & ACTIVE_ONLY && !i915_vma_is_active(vma))
-			continue;
-
-		if (flags & PINNED_ONLY && !i915_vma_is_pinned(vma))
-			continue;
-
-		capture_bo(err++, vma);
-		if (++i == count)
-			break;
-	}
-
-	return i;
-}
-
 /*
  * Generate a semi-unique error code. The code is not meant to have meaning, The
  * code's only purpose is to try to prevent false duplicated bug reports by
@@ -1281,7 +1248,7 @@ static void engine_record_requests(struct intel_engine_cs *engine,
 	if (!count)
 		return;
 
-	ee->requests = kcalloc(count, sizeof(*ee->requests), GFP_ATOMIC);
+	ee->requests = kcalloc(count, sizeof(*ee->requests), ATOMIC_MAYFAIL);
 	if (!ee->requests)
 		return;
 
@@ -1349,8 +1316,10 @@ static void record_context(struct drm_i915_error_context *e,
 	e->active = atomic_read(&ctx->active_count);
 }
 
-static void request_record_user_bo(struct i915_request *request,
-				   struct drm_i915_error_engine *ee)
+static void
+request_record_user_bo(struct i915_request *request,
+		       struct drm_i915_error_engine *ee,
+		       struct compress *compress)
 {
 	struct i915_capture_list *c;
 	struct drm_i915_error_object **bo;
@@ -1362,18 +1331,20 @@ static void request_record_user_bo(struct i915_request *request,
 	if (!max)
 		return;
 
-	bo = kmalloc_array(max, sizeof(*bo), GFP_ATOMIC);
+	bo = kmalloc_array(max, sizeof(*bo), ATOMIC_MAYFAIL);
 	if (!bo) {
 		/* If we can't capture everything, try to capture something. */
 		max = min_t(long, max, PAGE_SIZE / sizeof(*bo));
-		bo = kmalloc_array(max, sizeof(*bo), GFP_ATOMIC);
+		bo = kmalloc_array(max, sizeof(*bo), ATOMIC_MAYFAIL);
 	}
 	if (!bo)
 		return;
 
 	count = 0;
 	for (c = request->capture_list; c; c = c->next) {
-		bo[count] = i915_error_object_create(request->i915, c->vma);
+		bo[count] = i915_error_object_create(request->i915,
+						     c->vma,
+						     compress);
 		if (!bo[count])
 			break;
 		if (++count == max)
@@ -1386,7 +1357,8 @@ static void request_record_user_bo(struct i915_request *request,
 
 static struct drm_i915_error_object *
 capture_object(struct drm_i915_private *dev_priv,
-	       struct drm_i915_gem_object *obj)
+	       struct drm_i915_gem_object *obj,
+	       struct compress *compress)
 {
 	if (obj && i915_gem_object_has_pages(obj)) {
 		struct i915_vma fake = {
@@ -1396,13 +1368,14 @@ capture_object(struct drm_i915_private *dev_priv,
 			.obj = obj,
 		};
 
-		return i915_error_object_create(dev_priv, &fake);
+		return i915_error_object_create(dev_priv, &fake, compress);
 	} else {
 		return NULL;
 	}
 }
 
-static void gem_record_rings(struct i915_gpu_state *error)
+static void
+gem_record_rings(struct i915_gpu_state *error, struct compress *compress)
 {
 	struct drm_i915_private *i915 = error->i915;
 	int i;
@@ -1420,6 +1393,9 @@ static void gem_record_rings(struct i915_gpu_state *error)
 
 		ee->engine_id = i;
 
+		/* Refill our page pool before entering atomic section */
+		pool_refill(&compress->pool, ALLOW_FAIL);
+
 		error_record_engine_registers(error, engine, ee);
 		error_record_engine_execlists(engine, ee);
 
@@ -1429,8 +1405,6 @@ static void gem_record_rings(struct i915_gpu_state *error)
 			struct i915_gem_context *ctx = request->gem_context;
 			struct intel_ring *ring = request->ring;
 
-			ee->vm = ctx->vm ?: &engine->gt->ggtt->vm;
-
 			record_context(&ee->context, ctx);
 
 			/* We need to copy these to an anonymous buffer
@@ -1438,17 +1412,21 @@ static void gem_record_rings(struct i915_gpu_state *error)
 			 * by userspace.
 			 */
 			ee->batchbuffer =
-				i915_error_object_create(i915, request->batch);
+				i915_error_object_create(i915,
+							 request->batch,
+							 compress);
 
 			if (HAS_BROKEN_CS_TLB(i915))
 				ee->wa_batchbuffer =
 				  i915_error_object_create(i915,
-							   engine->gt->scratch);
-			request_record_user_bo(request, ee);
+							   engine->gt->scratch,
+							   compress);
+			request_record_user_bo(request, ee, compress);
 
 			ee->ctx =
 				i915_error_object_create(i915,
-							 request->hw_context->state);
+							 request->hw_context->state,
+							 compress);
 
 			error->simulated |=
 				i915_gem_context_no_error_capture(ctx);
@@ -1460,7 +1438,9 @@ static void gem_record_rings(struct i915_gpu_state *error)
 			ee->cpu_ring_head = ring->head;
 			ee->cpu_ring_tail = ring->tail;
 			ee->ringbuffer =
-				i915_error_object_create(i915, ring->vma);
+				i915_error_object_create(i915,
+							 ring->vma,
+							 compress);
 
 			engine_record_requests(engine, request, ee);
 		}
@@ -1468,89 +1448,21 @@ static void gem_record_rings(struct i915_gpu_state *error)
 
 		ee->hws_page =
 			i915_error_object_create(i915,
-						 engine->status_page.vma);
-
-		ee->wa_ctx = i915_error_object_create(i915, engine->wa_ctx.vma);
-
-		ee->default_state = capture_object(i915, engine->default_state);
-	}
-}
-
-static void gem_capture_vm(struct i915_gpu_state *error,
-			   struct i915_address_space *vm,
-			   int idx)
-{
-	struct drm_i915_error_buffer *active_bo;
-	struct i915_vma *vma;
-	int count;
-
-	count = 0;
-	list_for_each_entry(vma, &vm->bound_list, vm_link)
-		if (i915_vma_is_active(vma))
-			count++;
-
-	active_bo = NULL;
-	if (count)
-		active_bo = kcalloc(count, sizeof(*active_bo), GFP_ATOMIC);
-	if (active_bo)
-		count = capture_error_bo(active_bo,
-					 count, &vm->bound_list,
-					 ACTIVE_ONLY);
-	else
-		count = 0;
+						 engine->status_page.vma,
+						 compress);
 
-	error->active_vm[idx] = vm;
-	error->active_bo[idx] = active_bo;
-	error->active_bo_count[idx] = count;
-}
-
-static void capture_active_buffers(struct i915_gpu_state *error)
-{
-	int cnt = 0, i, j;
-
-	BUILD_BUG_ON(ARRAY_SIZE(error->engine) > ARRAY_SIZE(error->active_bo));
-	BUILD_BUG_ON(ARRAY_SIZE(error->active_bo) != ARRAY_SIZE(error->active_vm));
-	BUILD_BUG_ON(ARRAY_SIZE(error->active_bo) != ARRAY_SIZE(error->active_bo_count));
-
-	/* Scan each engine looking for unique active contexts/vm */
-	for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
-		struct drm_i915_error_engine *ee = &error->engine[i];
-		bool found;
-
-		if (!ee->vm)
-			continue;
+		ee->wa_ctx =
+			i915_error_object_create(i915,
+						 engine->wa_ctx.vma,
+						 compress);
 
-		found = false;
-		for (j = 0; j < i && !found; j++)
-			found = error->engine[j].vm == ee->vm;
-		if (!found)
-			gem_capture_vm(error, ee->vm, cnt++);
+		ee->default_state =
+			capture_object(i915, engine->default_state, compress);
 	}
 }
 
-static void capture_pinned_buffers(struct i915_gpu_state *error)
-{
-	struct i915_address_space *vm = &error->i915->ggtt.vm;
-	struct drm_i915_error_buffer *bo;
-	struct i915_vma *vma;
-	int count;
-
-	count = 0;
-	list_for_each_entry(vma, &vm->bound_list, vm_link)
-		count++;
-
-	bo = NULL;
-	if (count)
-		bo = kcalloc(count, sizeof(*bo), GFP_ATOMIC);
-	if (!bo)
-		return;
-
-	error->pinned_bo_count =
-		capture_error_bo(bo, count, &vm->bound_list, PINNED_ONLY);
-	error->pinned_bo = bo;
-}
-
-static void capture_uc_state(struct i915_gpu_state *error)
+static void
+capture_uc_state(struct i915_gpu_state *error, struct compress *compress)
 {
 	struct drm_i915_private *i915 = error->i915;
 	struct i915_error_uc *error_uc = &error->uc;
@@ -1567,9 +1479,11 @@ static void capture_uc_state(struct i915_gpu_state *error)
 	 * As modparams are generally accesible from the userspace make
 	 * explicit copies of the firmware paths.
 	 */
-	error_uc->guc_fw.path = kstrdup(uc->guc.fw.path, GFP_ATOMIC);
-	error_uc->huc_fw.path = kstrdup(uc->huc.fw.path, GFP_ATOMIC);
-	error_uc->guc_log = i915_error_object_create(i915, uc->guc.log.vma);
+	error_uc->guc_fw.path = kstrdup(uc->guc.fw.path, ALLOW_FAIL);
+	error_uc->huc_fw.path = kstrdup(uc->huc.fw.path, ALLOW_FAIL);
+	error_uc->guc_log = i915_error_object_create(i915,
+						     uc->guc.log.vma,
+						     compress);
 }
 
 /* Capture all registers which don't fit into another category. */
@@ -1753,56 +1667,53 @@ static void capture_finish(struct i915_gpu_state *error)
 	ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
 }
 
-static int capture(void *data)
-{
-	struct i915_gpu_state *error = data;
-
-	error->time = ktime_get_real();
-	error->boottime = ktime_get_boottime();
-	error->uptime = ktime_sub(ktime_get(),
-				  error->i915->gt.last_init_time);
-	error->capture = jiffies;
-
-	capture_params(error);
-	capture_gen_state(error);
-	capture_uc_state(error);
-	capture_reg_state(error);
-	gem_record_fences(error);
-	gem_record_rings(error);
-	capture_active_buffers(error);
-	capture_pinned_buffers(error);
-
-	error->overlay = intel_overlay_capture_error_state(error->i915);
-	error->display = intel_display_capture_error_state(error->i915);
-
-	error->epoch = capture_find_epoch(error);
-
-	capture_finish(error);
-	return 0;
-}
-
 #define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
 
 struct i915_gpu_state *
 i915_capture_gpu_state(struct drm_i915_private *i915)
 {
 	struct i915_gpu_state *error;
+	struct compress compress;
 
 	/* Check if GPU capture has been disabled */
 	error = READ_ONCE(i915->gpu_error.first_error);
 	if (IS_ERR(error))
 		return error;
 
-	error = kzalloc(sizeof(*error), GFP_ATOMIC);
+	error = kzalloc(sizeof(*error), ALLOW_FAIL);
 	if (!error) {
 		i915_disable_error_state(i915, -ENOMEM);
 		return ERR_PTR(-ENOMEM);
 	}
 
+	if (!compress_init(&compress)) {
+		kfree(error);
+		i915_disable_error_state(i915, -ENOMEM);
+		return ERR_PTR(-ENOMEM);
+	}
+
 	kref_init(&error->ref);
 	error->i915 = i915;
 
-	stop_machine(capture, error, NULL);
+	error->time = ktime_get_real();
+	error->boottime = ktime_get_boottime();
+	error->uptime = ktime_sub(ktime_get(), i915->gt.last_init_time);
+	error->capture = jiffies;
+
+	capture_params(error);
+	capture_gen_state(error);
+	capture_uc_state(error, &compress);
+	capture_reg_state(error);
+	gem_record_fences(error);
+	gem_record_rings(error, &compress);
+
+	error->overlay = intel_overlay_capture_error_state(i915);
+	error->display = intel_display_capture_error_state(i915);
+
+	error->epoch = capture_find_epoch(error);
+
+	capture_finish(error);
+	compress_fini(&compress);
 
 	return error;
 }
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 85f06bc5da05..a24c35107d16 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -85,7 +85,6 @@ struct i915_gpu_state {
 		/* Software tracked state */
 		bool idle;
 		unsigned long hangcheck_timestamp;
-		struct i915_address_space *vm;
 		int num_requests;
 		u32 reset_count;
 
@@ -161,22 +160,6 @@ struct i915_gpu_state {
 		} vm_info;
 	} engine[I915_NUM_ENGINES];
 
-	struct drm_i915_error_buffer {
-		u32 size;
-		u32 name;
-		u64 gtt_offset;
-		u32 read_domains;
-		u32 write_domain;
-		s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
-		u32 tiling:2;
-		u32 dirty:1;
-		u32 purgeable:1;
-		u32 userptr:1;
-		u32 cache_level:3;
-	} *active_bo[I915_NUM_ENGINES], *pinned_bo;
-	u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count;
-	struct i915_address_space *active_vm[I915_NUM_ENGINES];
-
 	struct scatterlist *sgl, *fit;
 };
 
-- 
2.22.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 33+ messages in thread

* [PATCH 08/24] drm/i915/oa: Reconfigure contexts on the fly
  2019-07-15  8:09 [PATCH 01/24] drm/i915: Hide unshrinkable context objects from the shrinker Chris Wilson
                   ` (5 preceding siblings ...)
  2019-07-15  8:09 ` [PATCH 07/24] drm/i915: Rely on spinlock protection for GPU error capture Chris Wilson
@ 2019-07-15  8:09 ` Chris Wilson
  2019-07-15  8:09 ` [PATCH 09/24] drm/i915: Add to timeline requires the timeline mutex Chris Wilson
                   ` (19 subsequent siblings)
  26 siblings, 0 replies; 33+ messages in thread
From: Chris Wilson @ 2019-07-15  8:09 UTC (permalink / raw)
  To: intel-gfx

Avoid a global idle barrier by reconfiguring each context by rewriting
them with MI_STORE_DWORD from the kernel context.

v2: We only need to determine the desired register values once, they are
the same for all contexts.
v3: Don't remove the kernel context from the list of known GEM contexts;
the world is not ready for that yet.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_lrc.c |   7 +-
 drivers/gpu/drm/i915/i915_perf.c    | 251 +++++++++++++++++++++-------
 2 files changed, 197 insertions(+), 61 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 9e0992498087..fc4c77555e06 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1570,9 +1570,12 @@ __execlists_update_reg_state(struct intel_context *ce,
 	regs[CTX_RING_TAIL + 1] = ring->tail;
 
 	/* RPCS */
-	if (engine->class == RENDER_CLASS)
+	if (engine->class == RENDER_CLASS) {
 		regs[CTX_R_PWR_CLK_STATE + 1] =
 			intel_sseu_make_rpcs(engine->i915, &ce->sseu);
+
+		i915_oa_init_reg_state(engine, ce, regs);
+	}
 }
 
 static int
@@ -2992,8 +2995,6 @@ static void execlists_init_reg_state(u32 *regs,
 	if (rcs) {
 		regs[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
 		CTX_REG(regs, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 0);
-
-		i915_oa_init_reg_state(engine, ce, regs);
 	}
 
 	regs[CTX_END] = MI_BATCH_BUFFER_END;
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 007826ded9b3..78cb9ec8866c 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1636,6 +1636,27 @@ static void hsw_disable_metric_set(struct drm_i915_private *dev_priv)
 				      ~GT_NOA_ENABLE));
 }
 
+static u32 oa_config_flex_reg(const struct i915_oa_config *oa_config,
+			      i915_reg_t reg)
+{
+	u32 mmio = i915_mmio_reg_offset(reg);
+	int i;
+
+	/*
+	 * This arbitrary default will select the 'EU FPU0 Pipeline
+	 * Active' event. In the future it's anticipated that there
+	 * will be an explicit 'No Event' we can select, but not yet...
+	 */
+	if (!oa_config)
+		return 0;
+
+	for (i = 0; i < oa_config->flex_regs_len; i++) {
+		if (i915_mmio_reg_offset(oa_config->flex_regs[i].addr) == mmio)
+			return oa_config->flex_regs[i].value;
+	}
+
+	return 0;
+}
 /*
  * NB: It must always remain pointer safe to run this even if the OA unit
  * has been disabled.
@@ -1669,28 +1690,8 @@ gen8_update_reg_state_unlocked(struct intel_context *ce,
 		GEN8_OA_COUNTER_RESUME);
 
 	for (i = 0; i < ARRAY_SIZE(flex_regs); i++) {
-		u32 state_offset = ctx_flexeu0 + i * 2;
-		u32 mmio = i915_mmio_reg_offset(flex_regs[i]);
-
-		/*
-		 * This arbitrary default will select the 'EU FPU0 Pipeline
-		 * Active' event. In the future it's anticipated that there
-		 * will be an explicit 'No Event' we can select, but not yet...
-		 */
-		u32 value = 0;
-
-		if (oa_config) {
-			u32 j;
-
-			for (j = 0; j < oa_config->flex_regs_len; j++) {
-				if (i915_mmio_reg_offset(oa_config->flex_regs[j].addr) == mmio) {
-					value = oa_config->flex_regs[j].value;
-					break;
-				}
-			}
-		}
-
-		CTX_REG(reg_state, state_offset, flex_regs[i], value);
+		CTX_REG(reg_state, ctx_flexeu0 + i * 2, flex_regs[i],
+			oa_config_flex_reg(oa_config, flex_regs[i]));
 	}
 
 	CTX_REG(reg_state,
@@ -1698,6 +1699,107 @@ gen8_update_reg_state_unlocked(struct intel_context *ce,
 		intel_sseu_make_rpcs(i915, &ce->sseu));
 }
 
+struct flex {
+	i915_reg_t reg;
+	u32 offset;
+	u32 value;
+};
+
+static int
+gen8_store_flex(struct i915_request *rq,
+		struct intel_context *ce,
+		const struct flex *flex, unsigned int count)
+{
+	u32 offset;
+	u32 *cs;
+
+	cs = intel_ring_begin(rq, 4 * count);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
+
+	offset = i915_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
+	do {
+		*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+		*cs++ = offset + (flex->offset + 1) * sizeof(u32);
+		*cs++ = 0;
+		*cs++ = flex->value;
+	} while (flex++, --count);
+
+	intel_ring_advance(rq, cs);
+
+	return 0;
+}
+
+static int
+gen8_load_flex(struct i915_request *rq,
+	       struct intel_context *ce,
+	       const struct flex *flex, unsigned int count)
+{
+	u32 *cs;
+
+	GEM_BUG_ON(!count || count > 63);
+
+	cs = intel_ring_begin(rq, 2 * count + 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
+
+	*cs++ = MI_LOAD_REGISTER_IMM(count);
+	do {
+		*cs++ = i915_mmio_reg_offset(flex->reg);
+		*cs++ = flex->value;
+	} while (flex++, --count);
+	*cs++ = MI_NOOP;
+
+	intel_ring_advance(rq, cs);
+
+	return 0;
+}
+
+static int gen8_modify_context(struct intel_context *ce,
+			       const struct flex *flex, unsigned int count)
+{
+	struct i915_request *rq;
+	int err;
+
+	lockdep_assert_held(&ce->pin_mutex);
+
+	rq = i915_request_create(ce->engine->kernel_context);
+	if (IS_ERR(rq))
+		return PTR_ERR(rq);
+
+	/* Serialise with the remote context */
+	err = i915_active_request_set(&ce->ring->timeline->last_request, rq);
+	if (err)
+		goto out_add;
+
+	/* Keep the remote context alive until after we finish editing */
+	err = i915_active_ref(&ce->active, rq->fence.context, rq);
+	if (err)
+		goto out_add;
+
+	err = gen8_store_flex(rq, ce, flex, count);
+
+out_add:
+	i915_request_add(rq);
+	return err;
+}
+
+static int gen8_modify_self(struct intel_context *ce,
+			    const struct flex *flex, unsigned int count)
+{
+	struct i915_request *rq;
+	int err;
+
+	rq = i915_request_create(ce);
+	if (IS_ERR(rq))
+		return PTR_ERR(rq);
+
+	err = gen8_load_flex(rq, ce, flex, count);
+
+	i915_request_add(rq);
+	return err;
+}
+
 /*
  * Manages updating the per-context aspects of the OA stream
  * configuration across all contexts.
@@ -1722,15 +1824,43 @@ gen8_update_reg_state_unlocked(struct intel_context *ce,
  *
  * Note: it's only the RCS/Render context that has any OA state.
  */
-static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
+static int gen8_configure_all_contexts(struct drm_i915_private *i915,
 				       const struct i915_oa_config *oa_config)
 {
-	unsigned int map_type = i915_coherent_map_type(dev_priv);
+	/* The MMIO offsets for Flex EU registers aren't contiguous */
+	const u32 ctx_flexeu0 = i915->perf.oa.ctx_flexeu0_offset;
+#define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N))
+	struct flex regs[] = {
+		{
+			GEN8_R_PWR_CLK_STATE,
+			CTX_R_PWR_CLK_STATE,
+		},
+		{
+			GEN8_OACTXCONTROL,
+			i915->perf.oa.ctx_oactxctrl_offset,
+			((i915->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
+			 (i915->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) |
+			 GEN8_OA_COUNTER_RESUME)
+		},
+		{ EU_PERF_CNTL0, ctx_flexeuN(0) },
+		{ EU_PERF_CNTL1, ctx_flexeuN(1) },
+		{ EU_PERF_CNTL2, ctx_flexeuN(2) },
+		{ EU_PERF_CNTL3, ctx_flexeuN(3) },
+		{ EU_PERF_CNTL4, ctx_flexeuN(4) },
+		{ EU_PERF_CNTL5, ctx_flexeuN(5) },
+		{ EU_PERF_CNTL6, ctx_flexeuN(6) },
+	};
+#undef ctx_flexeuN
+	struct intel_engine_cs *engine;
 	struct i915_gem_context *ctx;
-	struct i915_request *rq;
-	int ret;
+	enum intel_engine_id id;
+	int err;
+	int i;
 
-	lockdep_assert_held(&dev_priv->drm.struct_mutex);
+	for (i = 2; i < ARRAY_SIZE(regs); i++)
+		regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg);
+
+	lockdep_assert_held(&i915->drm.struct_mutex);
 
 	/*
 	 * The OA register config is setup through the context image. This image
@@ -1742,58 +1872,63 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
 	 * this might leave small interval of time where the OA unit is
 	 * configured at an invalid sampling period.
 	 *
-	 * So far the best way to work around this issue seems to be draining
-	 * the GPU from any submitted work.
+	 * Note that since we emit all requests from a single ring, there
+	 * is still an implicit global barrier here that may cause a high
+	 * priority context to wait for an otherwise independent low priority
+	 * context. Contexts idle at the time of reconfiguration are not
+	 * trapped behind the barrier.
 	 */
-	ret = i915_gem_wait_for_idle(dev_priv,
-				     I915_WAIT_LOCKED,
-				     MAX_SCHEDULE_TIMEOUT);
-	if (ret)
-		return ret;
-
-	/* Update all contexts now that we've stalled the submission. */
-	list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
+	list_for_each_entry(ctx, &i915->contexts.list, link) {
 		struct i915_gem_engines_iter it;
 		struct intel_context *ce;
 
+		if (ctx == i915->kernel_context)
+			continue;
+
 		for_each_gem_engine(ce,
 				    i915_gem_context_lock_engines(ctx),
 				    it) {
-			u32 *regs;
+			GEM_BUG_ON(ce == ce->engine->kernel_context);
 
 			if (ce->engine->class != RENDER_CLASS)
 				continue;
 
-			/* OA settings will be set upon first use */
-			if (!ce->state)
-				continue;
-
-			regs = i915_gem_object_pin_map(ce->state->obj,
-						       map_type);
-			if (IS_ERR(regs)) {
-				i915_gem_context_unlock_engines(ctx);
-				return PTR_ERR(regs);
-			}
+			err = intel_context_lock_pinned(ce);
+			if (err)
+				break;
 
-			ce->state->obj->mm.dirty = true;
-			regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs);
+			regs[0].value = intel_sseu_make_rpcs(i915, &ce->sseu);
 
-			gen8_update_reg_state_unlocked(ce, regs, oa_config);
+			/* Otherwise OA settings will be set upon first use */
+			if (intel_context_is_pinned(ce))
+				err = gen8_modify_context(ce, regs, ARRAY_SIZE(regs));
 
-			i915_gem_object_unpin_map(ce->state->obj);
+			intel_context_unlock_pinned(ce);
+			if (err)
+				break;
 		}
 		i915_gem_context_unlock_engines(ctx);
+		if (err)
+			return err;
 	}
 
 	/*
-	 * Apply the configuration by doing one context restore of the edited
-	 * context image.
+	 * After updating all other contexts, we need to modify ourselves.
+	 * If we don't modify the kernel_context, we do not get events while
+	 * idle.
 	 */
-	rq = i915_request_create(dev_priv->engine[RCS0]->kernel_context);
-	if (IS_ERR(rq))
-		return PTR_ERR(rq);
+	for_each_engine(engine, i915, id) {
+		struct intel_context *ce = engine->kernel_context;
 
-	i915_request_add(rq);
+		if (engine->class != RENDER_CLASS)
+			continue;
+
+		regs[0].value = intel_sseu_make_rpcs(i915, &ce->sseu);
+
+		err = gen8_modify_self(ce, regs, ARRAY_SIZE(regs));
+		if (err)
+			return err;
+	}
 
 	return 0;
 }
-- 
2.22.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 33+ messages in thread

* [PATCH 09/24] drm/i915: Add to timeline requires the timeline mutex
  2019-07-15  8:09 [PATCH 01/24] drm/i915: Hide unshrinkable context objects from the shrinker Chris Wilson
                   ` (6 preceding siblings ...)
  2019-07-15  8:09 ` [PATCH 08/24] drm/i915/oa: Reconfigure contexts on the fly Chris Wilson
@ 2019-07-15  8:09 ` Chris Wilson
  2019-07-15  8:09 ` [PATCH 10/24] drm/i915: Teach execbuffer to take the engine wakeref not GT Chris Wilson
                   ` (18 subsequent siblings)
  26 siblings, 0 replies; 33+ messages in thread
From: Chris Wilson @ 2019-07-15  8:09 UTC (permalink / raw)
  To: intel-gfx

Modifying a remote context requires careful serialisation with requests
on that context, and that serialisation requires us to take their
timeline->mutex. Make it so.

Note that while struct_mutex rules, we can't create more than one
request in parallel, but that age is soon coming to an end.

v2: Though it doesn't affect the current users, contexts may share
timelines so check if we already hold the right mutex.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_context.c | 16 ++-------
 drivers/gpu/drm/i915/gt/intel_context.c     | 38 +++++++++++++++++++++
 drivers/gpu/drm/i915/gt/intel_context.h     |  3 ++
 drivers/gpu/drm/i915/i915_perf.c            |  7 +---
 4 files changed, 44 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 6b5644d5a99b..0551cea321cc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -1174,20 +1174,8 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
 	if (IS_ERR(rq))
 		return PTR_ERR(rq);
 
-	/* Queue this switch after all other activity by this context. */
-	ret = i915_active_request_set(&ce->ring->timeline->last_request, rq);
-	if (ret)
-		goto out_add;
-
-	/*
-	 * Guarantee context image and the timeline remains pinned until the
-	 * modifying request is retired by setting the ce activity tracker.
-	 *
-	 * But we only need to take one pin on the account of it. Or in other
-	 * words transfer the pinned ce object to tracked active request.
-	 */
-	GEM_BUG_ON(i915_active_is_idle(&ce->active));
-	ret = i915_active_ref(&ce->active, rq->fence.context, rq);
+	/* Serialise with the remote context */
+	ret = intel_context_prepare_remote_request(ce, rq);
 	if (ret)
 		goto out_add;
 
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index f012ec898aba..8938f798608b 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -239,6 +239,44 @@ void intel_context_exit_engine(struct intel_context *ce)
 	intel_engine_pm_put(ce->engine);
 }
 
+int intel_context_prepare_remote_request(struct intel_context *ce,
+					 struct i915_request *rq)
+{
+	struct intel_timeline *tl = ce->ring->timeline;
+	int err;
+
+	/* Only suitable for use in remotely modifying this context */
+	GEM_BUG_ON(rq->hw_context == ce);
+
+	if (rq->timeline != tl) { /* beware timeline sharing */
+		err = mutex_lock_interruptible_nested(&tl->mutex,
+						      SINGLE_DEPTH_NESTING);
+		if (err)
+			return err;
+	}
+	lockdep_assert_held(&tl->mutex);
+
+	/* Queue this switch after all other activity by this context. */
+	err = i915_active_request_set(&tl->last_request, rq);
+	if (err)
+		goto unlock;
+
+	/*
+	 * Guarantee context image and the timeline remains pinned until the
+	 * modifying request is retired by setting the ce activity tracker.
+	 *
+	 * But we only need to take one pin on the account of it. Or in other
+	 * words transfer the pinned ce object to tracked active request.
+	 */
+	GEM_BUG_ON(i915_active_is_idle(&ce->active));
+	err = i915_active_ref(&ce->active, rq->fence.context, rq);
+
+unlock:
+	if (rq->timeline != tl)
+		mutex_unlock(&tl->mutex);
+	return err;
+}
+
 struct i915_request *intel_context_create_request(struct intel_context *ce)
 {
 	struct i915_request *rq;
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index 40cd8320fcc3..b41c610c2ce6 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -139,6 +139,9 @@ static inline void intel_context_timeline_unlock(struct intel_context *ce)
 	mutex_unlock(&ce->ring->timeline->mutex);
 }
 
+int intel_context_prepare_remote_request(struct intel_context *ce,
+					 struct i915_request *rq);
+
 struct i915_request *intel_context_create_request(struct intel_context *ce);
 
 #endif /* __INTEL_CONTEXT_H__ */
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 78cb9ec8866c..58a71efc25db 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1768,12 +1768,7 @@ static int gen8_modify_context(struct intel_context *ce,
 		return PTR_ERR(rq);
 
 	/* Serialise with the remote context */
-	err = i915_active_request_set(&ce->ring->timeline->last_request, rq);
-	if (err)
-		goto out_add;
-
-	/* Keep the remote context alive until after we finish editing */
-	err = i915_active_ref(&ce->active, rq->fence.context, rq);
+	err = intel_context_prepare_remote_request(ce, rq);
 	if (err)
 		goto out_add;
 
-- 
2.22.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 33+ messages in thread

* [PATCH 10/24] drm/i915: Teach execbuffer to take the engine wakeref not GT
  2019-07-15  8:09 [PATCH 01/24] drm/i915: Hide unshrinkable context objects from the shrinker Chris Wilson
                   ` (7 preceding siblings ...)
  2019-07-15  8:09 ` [PATCH 09/24] drm/i915: Add to timeline requires the timeline mutex Chris Wilson
@ 2019-07-15  8:09 ` Chris Wilson
  2019-07-15  8:09 ` [PATCH 11/24] drm/i915/gt: Track timeline activeness in enter/exit Chris Wilson
                   ` (17 subsequent siblings)
  26 siblings, 0 replies; 33+ messages in thread
From: Chris Wilson @ 2019-07-15  8:09 UTC (permalink / raw)
  To: intel-gfx

In the next patch, we would like to couple into the engine wakeref to
free the batch pool on idling. The caveat here is that we therefore want
to track the engine wakeref more precisely and to hold it instead of the
broader GT wakeref as we process the ioctl.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 .../gpu/drm/i915/gem/i915_gem_execbuffer.c    | 36 ++++++++++++-------
 drivers/gpu/drm/i915/gt/intel_context.h       |  7 ++++
 2 files changed, 31 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 8a2047c4e7c3..619f683d4ee9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -2143,13 +2143,35 @@ static int eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce)
 	if (err)
 		return err;
 
+	/*
+	 * Take a local wakeref for preparing to dispatch the execbuf as
+	 * we expect to access the hardware fairly frequently in the
+	 * process. Upon first dispatch, we acquire another prolonged
+	 * wakeref that we hold until the GPU has been idle for at least
+	 * 100ms.
+	 */
+	err = intel_context_timeline_lock(ce);
+	if (err)
+		goto err_unpin;
+
+	intel_context_enter(ce);
+	intel_context_timeline_unlock(ce);
+
 	eb->engine = ce->engine;
 	eb->context = ce;
 	return 0;
+
+err_unpin:
+	intel_context_unpin(ce);
+	return err;
 }
 
 static void eb_unpin_context(struct i915_execbuffer *eb)
 {
+	__intel_context_timeline_lock(eb->context);
+	intel_context_exit(eb->context);
+	intel_context_timeline_unlock(eb->context);
+
 	intel_context_unpin(eb->context);
 }
 
@@ -2430,18 +2452,9 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 	if (unlikely(err))
 		goto err_destroy;
 
-	/*
-	 * Take a local wakeref for preparing to dispatch the execbuf as
-	 * we expect to access the hardware fairly frequently in the
-	 * process. Upon first dispatch, we acquire another prolonged
-	 * wakeref that we hold until the GPU has been idle for at least
-	 * 100ms.
-	 */
-	intel_gt_pm_get(&eb.i915->gt);
-
 	err = i915_mutex_lock_interruptible(dev);
 	if (err)
-		goto err_rpm;
+		goto err_context;
 
 	err = eb_select_engine(&eb, file, args);
 	if (unlikely(err))
@@ -2606,8 +2619,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 	eb_unpin_context(&eb);
 err_unlock:
 	mutex_unlock(&dev->struct_mutex);
-err_rpm:
-	intel_gt_pm_put(&eb.i915->gt);
+err_context:
 	i915_gem_context_put(eb.gem_context);
 err_destroy:
 	eb_destroy(&eb);
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index b41c610c2ce6..99bd8210a234 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -126,6 +126,13 @@ static inline void intel_context_put(struct intel_context *ce)
 	kref_put(&ce->ref, ce->ops->destroy);
 }
 
+static inline void
+__intel_context_timeline_lock(struct intel_context *ce)
+	__acquires(&ce->ring->timeline->mutex)
+{
+	mutex_lock(&ce->ring->timeline->mutex);
+}
+
 static inline int __must_check
 intel_context_timeline_lock(struct intel_context *ce)
 	__acquires(&ce->ring->timeline->mutex)
-- 
2.22.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 33+ messages in thread

* [PATCH 11/24] drm/i915/gt: Track timeline activeness in enter/exit
  2019-07-15  8:09 [PATCH 01/24] drm/i915: Hide unshrinkable context objects from the shrinker Chris Wilson
                   ` (8 preceding siblings ...)
  2019-07-15  8:09 ` [PATCH 10/24] drm/i915: Teach execbuffer to take the engine wakeref not GT Chris Wilson
@ 2019-07-15  8:09 ` Chris Wilson
  2019-07-15  8:09 ` [PATCH 12/24] drm/i915/gt: Convert timeline tracking to spinlock Chris Wilson
                   ` (16 subsequent siblings)
  26 siblings, 0 replies; 33+ messages in thread
From: Chris Wilson @ 2019-07-15  8:09 UTC (permalink / raw)
  To: intel-gfx

Lift moving the timeline to/from the active_list on enter/exit in order
to shorten the active tracking span in comparison to the existing
pin/unpin.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gem/i915_gem_pm.c        |  1 -
 drivers/gpu/drm/i915/gt/intel_context.c       |  2 +
 drivers/gpu/drm/i915/gt/intel_engine_pm.c     |  1 +
 drivers/gpu/drm/i915/gt/intel_lrc.c           |  4 +
 drivers/gpu/drm/i915/gt/intel_timeline.c      | 98 +++++++------------
 drivers/gpu/drm/i915/gt/intel_timeline.h      |  3 +-
 .../gpu/drm/i915/gt/intel_timeline_types.h    |  1 +
 drivers/gpu/drm/i915/gt/selftest_timeline.c   |  2 -
 8 files changed, 46 insertions(+), 66 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 8faf262278ae..195ee6eedac0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -39,7 +39,6 @@ static void i915_gem_park(struct drm_i915_private *i915)
 		i915_gem_batch_pool_fini(&engine->batch_pool);
 	}
 
-	intel_timelines_park(i915);
 	i915_vma_parked(i915);
 
 	i915_globals_park();
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 8938f798608b..6bea5d9d8dcf 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -232,10 +232,12 @@ int __init i915_global_context_init(void)
 void intel_context_enter_engine(struct intel_context *ce)
 {
 	intel_engine_pm_get(ce->engine);
+	intel_timeline_enter(ce->ring->timeline);
 }
 
 void intel_context_exit_engine(struct intel_context *ce)
 {
+	intel_timeline_exit(ce->ring->timeline);
 	intel_engine_pm_put(ce->engine);
 }
 
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index e74fbf04a68d..072f65e6a09e 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -89,6 +89,7 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
 
 	/* Check again on the next retirement. */
 	engine->wakeref_serial = engine->serial + 1;
+	intel_timeline_enter(rq->timeline);
 
 	i915_request_add_barriers(rq);
 	__i915_request_commit(rq);
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index fc4c77555e06..b5b2b939d5d2 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -3215,6 +3215,8 @@ static void virtual_context_enter(struct intel_context *ce)
 
 	for (n = 0; n < ve->num_siblings; n++)
 		intel_engine_pm_get(ve->siblings[n]);
+
+	intel_timeline_enter(ce->ring->timeline);
 }
 
 static void virtual_context_exit(struct intel_context *ce)
@@ -3222,6 +3224,8 @@ static void virtual_context_exit(struct intel_context *ce)
 	struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
 	unsigned int n;
 
+	intel_timeline_exit(ce->ring->timeline);
+
 	for (n = 0; n < ve->num_siblings; n++)
 		intel_engine_pm_put(ve->siblings[n]);
 }
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 6daa9eb59e19..4af0b9801d91 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -278,64 +278,11 @@ void intel_timelines_init(struct drm_i915_private *i915)
 	timelines_init(&i915->gt);
 }
 
-static void timeline_add_to_active(struct intel_timeline *tl)
-{
-	struct intel_gt_timelines *gt = &tl->gt->timelines;
-
-	mutex_lock(&gt->mutex);
-	list_add(&tl->link, &gt->active_list);
-	mutex_unlock(&gt->mutex);
-}
-
-static void timeline_remove_from_active(struct intel_timeline *tl)
-{
-	struct intel_gt_timelines *gt = &tl->gt->timelines;
-
-	mutex_lock(&gt->mutex);
-	list_del(&tl->link);
-	mutex_unlock(&gt->mutex);
-}
-
-static void timelines_park(struct intel_gt *gt)
-{
-	struct intel_gt_timelines *timelines = &gt->timelines;
-	struct intel_timeline *timeline;
-
-	mutex_lock(&timelines->mutex);
-	list_for_each_entry(timeline, &timelines->active_list, link) {
-		/*
-		 * All known fences are completed so we can scrap
-		 * the current sync point tracking and start afresh,
-		 * any attempt to wait upon a previous sync point
-		 * will be skipped as the fence was signaled.
-		 */
-		i915_syncmap_free(&timeline->sync);
-	}
-	mutex_unlock(&timelines->mutex);
-}
-
-/**
- * intel_timelines_park - called when the driver idles
- * @i915: the drm_i915_private device
- *
- * When the driver is completely idle, we know that all of our sync points
- * have been signaled and our tracking is then entirely redundant. Any request
- * to wait upon an older sync point will be completed instantly as we know
- * the fence is signaled and therefore we will not even look them up in the
- * sync point map.
- */
-void intel_timelines_park(struct drm_i915_private *i915)
-{
-	timelines_park(&i915->gt);
-}
-
 void intel_timeline_fini(struct intel_timeline *timeline)
 {
 	GEM_BUG_ON(timeline->pin_count);
 	GEM_BUG_ON(!list_empty(&timeline->requests));
 
-	i915_syncmap_free(&timeline->sync);
-
 	if (timeline->hwsp_cacheline)
 		cacheline_free(timeline->hwsp_cacheline);
 	else
@@ -370,6 +317,7 @@ int intel_timeline_pin(struct intel_timeline *tl)
 	if (tl->pin_count++)
 		return 0;
 	GEM_BUG_ON(!tl->pin_count);
+	GEM_BUG_ON(tl->active_count);
 
 	err = i915_vma_pin(tl->hwsp_ggtt, 0, 0, PIN_GLOBAL | PIN_HIGH);
 	if (err)
@@ -380,7 +328,6 @@ int intel_timeline_pin(struct intel_timeline *tl)
 		offset_in_page(tl->hwsp_offset);
 
 	cacheline_acquire(tl->hwsp_cacheline);
-	timeline_add_to_active(tl);
 
 	return 0;
 
@@ -389,6 +336,40 @@ int intel_timeline_pin(struct intel_timeline *tl)
 	return err;
 }
 
+void intel_timeline_enter(struct intel_timeline *tl)
+{
+	struct intel_gt_timelines *timelines = &tl->gt->timelines;
+
+	GEM_BUG_ON(!tl->pin_count);
+	if (tl->active_count++)
+		return;
+	GEM_BUG_ON(!tl->active_count); /* overflow? */
+
+	mutex_lock(&timelines->mutex);
+	list_add(&tl->link, &timelines->active_list);
+	mutex_unlock(&timelines->mutex);
+}
+
+void intel_timeline_exit(struct intel_timeline *tl)
+{
+	struct intel_gt_timelines *timelines = &tl->gt->timelines;
+
+	GEM_BUG_ON(!tl->active_count);
+	if (--tl->active_count)
+		return;
+
+	mutex_lock(&timelines->mutex);
+	list_del(&tl->link);
+	mutex_unlock(&timelines->mutex);
+
+	/*
+	 * Since this timeline is idle, all bariers upon which we were waiting
+	 * must also be complete and so we can discard the last used barriers
+	 * without loss of information.
+	 */
+	i915_syncmap_free(&tl->sync);
+}
+
 static u32 timeline_advance(struct intel_timeline *tl)
 {
 	GEM_BUG_ON(!tl->pin_count);
@@ -546,16 +527,9 @@ void intel_timeline_unpin(struct intel_timeline *tl)
 	if (--tl->pin_count)
 		return;
 
-	timeline_remove_from_active(tl);
+	GEM_BUG_ON(tl->active_count);
 	cacheline_release(tl->hwsp_cacheline);
 
-	/*
-	 * Since this timeline is idle, all bariers upon which we were waiting
-	 * must also be complete and so we can discard the last used barriers
-	 * without loss of information.
-	 */
-	i915_syncmap_free(&tl->sync);
-
 	__i915_vma_unpin(tl->hwsp_ggtt);
 }
 
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.h b/drivers/gpu/drm/i915/gt/intel_timeline.h
index e08cebf64833..f583af1ba18d 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.h
@@ -77,9 +77,11 @@ static inline bool intel_timeline_sync_is_later(struct intel_timeline *tl,
 }
 
 int intel_timeline_pin(struct intel_timeline *tl);
+void intel_timeline_enter(struct intel_timeline *tl);
 int intel_timeline_get_seqno(struct intel_timeline *tl,
 			     struct i915_request *rq,
 			     u32 *seqno);
+void intel_timeline_exit(struct intel_timeline *tl);
 void intel_timeline_unpin(struct intel_timeline *tl);
 
 int intel_timeline_read_hwsp(struct i915_request *from,
@@ -87,7 +89,6 @@ int intel_timeline_read_hwsp(struct i915_request *from,
 			     u32 *hwsp_offset);
 
 void intel_timelines_init(struct drm_i915_private *i915);
-void intel_timelines_park(struct drm_i915_private *i915);
 void intel_timelines_fini(struct drm_i915_private *i915);
 
 #endif
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline_types.h b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
index 9a71aea7a338..b820ee76b7f5 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
@@ -58,6 +58,7 @@ struct intel_timeline {
 	 */
 	struct i915_syncmap *sync;
 
+	unsigned int active_count;
 	struct list_head link;
 	struct intel_gt *gt;
 
diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
index f0a840030382..d54113697745 100644
--- a/drivers/gpu/drm/i915/gt/selftest_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -816,8 +816,6 @@ static int live_hwsp_recycle(void *arg)
 
 			if (err)
 				goto out;
-
-			intel_timelines_park(i915); /* Encourage recycling! */
 		} while (!__igt_timeout(end_time, NULL));
 	}
 
-- 
2.22.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 33+ messages in thread

* [PATCH 12/24] drm/i915/gt: Convert timeline tracking to spinlock
  2019-07-15  8:09 [PATCH 01/24] drm/i915: Hide unshrinkable context objects from the shrinker Chris Wilson
                   ` (9 preceding siblings ...)
  2019-07-15  8:09 ` [PATCH 11/24] drm/i915/gt: Track timeline activeness in enter/exit Chris Wilson
@ 2019-07-15  8:09 ` Chris Wilson
  2019-07-15  8:09 ` [PATCH 13/24] drm/i915/gt: Guard timeline pinning with its own mutex Chris Wilson
                   ` (15 subsequent siblings)
  26 siblings, 0 replies; 33+ messages in thread
From: Chris Wilson @ 2019-07-15  8:09 UTC (permalink / raw)
  To: intel-gfx

Convert the list manipulation of active to use spinlocks so that we can
perform the updates from underneath a quick interrupt callback.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/intel_gt_types.h |  2 +-
 drivers/gpu/drm/i915/gt/intel_reset.c    | 10 ++++++++--
 drivers/gpu/drm/i915/gt/intel_timeline.c | 12 +++++-------
 drivers/gpu/drm/i915/i915_gem.c          | 20 ++++++++++----------
 4 files changed, 24 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index 5fd11e361d03..b13f63e52203 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -40,7 +40,7 @@ struct intel_gt {
 	struct intel_uc uc;
 
 	struct intel_gt_timelines {
-		struct mutex mutex; /* protects list */
+		spinlock_t lock; /* protects active_list */
 		struct list_head active_list;
 
 		/* Pack multiple timelines' seqnos into the same page */
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 7ddedfb16aa2..345d294061a4 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -871,7 +871,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
 	 *
 	 * No more can be submitted until we reset the wedged bit.
 	 */
-	mutex_lock(&timelines->mutex);
+	spin_lock(&timelines->lock);
 	list_for_each_entry(tl, &timelines->active_list, link) {
 		struct i915_request *rq;
 
@@ -879,6 +879,8 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
 		if (!rq)
 			continue;
 
+		spin_unlock(&timelines->lock);
+
 		/*
 		 * All internal dependencies (i915_requests) will have
 		 * been flushed by the set-wedge, but we may be stuck waiting
@@ -888,8 +890,12 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
 		 */
 		dma_fence_default_wait(&rq->fence, false, MAX_SCHEDULE_TIMEOUT);
 		i915_request_put(rq);
+
+		/* Restart iteration after droping lock */
+		spin_lock(&timelines->lock);
+		tl = list_entry(&timelines->active_list, typeof(*tl), link);
 	}
-	mutex_unlock(&timelines->mutex);
+	spin_unlock(&timelines->lock);
 
 	intel_gt_sanitize(gt, false);
 
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 4af0b9801d91..355dfc52c804 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -266,7 +266,7 @@ static void timelines_init(struct intel_gt *gt)
 {
 	struct intel_gt_timelines *timelines = &gt->timelines;
 
-	mutex_init(&timelines->mutex);
+	spin_lock_init(&timelines->lock);
 	INIT_LIST_HEAD(&timelines->active_list);
 
 	spin_lock_init(&timelines->hwsp_lock);
@@ -345,9 +345,9 @@ void intel_timeline_enter(struct intel_timeline *tl)
 		return;
 	GEM_BUG_ON(!tl->active_count); /* overflow? */
 
-	mutex_lock(&timelines->mutex);
+	spin_lock(&timelines->lock);
 	list_add(&tl->link, &timelines->active_list);
-	mutex_unlock(&timelines->mutex);
+	spin_unlock(&timelines->lock);
 }
 
 void intel_timeline_exit(struct intel_timeline *tl)
@@ -358,9 +358,9 @@ void intel_timeline_exit(struct intel_timeline *tl)
 	if (--tl->active_count)
 		return;
 
-	mutex_lock(&timelines->mutex);
+	spin_lock(&timelines->lock);
 	list_del(&tl->link);
-	mutex_unlock(&timelines->mutex);
+	spin_unlock(&timelines->lock);
 
 	/*
 	 * Since this timeline is idle, all bariers upon which we were waiting
@@ -548,8 +548,6 @@ static void timelines_fini(struct intel_gt *gt)
 
 	GEM_BUG_ON(!list_empty(&timelines->active_list));
 	GEM_BUG_ON(!list_empty(&timelines->hwsp_free_list));
-
-	mutex_destroy(&timelines->mutex);
 }
 
 void intel_timelines_fini(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index a207b90924e4..05dbf54281a8 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -909,20 +909,20 @@ static int wait_for_engines(struct intel_gt *gt)
 
 static long
 wait_for_timelines(struct drm_i915_private *i915,
-		   unsigned int flags, long timeout)
+		   unsigned int wait, long timeout)
 {
-	struct intel_gt_timelines *gt = &i915->gt.timelines;
+	struct intel_gt_timelines *timelines = &i915->gt.timelines;
 	struct intel_timeline *tl;
 
-	mutex_lock(&gt->mutex);
-	list_for_each_entry(tl, &gt->active_list, link) {
+	spin_lock(&timelines->lock);
+	list_for_each_entry(tl, &timelines->active_list, link) {
 		struct i915_request *rq;
 
 		rq = i915_active_request_get_unlocked(&tl->last_request);
 		if (!rq)
 			continue;
 
-		mutex_unlock(&gt->mutex);
+		spin_unlock(&timelines->lock);
 
 		/*
 		 * "Race-to-idle".
@@ -933,19 +933,19 @@ wait_for_timelines(struct drm_i915_private *i915,
 		 * want to complete as quickly as possible to avoid prolonged
 		 * stalls, so allow the gpu to boost to maximum clocks.
 		 */
-		if (flags & I915_WAIT_FOR_IDLE_BOOST)
+		if (wait & I915_WAIT_FOR_IDLE_BOOST)
 			gen6_rps_boost(rq);
 
-		timeout = i915_request_wait(rq, flags, timeout);
+		timeout = i915_request_wait(rq, wait, timeout);
 		i915_request_put(rq);
 		if (timeout < 0)
 			return timeout;
 
 		/* restart after reacquiring the lock */
-		mutex_lock(&gt->mutex);
-		tl = list_entry(&gt->active_list, typeof(*tl), link);
+		spin_lock(&timelines->lock);
+		tl = list_entry(&timelines->active_list, typeof(*tl), link);
 	}
-	mutex_unlock(&gt->mutex);
+	spin_unlock(&timelines->lock);
 
 	return timeout;
 }
-- 
2.22.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 33+ messages in thread

* [PATCH 13/24] drm/i915/gt: Guard timeline pinning with its own mutex
  2019-07-15  8:09 [PATCH 01/24] drm/i915: Hide unshrinkable context objects from the shrinker Chris Wilson
                   ` (10 preceding siblings ...)
  2019-07-15  8:09 ` [PATCH 12/24] drm/i915/gt: Convert timeline tracking to spinlock Chris Wilson
@ 2019-07-15  8:09 ` Chris Wilson
  2019-07-15  8:09 ` [PATCH 14/24] drm/i915: Protect request retirement with timeline->mutex Chris Wilson
                   ` (14 subsequent siblings)
  26 siblings, 0 replies; 33+ messages in thread
From: Chris Wilson @ 2019-07-15  8:09 UTC (permalink / raw)
  To: intel-gfx

In preparation for removing struct_mutex from around context retirement,
we need to make timeline pinning safe. Since multiple engines/contexts
can share a single timeline, it needs to be protected by a mutex.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/intel_timeline.c      | 27 +++++++++----------
 .../gpu/drm/i915/gt/intel_timeline_types.h    |  2 +-
 drivers/gpu/drm/i915/gt/mock_engine.c         |  6 ++---
 3 files changed, 16 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 355dfc52c804..7b476cd55dac 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -211,9 +211,9 @@ int intel_timeline_init(struct intel_timeline *timeline,
 	void *vaddr;
 
 	kref_init(&timeline->kref);
+	atomic_set(&timeline->pin_count, 0);
 
 	timeline->gt = gt;
-	timeline->pin_count = 0;
 
 	timeline->has_initial_breadcrumb = !hwsp;
 	timeline->hwsp_cacheline = NULL;
@@ -280,7 +280,7 @@ void intel_timelines_init(struct drm_i915_private *i915)
 
 void intel_timeline_fini(struct intel_timeline *timeline)
 {
-	GEM_BUG_ON(timeline->pin_count);
+	GEM_BUG_ON(atomic_read(&timeline->pin_count));
 	GEM_BUG_ON(!list_empty(&timeline->requests));
 
 	if (timeline->hwsp_cacheline)
@@ -314,33 +314,31 @@ int intel_timeline_pin(struct intel_timeline *tl)
 {
 	int err;
 
-	if (tl->pin_count++)
+	if (atomic_add_unless(&tl->pin_count, 1, 0))
 		return 0;
-	GEM_BUG_ON(!tl->pin_count);
-	GEM_BUG_ON(tl->active_count);
 
 	err = i915_vma_pin(tl->hwsp_ggtt, 0, 0, PIN_GLOBAL | PIN_HIGH);
 	if (err)
-		goto unpin;
+		return err;
 
 	tl->hwsp_offset =
 		i915_ggtt_offset(tl->hwsp_ggtt) +
 		offset_in_page(tl->hwsp_offset);
 
 	cacheline_acquire(tl->hwsp_cacheline);
+	if (atomic_fetch_inc(&tl->pin_count)) {
+		cacheline_release(tl->hwsp_cacheline);
+		__i915_vma_unpin(tl->hwsp_ggtt);
+	}
 
 	return 0;
-
-unpin:
-	tl->pin_count = 0;
-	return err;
 }
 
 void intel_timeline_enter(struct intel_timeline *tl)
 {
 	struct intel_gt_timelines *timelines = &tl->gt->timelines;
 
-	GEM_BUG_ON(!tl->pin_count);
+	GEM_BUG_ON(!atomic_read(&tl->pin_count));
 	if (tl->active_count++)
 		return;
 	GEM_BUG_ON(!tl->active_count); /* overflow? */
@@ -372,7 +370,7 @@ void intel_timeline_exit(struct intel_timeline *tl)
 
 static u32 timeline_advance(struct intel_timeline *tl)
 {
-	GEM_BUG_ON(!tl->pin_count);
+	GEM_BUG_ON(!atomic_read(&tl->pin_count));
 	GEM_BUG_ON(tl->seqno & tl->has_initial_breadcrumb);
 
 	return tl->seqno += 1 + tl->has_initial_breadcrumb;
@@ -523,11 +521,10 @@ int intel_timeline_read_hwsp(struct i915_request *from,
 
 void intel_timeline_unpin(struct intel_timeline *tl)
 {
-	GEM_BUG_ON(!tl->pin_count);
-	if (--tl->pin_count)
+	GEM_BUG_ON(!atomic_read(&tl->pin_count));
+	if (!atomic_dec_and_test(&tl->pin_count))
 		return;
 
-	GEM_BUG_ON(tl->active_count);
 	cacheline_release(tl->hwsp_cacheline);
 
 	__i915_vma_unpin(tl->hwsp_ggtt);
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline_types.h b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
index b820ee76b7f5..8dd14a2b8781 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
@@ -25,7 +25,7 @@ struct intel_timeline {
 
 	struct mutex mutex; /* protects the flow of requests */
 
-	unsigned int pin_count;
+	atomic_t pin_count;
 	const u32 *hwsp_seqno;
 	struct i915_vma *hwsp_ggtt;
 	u32 hwsp_offset;
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 490ebd121f4c..a48b36d31e65 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -38,13 +38,13 @@ struct mock_ring {
 
 static void mock_timeline_pin(struct intel_timeline *tl)
 {
-	tl->pin_count++;
+	atomic_inc(&tl->pin_count);
 }
 
 static void mock_timeline_unpin(struct intel_timeline *tl)
 {
-	GEM_BUG_ON(!tl->pin_count);
-	tl->pin_count--;
+	GEM_BUG_ON(!atomic_read(&tl->pin_count));
+	atomic_dec(&tl->pin_count);
 }
 
 static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
-- 
2.22.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 33+ messages in thread

* [PATCH 14/24] drm/i915: Protect request retirement with timeline->mutex
  2019-07-15  8:09 [PATCH 01/24] drm/i915: Hide unshrinkable context objects from the shrinker Chris Wilson
                   ` (11 preceding siblings ...)
  2019-07-15  8:09 ` [PATCH 13/24] drm/i915/gt: Guard timeline pinning with its own mutex Chris Wilson
@ 2019-07-15  8:09 ` Chris Wilson
  2019-07-15  8:09 ` [PATCH 15/24] drm/i915: Replace struct_mutex for batch pool serialisation Chris Wilson
                   ` (13 subsequent siblings)
  26 siblings, 0 replies; 33+ messages in thread
From: Chris Wilson @ 2019-07-15  8:09 UTC (permalink / raw)
  To: intel-gfx

Forgo the struct_mutex requirement for request retirement as we have
been transitioning over to only using the timeline->mutex for
controlling the lifetime of a request on that timeline.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 .../gpu/drm/i915/gem/i915_gem_execbuffer.c    | 192 ++++++++++--------
 drivers/gpu/drm/i915/gt/intel_context.h       |  25 +--
 drivers/gpu/drm/i915/gt/intel_engine_cs.c     |   1 -
 drivers/gpu/drm/i915/gt/intel_engine_types.h  |   2 -
 drivers/gpu/drm/i915/gt/intel_gt.c            |   1 -
 drivers/gpu/drm/i915/gt/intel_gt_types.h      |   2 -
 drivers/gpu/drm/i915/gt/intel_lrc.c           |   1 +
 drivers/gpu/drm/i915/gt/intel_ringbuffer.c    |  13 +-
 drivers/gpu/drm/i915/gt/mock_engine.c         |   1 -
 drivers/gpu/drm/i915/i915_request.c           | 151 +++++++-------
 drivers/gpu/drm/i915/i915_request.h           |   3 -
 11 files changed, 203 insertions(+), 189 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 619f683d4ee9..dd5df180109b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -739,63 +739,6 @@ static int eb_select_context(struct i915_execbuffer *eb)
 	return 0;
 }
 
-static struct i915_request *__eb_wait_for_ring(struct intel_ring *ring)
-{
-	struct i915_request *rq;
-
-	/*
-	 * Completely unscientific finger-in-the-air estimates for suitable
-	 * maximum user request size (to avoid blocking) and then backoff.
-	 */
-	if (intel_ring_update_space(ring) >= PAGE_SIZE)
-		return NULL;
-
-	/*
-	 * Find a request that after waiting upon, there will be at least half
-	 * the ring available. The hysteresis allows us to compete for the
-	 * shared ring and should mean that we sleep less often prior to
-	 * claiming our resources, but not so long that the ring completely
-	 * drains before we can submit our next request.
-	 */
-	list_for_each_entry(rq, &ring->request_list, ring_link) {
-		if (__intel_ring_space(rq->postfix,
-				       ring->emit, ring->size) > ring->size / 2)
-			break;
-	}
-	if (&rq->ring_link == &ring->request_list)
-		return NULL; /* weird, we will check again later for real */
-
-	return i915_request_get(rq);
-}
-
-static int eb_wait_for_ring(const struct i915_execbuffer *eb)
-{
-	struct i915_request *rq;
-	int ret = 0;
-
-	/*
-	 * Apply a light amount of backpressure to prevent excessive hogs
-	 * from blocking waiting for space whilst holding struct_mutex and
-	 * keeping all of their resources pinned.
-	 */
-
-	rq = __eb_wait_for_ring(eb->context->ring);
-	if (rq) {
-		mutex_unlock(&eb->i915->drm.struct_mutex);
-
-		if (i915_request_wait(rq,
-				      I915_WAIT_INTERRUPTIBLE,
-				      MAX_SCHEDULE_TIMEOUT) < 0)
-			ret = -EINTR;
-
-		i915_request_put(rq);
-
-		mutex_lock(&eb->i915->drm.struct_mutex);
-	}
-
-	return ret;
-}
-
 static int eb_lookup_vmas(struct i915_execbuffer *eb)
 {
 	struct radix_tree_root *handles_vma = &eb->gem_context->handles_vma;
@@ -2122,10 +2065,75 @@ static const enum intel_engine_id user_ring_map[] = {
 	[I915_EXEC_VEBOX]	= VECS0
 };
 
-static int eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce)
+static struct i915_request *eb_throttle(struct intel_context *ce)
+{
+	struct intel_ring *ring = ce->ring;
+	struct intel_timeline *tl = ring->timeline;
+	struct i915_request *rq;
+
+	/*
+	 * Completely unscientific finger-in-the-air estimates for suitable
+	 * maximum user request size (to avoid blocking) and then backoff.
+	 */
+	if (intel_ring_update_space(ring) >= PAGE_SIZE)
+		return NULL;
+
+	/*
+	 * Find a request that after waiting upon, there will be at least half
+	 * the ring available. The hysteresis allows us to compete for the
+	 * shared ring and should mean that we sleep less often prior to
+	 * claiming our resources, but not so long that the ring completely
+	 * drains before we can submit our next request.
+	 */
+	list_for_each_entry(rq, &tl->requests, link) {
+		if (rq->ring != ring)
+			continue;
+
+		if (__intel_ring_space(rq->postfix,
+				       ring->emit, ring->size) > ring->size / 2)
+			break;
+	}
+	if (&rq->link == &tl->requests)
+		return NULL; /* weird, we will check again later for real */
+
+	return i915_request_get(rq);
+}
+
+static int
+__eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce)
 {
 	int err;
 
+	if (likely(atomic_inc_not_zero(&ce->pin_count)))
+		return 0;
+
+	err = mutex_lock_interruptible(&eb->i915->drm.struct_mutex);
+	if (err)
+		return err;
+
+	err = __intel_context_do_pin(ce);
+	mutex_unlock(&eb->i915->drm.struct_mutex);
+
+	return err;
+}
+
+static void
+__eb_unpin_context(struct i915_execbuffer *eb, struct intel_context *ce)
+{
+	if (likely(atomic_add_unless(&ce->pin_count, -1, 1)))
+		return;
+
+	mutex_lock(&eb->i915->drm.struct_mutex);
+	intel_context_unpin(ce);
+	mutex_unlock(&eb->i915->drm.struct_mutex);
+}
+
+static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
+{
+	struct intel_timeline *tl;
+	struct i915_request *rq;
+	int err;
+
 	/*
 	 * ABI: Before userspace accesses the GPU (e.g. execbuffer), report
 	 * EIO if the GPU is already wedged.
@@ -2139,7 +2147,7 @@ static int eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce)
 	 * GGTT space, so do this first before we reserve a seqno for
 	 * ourselves.
 	 */
-	err = intel_context_pin(ce);
+	err = __eb_pin_context(eb, ce);
 	if (err)
 		return err;
 
@@ -2150,29 +2158,52 @@ static int eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce)
 	 * wakeref that we hold until the GPU has been idle for at least
 	 * 100ms.
 	 */
-	err = intel_context_timeline_lock(ce);
-	if (err)
+	tl = intel_context_timeline_lock(ce);
+	if (IS_ERR(tl)) {
+		err = PTR_ERR(tl);
 		goto err_unpin;
+	}
 
 	intel_context_enter(ce);
-	intel_context_timeline_unlock(ce);
+	rq = eb_throttle(ce);
+
+	intel_context_timeline_unlock(tl);
+
+	if (rq) {
+		if (i915_request_wait(rq,
+				      I915_WAIT_INTERRUPTIBLE,
+				      MAX_SCHEDULE_TIMEOUT) < 0) {
+			i915_request_put(rq);
+			err = -EINTR;
+			goto err_exit;
+		}
+
+		i915_request_put(rq);
+	}
 
 	eb->engine = ce->engine;
 	eb->context = ce;
 	return 0;
 
+err_exit:
+	mutex_lock(&tl->mutex);
+	intel_context_exit(ce);
+	intel_context_timeline_unlock(tl);
 err_unpin:
-	intel_context_unpin(ce);
+	__eb_unpin_context(eb, ce);
 	return err;
 }
 
-static void eb_unpin_context(struct i915_execbuffer *eb)
+static void eb_unpin_engine(struct i915_execbuffer *eb)
 {
-	__intel_context_timeline_lock(eb->context);
-	intel_context_exit(eb->context);
-	intel_context_timeline_unlock(eb->context);
+	struct intel_context *ce = eb->context;
+	struct intel_timeline *tl = ce->ring->timeline;
+
+	mutex_lock(&tl->mutex);
+	intel_context_exit(ce);
+	intel_context_timeline_unlock(tl);
 
-	intel_context_unpin(eb->context);
+	__eb_unpin_context(eb, ce);
 }
 
 static unsigned int
@@ -2217,9 +2248,9 @@ eb_select_legacy_ring(struct i915_execbuffer *eb,
 }
 
 static int
-eb_select_engine(struct i915_execbuffer *eb,
-		 struct drm_file *file,
-		 struct drm_i915_gem_execbuffer2 *args)
+eb_pin_engine(struct i915_execbuffer *eb,
+	      struct drm_file *file,
+	      struct drm_i915_gem_execbuffer2 *args)
 {
 	struct intel_context *ce;
 	unsigned int idx;
@@ -2234,7 +2265,7 @@ eb_select_engine(struct i915_execbuffer *eb,
 	if (IS_ERR(ce))
 		return PTR_ERR(ce);
 
-	err = eb_pin_context(eb, ce);
+	err = __eb_pin_engine(eb, ce);
 	intel_context_put(ce);
 
 	return err;
@@ -2452,16 +2483,12 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 	if (unlikely(err))
 		goto err_destroy;
 
-	err = i915_mutex_lock_interruptible(dev);
-	if (err)
-		goto err_context;
-
-	err = eb_select_engine(&eb, file, args);
+	err = eb_pin_engine(&eb, file, args);
 	if (unlikely(err))
-		goto err_unlock;
+		goto err_context;
 
-	err = eb_wait_for_ring(&eb); /* may temporarily drop struct_mutex */
-	if (unlikely(err))
+	err = i915_mutex_lock_interruptible(dev);
+	if (err)
 		goto err_engine;
 
 	err = eb_relocate(&eb);
@@ -2615,10 +2642,9 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 err_vma:
 	if (eb.exec)
 		eb_release_vmas(&eb);
-err_engine:
-	eb_unpin_context(&eb);
-err_unlock:
 	mutex_unlock(&dev->struct_mutex);
+err_engine:
+	eb_unpin_engine(&eb);
 err_context:
 	i915_gem_context_put(eb.gem_context);
 err_destroy:
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index 99bd8210a234..8429a97a3911 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -12,6 +12,7 @@
 #include "i915_active.h"
 #include "intel_context_types.h"
 #include "intel_engine_types.h"
+#include "intel_timeline_types.h"
 
 void intel_context_init(struct intel_context *ce,
 			struct i915_gem_context *ctx,
@@ -126,24 +127,24 @@ static inline void intel_context_put(struct intel_context *ce)
 	kref_put(&ce->ref, ce->ops->destroy);
 }
 
-static inline void
-__intel_context_timeline_lock(struct intel_context *ce)
-	__acquires(&ce->ring->timeline->mutex)
-{
-	mutex_lock(&ce->ring->timeline->mutex);
-}
-
-static inline int __must_check
+static inline struct intel_timeline *__must_check
 intel_context_timeline_lock(struct intel_context *ce)
 	__acquires(&ce->ring->timeline->mutex)
 {
-	return mutex_lock_interruptible(&ce->ring->timeline->mutex);
+	struct intel_timeline *tl = ce->ring->timeline;
+	int err;
+
+	err = mutex_lock_interruptible(&tl->mutex);
+	if (err)
+		return ERR_PTR(err);
+
+	return tl;
 }
 
-static inline void intel_context_timeline_unlock(struct intel_context *ce)
-	__releases(&ce->ring->timeline->mutex)
+static inline void intel_context_timeline_unlock(struct intel_timeline *tl)
+	__releases(&tl->mutex)
 {
-	mutex_unlock(&ce->ring->timeline->mutex);
+	mutex_unlock(&tl->mutex);
 }
 
 int intel_context_prepare_remote_request(struct intel_context *ce,
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 5f8909546d36..1f5ac045a8d8 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -745,7 +745,6 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
 				engine->status_page.vma))
 		goto out_frame;
 
-	INIT_LIST_HEAD(&frame->ring.request_list);
 	frame->ring.timeline = &frame->timeline;
 	frame->ring.vaddr = frame->cs;
 	frame->ring.size = sizeof(frame->cs);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 6822fbb44d2d..87eab7d13c8c 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -70,8 +70,6 @@ struct intel_ring {
 	void *vaddr;
 
 	struct intel_timeline *timeline;
-	struct list_head request_list;
-	struct list_head active_link;
 
 	/*
 	 * As we have two types of rings, one global to the engine used
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index f7e69db4019d..90ed79285ff8 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -14,7 +14,6 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
 	gt->i915 = i915;
 	gt->uncore = &i915->uncore;
 
-	INIT_LIST_HEAD(&gt->active_rings);
 	INIT_LIST_HEAD(&gt->closed_vma);
 
 	spin_lock_init(&gt->closed_lock);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index b13f63e52203..bfdabf49b1e7 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -48,8 +48,6 @@ struct intel_gt {
 		struct list_head hwsp_free_list;
 	} timelines;
 
-	struct list_head active_rings;
-
 	struct intel_wakeref wakeref;
 
 	struct list_head closed_vma;
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index b5b2b939d5d2..18d9ddc5cd58 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1553,6 +1553,7 @@ static void execlists_context_unpin(struct intel_context *ce)
 {
 	i915_gem_context_unpin_hw_id(ce->gem_context);
 	i915_gem_object_unpin_map(ce->state->obj);
+	intel_ring_reset(ce->ring, ce->ring->tail);
 }
 
 static void
diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
index f39c74740d8d..a0924f207d2c 100644
--- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
@@ -1237,7 +1237,7 @@ void intel_ring_unpin(struct intel_ring *ring)
 	GEM_TRACE("ring:%llx unpin\n", ring->timeline->fence_context);
 
 	/* Discard any unused bytes beyond that submitted to hw. */
-	intel_ring_reset(ring, ring->tail);
+	intel_ring_reset(ring, ring->emit);
 
 	i915_vma_unset_ggtt_write(vma);
 	if (i915_vma_is_map_and_fenceable(vma))
@@ -1302,7 +1302,6 @@ intel_engine_create_ring(struct intel_engine_cs *engine,
 		return ERR_PTR(-ENOMEM);
 
 	kref_init(&ring->ref);
-	INIT_LIST_HEAD(&ring->request_list);
 	ring->timeline = intel_timeline_get(timeline);
 
 	ring->size = size;
@@ -1828,21 +1827,25 @@ static int ring_request_alloc(struct i915_request *request)
 
 static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes)
 {
+	struct intel_timeline *tl = ring->timeline;
 	struct i915_request *target;
 	long timeout;
 
 	if (intel_ring_update_space(ring) >= bytes)
 		return 0;
 
-	GEM_BUG_ON(list_empty(&ring->request_list));
-	list_for_each_entry(target, &ring->request_list, ring_link) {
+	GEM_BUG_ON(list_empty(&tl->requests));
+	list_for_each_entry(target, &tl->requests, link) {
+		if (target->ring != ring)
+			continue;
+
 		/* Would completion of this request free enough space? */
 		if (bytes <= __intel_ring_space(target->postfix,
 						ring->emit, ring->size))
 			break;
 	}
 
-	if (WARN_ON(&target->ring_link == &ring->request_list))
+	if (GEM_WARN_ON(&target->link == &tl->requests))
 		return -ENOSPC;
 
 	timeout = i915_request_wait(target,
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index a48b36d31e65..5bcb461b8372 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -68,7 +68,6 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
 	ring->base.timeline = &ring->timeline;
 	atomic_set(&ring->base.pin_count, 1);
 
-	INIT_LIST_HEAD(&ring->base.request_list);
 	intel_ring_update_space(&ring->base);
 
 	return &ring->base;
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 8ac7d14ec8c9..92313a59563c 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -180,40 +180,6 @@ i915_request_remove_from_client(struct i915_request *request)
 	spin_unlock(&file_priv->mm.lock);
 }
 
-static void advance_ring(struct i915_request *request)
-{
-	struct intel_ring *ring = request->ring;
-	unsigned int tail;
-
-	/*
-	 * We know the GPU must have read the request to have
-	 * sent us the seqno + interrupt, so use the position
-	 * of tail of the request to update the last known position
-	 * of the GPU head.
-	 *
-	 * Note this requires that we are always called in request
-	 * completion order.
-	 */
-	GEM_BUG_ON(!list_is_first(&request->ring_link, &ring->request_list));
-	if (list_is_last(&request->ring_link, &ring->request_list)) {
-		/*
-		 * We may race here with execlists resubmitting this request
-		 * as we retire it. The resubmission will move the ring->tail
-		 * forwards (to request->wa_tail). We either read the
-		 * current value that was written to hw, or the value that
-		 * is just about to be. Either works, if we miss the last two
-		 * noops - they are safe to be replayed on a reset.
-		 */
-		tail = READ_ONCE(request->tail);
-		list_del(&ring->active_link);
-	} else {
-		tail = request->postfix;
-	}
-	list_del_init(&request->ring_link);
-
-	ring->head = tail;
-}
-
 static void free_capture_list(struct i915_request *request)
 {
 	struct i915_capture_list *capture;
@@ -231,7 +197,7 @@ static bool i915_request_retire(struct i915_request *rq)
 {
 	struct i915_active_request *active, *next;
 
-	lockdep_assert_held(&rq->i915->drm.struct_mutex);
+	lockdep_assert_held(&rq->timeline->mutex);
 	if (!i915_request_completed(rq))
 		return false;
 
@@ -243,7 +209,17 @@ static bool i915_request_retire(struct i915_request *rq)
 	GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
 	trace_i915_request_retire(rq);
 
-	advance_ring(rq);
+	/*
+	 * We know the GPU must have read the request to have
+	 * sent us the seqno + interrupt, so use the position
+	 * of tail of the request to update the last known position
+	 * of the GPU head.
+	 *
+	 * Note this requires that we are always called in request
+	 * completion order.
+	 */
+	GEM_BUG_ON(!list_is_first(&rq->link, &rq->timeline->requests));
+	rq->ring->head = rq->postfix;
 
 	/*
 	 * Walk through the active list, calling retire on each. This allows
@@ -320,7 +296,7 @@ static bool i915_request_retire(struct i915_request *rq)
 
 void i915_request_retire_upto(struct i915_request *rq)
 {
-	struct intel_ring *ring = rq->ring;
+	struct intel_timeline * const tl = rq->timeline;
 	struct i915_request *tmp;
 
 	GEM_TRACE("%s fence %llx:%lld, current %d\n",
@@ -328,15 +304,11 @@ void i915_request_retire_upto(struct i915_request *rq)
 		  rq->fence.context, rq->fence.seqno,
 		  hwsp_seqno(rq));
 
-	lockdep_assert_held(&rq->i915->drm.struct_mutex);
+	lockdep_assert_held(&tl->mutex);
 	GEM_BUG_ON(!i915_request_completed(rq));
 
-	if (list_empty(&rq->ring_link))
-		return;
-
 	do {
-		tmp = list_first_entry(&ring->request_list,
-				       typeof(*tmp), ring_link);
+		tmp = list_first_entry(&tl->requests, typeof(*tmp), link);
 	} while (i915_request_retire(tmp) && tmp != rq);
 }
 
@@ -563,29 +535,28 @@ semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
 	return NOTIFY_DONE;
 }
 
-static void ring_retire_requests(struct intel_ring *ring)
+static void retire_requests(struct intel_timeline *tl)
 {
 	struct i915_request *rq, *rn;
 
-	list_for_each_entry_safe(rq, rn, &ring->request_list, ring_link)
+	list_for_each_entry_safe(rq, rn, &tl->requests, link)
 		if (!i915_request_retire(rq))
 			break;
 }
 
 static noinline struct i915_request *
-request_alloc_slow(struct intel_context *ce, gfp_t gfp)
+request_alloc_slow(struct intel_timeline *tl, gfp_t gfp)
 {
-	struct intel_ring *ring = ce->ring;
 	struct i915_request *rq;
 
-	if (list_empty(&ring->request_list))
+	if (list_empty(&tl->requests))
 		goto out;
 
 	if (!gfpflags_allow_blocking(gfp))
 		goto out;
 
 	/* Move our oldest request to the slab-cache (if not in use!) */
-	rq = list_first_entry(&ring->request_list, typeof(*rq), ring_link);
+	rq = list_first_entry(&tl->requests, typeof(*rq), link);
 	i915_request_retire(rq);
 
 	rq = kmem_cache_alloc(global.slab_requests,
@@ -594,11 +565,11 @@ request_alloc_slow(struct intel_context *ce, gfp_t gfp)
 		return rq;
 
 	/* Ratelimit ourselves to prevent oom from malicious clients */
-	rq = list_last_entry(&ring->request_list, typeof(*rq), ring_link);
+	rq = list_last_entry(&tl->requests, typeof(*rq), link);
 	cond_synchronize_rcu(rq->rcustate);
 
 	/* Retire our old requests in the hope that we free some */
-	ring_retire_requests(ring);
+	retire_requests(tl);
 
 out:
 	return kmem_cache_alloc(global.slab_requests, gfp);
@@ -649,7 +620,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
 	rq = kmem_cache_alloc(global.slab_requests,
 			      gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
 	if (unlikely(!rq)) {
-		rq = request_alloc_slow(ce, gfp);
+		rq = request_alloc_slow(tl, gfp);
 		if (!rq) {
 			ret = -ENOMEM;
 			goto err_unreserve;
@@ -741,15 +712,15 @@ struct i915_request *
 i915_request_create(struct intel_context *ce)
 {
 	struct i915_request *rq;
-	int err;
+	struct intel_timeline *tl;
 
-	err = intel_context_timeline_lock(ce);
-	if (err)
-		return ERR_PTR(err);
+	tl = intel_context_timeline_lock(ce);
+	if (IS_ERR(tl))
+		return ERR_CAST(tl);
 
 	/* Move our oldest request to the slab-cache (if not in use!) */
-	rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
-	if (!list_is_last(&rq->ring_link, &ce->ring->request_list))
+	rq = list_first_entry(&tl->requests, typeof(*rq), link);
+	if (!list_is_last(&rq->link, &tl->requests))
 		i915_request_retire(rq);
 
 	intel_context_enter(ce);
@@ -759,22 +730,22 @@ i915_request_create(struct intel_context *ce)
 		goto err_unlock;
 
 	/* Check that we do not interrupt ourselves with a new request */
-	rq->cookie = lockdep_pin_lock(&ce->ring->timeline->mutex);
+	rq->cookie = lockdep_pin_lock(&tl->mutex);
 
 	return rq;
 
 err_unlock:
-	intel_context_timeline_unlock(ce);
+	intel_context_timeline_unlock(tl);
 	return rq;
 }
 
 static int
 i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
 {
-	if (list_is_first(&signal->ring_link, &signal->ring->request_list))
+	if (list_is_first(&signal->link, &signal->ring->timeline->requests))
 		return 0;
 
-	signal = list_prev_entry(signal, ring_link);
+	signal = list_prev_entry(signal, link);
 	if (intel_timeline_sync_is_later(rq->timeline, &signal->fence))
 		return 0;
 
@@ -1167,6 +1138,7 @@ struct i915_request *__i915_request_commit(struct i915_request *rq)
 	 */
 	GEM_BUG_ON(rq->reserved_space > ring->space);
 	rq->reserved_space = 0;
+	rq->emitted_jiffies = jiffies;
 
 	/*
 	 * Record the position of the start of the breadcrumb so that
@@ -1180,11 +1152,6 @@ struct i915_request *__i915_request_commit(struct i915_request *rq)
 
 	prev = __i915_request_add_to_timeline(rq);
 
-	list_add_tail(&rq->ring_link, &ring->request_list);
-	if (list_is_first(&rq->ring_link, &ring->request_list))
-		list_add(&ring->active_link, &rq->i915->gt.active_rings);
-	rq->emitted_jiffies = jiffies;
-
 	/*
 	 * Let the backend know a new request has arrived that may need
 	 * to adjust the existing execution schedule due to a high priority
@@ -1237,10 +1204,11 @@ struct i915_request *__i915_request_commit(struct i915_request *rq)
 
 void i915_request_add(struct i915_request *rq)
 {
+	struct intel_timeline * const tl = rq->timeline;
 	struct i915_request *prev;
 
-	lockdep_assert_held(&rq->timeline->mutex);
-	lockdep_unpin_lock(&rq->timeline->mutex, rq->cookie);
+	lockdep_assert_held(&tl->mutex);
+	lockdep_unpin_lock(&tl->mutex, rq->cookie);
 
 	trace_i915_request_add(rq);
 
@@ -1263,10 +1231,10 @@ void i915_request_add(struct i915_request *rq)
 	 * work on behalf of others -- but instead we should benefit from
 	 * improved resource management. (Well, that's the theory at least.)
 	 */
-	if (prev && i915_request_completed(prev))
+	if (prev && i915_request_completed(prev) && prev->timeline == tl)
 		i915_request_retire_upto(prev);
 
-	mutex_unlock(&rq->timeline->mutex);
+	mutex_unlock(&tl->mutex);
 }
 
 static unsigned long local_clock_us(unsigned int *cpu)
@@ -1486,18 +1454,43 @@ long i915_request_wait(struct i915_request *rq,
 
 bool i915_retire_requests(struct drm_i915_private *i915)
 {
-	struct intel_ring *ring, *tmp;
+	struct intel_gt_timelines *timelines = &i915->gt.timelines;
+	struct intel_timeline *tl, *tn;
+	LIST_HEAD(free);
+
+	spin_lock(&timelines->lock);
+	list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
+		if (!mutex_trylock(&tl->mutex))
+			continue;
+
+		intel_timeline_get(tl);
+		GEM_BUG_ON(!tl->active_count);
+		tl->active_count++; /* pin the list element */
+		spin_unlock(&timelines->lock);
 
-	lockdep_assert_held(&i915->drm.struct_mutex);
+		retire_requests(tl);
 
-	list_for_each_entry_safe(ring, tmp,
-				 &i915->gt.active_rings, active_link) {
-		intel_ring_get(ring); /* last rq holds reference! */
-		ring_retire_requests(ring);
-		intel_ring_put(ring);
+		spin_lock(&timelines->lock);
+
+		/* Restart iteration after dropping lock */
+		list_safe_reset_next(tl, tn, link);
+		if (!--tl->active_count)
+			list_del(&tl->link);
+
+		mutex_unlock(&tl->mutex);
+
+		/* Defer the final release to after the spinlock */
+		if (refcount_dec_and_test(&tl->kref.refcount)) {
+			GEM_BUG_ON(tl->active_count);
+			list_add(&tl->link, &free);
+		}
 	}
+	spin_unlock(&timelines->lock);
+
+	list_for_each_entry_safe(tl, tn, &free, link)
+		__intel_timeline_free(&tl->kref);
 
-	return !list_empty(&i915->gt.active_rings);
+	return !list_empty(&timelines->active_list);
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 313df3c37158..22e506e960e0 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -223,9 +223,6 @@ struct i915_request {
 	/** timeline->request entry for this request */
 	struct list_head link;
 
-	/** ring->request_list entry for this request */
-	struct list_head ring_link;
-
 	struct drm_i915_file_private *file_priv;
 	/** file_priv list entry for this request */
 	struct list_head client_link;
-- 
2.22.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 33+ messages in thread

* [PATCH 15/24] drm/i915: Replace struct_mutex for batch pool serialisation
  2019-07-15  8:09 [PATCH 01/24] drm/i915: Hide unshrinkable context objects from the shrinker Chris Wilson
                   ` (12 preceding siblings ...)
  2019-07-15  8:09 ` [PATCH 14/24] drm/i915: Protect request retirement with timeline->mutex Chris Wilson
@ 2019-07-15  8:09 ` Chris Wilson
  2019-07-15  8:09 ` [PATCH 16/24] drm/i915/gt: Mark context->active_count as protected by timeline->mutex Chris Wilson
                   ` (12 subsequent siblings)
  26 siblings, 0 replies; 33+ messages in thread
From: Chris Wilson @ 2019-07-15  8:09 UTC (permalink / raw)
  To: intel-gfx; +Cc: Matthew Auld

Switch to tracking activity via i915_active on individual nodes, only
keeping a list of retired objects in the cache, and reaping the cache
when the engine itself idles.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
---
 drivers/gpu/drm/i915/Makefile                 |   2 +-
 .../gpu/drm/i915/gem/i915_gem_execbuffer.c    |  58 +++---
 drivers/gpu/drm/i915/gem/i915_gem_object.c    |   1 -
 .../gpu/drm/i915/gem/i915_gem_object_types.h  |   1 -
 drivers/gpu/drm/i915/gem/i915_gem_pm.c        |   4 +-
 drivers/gpu/drm/i915/gt/intel_engine.h        |   1 -
 drivers/gpu/drm/i915/gt/intel_engine_cs.c     |  11 +-
 drivers/gpu/drm/i915/gt/intel_engine_pm.c     |   2 +
 drivers/gpu/drm/i915/gt/intel_engine_pool.c   | 177 ++++++++++++++++++
 drivers/gpu/drm/i915/gt/intel_engine_pool.h   |  34 ++++
 .../gpu/drm/i915/gt/intel_engine_pool_types.h |  29 +++
 drivers/gpu/drm/i915/gt/intel_engine_types.h  |   6 +-
 drivers/gpu/drm/i915/gt/mock_engine.c         |   3 +
 drivers/gpu/drm/i915/i915_debugfs.c           |  68 -------
 drivers/gpu/drm/i915/i915_gem_batch_pool.c    | 132 -------------
 drivers/gpu/drm/i915/i915_gem_batch_pool.h    |  26 ---
 16 files changed, 290 insertions(+), 265 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/gt/intel_engine_pool.c
 create mode 100644 drivers/gpu/drm/i915/gt/intel_engine_pool.h
 create mode 100644 drivers/gpu/drm/i915/gt/intel_engine_pool_types.h
 delete mode 100644 drivers/gpu/drm/i915/i915_gem_batch_pool.c
 delete mode 100644 drivers/gpu/drm/i915/i915_gem_batch_pool.h

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index fafc3763dc2d..cdc2d462db8b 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -72,6 +72,7 @@ obj-y += gt/
 gt-y += \
 	gt/intel_breadcrumbs.o \
 	gt/intel_context.o \
+	gt/intel_engine_pool.o \
 	gt/intel_engine_cs.o \
 	gt/intel_engine_pm.o \
 	gt/intel_engine_user.o \
@@ -126,7 +127,6 @@ i915-y += \
 	  $(gem-y) \
 	  i915_active.o \
 	  i915_cmd_parser.o \
-	  i915_gem_batch_pool.o \
 	  i915_gem_evict.o \
 	  i915_gem_fence_reg.o \
 	  i915_gem_gtt.o \
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index dd5df180109b..3506aafada6c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -16,6 +16,7 @@
 
 #include "gem/i915_gem_ioctls.h"
 #include "gt/intel_context.h"
+#include "gt/intel_engine_pool.h"
 #include "gt/intel_gt.h"
 #include "gt/intel_gt_pm.h"
 
@@ -1145,25 +1146,26 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
 			     unsigned int len)
 {
 	struct reloc_cache *cache = &eb->reloc_cache;
-	struct drm_i915_gem_object *obj;
+	struct intel_engine_pool_node *pool;
 	struct i915_request *rq;
 	struct i915_vma *batch;
 	u32 *cmd;
 	int err;
 
-	obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE);
-	if (IS_ERR(obj))
-		return PTR_ERR(obj);
+	pool = intel_engine_pool_get(&eb->engine->pool, PAGE_SIZE);
+	if (IS_ERR(pool))
+		return PTR_ERR(pool);
 
-	cmd = i915_gem_object_pin_map(obj,
+	cmd = i915_gem_object_pin_map(pool->obj,
 				      cache->has_llc ?
 				      I915_MAP_FORCE_WB :
 				      I915_MAP_FORCE_WC);
-	i915_gem_object_unpin_pages(obj);
-	if (IS_ERR(cmd))
-		return PTR_ERR(cmd);
+	if (IS_ERR(cmd)) {
+		err = PTR_ERR(cmd);
+		goto out_pool;
+	}
 
-	batch = i915_vma_instance(obj, vma->vm, NULL);
+	batch = i915_vma_instance(pool->obj, vma->vm, NULL);
 	if (IS_ERR(batch)) {
 		err = PTR_ERR(batch);
 		goto err_unmap;
@@ -1179,6 +1181,10 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
 		goto err_unpin;
 	}
 
+	err = intel_engine_pool_mark_active(pool, rq);
+	if (err)
+		goto err_request;
+
 	err = reloc_move_to_gpu(rq, vma);
 	if (err)
 		goto err_request;
@@ -1204,7 +1210,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
 	cache->rq_size = 0;
 
 	/* Return with batch mapping (cmd) still pinned */
-	return 0;
+	goto out_pool;
 
 skip_request:
 	i915_request_skip(rq, err);
@@ -1213,7 +1219,9 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
 err_unpin:
 	i915_vma_unpin(batch);
 err_unmap:
-	i915_gem_object_unpin_map(obj);
+	i915_gem_object_unpin_map(pool->obj);
+out_pool:
+	intel_engine_pool_put(pool);
 	return err;
 }
 
@@ -1957,18 +1965,17 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
 
 static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
 {
-	struct drm_i915_gem_object *shadow_batch_obj;
+	struct intel_engine_pool_node *pool;
 	struct i915_vma *vma;
 	int err;
 
-	shadow_batch_obj = i915_gem_batch_pool_get(&eb->engine->batch_pool,
-						   PAGE_ALIGN(eb->batch_len));
-	if (IS_ERR(shadow_batch_obj))
-		return ERR_CAST(shadow_batch_obj);
+	pool = intel_engine_pool_get(&eb->engine->pool, eb->batch_len);
+	if (IS_ERR(pool))
+		return ERR_CAST(pool);
 
 	err = intel_engine_cmd_parser(eb->engine,
 				      eb->batch->obj,
-				      shadow_batch_obj,
+				      pool->obj,
 				      eb->batch_start_offset,
 				      eb->batch_len,
 				      is_master);
@@ -1977,12 +1984,12 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
 			vma = NULL;
 		else
 			vma = ERR_PTR(err);
-		goto out;
+		goto err;
 	}
 
-	vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
+	vma = i915_gem_object_ggtt_pin(pool->obj, NULL, 0, 0, 0);
 	if (IS_ERR(vma))
-		goto out;
+		goto err;
 
 	eb->vma[eb->buffer_count] = i915_vma_get(vma);
 	eb->flags[eb->buffer_count] =
@@ -1990,8 +1997,11 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
 	vma->exec_flags = &eb->flags[eb->buffer_count];
 	eb->buffer_count++;
 
-out:
-	i915_gem_object_unpin_pages(shadow_batch_obj);
+	vma->private = pool;
+	return vma;
+
+err:
+	intel_engine_pool_put(pool);
 	return vma;
 }
 
@@ -2615,6 +2625,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 	 * to explicitly hold another reference here.
 	 */
 	eb.request->batch = eb.batch;
+	if (eb.batch->private)
+		intel_engine_pool_mark_active(eb.batch->private, eb.request);
 
 	trace_i915_request_queue(eb.request, eb.batch_flags);
 	err = eb_submit(&eb);
@@ -2639,6 +2651,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 err_batch_unpin:
 	if (eb.batch_flags & I915_DISPATCH_SECURE)
 		i915_vma_unpin(eb.batch);
+	if (eb.batch->private)
+		intel_engine_pool_put(eb.batch->private);
 err_vma:
 	if (eb.exec)
 		eb_release_vmas(&eb);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 4ea97fca9c35..eccd7f4768f8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -66,7 +66,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
 	INIT_LIST_HEAD(&obj->mm.link);
 
 	INIT_LIST_HEAD(&obj->lut_list);
-	INIT_LIST_HEAD(&obj->batch_pool_link);
 
 	init_rcu_head(&obj->rcu);
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 34b51fad02de..d474c6ac4100 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -114,7 +114,6 @@ struct drm_i915_gem_object {
 	unsigned int userfault_count;
 	struct list_head userfault_link;
 
-	struct list_head batch_pool_link;
 	I915_SELFTEST_DECLARE(struct list_head st_link);
 
 	/*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 195ee6eedac0..cb7a85ac69d0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -34,10 +34,8 @@ static void i915_gem_park(struct drm_i915_private *i915)
 
 	lockdep_assert_held(&i915->drm.struct_mutex);
 
-	for_each_engine(engine, i915, id) {
+	for_each_engine(engine, i915, id)
 		call_idle_barriers(engine); /* cleanup after wedging */
-		i915_gem_batch_pool_fini(&engine->batch_pool);
-	}
 
 	i915_vma_parked(i915);
 
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 30856383e4c5..52f29e618696 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -9,7 +9,6 @@
 #include <linux/random.h>
 #include <linux/seqlock.h>
 
-#include "i915_gem_batch_pool.h"
 #include "i915_pmu.h"
 #include "i915_reg.h"
 #include "i915_request.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 1f5ac045a8d8..8007146a6cf3 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -32,6 +32,7 @@
 
 #include "intel_engine.h"
 #include "intel_engine_pm.h"
+#include "intel_engine_pool.h"
 #include "intel_engine_user.h"
 #include "intel_context.h"
 #include "intel_lrc.h"
@@ -493,11 +494,6 @@ int intel_engines_init(struct drm_i915_private *i915)
 	return err;
 }
 
-static void intel_engine_init_batch_pool(struct intel_engine_cs *engine)
-{
-	i915_gem_batch_pool_init(&engine->batch_pool, engine);
-}
-
 void intel_engine_init_execlists(struct intel_engine_cs *engine)
 {
 	struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -623,10 +619,11 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine)
 	intel_engine_init_breadcrumbs(engine);
 	intel_engine_init_execlists(engine);
 	intel_engine_init_hangcheck(engine);
-	intel_engine_init_batch_pool(engine);
 	intel_engine_init_cmd_parser(engine);
 	intel_engine_init__pm(engine);
 
+	intel_engine_pool_init(&engine->pool);
+
 	/* Use the whole device by default */
 	engine->sseu =
 		intel_sseu_from_device_info(&RUNTIME_INFO(engine->i915)->sseu);
@@ -869,9 +866,9 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
 
 	cleanup_status_page(engine);
 
+	intel_engine_pool_fini(&engine->pool);
 	intel_engine_fini_breadcrumbs(engine);
 	intel_engine_cleanup_cmd_parser(engine);
-	i915_gem_batch_pool_fini(&engine->batch_pool);
 
 	if (engine->default_state)
 		i915_gem_object_put(engine->default_state);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 072f65e6a09e..76c828d11b05 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -7,6 +7,7 @@
 #include "i915_drv.h"
 
 #include "intel_engine.h"
+#include "intel_engine_pool.h"
 #include "intel_engine_pm.h"
 #include "intel_gt.h"
 #include "intel_gt_pm.h"
@@ -117,6 +118,7 @@ static int __engine_park(struct intel_wakeref *wf)
 	GEM_TRACE("%s\n", engine->name);
 
 	intel_engine_disarm_breadcrumbs(engine);
+	intel_engine_pool_park(&engine->pool);
 
 	/* Must be reset upon idling, or we may miss the busy wakeup. */
 	GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.c b/drivers/gpu/drm/i915/gt/intel_engine_pool.c
new file mode 100644
index 000000000000..03d90b49584a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.c
@@ -0,0 +1,177 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#include "gem/i915_gem_object.h"
+
+#include "i915_drv.h"
+#include "intel_engine_pm.h"
+#include "intel_engine_pool.h"
+
+static struct intel_engine_cs *to_engine(struct intel_engine_pool *pool)
+{
+	return container_of(pool, struct intel_engine_cs, pool);
+}
+
+static struct list_head *
+bucket_for_size(struct intel_engine_pool *pool, size_t sz)
+{
+	int n;
+
+	/*
+	 * Compute a power-of-two bucket, but throw everything greater than
+	 * 16KiB into the same bucket: i.e. the buckets hold objects of
+	 * (1 page, 2 pages, 4 pages, 8+ pages).
+	 */
+	n = fls(sz >> PAGE_SHIFT) - 1;
+	if (n >= ARRAY_SIZE(pool->cache_list))
+		n = ARRAY_SIZE(pool->cache_list) - 1;
+
+	return &pool->cache_list[n];
+}
+
+static void node_free(struct intel_engine_pool_node *node)
+{
+	i915_gem_object_put(node->obj);
+	i915_active_fini(&node->active);
+	kfree(node);
+}
+
+static int pool_active(struct i915_active *ref)
+{
+	struct intel_engine_pool_node *node =
+		container_of(ref, typeof(*node), active);
+	struct reservation_object *resv = node->obj->base.resv;
+	int err;
+
+	if (reservation_object_trylock(resv)) {
+		reservation_object_add_excl_fence(resv, NULL);
+		reservation_object_unlock(resv);
+	}
+
+	err = i915_gem_object_pin_pages(node->obj);
+	if (err)
+		return err;
+
+	/* Hide this pinned object from the shrinker until retired */
+	i915_gem_object_make_unshrinkable(node->obj);
+
+	return 0;
+}
+
+static void pool_retire(struct i915_active *ref)
+{
+	struct intel_engine_pool_node *node =
+		container_of(ref, typeof(*node), active);
+	struct intel_engine_pool *pool = node->pool;
+	struct list_head *list = bucket_for_size(pool, node->obj->base.size);
+	unsigned long flags;
+
+	GEM_BUG_ON(!intel_engine_pm_is_awake(to_engine(pool)));
+
+	i915_gem_object_unpin_pages(node->obj);
+
+	/* Return this object to the shrinker pool */
+	i915_gem_object_make_purgeable(node->obj);
+
+	spin_lock_irqsave(&pool->lock, flags);
+	list_add(&node->link, list);
+	spin_unlock_irqrestore(&pool->lock, flags);
+}
+
+static struct intel_engine_pool_node *
+node_create(struct intel_engine_pool *pool, size_t sz)
+{
+	struct intel_engine_cs *engine = to_engine(pool);
+	struct intel_engine_pool_node *node;
+	struct drm_i915_gem_object *obj;
+
+	node = kmalloc(sizeof(*node),
+		       GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+	if (!node)
+		return ERR_PTR(-ENOMEM);
+
+	node->pool = pool;
+	i915_active_init(engine->i915, &node->active, pool_active, pool_retire);
+
+	obj = i915_gem_object_create_internal(engine->i915, sz);
+	if (IS_ERR(obj)) {
+		i915_active_fini(&node->active);
+		kfree(node);
+		return ERR_CAST(obj);
+	}
+
+	node->obj = obj;
+	return node;
+}
+
+struct intel_engine_pool_node *
+intel_engine_pool_get(struct intel_engine_pool *pool, size_t size)
+{
+	struct intel_engine_pool_node *node;
+	struct list_head *list;
+	unsigned long flags;
+	int ret;
+
+	GEM_BUG_ON(!intel_engine_pm_is_awake(to_engine(pool)));
+
+	size = PAGE_ALIGN(size);
+	list = bucket_for_size(pool, size);
+
+	spin_lock_irqsave(&pool->lock, flags);
+	list_for_each_entry(node, list, link) {
+		if (node->obj->base.size < size)
+			continue;
+		list_del(&node->link);
+		break;
+	}
+	spin_unlock_irqrestore(&pool->lock, flags);
+
+	if (&node->link == list) {
+		node = node_create(pool, size);
+		if (IS_ERR(node))
+			return node;
+	}
+
+	ret = i915_active_acquire(&node->active);
+	if (ret) {
+		node_free(node);
+		return ERR_PTR(ret);
+	}
+
+	return node;
+}
+
+void intel_engine_pool_init(struct intel_engine_pool *pool)
+{
+	int n;
+
+	spin_lock_init(&pool->lock);
+	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
+		INIT_LIST_HEAD(&pool->cache_list[n]);
+}
+
+void intel_engine_pool_park(struct intel_engine_pool *pool)
+{
+	int n;
+
+	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
+		struct list_head *list = &pool->cache_list[n];
+		struct intel_engine_pool_node *node, *nn;
+
+		list_for_each_entry_safe(node, nn, list, link)
+			node_free(node);
+
+		INIT_LIST_HEAD(list);
+	}
+}
+
+void intel_engine_pool_fini(struct intel_engine_pool *pool)
+{
+	int n;
+
+	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
+		GEM_BUG_ON(!list_empty(&pool->cache_list[n]));
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.h b/drivers/gpu/drm/i915/gt/intel_engine_pool.h
new file mode 100644
index 000000000000..f7a0a660c1c9
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.h
@@ -0,0 +1,34 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#ifndef INTEL_ENGINE_POOL_H
+#define INTEL_ENGINE_POOL_H
+
+#include "intel_engine_pool_types.h"
+#include "i915_active.h"
+#include "i915_request.h"
+
+struct intel_engine_pool_node *
+intel_engine_pool_get(struct intel_engine_pool *pool, size_t size);
+
+static inline int
+intel_engine_pool_mark_active(struct intel_engine_pool_node *node,
+			      struct i915_request *rq)
+{
+	return i915_active_ref(&node->active, rq->fence.context, rq);
+}
+
+static inline void
+intel_engine_pool_put(struct intel_engine_pool_node *node)
+{
+	i915_active_release(&node->active);
+}
+
+void intel_engine_pool_init(struct intel_engine_pool *pool);
+void intel_engine_pool_park(struct intel_engine_pool *pool);
+void intel_engine_pool_fini(struct intel_engine_pool *pool);
+
+#endif /* INTEL_ENGINE_POOL_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool_types.h b/drivers/gpu/drm/i915/gt/intel_engine_pool_types.h
new file mode 100644
index 000000000000..e31ee361b76f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pool_types.h
@@ -0,0 +1,29 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#ifndef INTEL_ENGINE_POOL_TYPES_H
+#define INTEL_ENGINE_POOL_TYPES_H
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+#include "i915_active_types.h"
+
+struct drm_i915_gem_object;
+
+struct intel_engine_pool {
+	spinlock_t lock;
+	struct list_head cache_list[4];
+};
+
+struct intel_engine_pool_node {
+	struct i915_active active;
+	struct drm_i915_gem_object *obj;
+	struct list_head link;
+	struct intel_engine_pool *pool;
+};
+
+#endif /* INTEL_ENGINE_POOL_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 87eab7d13c8c..1da8d2f34c56 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -17,12 +17,12 @@
 #include <linux/types.h>
 
 #include "i915_gem.h"
-#include "i915_gem_batch_pool.h"
 #include "i915_pmu.h"
 #include "i915_priolist_types.h"
 #include "i915_selftest.h"
-#include "gt/intel_timeline_types.h"
+#include "intel_engine_pool_types.h"
 #include "intel_sseu.h"
+#include "intel_timeline_types.h"
 #include "intel_wakeref.h"
 #include "intel_workarounds_types.h"
 
@@ -355,7 +355,7 @@ struct intel_engine_cs {
 	 * when the command parser is enabled. Prevents the client from
 	 * modifying the batch contents after software parsing.
 	 */
-	struct i915_gem_batch_pool batch_pool;
+	struct intel_engine_pool pool;
 
 	struct intel_hw_status_page status_page;
 	struct i915_ctx_workarounds wa_ctx;
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 5bcb461b8372..b94d57bf2c48 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -27,6 +27,7 @@
 #include "i915_drv.h"
 #include "intel_context.h"
 #include "intel_engine_pm.h"
+#include "intel_engine_pool.h"
 
 #include "mock_engine.h"
 #include "selftests/mock_request.h"
@@ -291,6 +292,8 @@ int mock_engine_init(struct intel_engine_cs *engine)
 	intel_engine_init_execlists(engine);
 	intel_engine_init__pm(engine);
 
+	intel_engine_pool_init(&engine->pool);
+
 	engine->kernel_context =
 		i915_gem_context_get_engine(i915->kernel_context, engine->id);
 	if (IS_ERR(engine->kernel_context))
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 8bdf1d100627..4fa227f01a60 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -295,27 +295,6 @@ static int per_file_stats(int id, void *ptr, void *data)
 			   stats.closed); \
 } while (0)
 
-static void print_batch_pool_stats(struct seq_file *m,
-				   struct drm_i915_private *dev_priv)
-{
-	struct drm_i915_gem_object *obj;
-	struct intel_engine_cs *engine;
-	struct file_stats stats = {};
-	enum intel_engine_id id;
-	int j;
-
-	for_each_engine(engine, dev_priv, id) {
-		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
-			list_for_each_entry(obj,
-					    &engine->batch_pool.cache_list[j],
-					    batch_pool_link)
-				per_file_stats(0, obj, &stats);
-		}
-	}
-
-	print_file_stats(m, "[k]batch pool", stats);
-}
-
 static void print_context_stats(struct seq_file *m,
 				struct drm_i915_private *i915)
 {
@@ -374,58 +353,12 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
 	if (ret)
 		return ret;
 
-	print_batch_pool_stats(m, i915);
 	print_context_stats(m, i915);
 	mutex_unlock(&i915->drm.struct_mutex);
 
 	return 0;
 }
 
-static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
-{
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct drm_device *dev = &dev_priv->drm;
-	struct drm_i915_gem_object *obj;
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-	int total = 0;
-	int ret, j;
-
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
-
-	for_each_engine(engine, dev_priv, id) {
-		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
-			int count;
-
-			count = 0;
-			list_for_each_entry(obj,
-					    &engine->batch_pool.cache_list[j],
-					    batch_pool_link)
-				count++;
-			seq_printf(m, "%s cache[%d]: %d objects\n",
-				   engine->name, j, count);
-
-			list_for_each_entry(obj,
-					    &engine->batch_pool.cache_list[j],
-					    batch_pool_link) {
-				seq_puts(m, "   ");
-				describe_obj(m, obj);
-				seq_putc(m, '\n');
-			}
-
-			total += count;
-		}
-	}
-
-	seq_printf(m, "total: %d\n", total);
-
-	mutex_unlock(&dev->struct_mutex);
-
-	return 0;
-}
-
 static void gen8_display_interrupt_info(struct seq_file *m)
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -4353,7 +4286,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
 	{"i915_gem_objects", i915_gem_object_info, 0},
 	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
 	{"i915_gem_interrupt", i915_interrupt_info, 0},
-	{"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
 	{"i915_guc_info", i915_guc_info, 0},
 	{"i915_guc_load_status", i915_guc_load_status_info, 0},
 	{"i915_guc_log_dump", i915_guc_log_dump, 0},
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
deleted file mode 100644
index b17f23991253..000000000000
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2014-2018 Intel Corporation
- */
-
-#include "i915_gem_batch_pool.h"
-#include "i915_drv.h"
-
-/**
- * DOC: batch pool
- *
- * In order to submit batch buffers as 'secure', the software command parser
- * must ensure that a batch buffer cannot be modified after parsing. It does
- * this by copying the user provided batch buffer contents to a kernel owned
- * buffer from which the hardware will actually execute, and by carefully
- * managing the address space bindings for such buffers.
- *
- * The batch pool framework provides a mechanism for the driver to manage a
- * set of scratch buffers to use for this purpose. The framework can be
- * extended to support other uses cases should they arise.
- */
-
-/**
- * i915_gem_batch_pool_init() - initialize a batch buffer pool
- * @pool: the batch buffer pool
- * @engine: the associated request submission engine
- */
-void i915_gem_batch_pool_init(struct i915_gem_batch_pool *pool,
-			      struct intel_engine_cs *engine)
-{
-	int n;
-
-	pool->engine = engine;
-
-	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
-		INIT_LIST_HEAD(&pool->cache_list[n]);
-}
-
-/**
- * i915_gem_batch_pool_fini() - clean up a batch buffer pool
- * @pool: the pool to clean up
- *
- * Note: Callers must hold the struct_mutex.
- */
-void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
-{
-	int n;
-
-	lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
-
-	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
-		struct drm_i915_gem_object *obj, *next;
-
-		list_for_each_entry_safe(obj, next,
-					 &pool->cache_list[n],
-					 batch_pool_link)
-			i915_gem_object_put(obj);
-
-		INIT_LIST_HEAD(&pool->cache_list[n]);
-	}
-}
-
-/**
- * i915_gem_batch_pool_get() - allocate a buffer from the pool
- * @pool: the batch buffer pool
- * @size: the minimum desired size of the returned buffer
- *
- * Returns an inactive buffer from @pool with at least @size bytes,
- * with the pages pinned. The caller must i915_gem_object_unpin_pages()
- * on the returned object.
- *
- * Note: Callers must hold the struct_mutex
- *
- * Return: the buffer object or an error pointer
- */
-struct drm_i915_gem_object *
-i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
-			size_t size)
-{
-	struct drm_i915_gem_object *obj;
-	struct list_head *list;
-	int n, ret;
-
-	lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
-
-	/* Compute a power-of-two bucket, but throw everything greater than
-	 * 16KiB into the same bucket: i.e. the the buckets hold objects of
-	 * (1 page, 2 pages, 4 pages, 8+ pages).
-	 */
-	n = fls(size >> PAGE_SHIFT) - 1;
-	if (n >= ARRAY_SIZE(pool->cache_list))
-		n = ARRAY_SIZE(pool->cache_list) - 1;
-	list = &pool->cache_list[n];
-
-	list_for_each_entry(obj, list, batch_pool_link) {
-		struct reservation_object *resv = obj->base.resv;
-
-		/* The batches are strictly LRU ordered */
-		if (!reservation_object_test_signaled_rcu(resv, true))
-			break;
-
-		/*
-		 * The object is now idle, clear the array of shared
-		 * fences before we add a new request. Although, we
-		 * remain on the same engine, we may be on a different
-		 * timeline and so may continually grow the array,
-		 * trapping a reference to all the old fences, rather
-		 * than replace the existing fence.
-		 */
-		if (rcu_access_pointer(resv->fence)) {
-			reservation_object_lock(resv, NULL);
-			reservation_object_add_excl_fence(resv, NULL);
-			reservation_object_unlock(resv);
-		}
-
-		if (obj->base.size >= size)
-			goto found;
-	}
-
-	obj = i915_gem_object_create_internal(pool->engine->i915, size);
-	if (IS_ERR(obj))
-		return obj;
-
-found:
-	ret = i915_gem_object_pin_pages(obj);
-	if (ret)
-		return ERR_PTR(ret);
-
-	list_move_tail(&obj->batch_pool_link, list);
-	return obj;
-}
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.h b/drivers/gpu/drm/i915/i915_gem_batch_pool.h
deleted file mode 100644
index feeeeeaa54d8..000000000000
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2014-2018 Intel Corporation
- */
-
-#ifndef I915_GEM_BATCH_POOL_H
-#define I915_GEM_BATCH_POOL_H
-
-#include <linux/types.h>
-
-struct drm_i915_gem_object;
-struct intel_engine_cs;
-
-struct i915_gem_batch_pool {
-	struct intel_engine_cs *engine;
-	struct list_head cache_list[4];
-};
-
-void i915_gem_batch_pool_init(struct i915_gem_batch_pool *pool,
-			      struct intel_engine_cs *engine);
-void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool);
-struct drm_i915_gem_object *
-i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, size_t size);
-
-#endif /* I915_GEM_BATCH_POOL_H */
-- 
2.22.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 33+ messages in thread

* [PATCH 16/24] drm/i915/gt: Mark context->active_count as protected by timeline->mutex
  2019-07-15  8:09 [PATCH 01/24] drm/i915: Hide unshrinkable context objects from the shrinker Chris Wilson
                   ` (13 preceding siblings ...)
  2019-07-15  8:09 ` [PATCH 15/24] drm/i915: Replace struct_mutex for batch pool serialisation Chris Wilson
@ 2019-07-15  8:09 ` Chris Wilson
  2019-07-15  8:09 ` [PATCH 17/24] drm/i915: Forgo last_fence active request tracking Chris Wilson
                   ` (11 subsequent siblings)
  26 siblings, 0 replies; 33+ messages in thread
From: Chris Wilson @ 2019-07-15  8:09 UTC (permalink / raw)
  To: intel-gfx

We use timeline->mutex to protect modifications to
context->active_count, and the associated enable/disable callbacks.
Due to complications with engine-pm barrier there is a path where we used
a "superlock" to provide serialised protect and so could not
unconditionally assert with lockdep that it was always held. However,
we can mark the mutex as taken (noting that we may be nested underneath
ourselves) which means we can be reassured the right timeline->mutex is
always treated as held and let lockdep roam free.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_context.h       |  3 +++
 drivers/gpu/drm/i915/gt/intel_context_types.h |  2 +-
 drivers/gpu/drm/i915/gt/intel_engine_pm.c     | 12 ++++++++++++
 drivers/gpu/drm/i915/gt/intel_timeline.c      |  4 ++++
 4 files changed, 20 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index 8429a97a3911..02bab98f2c72 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -88,17 +88,20 @@ void intel_context_exit_engine(struct intel_context *ce);
 
 static inline void intel_context_enter(struct intel_context *ce)
 {
+	lockdep_assert_held(&ce->ring->timeline->mutex);
 	if (!ce->active_count++)
 		ce->ops->enter(ce);
 }
 
 static inline void intel_context_mark_active(struct intel_context *ce)
 {
+	lockdep_assert_held(&ce->ring->timeline->mutex);
 	++ce->active_count;
 }
 
 static inline void intel_context_exit(struct intel_context *ce)
 {
+	lockdep_assert_held(&ce->ring->timeline->mutex);
 	GEM_BUG_ON(!ce->active_count);
 	if (!--ce->active_count)
 		ce->ops->exit(ce);
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 4c0e211c715d..c00419e38a77 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -53,7 +53,7 @@ struct intel_context {
 	u32 *lrc_reg_state;
 	u64 lrc_desc;
 
-	unsigned int active_count; /* notionally protected by timeline->mutex */
+	unsigned int active_count; /* protected by timeline->mutex */
 
 	atomic_t pin_count;
 	struct mutex pin_mutex; /* guards pinning and associated on-gpuing */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 76c828d11b05..1218b7d53b88 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -59,6 +59,16 @@ void intel_engine_park(struct intel_engine_cs *engine)
 	}
 }
 
+static inline void __timeline_mark_lock(struct intel_context *ce)
+{
+	mutex_acquire(&ce->ring->timeline->mutex.dep_map, 2, 0, _THIS_IP_);
+}
+
+static inline void __timeline_mark_unlock(struct intel_context *ce)
+{
+	mutex_release(&ce->ring->timeline->mutex.dep_map, 0, _THIS_IP_);
+}
+
 static bool switch_to_kernel_context(struct intel_engine_cs *engine)
 {
 	struct i915_request *rq;
@@ -83,6 +93,7 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
 	 * retiring the last request, thus all rings should be empty and
 	 * all timelines idle.
 	 */
+	__timeline_mark_lock(engine->kernel_context);
 	rq = __i915_request_create(engine->kernel_context, GFP_NOWAIT);
 	if (IS_ERR(rq))
 		/* Context switch failed, hope for the best! Maybe reset? */
@@ -94,6 +105,7 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
 
 	i915_request_add_barriers(rq);
 	__i915_request_commit(rq);
+	__timeline_mark_unlock(engine->kernel_context);
 
 	return false;
 }
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 7b476cd55dac..eafd94d5e211 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -338,6 +338,8 @@ void intel_timeline_enter(struct intel_timeline *tl)
 {
 	struct intel_gt_timelines *timelines = &tl->gt->timelines;
 
+	lockdep_assert_held(&tl->mutex);
+
 	GEM_BUG_ON(!atomic_read(&tl->pin_count));
 	if (tl->active_count++)
 		return;
@@ -352,6 +354,8 @@ void intel_timeline_exit(struct intel_timeline *tl)
 {
 	struct intel_gt_timelines *timelines = &tl->gt->timelines;
 
+	lockdep_assert_held(&tl->mutex);
+
 	GEM_BUG_ON(!tl->active_count);
 	if (--tl->active_count)
 		return;
-- 
2.22.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 33+ messages in thread

* [PATCH 17/24] drm/i915: Forgo last_fence active request tracking
  2019-07-15  8:09 [PATCH 01/24] drm/i915: Hide unshrinkable context objects from the shrinker Chris Wilson
                   ` (14 preceding siblings ...)
  2019-07-15  8:09 ` [PATCH 16/24] drm/i915/gt: Mark context->active_count as protected by timeline->mutex Chris Wilson
@ 2019-07-15  8:09 ` Chris Wilson
  2019-07-15  8:09 ` [PATCH 18/24] drm/i915/overlay: Switch to using i915_active tracking Chris Wilson
                   ` (10 subsequent siblings)
  26 siblings, 0 replies; 33+ messages in thread
From: Chris Wilson @ 2019-07-15  8:09 UTC (permalink / raw)
  To: intel-gfx; +Cc: Matthew Auld

We were using the last_fence to track the last request that used this
vma that might be interpreted by a fence register and forced ourselves
to wait for this request before modifying any fence register that
overlapped our vma. Due to requirement that we need to track any XY_BLT
command, linear or tiled, this in effect meant that we have to track the
vma for its active lifespan anyway, so we can forgo the explicit
last_fence tracking and just use the whole vma->active.

Another solution would be to pipeline the register updates, and would
help resolve some long running stalls for gen3 (but only gen 2 and 3!)

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c       |  4 +---
 drivers/gpu/drm/i915/i915_gem_fence_reg.c |  6 ++----
 drivers/gpu/drm/i915/i915_gem_gtt.c       |  1 -
 drivers/gpu/drm/i915/i915_vma.c           | 13 -------------
 drivers/gpu/drm/i915/i915_vma.h           |  1 -
 5 files changed, 3 insertions(+), 22 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 4fa227f01a60..8f639ed6034a 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -210,9 +210,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 			}
 		}
 		if (vma->fence)
-			seq_printf(m, " , fence: %d%s",
-				   vma->fence->id,
-				   i915_active_request_isset(&vma->last_fence) ? "*" : "");
+			seq_printf(m, " , fence: %d", vma->fence->id);
 		seq_puts(m, ")");
 
 		spin_lock(&obj->vma.lock);
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index bcac359ec661..c9654f1a468f 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -230,16 +230,14 @@ static int fence_update(struct i915_fence_reg *fence,
 			 i915_gem_object_get_tiling(vma->obj)))
 			return -EINVAL;
 
-		ret = i915_active_request_retire(&vma->last_fence,
-					     &vma->obj->base.dev->struct_mutex);
+		ret = i915_active_wait(&vma->active);
 		if (ret)
 			return ret;
 	}
 
 	old = xchg(&fence->vma, NULL);
 	if (old) {
-		ret = i915_active_request_retire(&old->last_fence,
-					     &old->obj->base.dev->struct_mutex);
+		ret = i915_active_wait(&old->active);
 		if (ret) {
 			fence->vma = old;
 			return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 5c9c7cae4817..44f87f8aca0e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1864,7 +1864,6 @@ static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
 		return ERR_PTR(-ENOMEM);
 
 	i915_active_init(i915, &vma->active, NULL, NULL);
-	INIT_ACTIVE_REQUEST(&vma->last_fence);
 
 	vma->vm = &ggtt->vm;
 	vma->ops = &pd_vma_ops;
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 1d003ec9da19..85b4a906be1e 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -119,7 +119,6 @@ vma_create(struct drm_i915_gem_object *obj,
 
 	i915_active_init(vm->i915, &vma->active,
 			 __i915_vma_active, __i915_vma_retire);
-	INIT_ACTIVE_REQUEST(&vma->last_fence);
 
 	/* Declare ourselves safe for use inside shrinkers */
 	if (IS_ENABLED(CONFIG_LOCKDEP)) {
@@ -801,8 +800,6 @@ static void __i915_vma_destroy(struct i915_vma *vma)
 	GEM_BUG_ON(vma->node.allocated);
 	GEM_BUG_ON(vma->fence);
 
-	GEM_BUG_ON(i915_active_request_isset(&vma->last_fence));
-
 	mutex_lock(&vma->vm->mutex);
 	list_del(&vma->vm_link);
 	mutex_unlock(&vma->vm->mutex);
@@ -938,9 +935,6 @@ int i915_vma_move_to_active(struct i915_vma *vma,
 	obj->read_domains |= I915_GEM_GPU_DOMAINS;
 	obj->mm.dirty = true;
 
-	if (flags & EXEC_OBJECT_NEEDS_FENCE)
-		__i915_active_request_set(&vma->last_fence, rq);
-
 	export_fence(vma, rq, flags);
 
 	GEM_BUG_ON(!i915_vma_is_active(vma));
@@ -973,14 +967,7 @@ int i915_vma_unbind(struct i915_vma *vma)
 		 * before we are finished).
 		 */
 		__i915_vma_pin(vma);
-
 		ret = i915_active_wait(&vma->active);
-		if (ret)
-			goto unpin;
-
-		ret = i915_active_request_retire(&vma->last_fence,
-					      &vma->vm->i915->drm.struct_mutex);
-unpin:
 		__i915_vma_unpin(vma);
 		if (ret)
 			return ret;
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index a24bd6787ef7..d3d184946393 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -111,7 +111,6 @@ struct i915_vma {
 #define I915_VMA_GGTT_WRITE	BIT(14)
 
 	struct i915_active active;
-	struct i915_active_request last_fence;
 
 	/**
 	 * Support different GGTT views into the same object.
-- 
2.22.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 33+ messages in thread

* [PATCH 18/24] drm/i915/overlay: Switch to using i915_active tracking
  2019-07-15  8:09 [PATCH 01/24] drm/i915: Hide unshrinkable context objects from the shrinker Chris Wilson
                   ` (15 preceding siblings ...)
  2019-07-15  8:09 ` [PATCH 17/24] drm/i915: Forgo last_fence active request tracking Chris Wilson
@ 2019-07-15  8:09 ` Chris Wilson
  2019-07-15  8:09 ` [PATCH 19/24] drm/i915: Extract intel_frontbuffer active tracking Chris Wilson
                   ` (9 subsequent siblings)
  26 siblings, 0 replies; 33+ messages in thread
From: Chris Wilson @ 2019-07-15  8:09 UTC (permalink / raw)
  To: intel-gfx

Remove the raw i915_active_request tracking in favour of the higher
level i915_active tracking for the sole purpose of making the lockless
transition easier in later patches.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/display/intel_overlay.c | 129 +++++++++----------
 drivers/gpu/drm/i915/i915_active.h           |  19 ---
 2 files changed, 64 insertions(+), 84 deletions(-)

diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 07929726b780..9b3f4f6644f3 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -191,7 +191,8 @@ struct intel_overlay {
 	struct overlay_registers __iomem *regs;
 	u32 flip_addr;
 	/* flip handling */
-	struct i915_active_request last_flip;
+	struct i915_active last_flip;
+	void (*flip_complete)(struct intel_overlay *ovl);
 };
 
 static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
@@ -217,30 +218,25 @@ static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
 				  PCI_DEVFN(0, 0), I830_CLOCK_GATE, val);
 }
 
-static void intel_overlay_submit_request(struct intel_overlay *overlay,
-					 struct i915_request *rq,
-					 i915_active_retire_fn retire)
+static struct i915_request *
+alloc_request(struct intel_overlay *overlay, void (*fn)(struct intel_overlay *))
 {
-	GEM_BUG_ON(i915_active_request_peek(&overlay->last_flip,
-					    &overlay->i915->drm.struct_mutex));
-	i915_active_request_set_retire_fn(&overlay->last_flip, retire,
-					  &overlay->i915->drm.struct_mutex);
-	__i915_active_request_set(&overlay->last_flip, rq);
-	i915_request_add(rq);
-}
+	struct i915_request *rq;
+	int err;
 
-static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
-					 struct i915_request *rq,
-					 i915_active_retire_fn retire)
-{
-	intel_overlay_submit_request(overlay, rq, retire);
-	return i915_active_request_retire(&overlay->last_flip,
-					  &overlay->i915->drm.struct_mutex);
-}
+	overlay->flip_complete = fn;
 
-static struct i915_request *alloc_request(struct intel_overlay *overlay)
-{
-	return i915_request_create(overlay->context);
+	rq = i915_request_create(overlay->context);
+	if (IS_ERR(rq))
+		return rq;
+
+	err = i915_active_ref(&overlay->last_flip, rq->fence.context, rq);
+	if (err) {
+		i915_request_add(rq);
+		return ERR_PTR(err);
+	}
+
+	return rq;
 }
 
 /* overlay needs to be disable in OCMD reg */
@@ -252,7 +248,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 
 	WARN_ON(overlay->active);
 
-	rq = alloc_request(overlay);
+	rq = alloc_request(overlay, NULL);
 	if (IS_ERR(rq))
 		return PTR_ERR(rq);
 
@@ -273,7 +269,9 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 	*cs++ = MI_NOOP;
 	intel_ring_advance(rq, cs);
 
-	return intel_overlay_do_wait_request(overlay, rq, NULL);
+	i915_request_add(rq);
+
+	return i915_active_wait(&overlay->last_flip);
 }
 
 static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
@@ -317,7 +315,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 	if (tmp & (1 << 17))
 		DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
 
-	rq = alloc_request(overlay);
+	rq = alloc_request(overlay, NULL);
 	if (IS_ERR(rq))
 		return PTR_ERR(rq);
 
@@ -332,8 +330,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 	intel_ring_advance(rq, cs);
 
 	intel_overlay_flip_prepare(overlay, vma);
-
-	intel_overlay_submit_request(overlay, rq, NULL);
+	i915_request_add(rq);
 
 	return 0;
 }
@@ -354,20 +351,13 @@ static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
 }
 
 static void
-intel_overlay_release_old_vid_tail(struct i915_active_request *active,
-				   struct i915_request *rq)
+intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
 {
-	struct intel_overlay *overlay =
-		container_of(active, typeof(*overlay), last_flip);
-
 	intel_overlay_release_old_vma(overlay);
 }
 
-static void intel_overlay_off_tail(struct i915_active_request *active,
-				   struct i915_request *rq)
+static void intel_overlay_off_tail(struct intel_overlay *overlay)
 {
-	struct intel_overlay *overlay =
-		container_of(active, typeof(*overlay), last_flip);
 	struct drm_i915_private *dev_priv = overlay->i915;
 
 	intel_overlay_release_old_vma(overlay);
@@ -380,6 +370,16 @@ static void intel_overlay_off_tail(struct i915_active_request *active,
 		i830_overlay_clock_gating(dev_priv, true);
 }
 
+static void
+intel_overlay_last_flip_retire(struct i915_active *active)
+{
+	struct intel_overlay *overlay =
+		container_of(active, typeof(*overlay), last_flip);
+
+	if (overlay->flip_complete)
+		overlay->flip_complete(overlay);
+}
+
 /* overlay needs to be disabled in OCMD reg */
 static int intel_overlay_off(struct intel_overlay *overlay)
 {
@@ -394,7 +394,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
 	 * of the hw. Do it in both cases */
 	flip_addr |= OFC_UPDATE;
 
-	rq = alloc_request(overlay);
+	rq = alloc_request(overlay, intel_overlay_off_tail);
 	if (IS_ERR(rq))
 		return PTR_ERR(rq);
 
@@ -417,17 +417,16 @@ static int intel_overlay_off(struct intel_overlay *overlay)
 	intel_ring_advance(rq, cs);
 
 	intel_overlay_flip_prepare(overlay, NULL);
+	i915_request_add(rq);
 
-	return intel_overlay_do_wait_request(overlay, rq,
-					     intel_overlay_off_tail);
+	return i915_active_wait(&overlay->last_flip);
 }
 
 /* recover from an interruption due to a signal
  * We have to be careful not to repeat work forever an make forward progess. */
 static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
 {
-	return i915_active_request_retire(&overlay->last_flip,
-					  &overlay->i915->drm.struct_mutex);
+	return i915_active_wait(&overlay->last_flip);
 }
 
 /* Wait for pending overlay flip and release old frame.
@@ -437,43 +436,40 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
 static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 {
 	struct drm_i915_private *dev_priv = overlay->i915;
+	struct i915_request *rq;
 	u32 *cs;
-	int ret;
 
 	lockdep_assert_held(&dev_priv->drm.struct_mutex);
 
-	/* Only wait if there is actually an old frame to release to
+	/*
+	 * Only wait if there is actually an old frame to release to
 	 * guarantee forward progress.
 	 */
 	if (!overlay->old_vma)
 		return 0;
 
-	if (I915_READ(GEN2_ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
-		/* synchronous slowpath */
-		struct i915_request *rq;
+	if (!(I915_READ(GEN2_ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT)) {
+		intel_overlay_release_old_vid_tail(overlay);
+		return 0;
+	}
 
-		rq = alloc_request(overlay);
-		if (IS_ERR(rq))
-			return PTR_ERR(rq);
+	rq = alloc_request(overlay, intel_overlay_release_old_vid_tail);
+	if (IS_ERR(rq))
+		return PTR_ERR(rq);
 
-		cs = intel_ring_begin(rq, 2);
-		if (IS_ERR(cs)) {
-			i915_request_add(rq);
-			return PTR_ERR(cs);
-		}
+	cs = intel_ring_begin(rq, 2);
+	if (IS_ERR(cs)) {
+		i915_request_add(rq);
+		return PTR_ERR(cs);
+	}
 
-		*cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
-		*cs++ = MI_NOOP;
-		intel_ring_advance(rq, cs);
+	*cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
+	*cs++ = MI_NOOP;
+	intel_ring_advance(rq, cs);
 
-		ret = intel_overlay_do_wait_request(overlay, rq,
-						    intel_overlay_release_old_vid_tail);
-		if (ret)
-			return ret;
-	} else
-		intel_overlay_release_old_vid_tail(&overlay->last_flip, NULL);
+	i915_request_add(rq);
 
-	return 0;
+	return i915_active_wait(&overlay->last_flip);
 }
 
 void intel_overlay_reset(struct drm_i915_private *dev_priv)
@@ -1375,7 +1371,9 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
 	overlay->contrast = 75;
 	overlay->saturation = 146;
 
-	INIT_ACTIVE_REQUEST(&overlay->last_flip);
+	i915_active_init(dev_priv,
+			 &overlay->last_flip,
+			 NULL, intel_overlay_last_flip_retire);
 
 	ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv));
 	if (ret)
@@ -1409,6 +1407,7 @@ void intel_overlay_cleanup(struct drm_i915_private *dev_priv)
 	WARN_ON(overlay->active);
 
 	i915_gem_object_put(overlay->reg_bo);
+	i915_active_fini(&overlay->last_flip);
 
 	kfree(overlay);
 }
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index 134166d31251..911a8338007a 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -89,25 +89,6 @@ int __must_check
 i915_active_request_set(struct i915_active_request *active,
 			struct i915_request *rq);
 
-/**
- * i915_active_request_set_retire_fn - updates the retirement callback
- * @active - the active tracker
- * @fn - the routine called when the request is retired
- * @mutex - struct_mutex used to guard retirements
- *
- * i915_active_request_set_retire_fn() updates the function pointer that
- * is called when the final request associated with the @active tracker
- * is retired.
- */
-static inline void
-i915_active_request_set_retire_fn(struct i915_active_request *active,
-				  i915_active_retire_fn fn,
-				  struct mutex *mutex)
-{
-	lockdep_assert_held(mutex);
-	active->retire = fn ?: i915_active_retire_noop;
-}
-
 /**
  * i915_active_request_raw - return the active request
  * @active - the active tracker
-- 
2.22.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 33+ messages in thread

* [PATCH 19/24] drm/i915: Extract intel_frontbuffer active tracking
  2019-07-15  8:09 [PATCH 01/24] drm/i915: Hide unshrinkable context objects from the shrinker Chris Wilson
                   ` (16 preceding siblings ...)
  2019-07-15  8:09 ` [PATCH 18/24] drm/i915/overlay: Switch to using i915_active tracking Chris Wilson
@ 2019-07-15  8:09 ` Chris Wilson
  2019-07-15  8:09 ` [PATCH 20/24] drm/i915: Markup expected timeline locks for i915_active Chris Wilson
                   ` (8 subsequent siblings)
  26 siblings, 0 replies; 33+ messages in thread
From: Chris Wilson @ 2019-07-15  8:09 UTC (permalink / raw)
  To: intel-gfx

Move the active tracking for the frontbuffer operations out of the
i915_gem_object and into its own first class (refcounted) object. In the
process of detangling, we switch from low level request tracking to the
easier i915_active -- with the plan that this avoids any potential
atomic callbacks as the frontbuffer tracking wishes to sleep as it
flushes.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/display/intel_display.c  |  70 +++--
 drivers/gpu/drm/i915/display/intel_fbdev.c    |  40 ++-
 .../gpu/drm/i915/display/intel_frontbuffer.c  | 255 +++++++++++++-----
 .../gpu/drm/i915/display/intel_frontbuffer.h  |  70 +++--
 drivers/gpu/drm/i915/display/intel_overlay.c  |   8 +-
 drivers/gpu/drm/i915/gem/i915_gem_clflush.c   |   2 +-
 drivers/gpu/drm/i915/gem/i915_gem_domain.c    |  14 +-
 drivers/gpu/drm/i915/gem/i915_gem_mman.c      |   4 -
 drivers/gpu/drm/i915/gem/i915_gem_object.c    |  27 +-
 drivers/gpu/drm/i915/gem/i915_gem_object.h    |   2 +-
 .../gpu/drm/i915/gem/i915_gem_object_types.h  |   8 +-
 drivers/gpu/drm/i915/i915_debugfs.c           |   5 -
 drivers/gpu/drm/i915/i915_drv.h               |   4 -
 drivers/gpu/drm/i915/i915_gem.c               |  47 +---
 drivers/gpu/drm/i915/i915_vma.c               |   6 +-
 drivers/gpu/drm/i915/intel_drv.h              |   1 +
 16 files changed, 306 insertions(+), 257 deletions(-)

diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index e25b82d07d4f..219ae5a60b31 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -3049,12 +3049,13 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct drm_i915_gem_object *obj = NULL;
 	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
 	struct drm_framebuffer *fb = &plane_config->fb->base;
 	u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
 	u32 size_aligned = round_up(plane_config->base + plane_config->size,
 				    PAGE_SIZE);
+	struct drm_i915_gem_object *obj;
+	bool ret = false;
 
 	size_aligned -= base_aligned;
 
@@ -3096,7 +3097,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
 		break;
 	default:
 		MISSING_CASE(plane_config->tiling);
-		return false;
+		goto out;
 	}
 
 	mode_cmd.pixel_format = fb->format->format;
@@ -3108,16 +3109,15 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
 
 	if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
 		DRM_DEBUG_KMS("intel fb init failed\n");
-		goto out_unref_obj;
+		goto out;
 	}
 
 
 	DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
-	return true;
-
-out_unref_obj:
+	ret = true;
+out:
 	i915_gem_object_put(obj);
-	return false;
+	return ret;
 }
 
 static void
@@ -3174,6 +3174,12 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
 	intel_disable_plane(plane, crtc_state);
 }
 
+static struct intel_frontbuffer *
+to_intel_frontbuffer(struct drm_framebuffer *fb)
+{
+	return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
+}
+
 static void
 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
 			     struct intel_initial_plane_config *plane_config)
@@ -3181,7 +3187,6 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
 	struct drm_device *dev = intel_crtc->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_crtc *c;
-	struct drm_i915_gem_object *obj;
 	struct drm_plane *primary = intel_crtc->base.primary;
 	struct drm_plane_state *plane_state = primary->state;
 	struct intel_plane *intel_plane = to_intel_plane(primary);
@@ -3257,8 +3262,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
 		return;
 	}
 
-	obj = intel_fb_obj(fb);
-	intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
+	intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
 
 	plane_state->src_x = 0;
 	plane_state->src_y = 0;
@@ -3273,14 +3277,14 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
 	intel_state->base.src = drm_plane_state_src(plane_state);
 	intel_state->base.dst = drm_plane_state_dest(plane_state);
 
-	if (i915_gem_object_is_tiled(obj))
+	if (plane_config->tiling)
 		dev_priv->preserve_bios_swizzle = true;
 
 	plane_state->fb = fb;
 	plane_state->crtc = &intel_crtc->base;
 
 	atomic_or(to_intel_plane(primary)->frontbuffer_bit,
-		  &obj->frontbuffer_bits);
+		  &to_intel_frontbuffer(fb)->bits);
 }
 
 static int skl_max_plane_width(const struct drm_framebuffer *fb,
@@ -14129,9 +14133,9 @@ static void intel_atomic_track_fbs(struct intel_atomic_state *state)
 
 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
 					     new_plane_state, i)
-		i915_gem_track_fb(intel_fb_obj(old_plane_state->base.fb),
-				  intel_fb_obj(new_plane_state->base.fb),
-				  plane->frontbuffer_bit);
+		intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->base.fb),
+					to_intel_frontbuffer(new_plane_state->base.fb),
+					plane->frontbuffer_bit);
 }
 
 static int intel_atomic_commit(struct drm_device *dev,
@@ -14415,7 +14419,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
 		return ret;
 
 	fb_obj_bump_render_priority(obj);
-	intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
+	intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_DIRTYFB);
 
 	if (!new_state->fence) { /* implicit fencing */
 		struct dma_fence *fence;
@@ -14678,13 +14682,12 @@ intel_legacy_cursor_update(struct drm_plane *plane,
 			   struct drm_modeset_acquire_ctx *ctx)
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-	int ret;
 	struct drm_plane_state *old_plane_state, *new_plane_state;
 	struct intel_plane *intel_plane = to_intel_plane(plane);
-	struct drm_framebuffer *old_fb;
 	struct intel_crtc_state *crtc_state =
 		to_intel_crtc_state(crtc->state);
 	struct intel_crtc_state *new_crtc_state;
+	int ret;
 
 	/*
 	 * When crtc is inactive or there is a modeset pending,
@@ -14752,11 +14755,10 @@ intel_legacy_cursor_update(struct drm_plane *plane,
 	if (ret)
 		goto out_unlock;
 
-	intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_FLIP);
-
-	old_fb = old_plane_state->fb;
-	i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb),
-			  intel_plane->frontbuffer_bit);
+	intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_FLIP);
+	intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->fb),
+				to_intel_frontbuffer(fb),
+				intel_plane->frontbuffer_bit);
 
 	/* Swap plane state */
 	plane->state = new_plane_state;
@@ -15536,15 +15538,9 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
 {
 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 
 	drm_framebuffer_cleanup(fb);
-
-	i915_gem_object_lock(obj);
-	WARN_ON(!obj->framebuffer_references--);
-	i915_gem_object_unlock(obj);
-
-	i915_gem_object_put(obj);
+	intel_frontbuffer_put(intel_fb->frontbuffer);
 
 	kfree(intel_fb);
 }
@@ -15572,7 +15568,7 @@ static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 
 	i915_gem_object_flush_if_display(obj);
-	intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
+	intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
 
 	return 0;
 }
@@ -15594,8 +15590,11 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
 	int ret = -EINVAL;
 	int i;
 
+	intel_fb->frontbuffer = intel_frontbuffer_get(obj);
+	if (!intel_fb->frontbuffer)
+		return -ENOMEM;
+
 	i915_gem_object_lock(obj);
-	obj->framebuffer_references++;
 	tiling = i915_gem_object_get_tiling(obj);
 	stride = i915_gem_object_get_stride(obj);
 	i915_gem_object_unlock(obj);
@@ -15712,9 +15711,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
 	return 0;
 
 err:
-	i915_gem_object_lock(obj);
-	obj->framebuffer_references--;
-	i915_gem_object_unlock(obj);
+	intel_frontbuffer_put(intel_fb->frontbuffer);
 	return ret;
 }
 
@@ -15732,8 +15729,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
 		return ERR_PTR(-ENOENT);
 
 	fb = intel_framebuffer_create(obj, &mode_cmd);
-	if (IS_ERR(fb))
-		i915_gem_object_put(obj);
+	i915_gem_object_put(obj);
 
 	return fb;
 }
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 1edd44ee32b2..4b57cdd76699 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -47,13 +47,14 @@
 #include "intel_fbdev.h"
 #include "intel_frontbuffer.h"
 
-static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev)
+static struct intel_frontbuffer *to_frontbuffer(struct intel_fbdev *ifbdev)
 {
-	struct drm_i915_gem_object *obj = intel_fb_obj(&ifbdev->fb->base);
-	unsigned int origin =
-		ifbdev->vma_flags & PLANE_HAS_FENCE ? ORIGIN_GTT : ORIGIN_CPU;
+	return ifbdev->fb->frontbuffer;
+}
 
-	intel_fb_obj_invalidate(obj, origin);
+static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev)
+{
+	intel_frontbuffer_invalidate(to_frontbuffer(ifbdev), ORIGIN_CPU);
 }
 
 static int intel_fbdev_set_par(struct fb_info *info)
@@ -120,7 +121,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_mode_fb_cmd2 mode_cmd = {};
 	struct drm_i915_gem_object *obj;
-	int size, ret;
+	int size;
 
 	/* we don't do packed 24bpp */
 	if (sizes->surface_bpp == 24)
@@ -147,24 +148,16 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
 		obj = i915_gem_object_create_shmem(dev_priv, size);
 	if (IS_ERR(obj)) {
 		DRM_ERROR("failed to allocate framebuffer\n");
-		ret = PTR_ERR(obj);
-		goto err;
+		return PTR_ERR(obj);
 	}
 
 	fb = intel_framebuffer_create(obj, &mode_cmd);
-	if (IS_ERR(fb)) {
-		ret = PTR_ERR(fb);
-		goto err_obj;
-	}
+	i915_gem_object_put(obj);
+	if (IS_ERR(fb))
+		return PTR_ERR(fb);
 
 	ifbdev->fb = to_intel_framebuffer(fb);
-
 	return 0;
-
-err_obj:
-	i915_gem_object_put(obj);
-err:
-	return ret;
 }
 
 static int intelfb_create(struct drm_fb_helper *helper,
@@ -180,7 +173,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
 	const struct i915_ggtt_view view = {
 		.type = I915_GGTT_VIEW_NORMAL,
 	};
-	struct drm_framebuffer *fb;
 	intel_wakeref_t wakeref;
 	struct fb_info *info;
 	struct i915_vma *vma;
@@ -226,8 +218,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
 		goto out_unlock;
 	}
 
-	fb = &ifbdev->fb->base;
-	intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_DIRTYFB);
+	intel_frontbuffer_flush(to_frontbuffer(ifbdev), ORIGIN_DIRTYFB);
 
 	info = drm_fb_helper_alloc_fbi(helper);
 	if (IS_ERR(info)) {
@@ -236,7 +227,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
 		goto out_unpin;
 	}
 
-	ifbdev->helper.fb = fb;
+	ifbdev->helper.fb = &ifbdev->fb->base;
 
 	info->fbops = &intelfb_ops;
 
@@ -262,13 +253,14 @@ static int intelfb_create(struct drm_fb_helper *helper,
 	 * If the object is stolen however, it will be full of whatever
 	 * garbage was left in there.
 	 */
-	if (intel_fb_obj(fb)->stolen && !prealloc)
+	if (vma->obj->stolen && !prealloc)
 		memset_io(info->screen_base, 0, info->screen_size);
 
 	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
 
 	DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x\n",
-		      fb->width, fb->height, i915_ggtt_offset(vma));
+		      ifbdev->fb->base.width, ifbdev->fb->base.height,
+		      i915_ggtt_offset(vma));
 	ifbdev->vma = vma;
 	ifbdev->vma_flags = flags;
 
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index 44273c10cea5..d11031811a48 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -30,11 +30,11 @@
  * Many features require us to track changes to the currently active
  * frontbuffer, especially rendering targeted at the frontbuffer.
  *
- * To be able to do so GEM tracks frontbuffers using a bitmask for all possible
- * frontbuffer slots through i915_gem_track_fb(). The function in this file are
- * then called when the contents of the frontbuffer are invalidated, when
- * frontbuffer rendering has stopped again to flush out all the changes and when
- * the frontbuffer is exchanged with a flip. Subsystems interested in
+ * To be able to do so we track frontbuffers using a bitmask for all possible
+ * frontbuffer slots through intel_frontbuffer_track(). The functions in this
+ * file are then called when the contents of the frontbuffer are invalidated,
+ * when frontbuffer rendering has stopped again to flush out all the changes
+ * and when the frontbuffer is exchanged with a flip. Subsystems interested in
  * frontbuffer changes (e.g. PSR, FBC, DRRS) should directly put their callbacks
  * into the relevant places and filter for the frontbuffer slots that they are
  * interested int.
@@ -63,28 +63,9 @@
 #include "intel_frontbuffer.h"
 #include "intel_psr.h"
 
-void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
-			       enum fb_op_origin origin,
-			       unsigned int frontbuffer_bits)
-{
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-
-	if (origin == ORIGIN_CS) {
-		spin_lock(&dev_priv->fb_tracking.lock);
-		dev_priv->fb_tracking.busy_bits |= frontbuffer_bits;
-		dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
-		spin_unlock(&dev_priv->fb_tracking.lock);
-	}
-
-	might_sleep();
-	intel_psr_invalidate(dev_priv, frontbuffer_bits, origin);
-	intel_edp_drrs_invalidate(dev_priv, frontbuffer_bits);
-	intel_fbc_invalidate(dev_priv, frontbuffer_bits, origin);
-}
-
 /**
- * intel_frontbuffer_flush - flush frontbuffer
- * @dev_priv: i915 device
+ * frontbuffer_flush - flush frontbuffer
+ * @i915: i915 device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  * @origin: which operation caused the flush
  *
@@ -94,45 +75,27 @@ void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
  *
  * Can be called without any locks held.
  */
-static void intel_frontbuffer_flush(struct drm_i915_private *dev_priv,
-				    unsigned frontbuffer_bits,
-				    enum fb_op_origin origin)
+static void frontbuffer_flush(struct drm_i915_private *i915,
+			      unsigned int frontbuffer_bits,
+			      enum fb_op_origin origin)
 {
 	/* Delay flushing when rings are still busy.*/
-	spin_lock(&dev_priv->fb_tracking.lock);
-	frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
-	spin_unlock(&dev_priv->fb_tracking.lock);
+	spin_lock(&i915->fb_tracking.lock);
+	frontbuffer_bits &= ~i915->fb_tracking.busy_bits;
+	spin_unlock(&i915->fb_tracking.lock);
 
 	if (!frontbuffer_bits)
 		return;
 
 	might_sleep();
-	intel_edp_drrs_flush(dev_priv, frontbuffer_bits);
-	intel_psr_flush(dev_priv, frontbuffer_bits, origin);
-	intel_fbc_flush(dev_priv, frontbuffer_bits, origin);
-}
-
-void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
-			  enum fb_op_origin origin,
-			  unsigned int frontbuffer_bits)
-{
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-
-	if (origin == ORIGIN_CS) {
-		spin_lock(&dev_priv->fb_tracking.lock);
-		/* Filter out new bits since rendering started. */
-		frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
-		dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
-		spin_unlock(&dev_priv->fb_tracking.lock);
-	}
-
-	if (frontbuffer_bits)
-		intel_frontbuffer_flush(dev_priv, frontbuffer_bits, origin);
+	intel_edp_drrs_flush(i915, frontbuffer_bits);
+	intel_psr_flush(i915, frontbuffer_bits, origin);
+	intel_fbc_flush(i915, frontbuffer_bits, origin);
 }
 
 /**
  * intel_frontbuffer_flip_prepare - prepare asynchronous frontbuffer flip
- * @dev_priv: i915 device
+ * @i915: i915 device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  *
  * This function gets called after scheduling a flip on @obj. The actual
@@ -142,19 +105,19 @@ void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
  *
  * Can be called without any locks held.
  */
-void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
+void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
 				    unsigned frontbuffer_bits)
 {
-	spin_lock(&dev_priv->fb_tracking.lock);
-	dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
+	spin_lock(&i915->fb_tracking.lock);
+	i915->fb_tracking.flip_bits |= frontbuffer_bits;
 	/* Remove stale busy bits due to the old buffer. */
-	dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
-	spin_unlock(&dev_priv->fb_tracking.lock);
+	i915->fb_tracking.busy_bits &= ~frontbuffer_bits;
+	spin_unlock(&i915->fb_tracking.lock);
 }
 
 /**
  * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flip
- * @dev_priv: i915 device
+ * @i915: i915 device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  *
  * This function gets called after the flip has been latched and will complete
@@ -162,23 +125,22 @@ void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
  *
  * Can be called without any locks held.
  */
-void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
+void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
 				     unsigned frontbuffer_bits)
 {
-	spin_lock(&dev_priv->fb_tracking.lock);
+	spin_lock(&i915->fb_tracking.lock);
 	/* Mask any cancelled flips. */
-	frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
-	dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
-	spin_unlock(&dev_priv->fb_tracking.lock);
+	frontbuffer_bits &= i915->fb_tracking.flip_bits;
+	i915->fb_tracking.flip_bits &= ~frontbuffer_bits;
+	spin_unlock(&i915->fb_tracking.lock);
 
 	if (frontbuffer_bits)
-		intel_frontbuffer_flush(dev_priv,
-					frontbuffer_bits, ORIGIN_FLIP);
+		frontbuffer_flush(i915, frontbuffer_bits, ORIGIN_FLIP);
 }
 
 /**
  * intel_frontbuffer_flip - synchronous frontbuffer flip
- * @dev_priv: i915 device
+ * @i915: i915 device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  *
  * This function gets called after scheduling a flip on @obj. This is for
@@ -187,13 +149,160 @@ void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
  *
  * Can be called without any locks held.
  */
-void intel_frontbuffer_flip(struct drm_i915_private *dev_priv,
+void intel_frontbuffer_flip(struct drm_i915_private *i915,
 			    unsigned frontbuffer_bits)
 {
-	spin_lock(&dev_priv->fb_tracking.lock);
+	spin_lock(&i915->fb_tracking.lock);
 	/* Remove stale busy bits due to the old buffer. */
-	dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
-	spin_unlock(&dev_priv->fb_tracking.lock);
+	i915->fb_tracking.busy_bits &= ~frontbuffer_bits;
+	spin_unlock(&i915->fb_tracking.lock);
 
-	intel_frontbuffer_flush(dev_priv, frontbuffer_bits, ORIGIN_FLIP);
+	frontbuffer_flush(i915, frontbuffer_bits, ORIGIN_FLIP);
+}
+
+void __intel_fb_invalidate(struct intel_frontbuffer *front,
+			   enum fb_op_origin origin,
+			   unsigned int frontbuffer_bits)
+{
+	struct drm_i915_private *i915 = to_i915(front->obj->base.dev);
+
+	if (origin == ORIGIN_CS) {
+		spin_lock(&i915->fb_tracking.lock);
+		i915->fb_tracking.busy_bits |= frontbuffer_bits;
+		i915->fb_tracking.flip_bits &= ~frontbuffer_bits;
+		spin_unlock(&i915->fb_tracking.lock);
+	}
+
+	might_sleep();
+	intel_psr_invalidate(i915, frontbuffer_bits, origin);
+	intel_edp_drrs_invalidate(i915, frontbuffer_bits);
+	intel_fbc_invalidate(i915, frontbuffer_bits, origin);
+}
+
+void __intel_fb_flush(struct intel_frontbuffer *front,
+		      enum fb_op_origin origin,
+		      unsigned int frontbuffer_bits)
+{
+	struct drm_i915_private *i915 = to_i915(front->obj->base.dev);
+
+	if (origin == ORIGIN_CS) {
+		spin_lock(&i915->fb_tracking.lock);
+		/* Filter out new bits since rendering started. */
+		frontbuffer_bits &= i915->fb_tracking.busy_bits;
+		i915->fb_tracking.busy_bits &= ~frontbuffer_bits;
+		spin_unlock(&i915->fb_tracking.lock);
+	}
+
+	if (frontbuffer_bits)
+		frontbuffer_flush(i915, frontbuffer_bits, origin);
+}
+
+static int frontbuffer_active(struct i915_active *ref)
+{
+	struct intel_frontbuffer *front =
+		container_of(ref, typeof(*front), write);
+
+	kref_get(&front->ref);
+	return 0;
+}
+
+static void frontbuffer_retire(struct i915_active *ref)
+{
+	struct intel_frontbuffer *front =
+		container_of(ref, typeof(*front), write);
+
+	intel_frontbuffer_flush(front, ORIGIN_CS);
+	intel_frontbuffer_put(front);
+}
+
+static void frontbuffer_release(struct kref *ref)
+	__releases(&to_i915(front->obj->base.dev)->fb_tracking.lock)
+{
+	struct intel_frontbuffer *front =
+		container_of(ref, typeof(*front), ref);
+
+	front->obj->frontbuffer = NULL;
+	spin_unlock(&to_i915(front->obj->base.dev)->fb_tracking.lock);
+
+	i915_gem_object_put(front->obj);
+	kfree(front);
+}
+
+struct intel_frontbuffer *
+intel_frontbuffer_get(struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
+	struct intel_frontbuffer *front;
+
+	spin_lock(&i915->fb_tracking.lock);
+	front = obj->frontbuffer;
+	if (front)
+		kref_get(&front->ref);
+	spin_unlock(&i915->fb_tracking.lock);
+	if (front)
+		return front;
+
+	front = kmalloc(sizeof(*front), GFP_KERNEL);
+	if (!front)
+		return NULL;
+
+	front->obj = obj;
+	kref_init(&front->ref);
+	atomic_set(&front->bits, 0);
+	i915_active_init(i915, &front->write,
+			 frontbuffer_active, frontbuffer_retire);
+
+	spin_lock(&i915->fb_tracking.lock);
+	if (obj->frontbuffer) {
+		kfree(front);
+		front = obj->frontbuffer;
+		kref_get(&front->ref);
+	} else {
+		i915_gem_object_get(obj);
+		obj->frontbuffer = front;
+	}
+	spin_unlock(&i915->fb_tracking.lock);
+
+	return front;
+}
+
+void intel_frontbuffer_put(struct intel_frontbuffer *front)
+{
+	kref_put_lock(&front->ref,
+		      frontbuffer_release,
+		      &to_i915(front->obj->base.dev)->fb_tracking.lock);
+}
+
+/**
+ * intel_frontbuffer_track - update frontbuffer tracking
+ * @old: current buffer for the frontbuffer slots
+ * @new: new buffer for the frontbuffer slots
+ * @frontbuffer_bits: bitmask of frontbuffer slots
+ *
+ * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
+ * from @old and setting them in @new. Both @old and @new can be NULL.
+ */
+void intel_frontbuffer_track(struct intel_frontbuffer *old,
+			     struct intel_frontbuffer *new,
+			     unsigned int frontbuffer_bits)
+{
+	/*
+	 * Control of individual bits within the mask are guarded by
+	 * the owning plane->mutex, i.e. we can never see concurrent
+	 * manipulation of individual bits. But since the bitfield as a whole
+	 * is updated using RMW, we need to use atomics in order to update
+	 * the bits.
+	 */
+	BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
+		     BITS_PER_TYPE(atomic_t));
+
+	if (old) {
+		WARN_ON(!(atomic_read(&old->bits) & frontbuffer_bits));
+		atomic_andnot(frontbuffer_bits, &old->bits);
+	}
+
+	if (new) {
+		WARN_ON(atomic_read(&new->bits) & frontbuffer_bits);
+		atomic_or(frontbuffer_bits, &new->bits);
+	}
 }
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.h b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
index 5727320c8084..adc64d61a4a5 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.h
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
@@ -24,7 +24,10 @@
 #ifndef __INTEL_FRONTBUFFER_H__
 #define __INTEL_FRONTBUFFER_H__
 
-#include "gem/i915_gem_object.h"
+#include <linux/atomic.h>
+#include <linux/kref.h>
+
+#include "i915_active.h"
 
 struct drm_i915_private;
 struct drm_i915_gem_object;
@@ -37,23 +40,30 @@ enum fb_op_origin {
 	ORIGIN_DIRTYFB,
 };
 
-void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
+struct intel_frontbuffer {
+	struct kref ref;
+	atomic_t bits;
+	struct i915_active write;
+	struct drm_i915_gem_object *obj;
+};
+
+void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
 				    unsigned frontbuffer_bits);
-void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
+void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
 				     unsigned frontbuffer_bits);
-void intel_frontbuffer_flip(struct drm_i915_private *dev_priv,
+void intel_frontbuffer_flip(struct drm_i915_private *i915,
 			    unsigned frontbuffer_bits);
 
-void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
-			       enum fb_op_origin origin,
-			       unsigned int frontbuffer_bits);
-void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
-			  enum fb_op_origin origin,
-			  unsigned int frontbuffer_bits);
+struct intel_frontbuffer *
+intel_frontbuffer_get(struct drm_i915_gem_object *obj);
+
+void __intel_fb_invalidate(struct intel_frontbuffer *front,
+			   enum fb_op_origin origin,
+			   unsigned int frontbuffer_bits);
 
 /**
- * intel_fb_obj_invalidate - invalidate frontbuffer object
- * @obj: GEM object to invalidate
+ * intel_frontbuffer_invalidate - invalidate frontbuffer object
+ * @front: GEM object to invalidate
  * @origin: which operation caused the invalidation
  *
  * This function gets called every time rendering on the given object starts and
@@ -62,37 +72,53 @@ void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
  * until the rendering completes or a flip on this frontbuffer plane is
  * scheduled.
  */
-static inline bool intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
-					   enum fb_op_origin origin)
+static inline bool intel_frontbuffer_invalidate(struct intel_frontbuffer *front,
+						enum fb_op_origin origin)
 {
 	unsigned int frontbuffer_bits;
 
-	frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
+	if (!front)
+		return false;
+
+	frontbuffer_bits = atomic_read(&front->bits);
 	if (!frontbuffer_bits)
 		return false;
 
-	__intel_fb_obj_invalidate(obj, origin, frontbuffer_bits);
+	__intel_fb_invalidate(front, origin, frontbuffer_bits);
 	return true;
 }
 
+void __intel_fb_flush(struct intel_frontbuffer *front,
+		      enum fb_op_origin origin,
+		      unsigned int frontbuffer_bits);
+
 /**
- * intel_fb_obj_flush - flush frontbuffer object
- * @obj: GEM object to flush
+ * intel_frontbuffer_flush - flush frontbuffer object
+ * @front: GEM object to flush
  * @origin: which operation caused the flush
  *
  * This function gets called every time rendering on the given object has
  * completed and frontbuffer caching can be started again.
  */
-static inline void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
-				      enum fb_op_origin origin)
+static inline void intel_frontbuffer_flush(struct intel_frontbuffer *front,
+					   enum fb_op_origin origin)
 {
 	unsigned int frontbuffer_bits;
 
-	frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
+	if (!front)
+		return;
+
+	frontbuffer_bits = atomic_read(&front->bits);
 	if (!frontbuffer_bits)
 		return;
 
-	__intel_fb_obj_flush(obj, origin, frontbuffer_bits);
+	__intel_fb_flush(front, origin, frontbuffer_bits);
 }
 
+void intel_frontbuffer_track(struct intel_frontbuffer *old,
+			     struct intel_frontbuffer *new,
+			     unsigned int frontbuffer_bits);
+
+void intel_frontbuffer_put(struct intel_frontbuffer *front);
+
 #endif /* __INTEL_FRONTBUFFER_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 9b3f4f6644f3..1a15fa34205c 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -281,9 +281,9 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
 
 	WARN_ON(overlay->old_vma);
 
-	i915_gem_track_fb(overlay->vma ? overlay->vma->obj : NULL,
-			  vma ? vma->obj : NULL,
-			  INTEL_FRONTBUFFER_OVERLAY(pipe));
+	intel_frontbuffer_track(overlay->vma ? overlay->vma->obj->frontbuffer : NULL,
+				vma ? vma->obj->frontbuffer : NULL,
+				INTEL_FRONTBUFFER_OVERLAY(pipe));
 
 	intel_frontbuffer_flip_prepare(overlay->i915,
 				       INTEL_FRONTBUFFER_OVERLAY(pipe));
@@ -768,7 +768,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
 		ret = PTR_ERR(vma);
 		goto out_pin_section;
 	}
-	intel_fb_obj_flush(new_bo, ORIGIN_DIRTYFB);
+	intel_frontbuffer_flush(new_bo->frontbuffer, ORIGIN_DIRTYFB);
 
 	ret = i915_vma_put_fence(vma);
 	if (ret)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
index 5295285d5843..a65d401f891c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
@@ -48,7 +48,7 @@ static void __i915_do_clflush(struct drm_i915_gem_object *obj)
 {
 	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
 	drm_clflush_sg(obj->mm.pages);
-	intel_fb_obj_flush(obj, ORIGIN_CPU);
+	intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
 }
 
 static void i915_clflush_work(struct work_struct *work)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index 2e3ce2a69653..a1afc2690e9e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -551,13 +551,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
 	return 0;
 }
 
-static inline enum fb_op_origin
-fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
-{
-	return (domain == I915_GEM_DOMAIN_GTT ?
-		obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
-}
-
 /**
  * Called when user space prepares to use an object with the CPU, either
  * through the mmap ioctl's mapping or a GTT mapping.
@@ -661,9 +654,8 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
 
 	i915_gem_object_unlock(obj);
 
-	if (write_domain != 0)
-		intel_fb_obj_invalidate(obj,
-					fb_write_origin(obj, write_domain));
+	if (write_domain)
+		intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
 
 out_unpin:
 	i915_gem_object_unpin_pages(obj);
@@ -783,7 +775,7 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
 	}
 
 out:
-	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
+	intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
 	obj->mm.dirty = true;
 	/* return with the pages pinned */
 	return 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index a564c1e4231b..71d10ae90922 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -101,9 +101,6 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
 		up_write(&mm->mmap_sem);
 		if (IS_ERR_VALUE(addr))
 			goto err;
-
-		/* This may race, but that's ok, it only gets set */
-		WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
 	}
 	i915_gem_object_put(obj);
 
@@ -283,7 +280,6 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 		 * Userspace is now writing through an untracked VMA, abandon
 		 * all hope that the hardware is able to track future writes.
 		 */
-		obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
 
 		vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
 		if (IS_ERR(vma) && !view.type) {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index eccd7f4768f8..cfed28cc6185 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -45,16 +45,6 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj)
 	return kmem_cache_free(global.slab_objects, obj);
 }
 
-static void
-frontbuffer_retire(struct i915_active_request *active,
-		   struct i915_request *request)
-{
-	struct drm_i915_gem_object *obj =
-		container_of(active, typeof(*obj), frontbuffer_write);
-
-	intel_fb_obj_flush(obj, ORIGIN_CS);
-}
-
 void i915_gem_object_init(struct drm_i915_gem_object *obj,
 			  const struct drm_i915_gem_object_ops *ops)
 {
@@ -71,10 +61,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
 
 	obj->ops = ops;
 
-	obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
-	i915_active_request_init(&obj->frontbuffer_write,
-				 NULL, frontbuffer_retire);
-
 	obj->mm.madv = I915_MADV_WILLNEED;
 	INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
 	mutex_init(&obj->mm.get_page.lock);
@@ -186,7 +172,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
 
 		GEM_BUG_ON(atomic_read(&obj->bind_count));
 		GEM_BUG_ON(obj->userfault_count);
-		GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
 		GEM_BUG_ON(!list_empty(&obj->lut_list));
 
 		atomic_set(&obj->mm.pages_pin_count, 0);
@@ -259,6 +244,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
 	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 
+	GEM_BUG_ON(i915_gem_object_is_framebuffer(obj));
+
 	/*
 	 * Before we free the object, make sure any pure RCU-only
 	 * read-side critical sections are complete, e.g.
@@ -290,13 +277,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
 		queue_work(i915->wq, &i915->mm.free_work);
 }
 
-static inline enum fb_op_origin
-fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
-{
-	return (domain == I915_GEM_DOMAIN_GTT ?
-		obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
-}
-
 static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
 {
 	return !(obj->cache_level == I915_CACHE_NONE ||
@@ -319,8 +299,7 @@ i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
 		for_each_ggtt_vma(vma, obj)
 			intel_gt_flush_ggtt_writes(vma->vm->gt);
 
-		intel_fb_obj_flush(obj,
-				   fb_write_origin(obj, I915_GEM_DOMAIN_GTT));
+		intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
 
 		for_each_ggtt_vma(vma, obj) {
 			if (vma->iomap)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 3714cf234d64..abc23e7e13a7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -161,7 +161,7 @@ i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
 static inline bool
 i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
 {
-	return READ_ONCE(obj->framebuffer_references);
+	return READ_ONCE(obj->frontbuffer);
 }
 
 static inline unsigned int
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index d474c6ac4100..ede0eb4218a8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -13,6 +13,7 @@
 #include "i915_selftest.h"
 
 struct drm_i915_gem_object;
+struct intel_fronbuffer;
 
 /*
  * struct i915_lut_handle tracks the fast lookups from handle to vma used
@@ -141,9 +142,7 @@ struct drm_i915_gem_object {
 	 */
 	u16 write_domain;
 
-	atomic_t frontbuffer_bits;
-	unsigned int frontbuffer_ggtt_origin; /* write once */
-	struct i915_active_request frontbuffer_write;
+	struct intel_frontbuffer *frontbuffer;
 
 	/** Current tiling stride for the object, if it's tiled. */
 	unsigned int tiling_and_stride;
@@ -224,9 +223,6 @@ struct drm_i915_gem_object {
 		bool quirked:1;
 	} mm;
 
-	/** References from framebuffers, locks out tiling changes. */
-	unsigned int framebuffer_references;
-
 	/** Record of address bit 17 of each page at last unbind. */
 	unsigned long *bit_17;
 
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 8f639ed6034a..b6b80963f992 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -136,7 +136,6 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 	struct intel_engine_cs *engine;
 	struct i915_vma *vma;
-	unsigned int frontbuffer_bits;
 	int pin_count = 0;
 
 	seq_printf(m, "%pK: %c%c%c%c %8zdKiB %02x %02x %s%s%s",
@@ -226,10 +225,6 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 	engine = i915_gem_object_last_write_engine(obj);
 	if (engine)
 		seq_printf(m, " (%s)", engine->name);
-
-	frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
-	if (frontbuffer_bits)
-		seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
 }
 
 struct file_stats {
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 86b0a16632bc..8fbedbbb39dc 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2473,10 +2473,6 @@ int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
 		      u32 handle, u64 *offset);
 int i915_gem_mmap_gtt_version(void);
 
-void i915_gem_track_fb(struct drm_i915_gem_object *old,
-		       struct drm_i915_gem_object *new,
-		       unsigned frontbuffer_bits);
-
 int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
 
 static inline u32 i915_reset_count(struct i915_gpu_error *error)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 05dbf54281a8..46ed90e59a8d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -138,17 +138,19 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
 	void *vaddr = obj->phys_handle->vaddr + args->offset;
 	char __user *user_data = u64_to_user_ptr(args->data_ptr);
 
-	/* We manually control the domain here and pretend that it
+	/*
+	 * We manually control the domain here and pretend that it
 	 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
 	 */
-	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
+	intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
+
 	if (copy_from_user(vaddr, user_data, args->size))
 		return -EFAULT;
 
 	drm_clflush_virt_range(vaddr, args->size);
 	intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
 
-	intel_fb_obj_flush(obj, ORIGIN_CPU);
+	intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
 	return 0;
 }
 
@@ -596,7 +598,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
 		goto out_unpin;
 	}
 
-	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
+	intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
 
 	user_data = u64_to_user_ptr(args->data_ptr);
 	offset = args->offset;
@@ -637,7 +639,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
 		user_data += page_length;
 		offset += page_length;
 	}
-	intel_fb_obj_flush(obj, ORIGIN_CPU);
+	intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
 
 	i915_gem_object_unlock_fence(obj, fence);
 out_unpin:
@@ -730,7 +732,7 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
 		offset = 0;
 	}
 
-	intel_fb_obj_flush(obj, ORIGIN_CPU);
+	intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
 	i915_gem_object_unlock_fence(obj, fence);
 
 	return ret;
@@ -1776,39 +1778,6 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
 	return ret;
 }
 
-/**
- * i915_gem_track_fb - update frontbuffer tracking
- * @old: current GEM buffer for the frontbuffer slots
- * @new: new GEM buffer for the frontbuffer slots
- * @frontbuffer_bits: bitmask of frontbuffer slots
- *
- * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
- * from @old and setting them in @new. Both @old and @new can be NULL.
- */
-void i915_gem_track_fb(struct drm_i915_gem_object *old,
-		       struct drm_i915_gem_object *new,
-		       unsigned frontbuffer_bits)
-{
-	/* Control of individual bits within the mask are guarded by
-	 * the owning plane->mutex, i.e. we can never see concurrent
-	 * manipulation of individual bits. But since the bitfield as a whole
-	 * is updated using RMW, we need to use atomics in order to update
-	 * the bits.
-	 */
-	BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
-		     BITS_PER_TYPE(atomic_t));
-
-	if (old) {
-		WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
-		atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
-	}
-
-	if (new) {
-		WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
-		atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
-	}
-}
-
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/mock_gem_device.c"
 #include "selftests/i915_gem.c"
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 85b4a906be1e..cc1b93fce145 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -927,8 +927,10 @@ int i915_vma_move_to_active(struct i915_vma *vma,
 	if (flags & EXEC_OBJECT_WRITE) {
 		obj->write_domain = I915_GEM_DOMAIN_RENDER;
 
-		if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
-			__i915_active_request_set(&obj->frontbuffer_write, rq);
+		if (intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CS))
+			i915_active_ref(&obj->frontbuffer->write,
+					rq->fence.context,
+					rq);
 
 		obj->read_domains = 0;
 	}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index e8ecbd55476e..a129a32d9040 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -69,6 +69,7 @@ enum intel_output_type {
 
 struct intel_framebuffer {
 	struct drm_framebuffer base;
+	struct intel_frontbuffer *frontbuffer;
 	struct intel_rotation_info rot_info;
 
 	/* for each plane in the normal GTT view */
-- 
2.22.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 33+ messages in thread

* [PATCH 20/24] drm/i915: Markup expected timeline locks for i915_active
  2019-07-15  8:09 [PATCH 01/24] drm/i915: Hide unshrinkable context objects from the shrinker Chris Wilson
                   ` (17 preceding siblings ...)
  2019-07-15  8:09 ` [PATCH 19/24] drm/i915: Extract intel_frontbuffer active tracking Chris Wilson
@ 2019-07-15  8:09 ` Chris Wilson
  2019-07-15  8:09 ` [PATCH 21/24] drm/i915: Remove logical HW ID Chris Wilson
                   ` (7 subsequent siblings)
  26 siblings, 0 replies; 33+ messages in thread
From: Chris Wilson @ 2019-07-15  8:09 UTC (permalink / raw)
  To: intel-gfx

As every i915_active_request should be serialised by a dedicated lock,
i915_active consists of a tree of locks; one for each node. Markup up
the i915_active_request with what lock is supposed to be guarding it so
that we can verify that the serialised updated are indeed serialised.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/display/intel_overlay.c  |  2 +-
 .../gpu/drm/i915/gem/i915_gem_client_blt.c    |  2 +-
 drivers/gpu/drm/i915/gem/i915_gem_context.c   |  2 +-
 drivers/gpu/drm/i915/gt/intel_context.c       |  2 +-
 drivers/gpu/drm/i915/gt/intel_engine_pool.h   |  2 +-
 drivers/gpu/drm/i915/gt/intel_timeline.c      |  7 +++----
 drivers/gpu/drm/i915/gt/selftest_timeline.c   |  4 ++++
 .../gpu/drm/i915/gt/selftests/mock_timeline.c |  2 +-
 drivers/gpu/drm/i915/i915_active.c            | 20 +++++++++++++------
 drivers/gpu/drm/i915/i915_active.h            | 12 +++++++++--
 drivers/gpu/drm/i915/i915_active_types.h      |  3 +++
 drivers/gpu/drm/i915/i915_vma.c               |  4 ++--
 drivers/gpu/drm/i915/selftests/i915_active.c  |  3 +--
 13 files changed, 43 insertions(+), 22 deletions(-)

diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 1a15fa34205c..d5e06b463567 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -230,7 +230,7 @@ alloc_request(struct intel_overlay *overlay, void (*fn)(struct intel_overlay *))
 	if (IS_ERR(rq))
 		return rq;
 
-	err = i915_active_ref(&overlay->last_flip, rq->fence.context, rq);
+	err = i915_active_ref(&overlay->last_flip, rq->timeline, rq);
 	if (err) {
 		i915_request_add(rq);
 		return ERR_PTR(err);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
index 6f537e8e4dea..119d24aba362 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
@@ -197,7 +197,7 @@ static void clear_pages_worker(struct work_struct *work)
 	 * keep track of the GPU activity within this vma/request, and
 	 * propagate the signal from the request to w->dma.
 	 */
-	err = i915_active_ref(&vma->active, rq->fence.context, rq);
+	err = i915_active_ref(&vma->active, rq->timeline, rq);
 	if (err)
 		goto out_request;
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 0551cea321cc..e9f5e19265b9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -938,7 +938,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
 		if (emit)
 			err = emit(rq, data);
 		if (err == 0)
-			err = i915_active_ref(&cb->base, rq->fence.context, rq);
+			err = i915_active_ref(&cb->base, rq->timeline, rq);
 
 		i915_request_add(rq);
 		if (err)
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 6bea5d9d8dcf..726b4eec2122 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -271,7 +271,7 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
 	 * words transfer the pinned ce object to tracked active request.
 	 */
 	GEM_BUG_ON(i915_active_is_idle(&ce->active));
-	err = i915_active_ref(&ce->active, rq->fence.context, rq);
+	err = i915_active_ref(&ce->active, rq->timeline, rq);
 
 unlock:
 	if (rq->timeline != tl)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.h b/drivers/gpu/drm/i915/gt/intel_engine_pool.h
index f7a0a660c1c9..8d069efd9457 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pool.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.h
@@ -18,7 +18,7 @@ static inline int
 intel_engine_pool_mark_active(struct intel_engine_pool_node *node,
 			      struct i915_request *rq)
 {
-	return i915_active_ref(&node->active, rq->fence.context, rq);
+	return i915_active_ref(&node->active, rq->timeline, rq);
 }
 
 static inline void
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index eafd94d5e211..02fbe11b671b 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -254,7 +254,7 @@ int intel_timeline_init(struct intel_timeline *timeline,
 
 	mutex_init(&timeline->mutex);
 
-	INIT_ACTIVE_REQUEST(&timeline->last_request);
+	INIT_ACTIVE_REQUEST(&timeline->last_request, &timeline->mutex);
 	INIT_LIST_HEAD(&timeline->requests);
 
 	i915_syncmap_init(&timeline->sync);
@@ -440,8 +440,7 @@ __intel_timeline_get_seqno(struct intel_timeline *tl,
 	 * free it after the current request is retired, which ensures that
 	 * all writes into the cacheline from previous requests are complete.
 	 */
-	err = i915_active_ref(&tl->hwsp_cacheline->active,
-			      tl->fence_context, rq);
+	err = i915_active_ref(&tl->hwsp_cacheline->active, tl, rq);
 	if (err)
 		goto err_cacheline;
 
@@ -492,7 +491,7 @@ int intel_timeline_get_seqno(struct intel_timeline *tl,
 static int cacheline_ref(struct intel_timeline_cacheline *cl,
 			 struct i915_request *rq)
 {
-	return i915_active_ref(&cl->active, rq->fence.context, rq);
+	return i915_active_ref(&cl->active, rq->timeline, rq);
 }
 
 int intel_timeline_read_hwsp(struct i915_request *from,
diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
index d54113697745..321481403165 100644
--- a/drivers/gpu/drm/i915/gt/selftest_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -689,7 +689,9 @@ static int live_hwsp_wrap(void *arg)
 
 		tl->seqno = -4u;
 
+		mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING);
 		err = intel_timeline_get_seqno(tl, rq, &seqno[0]);
+		mutex_unlock(&tl->mutex);
 		if (err) {
 			i915_request_add(rq);
 			goto out;
@@ -704,7 +706,9 @@ static int live_hwsp_wrap(void *arg)
 		}
 		hwsp_seqno[0] = tl->hwsp_seqno;
 
+		mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING);
 		err = intel_timeline_get_seqno(tl, rq, &seqno[1]);
+		mutex_unlock(&tl->mutex);
 		if (err) {
 			i915_request_add(rq);
 			goto out;
diff --git a/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c
index 5c549205828a..598170efcaf6 100644
--- a/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c
@@ -15,7 +15,7 @@ void mock_timeline_init(struct intel_timeline *timeline, u64 context)
 
 	mutex_init(&timeline->mutex);
 
-	INIT_ACTIVE_REQUEST(&timeline->last_request);
+	INIT_ACTIVE_REQUEST(&timeline->last_request, &timeline->mutex);
 	INIT_LIST_HEAD(&timeline->requests);
 
 	i915_syncmap_init(&timeline->sync);
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 13f304a29fc8..ab313c931abf 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -131,10 +131,11 @@ node_retire(struct i915_active_request *base, struct i915_request *rq)
 }
 
 static struct i915_active_request *
-active_instance(struct i915_active *ref, u64 idx)
+active_instance(struct i915_active *ref, struct intel_timeline *tl)
 {
 	struct active_node *node, *prealloc;
 	struct rb_node **p, *parent;
+	u64 idx = tl->fence_context;
 
 	/*
 	 * We track the most recently used timeline to skip a rbtree search
@@ -173,7 +174,7 @@ active_instance(struct i915_active *ref, u64 idx)
 	}
 
 	node = prealloc;
-	i915_active_request_init(&node->base, NULL, node_retire);
+	i915_active_request_init(&node->base, &tl->mutex, NULL, node_retire);
 	node->ref = ref;
 	node->timeline = idx;
 
@@ -206,18 +207,20 @@ void __i915_active_init(struct drm_i915_private *i915,
 }
 
 int i915_active_ref(struct i915_active *ref,
-		    u64 timeline,
+		    struct intel_timeline *tl,
 		    struct i915_request *rq)
 {
 	struct i915_active_request *active;
 	int err;
 
+	lockdep_assert_held(&tl->mutex);
+
 	/* Prevent reaping in case we malloc/wait while building the tree */
 	err = i915_active_acquire(ref);
 	if (err)
 		return err;
 
-	active = active_instance(ref, timeline);
+	active = active_instance(ref, tl);
 	if (!active) {
 		err = -ENOMEM;
 		goto out;
@@ -353,6 +356,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
 	GEM_BUG_ON(!engine->mask);
 	for_each_engine_masked(engine, i915, engine->mask, tmp) {
 		struct intel_context *kctx = engine->kernel_context;
+		struct intel_timeline *tl = kctx->ring->timeline;
 		struct active_node *node;
 
 		node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
@@ -361,9 +365,9 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
 			goto unwind;
 		}
 
-		i915_active_request_init(&node->base,
+		i915_active_request_init(&node->base, &tl->mutex,
 					 (void *)engine, node_retire);
-		node->timeline = kctx->ring->timeline->fence_context;
+		node->timeline = tl->fence_context;
 		node->ref = ref;
 		atomic_inc(&ref->count);
 
@@ -441,6 +445,10 @@ int i915_active_request_set(struct i915_active_request *active,
 {
 	int err;
 
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+	lockdep_assert_held(active->lock);
+#endif
+
 	/* Must maintain ordering wrt previous active requests */
 	err = i915_request_await_active_request(rq, active);
 	if (err)
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index 911a8338007a..164c2b074cfc 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -58,15 +58,20 @@ void i915_active_retire_noop(struct i915_active_request *active,
  */
 static inline void
 i915_active_request_init(struct i915_active_request *active,
+			 struct mutex *lock,
 			 struct i915_request *rq,
 			 i915_active_retire_fn retire)
 {
 	RCU_INIT_POINTER(active->request, rq);
 	INIT_LIST_HEAD(&active->link);
 	active->retire = retire ?: i915_active_retire_noop;
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+	active->lock = lock;
+#endif
 }
 
-#define INIT_ACTIVE_REQUEST(name) i915_active_request_init((name), NULL, NULL)
+#define INIT_ACTIVE_REQUEST(name, lock) \
+	i915_active_request_init((name), (lock), NULL, NULL)
 
 /**
  * i915_active_request_set - updates the tracker to watch the current request
@@ -81,6 +86,9 @@ static inline void
 __i915_active_request_set(struct i915_active_request *active,
 			  struct i915_request *request)
 {
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+	lockdep_assert_held(active->lock);
+#endif
 	list_move(&active->link, &request->active_list);
 	rcu_assign_pointer(active->request, request);
 }
@@ -362,7 +370,7 @@ void __i915_active_init(struct drm_i915_private *i915,
 } while (0)
 
 int i915_active_ref(struct i915_active *ref,
-		    u64 timeline,
+		    struct intel_timeline *tl,
 		    struct i915_request *rq);
 
 int i915_active_wait(struct i915_active *ref);
diff --git a/drivers/gpu/drm/i915/i915_active_types.h b/drivers/gpu/drm/i915/i915_active_types.h
index 5b0a3024ce24..4311c2fab6e6 100644
--- a/drivers/gpu/drm/i915/i915_active_types.h
+++ b/drivers/gpu/drm/i915/i915_active_types.h
@@ -24,6 +24,9 @@ struct i915_active_request {
 	struct i915_request __rcu *request;
 	struct list_head link;
 	i915_active_retire_fn retire;
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+	struct mutex *lock;
+#endif
 };
 
 struct active_node;
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index cc1b93fce145..78d975a6c293 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -919,7 +919,7 @@ int i915_vma_move_to_active(struct i915_vma *vma,
 	 * add the active reference first and queue for it to be dropped
 	 * *last*.
 	 */
-	err = i915_active_ref(&vma->active, rq->fence.context, rq);
+	err = i915_active_ref(&vma->active, rq->timeline, rq);
 	if (unlikely(err))
 		return err;
 
@@ -929,7 +929,7 @@ int i915_vma_move_to_active(struct i915_vma *vma,
 
 		if (intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CS))
 			i915_active_ref(&obj->frontbuffer->write,
-					rq->fence.context,
+					rq->timeline,
 					rq);
 
 		obj->read_domains = 0;
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index e5cd5d47e380..77d844ac8b71 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -110,8 +110,7 @@ __live_active_setup(struct drm_i915_private *i915)
 						       submit,
 						       GFP_KERNEL);
 		if (err >= 0)
-			err = i915_active_ref(&active->base,
-					      rq->fence.context, rq);
+			err = i915_active_ref(&active->base, rq->timeline, rq);
 		i915_request_add(rq);
 		if (err) {
 			pr_err("Failed to track active ref!\n");
-- 
2.22.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 33+ messages in thread

* [PATCH 21/24] drm/i915: Remove logical HW ID
  2019-07-15  8:09 [PATCH 01/24] drm/i915: Hide unshrinkable context objects from the shrinker Chris Wilson
                   ` (18 preceding siblings ...)
  2019-07-15  8:09 ` [PATCH 20/24] drm/i915: Markup expected timeline locks for i915_active Chris Wilson
@ 2019-07-15  8:09 ` Chris Wilson
  2019-07-19 20:56   ` Kumar Valsan, Prathap
  2019-07-15  8:09 ` [PATCH 22/24] drm/i915: Move context management under GEM Chris Wilson
                   ` (6 subsequent siblings)
  26 siblings, 1 reply; 33+ messages in thread
From: Chris Wilson @ 2019-07-15  8:09 UTC (permalink / raw)
  To: intel-gfx

We only need to keep a unique tag for the active lifetime of the
context, and for as long as we need to identify that context. The HW
uses the tag to determine if it should use a lite-restore (why not the
LRCA?) and passes the tag back for various status identifies. The only
status we need to track is for OA, so when using perf, we assign the
specific context a unique tag.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gem/i915_gem_context.c   | 151 ------------------
 drivers/gpu/drm/i915/gem/i915_gem_context.h   |  15 --
 .../gpu/drm/i915/gem/i915_gem_context_types.h |  18 ---
 .../drm/i915/gem/selftests/i915_gem_context.c |  13 +-
 .../gpu/drm/i915/gem/selftests/mock_context.c |   8 -
 drivers/gpu/drm/i915/gt/intel_context_types.h |   1 +
 drivers/gpu/drm/i915/gt/intel_engine_types.h  |   5 +-
 drivers/gpu/drm/i915/gt/intel_lrc.c           |  28 ++--
 drivers/gpu/drm/i915/gvt/kvmgt.c              |  17 --
 drivers/gpu/drm/i915/i915_debugfs.c           |   3 -
 drivers/gpu/drm/i915/i915_gpu_error.c         |   7 +-
 drivers/gpu/drm/i915/i915_gpu_error.h         |   1 -
 drivers/gpu/drm/i915/i915_perf.c              |  28 ++--
 drivers/gpu/drm/i915/i915_trace.h             |  38 ++---
 .../gpu/drm/i915/selftests/i915_gem_evict.c   |   4 +-
 drivers/gpu/drm/i915/selftests/i915_vma.c     |   2 +-
 16 files changed, 51 insertions(+), 288 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index e9f5e19265b9..2669e038661e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -167,95 +167,6 @@ lookup_user_engine(struct i915_gem_context *ctx,
 	return i915_gem_context_get_engine(ctx, idx);
 }
 
-static inline int new_hw_id(struct drm_i915_private *i915, gfp_t gfp)
-{
-	unsigned int max;
-
-	lockdep_assert_held(&i915->contexts.mutex);
-
-	if (INTEL_GEN(i915) >= 11)
-		max = GEN11_MAX_CONTEXT_HW_ID;
-	else if (USES_GUC_SUBMISSION(i915))
-		/*
-		 * When using GuC in proxy submission, GuC consumes the
-		 * highest bit in the context id to indicate proxy submission.
-		 */
-		max = MAX_GUC_CONTEXT_HW_ID;
-	else
-		max = MAX_CONTEXT_HW_ID;
-
-	return ida_simple_get(&i915->contexts.hw_ida, 0, max, gfp);
-}
-
-static int steal_hw_id(struct drm_i915_private *i915)
-{
-	struct i915_gem_context *ctx, *cn;
-	LIST_HEAD(pinned);
-	int id = -ENOSPC;
-
-	lockdep_assert_held(&i915->contexts.mutex);
-
-	list_for_each_entry_safe(ctx, cn,
-				 &i915->contexts.hw_id_list, hw_id_link) {
-		if (atomic_read(&ctx->hw_id_pin_count)) {
-			list_move_tail(&ctx->hw_id_link, &pinned);
-			continue;
-		}
-
-		GEM_BUG_ON(!ctx->hw_id); /* perma-pinned kernel context */
-		list_del_init(&ctx->hw_id_link);
-		id = ctx->hw_id;
-		break;
-	}
-
-	/*
-	 * Remember how far we got up on the last repossesion scan, so the
-	 * list is kept in a "least recently scanned" order.
-	 */
-	list_splice_tail(&pinned, &i915->contexts.hw_id_list);
-	return id;
-}
-
-static int assign_hw_id(struct drm_i915_private *i915, unsigned int *out)
-{
-	int ret;
-
-	lockdep_assert_held(&i915->contexts.mutex);
-
-	/*
-	 * We prefer to steal/stall ourselves and our users over that of the
-	 * entire system. That may be a little unfair to our users, and
-	 * even hurt high priority clients. The choice is whether to oomkill
-	 * something else, or steal a context id.
-	 */
-	ret = new_hw_id(i915, GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
-	if (unlikely(ret < 0)) {
-		ret = steal_hw_id(i915);
-		if (ret < 0) /* once again for the correct errno code */
-			ret = new_hw_id(i915, GFP_KERNEL);
-		if (ret < 0)
-			return ret;
-	}
-
-	*out = ret;
-	return 0;
-}
-
-static void release_hw_id(struct i915_gem_context *ctx)
-{
-	struct drm_i915_private *i915 = ctx->i915;
-
-	if (list_empty(&ctx->hw_id_link))
-		return;
-
-	mutex_lock(&i915->contexts.mutex);
-	if (!list_empty(&ctx->hw_id_link)) {
-		ida_simple_remove(&i915->contexts.hw_ida, ctx->hw_id);
-		list_del_init(&ctx->hw_id_link);
-	}
-	mutex_unlock(&i915->contexts.mutex);
-}
-
 static void __free_engines(struct i915_gem_engines *e, unsigned int count)
 {
 	while (count--) {
@@ -309,7 +220,6 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
 	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
 	GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
 
-	release_hw_id(ctx);
 	if (ctx->vm)
 		i915_vm_put(ctx->vm);
 
@@ -381,12 +291,6 @@ static void context_close(struct i915_gem_context *ctx)
 	i915_gem_context_set_closed(ctx);
 	ctx->file_priv = ERR_PTR(-EBADF);
 
-	/*
-	 * This context will never again be assinged to HW, so we can
-	 * reuse its ID for the next context.
-	 */
-	release_hw_id(ctx);
-
 	/*
 	 * The LUT uses the VMA as a backpointer to unref the object,
 	 * so we need to clear the LUT before we close all the VMA (inside
@@ -449,7 +353,6 @@ __create_context(struct drm_i915_private *i915)
 	RCU_INIT_POINTER(ctx->engines, e);
 
 	INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
-	INIT_LIST_HEAD(&ctx->hw_id_link);
 
 	/* NB: Mark all slices as needing a remap so that when the context first
 	 * loads it will restore whatever remap state already exists. If there
@@ -572,13 +475,6 @@ i915_gem_context_create_gvt(struct drm_device *dev)
 	if (IS_ERR(ctx))
 		goto out;
 
-	ret = i915_gem_context_pin_hw_id(ctx);
-	if (ret) {
-		context_close(ctx);
-		ctx = ERR_PTR(ret);
-		goto out;
-	}
-
 	ctx->file_priv = ERR_PTR(-EBADF);
 	i915_gem_context_set_closed(ctx); /* not user accessible */
 	i915_gem_context_clear_bannable(ctx);
@@ -609,18 +505,11 @@ struct i915_gem_context *
 i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
 {
 	struct i915_gem_context *ctx;
-	int err;
 
 	ctx = i915_gem_create_context(i915, 0);
 	if (IS_ERR(ctx))
 		return ctx;
 
-	err = i915_gem_context_pin_hw_id(ctx);
-	if (err) {
-		destroy_kernel_context(&ctx);
-		return ERR_PTR(err);
-	}
-
 	i915_gem_context_clear_bannable(ctx);
 	ctx->sched.priority = I915_USER_PRIORITY(prio);
 	ctx->ring_size = PAGE_SIZE;
@@ -660,15 +549,6 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
 		DRM_ERROR("Failed to create default global context\n");
 		return PTR_ERR(ctx);
 	}
-	/*
-	 * For easy recognisablity, we want the kernel context to be 0 and then
-	 * all user contexts will have non-zero hw_id. Kernel contexts are
-	 * permanently pinned, so that we never suffer a stall and can
-	 * use them from any allocation context (e.g. for evicting other
-	 * contexts and from inside the shrinker).
-	 */
-	GEM_BUG_ON(ctx->hw_id);
-	GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count));
 	dev_priv->kernel_context = ctx;
 
 	DRM_DEBUG_DRIVER("%s context support initialized\n",
@@ -682,10 +562,6 @@ void i915_gem_contexts_fini(struct drm_i915_private *i915)
 	lockdep_assert_held(&i915->drm.struct_mutex);
 
 	destroy_kernel_context(&i915->kernel_context);
-
-	/* Must free all deferred contexts (via flush_workqueue) first */
-	GEM_BUG_ON(!list_empty(&i915->contexts.hw_id_list));
-	ida_destroy(&i915->contexts.hw_ida);
 }
 
 static int context_idr_cleanup(int id, void *p, void *data)
@@ -2366,33 +2242,6 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
 	return ret;
 }
 
-int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
-{
-	struct drm_i915_private *i915 = ctx->i915;
-	int err = 0;
-
-	mutex_lock(&i915->contexts.mutex);
-
-	GEM_BUG_ON(i915_gem_context_is_closed(ctx));
-
-	if (list_empty(&ctx->hw_id_link)) {
-		GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count));
-
-		err = assign_hw_id(i915, &ctx->hw_id);
-		if (err)
-			goto out_unlock;
-
-		list_add_tail(&ctx->hw_id_link, &i915->contexts.hw_id_list);
-	}
-
-	GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == ~0u);
-	atomic_inc(&ctx->hw_id_pin_count);
-
-out_unlock:
-	mutex_unlock(&i915->contexts.mutex);
-	return err;
-}
-
 /* GEM context-engines iterator: for_each_gem_engine() */
 struct intel_context *
 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index 106e2ccf7a4c..6fb3ad7e03fc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -112,21 +112,6 @@ i915_gem_context_clear_user_engines(struct i915_gem_context *ctx)
 	clear_bit(CONTEXT_USER_ENGINES, &ctx->flags);
 }
 
-int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx);
-static inline int i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
-{
-	if (atomic_inc_not_zero(&ctx->hw_id_pin_count))
-		return 0;
-
-	return __i915_gem_context_pin_hw_id(ctx);
-}
-
-static inline void i915_gem_context_unpin_hw_id(struct i915_gem_context *ctx)
-{
-	GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == 0u);
-	atomic_dec(&ctx->hw_id_pin_count);
-}
-
 static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
 {
 	return !ctx->file_priv;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index 0ee61482ef94..80cd3ab355e0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -147,24 +147,6 @@ struct i915_gem_context {
 #define CONTEXT_FORCE_SINGLE_SUBMISSION	2
 #define CONTEXT_USER_ENGINES		3
 
-	/**
-	 * @hw_id: - unique identifier for the context
-	 *
-	 * The hardware needs to uniquely identify the context for a few
-	 * functions like fault reporting, PASID, scheduling. The
-	 * &drm_i915_private.context_hw_ida is used to assign a unqiue
-	 * id for the lifetime of the context.
-	 *
-	 * @hw_id_pin_count: - number of times this context had been pinned
-	 * for use (should be, at most, once per engine).
-	 *
-	 * @hw_id_link: - all contexts with an assigned id are tracked
-	 * for possible repossession.
-	 */
-	unsigned int hw_id;
-	atomic_t hw_id_pin_count;
-	struct list_head hw_id_link;
-
 	struct mutex mutex;
 
 	struct i915_sched_attr sched;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index db7856f0f31e..a7b6da29f3df 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -524,9 +524,9 @@ static int igt_ctx_exec(void *arg)
 
 			err = gpu_fill(obj, ctx, engine, dw);
 			if (err) {
-				pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
+				pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
 				       ndwords, dw, max_dwords(obj),
-				       engine->name, ctx->hw_id,
+				       engine->name,
 				       yesno(!!ctx->vm), err);
 				goto out_unlock;
 			}
@@ -643,9 +643,9 @@ static int igt_shared_ctx_exec(void *arg)
 
 			err = gpu_fill(obj, ctx, engine, dw);
 			if (err) {
-				pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
+				pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
 				       ndwords, dw, max_dwords(obj),
-				       engine->name, ctx->hw_id,
+				       engine->name,
 				       yesno(!!ctx->vm), err);
 				kernel_context_close(ctx);
 				goto out_test;
@@ -1219,10 +1219,9 @@ static int igt_ctx_readonly(void *arg)
 
 			err = gpu_fill(obj, ctx, engine, dw);
 			if (err) {
-				pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
+				pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
 				       ndwords, dw, max_dwords(obj),
-				       engine->name, ctx->hw_id,
-				       yesno(!!ctx->vm), err);
+				       engine->name, yesno(!!ctx->vm), err);
 				goto out_unlock;
 			}
 
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
index be8974ccff24..0104f16b1327 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
@@ -13,7 +13,6 @@ mock_context(struct drm_i915_private *i915,
 {
 	struct i915_gem_context *ctx;
 	struct i915_gem_engines *e;
-	int ret;
 
 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
 	if (!ctx)
@@ -30,13 +29,8 @@ mock_context(struct drm_i915_private *i915,
 	RCU_INIT_POINTER(ctx->engines, e);
 
 	INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
-	INIT_LIST_HEAD(&ctx->hw_id_link);
 	mutex_init(&ctx->mutex);
 
-	ret = i915_gem_context_pin_hw_id(ctx);
-	if (ret < 0)
-		goto err_engines;
-
 	if (name) {
 		struct i915_ppgtt *ppgtt;
 
@@ -54,8 +48,6 @@ mock_context(struct drm_i915_private *i915,
 
 	return ctx;
 
-err_engines:
-	free_engines(rcu_access_pointer(ctx->engines));
 err_free:
 	kfree(ctx);
 	return NULL;
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index c00419e38a77..36394d4061e4 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -52,6 +52,7 @@ struct intel_context {
 
 	u32 *lrc_reg_state;
 	u64 lrc_desc;
+	u32 hw_tag;
 
 	unsigned int active_count; /* protected by timeline->mutex */
 
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 1da8d2f34c56..121abe5b08e7 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -270,10 +270,13 @@ struct intel_engine_cs {
 
 	u8 class;
 	u8 instance;
+
+	u32 uabi_capabilities;
 	u32 context_size;
 	u32 mmio_base;
 
-	u32 uabi_capabilities;
+	unsigned int hw_tag;
+#define NUM_HW_TAG (256)
 
 	struct rb_node uabi_node;
 
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 18d9ddc5cd58..ae15e0dd98ac 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -412,9 +412,6 @@ lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
 	struct i915_gem_context *ctx = ce->gem_context;
 	u64 desc;
 
-	BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH)));
-	BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > (BIT(GEN11_SW_CTX_ID_WIDTH)));
-
 	desc = ctx->desc_template;				/* bits  0-11 */
 	GEM_BUG_ON(desc & GENMASK_ULL(63, 12));
 
@@ -428,20 +425,11 @@ lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
 	 * anything below.
 	 */
 	if (INTEL_GEN(engine->i915) >= 11) {
-		GEM_BUG_ON(ctx->hw_id >= BIT(GEN11_SW_CTX_ID_WIDTH));
-		desc |= (u64)ctx->hw_id << GEN11_SW_CTX_ID_SHIFT;
-								/* bits 37-47 */
-
 		desc |= (u64)engine->instance << GEN11_ENGINE_INSTANCE_SHIFT;
 								/* bits 48-53 */
 
-		/* TODO: decide what to do with SW counter (bits 55-60) */
-
 		desc |= (u64)engine->class << GEN11_ENGINE_CLASS_SHIFT;
 								/* bits 61-63 */
-	} else {
-		GEM_BUG_ON(ctx->hw_id >= BIT(GEN8_CTX_ID_WIDTH));
-		desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT;	/* bits 32-52 */
 	}
 
 	return desc;
@@ -537,6 +525,15 @@ execlists_schedule_in(struct i915_request *rq, int idx)
 		intel_context_get(ce);
 		ce->inflight = rq->engine;
 
+		if (ce->hw_tag) {
+			ce->lrc_desc |= (u64)ce->hw_tag << 32;
+		} else {
+			ce->lrc_desc &= ~GENMASK_ULL(47, 37);
+			ce->lrc_desc |=
+				(u64)(rq->engine->hw_tag++ % NUM_HW_TAG) <<
+				GEN11_SW_CTX_ID_SHIFT;
+		}
+
 		execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
 		intel_engine_context_in(ce->inflight);
 	}
@@ -1551,7 +1548,6 @@ static void execlists_context_destroy(struct kref *kref)
 
 static void execlists_context_unpin(struct intel_context *ce)
 {
-	i915_gem_context_unpin_hw_id(ce->gem_context);
 	i915_gem_object_unpin_map(ce->state->obj);
 	intel_ring_reset(ce->ring, ce->ring->tail);
 }
@@ -1606,18 +1602,12 @@ __execlists_context_pin(struct intel_context *ce,
 		goto unpin_active;
 	}
 
-	ret = i915_gem_context_pin_hw_id(ce->gem_context);
-	if (ret)
-		goto unpin_map;
-
 	ce->lrc_desc = lrc_descriptor(ce, engine);
 	ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
 	__execlists_update_reg_state(ce, engine);
 
 	return 0;
 
-unpin_map:
-	i915_gem_object_unpin_map(ce->state->obj);
 unpin_active:
 	intel_context_active_release(ce);
 err:
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 144301b778df..941d0f08b047 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -1566,27 +1566,10 @@ vgpu_id_show(struct device *dev, struct device_attribute *attr,
 	return sprintf(buf, "\n");
 }
 
-static ssize_t
-hw_id_show(struct device *dev, struct device_attribute *attr,
-	   char *buf)
-{
-	struct mdev_device *mdev = mdev_from_dev(dev);
-
-	if (mdev) {
-		struct intel_vgpu *vgpu = (struct intel_vgpu *)
-			mdev_get_drvdata(mdev);
-		return sprintf(buf, "%u\n",
-			       vgpu->submission.shadow[0]->gem_context->hw_id);
-	}
-	return sprintf(buf, "\n");
-}
-
 static DEVICE_ATTR_RO(vgpu_id);
-static DEVICE_ATTR_RO(hw_id);
 
 static struct attribute *intel_vgpu_attrs[] = {
 	&dev_attr_vgpu_id.attr,
-	&dev_attr_hw_id.attr,
 	NULL
 };
 
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index b6b80963f992..b7ec9f3cdc2c 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1581,9 +1581,6 @@ static int i915_context_status(struct seq_file *m, void *unused)
 		struct intel_context *ce;
 
 		seq_puts(m, "HW context ");
-		if (!list_empty(&ctx->hw_id_link))
-			seq_printf(m, "%x [pin %u]", ctx->hw_id,
-				   atomic_read(&ctx->hw_id_pin_count));
 		if (ctx->pid) {
 			struct task_struct *task;
 
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 026ee1f5536d..75671faf263d 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -506,9 +506,9 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
 				const char *header,
 				const struct drm_i915_error_context *ctx)
 {
-	err_printf(m, "%s%s[%d] hw_id %d, prio %d, guilty %d active %d\n",
-		   header, ctx->comm, ctx->pid, ctx->hw_id,
-		   ctx->sched_attr.priority, ctx->guilty, ctx->active);
+	err_printf(m, "%s%s[%d] prio %d, guilty %d active %d\n",
+		   header, ctx->comm, ctx->pid, ctx->sched_attr.priority,
+		   ctx->guilty, ctx->active);
 }
 
 static void error_print_engine(struct drm_i915_error_state_buf *m,
@@ -1310,7 +1310,6 @@ static void record_context(struct drm_i915_error_context *e,
 		rcu_read_unlock();
 	}
 
-	e->hw_id = ctx->hw_id;
 	e->sched_attr = ctx->sched;
 	e->guilty = atomic_read(&ctx->guilty_count);
 	e->active = atomic_read(&ctx->active_count);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index a24c35107d16..1d951eae7e2e 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -117,7 +117,6 @@ struct i915_gpu_state {
 		struct drm_i915_error_context {
 			char comm[TASK_COMM_LEN];
 			pid_t pid;
-			u32 hw_id;
 			int active;
 			int guilty;
 			struct i915_sched_attr sched_attr;
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 58a71efc25db..df02e0fe0701 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1293,19 +1293,14 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
 			i915->perf.oa.specific_ctx_id_mask =
 				(1U << GEN8_CTX_ID_WIDTH) - 1;
 			i915->perf.oa.specific_ctx_id =
-				upper_32_bits(ce->lrc_desc);
-			i915->perf.oa.specific_ctx_id &=
 				i915->perf.oa.specific_ctx_id_mask;
 		}
 		break;
 
 	case 11: {
 		i915->perf.oa.specific_ctx_id_mask =
-			((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32) |
-			((1U << GEN11_ENGINE_INSTANCE_WIDTH) - 1) << (GEN11_ENGINE_INSTANCE_SHIFT - 32) |
-			((1 << GEN11_ENGINE_CLASS_WIDTH) - 1) << (GEN11_ENGINE_CLASS_SHIFT - 32);
-		i915->perf.oa.specific_ctx_id = upper_32_bits(ce->lrc_desc);
-		i915->perf.oa.specific_ctx_id &=
+			((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
+		i915->perf.oa.specific_ctx_id =
 			i915->perf.oa.specific_ctx_id_mask;
 		break;
 	}
@@ -1314,6 +1309,8 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
 		MISSING_CASE(INTEL_GEN(i915));
 	}
 
+	ce->hw_tag = i915->perf.oa.specific_ctx_id_mask;
+
 	DRM_DEBUG_DRIVER("filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
 			 i915->perf.oa.specific_ctx_id,
 			 i915->perf.oa.specific_ctx_id_mask);
@@ -1330,18 +1327,19 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
  */
 static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
 {
-	struct drm_i915_private *dev_priv = stream->dev_priv;
+	struct drm_i915_private *i915 = stream->dev_priv;
 	struct intel_context *ce;
 
-	dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
-	dev_priv->perf.oa.specific_ctx_id_mask = 0;
-
-	ce = fetch_and_zero(&dev_priv->perf.oa.pinned_ctx);
-	if (ce) {
-		mutex_lock(&dev_priv->drm.struct_mutex);
+	ce = fetch_and_zero(&i915->perf.oa.pinned_ctx);
+	if (ce)  {
+		mutex_lock(&i915->drm.struct_mutex);
+		ce->hw_tag = 0;
 		intel_context_unpin(ce);
-		mutex_unlock(&dev_priv->drm.struct_mutex);
+		mutex_unlock(&i915->drm.struct_mutex);
 	}
+
+	i915->perf.oa.specific_ctx_id = INVALID_CTX_ID;
+	i915->perf.oa.specific_ctx_id_mask = 0;
 }
 
 static void
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index da18b8d6b80c..fbf6dd0eb5c9 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -665,7 +665,6 @@ TRACE_EVENT(i915_request_queue,
 
 	    TP_STRUCT__entry(
 			     __field(u32, dev)
-			     __field(u32, hw_id)
 			     __field(u64, ctx)
 			     __field(u16, class)
 			     __field(u16, instance)
@@ -675,7 +674,6 @@ TRACE_EVENT(i915_request_queue,
 
 	    TP_fast_assign(
 			   __entry->dev = rq->i915->drm.primary->index;
-			   __entry->hw_id = rq->gem_context->hw_id;
 			   __entry->class = rq->engine->uabi_class;
 			   __entry->instance = rq->engine->instance;
 			   __entry->ctx = rq->fence.context;
@@ -683,10 +681,9 @@ TRACE_EVENT(i915_request_queue,
 			   __entry->flags = flags;
 			   ),
 
-	    TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, flags=0x%x",
+	    TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, flags=0x%x",
 		      __entry->dev, __entry->class, __entry->instance,
-		      __entry->hw_id, __entry->ctx, __entry->seqno,
-		      __entry->flags)
+		      __entry->ctx, __entry->seqno, __entry->flags)
 );
 
 DECLARE_EVENT_CLASS(i915_request,
@@ -695,7 +692,6 @@ DECLARE_EVENT_CLASS(i915_request,
 
 	    TP_STRUCT__entry(
 			     __field(u32, dev)
-			     __field(u32, hw_id)
 			     __field(u64, ctx)
 			     __field(u16, class)
 			     __field(u16, instance)
@@ -704,16 +700,15 @@ DECLARE_EVENT_CLASS(i915_request,
 
 	    TP_fast_assign(
 			   __entry->dev = rq->i915->drm.primary->index;
-			   __entry->hw_id = rq->gem_context->hw_id;
 			   __entry->class = rq->engine->uabi_class;
 			   __entry->instance = rq->engine->instance;
 			   __entry->ctx = rq->fence.context;
 			   __entry->seqno = rq->fence.seqno;
 			   ),
 
-	    TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u",
+	    TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u",
 		      __entry->dev, __entry->class, __entry->instance,
-		      __entry->hw_id, __entry->ctx, __entry->seqno)
+		      __entry->ctx, __entry->seqno)
 );
 
 DEFINE_EVENT(i915_request, i915_request_add,
@@ -738,7 +733,6 @@ TRACE_EVENT(i915_request_in,
 
 	    TP_STRUCT__entry(
 			     __field(u32, dev)
-			     __field(u32, hw_id)
 			     __field(u64, ctx)
 			     __field(u16, class)
 			     __field(u16, instance)
@@ -749,7 +743,6 @@ TRACE_EVENT(i915_request_in,
 
 	    TP_fast_assign(
 			   __entry->dev = rq->i915->drm.primary->index;
-			   __entry->hw_id = rq->gem_context->hw_id;
 			   __entry->class = rq->engine->uabi_class;
 			   __entry->instance = rq->engine->instance;
 			   __entry->ctx = rq->fence.context;
@@ -758,9 +751,9 @@ TRACE_EVENT(i915_request_in,
 			   __entry->port = port;
 			   ),
 
-	    TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, prio=%u, port=%u",
+	    TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, prio=%u, port=%u",
 		      __entry->dev, __entry->class, __entry->instance,
-		      __entry->hw_id, __entry->ctx, __entry->seqno,
+		      __entry->ctx, __entry->seqno,
 		      __entry->prio, __entry->port)
 );
 
@@ -770,7 +763,6 @@ TRACE_EVENT(i915_request_out,
 
 	    TP_STRUCT__entry(
 			     __field(u32, dev)
-			     __field(u32, hw_id)
 			     __field(u64, ctx)
 			     __field(u16, class)
 			     __field(u16, instance)
@@ -780,7 +772,6 @@ TRACE_EVENT(i915_request_out,
 
 	    TP_fast_assign(
 			   __entry->dev = rq->i915->drm.primary->index;
-			   __entry->hw_id = rq->gem_context->hw_id;
 			   __entry->class = rq->engine->uabi_class;
 			   __entry->instance = rq->engine->instance;
 			   __entry->ctx = rq->fence.context;
@@ -788,10 +779,9 @@ TRACE_EVENT(i915_request_out,
 			   __entry->completed = i915_request_completed(rq);
 			   ),
 
-		    TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, completed?=%u",
+		    TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, completed?=%u",
 			      __entry->dev, __entry->class, __entry->instance,
-			      __entry->hw_id, __entry->ctx, __entry->seqno,
-			      __entry->completed)
+			      __entry->ctx, __entry->seqno, __entry->completed)
 );
 
 #else
@@ -829,7 +819,6 @@ TRACE_EVENT(i915_request_wait_begin,
 
 	    TP_STRUCT__entry(
 			     __field(u32, dev)
-			     __field(u32, hw_id)
 			     __field(u64, ctx)
 			     __field(u16, class)
 			     __field(u16, instance)
@@ -845,7 +834,6 @@ TRACE_EVENT(i915_request_wait_begin,
 	     */
 	    TP_fast_assign(
 			   __entry->dev = rq->i915->drm.primary->index;
-			   __entry->hw_id = rq->gem_context->hw_id;
 			   __entry->class = rq->engine->uabi_class;
 			   __entry->instance = rq->engine->instance;
 			   __entry->ctx = rq->fence.context;
@@ -853,9 +841,9 @@ TRACE_EVENT(i915_request_wait_begin,
 			   __entry->flags = flags;
 			   ),
 
-	    TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, flags=0x%x",
+	    TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, flags=0x%x",
 		      __entry->dev, __entry->class, __entry->instance,
-		      __entry->hw_id, __entry->ctx, __entry->seqno,
+		      __entry->ctx, __entry->seqno,
 		      __entry->flags)
 );
 
@@ -958,19 +946,17 @@ DECLARE_EVENT_CLASS(i915_context,
 	TP_STRUCT__entry(
 			__field(u32, dev)
 			__field(struct i915_gem_context *, ctx)
-			__field(u32, hw_id)
 			__field(struct i915_address_space *, vm)
 	),
 
 	TP_fast_assign(
 			__entry->dev = ctx->i915->drm.primary->index;
 			__entry->ctx = ctx;
-			__entry->hw_id = ctx->hw_id;
 			__entry->vm = ctx->vm;
 	),
 
-	TP_printk("dev=%u, ctx=%p, ctx_vm=%p, hw_id=%u",
-		  __entry->dev, __entry->ctx, __entry->vm, __entry->hw_id)
+	TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
+		  __entry->dev, __entry->ctx, __entry->vm)
 )
 
 DEFINE_EVENT(i915_context, i915_context_create,
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index b6449d0a8c17..07ab822b224f 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -475,8 +475,8 @@ static int igt_evict_contexts(void *arg)
 			if (IS_ERR(rq)) {
 				/* When full, fail_if_busy will trigger EBUSY */
 				if (PTR_ERR(rq) != -EBUSY) {
-					pr_err("Unexpected error from request alloc (ctx hw id %u, on %s): %d\n",
-					       ctx->hw_id, engine->name,
+					pr_err("Unexpected error from request alloc (on %s): %d\n",
+					       engine->name,
 					       (int)PTR_ERR(rq));
 					err = PTR_ERR(rq);
 				}
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index fbc79b14823a..85008f4fc78c 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -170,7 +170,7 @@ static int igt_vma_create(void *arg)
 		}
 
 		nc = 0;
-		for_each_prime_number(num_ctx, MAX_CONTEXT_HW_ID) {
+		for_each_prime_number(num_ctx, NUM_HW_TAG) {
 			for (; nc < num_ctx; nc++) {
 				ctx = mock_context(i915, "mock");
 				if (!ctx)
-- 
2.22.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 33+ messages in thread

* [PATCH 22/24] drm/i915: Move context management under GEM
  2019-07-15  8:09 [PATCH 01/24] drm/i915: Hide unshrinkable context objects from the shrinker Chris Wilson
                   ` (19 preceding siblings ...)
  2019-07-15  8:09 ` [PATCH 21/24] drm/i915: Remove logical HW ID Chris Wilson
@ 2019-07-15  8:09 ` Chris Wilson
  2019-07-15  8:09 ` [PATCH 23/24] drm/i915: Make debugfs/per_file_stats scale better Chris Wilson
                   ` (5 subsequent siblings)
  26 siblings, 0 replies; 33+ messages in thread
From: Chris Wilson @ 2019-07-15  8:09 UTC (permalink / raw)
  To: intel-gfx

Keep track of the GEM contexts underneath i915->gem.contexts and assign
them their own lock for the purposes of list management.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_context.c   | 116 +++++++-----------
 drivers/gpu/drm/i915/gem/i915_gem_context.h   |   4 +-
 .../gpu/drm/i915/gem/selftests/mock_context.c |   2 +-
 drivers/gpu/drm/i915/i915_debugfs.c           |  20 +--
 drivers/gpu/drm/i915/i915_drv.c               |   2 -
 drivers/gpu/drm/i915/i915_drv.h               |  24 ++--
 drivers/gpu/drm/i915/i915_gem.c               |   8 +-
 drivers/gpu/drm/i915/i915_perf.c              |   8 +-
 drivers/gpu/drm/i915/i915_sysfs.c             |  38 +++---
 .../gpu/drm/i915/selftests/mock_gem_device.c  |   4 +-
 10 files changed, 92 insertions(+), 134 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 2669e038661e..3ee352d389c8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -217,9 +217,12 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
 
 static void i915_gem_context_free(struct i915_gem_context *ctx)
 {
-	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
 	GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
 
+	mutex_lock(&ctx->i915->gem.contexts.mutex);
+	list_del(&ctx->link);
+	mutex_unlock(&ctx->i915->gem.contexts.mutex);
+
 	if (ctx->vm)
 		i915_vm_put(ctx->vm);
 
@@ -232,56 +235,40 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
 	kfree(ctx->name);
 	put_pid(ctx->pid);
 
-	list_del(&ctx->link);
 	mutex_destroy(&ctx->mutex);
 
 	kfree_rcu(ctx, rcu);
 }
 
-static void contexts_free(struct drm_i915_private *i915)
+static void contexts_free_all(struct llist_node *list)
 {
-	struct llist_node *freed = llist_del_all(&i915->contexts.free_list);
 	struct i915_gem_context *ctx, *cn;
 
-	lockdep_assert_held(&i915->drm.struct_mutex);
-
-	llist_for_each_entry_safe(ctx, cn, freed, free_link)
+	llist_for_each_entry_safe(ctx, cn, list, free_link)
 		i915_gem_context_free(ctx);
 }
 
-static void contexts_free_first(struct drm_i915_private *i915)
+static void contexts_flush_free(struct i915_gem_contexts *gc)
 {
-	struct i915_gem_context *ctx;
-	struct llist_node *freed;
-
-	lockdep_assert_held(&i915->drm.struct_mutex);
-
-	freed = llist_del_first(&i915->contexts.free_list);
-	if (!freed)
-		return;
-
-	ctx = container_of(freed, typeof(*ctx), free_link);
-	i915_gem_context_free(ctx);
+	contexts_free_all(llist_del_all(&gc->free_list));
 }
 
 static void contexts_free_worker(struct work_struct *work)
 {
-	struct drm_i915_private *i915 =
-		container_of(work, typeof(*i915), contexts.free_work);
+	struct i915_gem_contexts *gc =
+		container_of(work, typeof(*gc), free_work);
 
-	mutex_lock(&i915->drm.struct_mutex);
-	contexts_free(i915);
-	mutex_unlock(&i915->drm.struct_mutex);
+	contexts_flush_free(gc);
 }
 
 void i915_gem_context_release(struct kref *ref)
 {
 	struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
-	struct drm_i915_private *i915 = ctx->i915;
+	struct i915_gem_contexts *gc = &ctx->i915->gem.contexts;
 
 	trace_i915_context_free(ctx);
-	if (llist_add(&ctx->free_link, &i915->contexts.free_list))
-		queue_work(i915->wq, &i915->contexts.free_work);
+	if (llist_add(&ctx->free_link, &gc->free_list))
+		queue_work(ctx->i915->wq, &gc->free_work);
 }
 
 static void context_close(struct i915_gem_context *ctx)
@@ -339,7 +326,6 @@ __create_context(struct drm_i915_private *i915)
 		return ERR_PTR(-ENOMEM);
 
 	kref_init(&ctx->ref);
-	list_add_tail(&ctx->link, &i915->contexts.list);
 	ctx->i915 = i915;
 	ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
 	mutex_init(&ctx->mutex);
@@ -369,6 +355,10 @@ __create_context(struct drm_i915_private *i915)
 	for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
 		ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
 
+	mutex_lock(&i915->gem.contexts.mutex);
+	list_add_tail(&ctx->link, &i915->gem.contexts.list);
+	mutex_unlock(&i915->gem.contexts.mutex);
+
 	return ctx;
 
 err_free:
@@ -399,27 +389,25 @@ static void __assign_ppgtt(struct i915_gem_context *ctx,
 }
 
 static struct i915_gem_context *
-i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
+i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
 {
 	struct i915_gem_context *ctx;
 
-	lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
 	if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &&
-	    !HAS_EXECLISTS(dev_priv))
+	    !HAS_EXECLISTS(i915))
 		return ERR_PTR(-EINVAL);
 
-	/* Reap the most stale context */
-	contexts_free_first(dev_priv);
+	/* Reap the stale contexts */
+	contexts_flush_free(&i915->gem.contexts);
 
-	ctx = __create_context(dev_priv);
+	ctx = __create_context(i915);
 	if (IS_ERR(ctx))
 		return ctx;
 
-	if (HAS_FULL_PPGTT(dev_priv)) {
+	if (HAS_FULL_PPGTT(i915)) {
 		struct i915_ppgtt *ppgtt;
 
-		ppgtt = i915_ppgtt_create(dev_priv);
+		ppgtt = i915_ppgtt_create(i915);
 		if (IS_ERR(ppgtt)) {
 			DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
 					 PTR_ERR(ppgtt));
@@ -434,7 +422,7 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
 	if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
 		struct intel_timeline *timeline;
 
-		timeline = intel_timeline_create(&dev_priv->gt, NULL);
+		timeline = intel_timeline_create(&i915->gt, NULL);
 		if (IS_ERR(timeline)) {
 			context_close(ctx);
 			return ERR_CAST(timeline);
@@ -462,18 +450,13 @@ struct i915_gem_context *
 i915_gem_context_create_gvt(struct drm_device *dev)
 {
 	struct i915_gem_context *ctx;
-	int ret;
 
 	if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
 		return ERR_PTR(-ENODEV);
 
-	ret = i915_mutex_lock_interruptible(dev);
-	if (ret)
-		return ERR_PTR(ret);
-
 	ctx = i915_gem_create_context(to_i915(dev), 0);
 	if (IS_ERR(ctx))
-		goto out;
+		return ctx;
 
 	ctx->file_priv = ERR_PTR(-EBADF);
 	i915_gem_context_set_closed(ctx); /* not user accessible */
@@ -483,8 +466,6 @@ i915_gem_context_create_gvt(struct drm_device *dev)
 		ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
 
 	GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
-out:
-	mutex_unlock(&dev->struct_mutex);
 	return ctx;
 }
 
@@ -519,48 +500,40 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
 	return ctx;
 }
 
-static void init_contexts(struct drm_i915_private *i915)
+static void init_contexts(struct i915_gem_contexts *gc)
 {
-	mutex_init(&i915->contexts.mutex);
-	INIT_LIST_HEAD(&i915->contexts.list);
-
-	/* Using the simple ida interface, the max is limited by sizeof(int) */
-	BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
-	BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > INT_MAX);
-	ida_init(&i915->contexts.hw_ida);
-	INIT_LIST_HEAD(&i915->contexts.hw_id_list);
+	mutex_init(&gc->mutex);
+	INIT_LIST_HEAD(&gc->list);
 
-	INIT_WORK(&i915->contexts.free_work, contexts_free_worker);
-	init_llist_head(&i915->contexts.free_list);
+	INIT_WORK(&gc->free_work, contexts_free_worker);
+	init_llist_head(&gc->free_list);
 }
 
-int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
+int i915_gem_init_contexts(struct drm_i915_private *i915)
 {
 	struct i915_gem_context *ctx;
 
 	/* Reassure ourselves we are only called once */
-	GEM_BUG_ON(dev_priv->kernel_context);
+	GEM_BUG_ON(i915->kernel_context);
 
-	init_contexts(dev_priv);
+	init_contexts(&i915->gem.contexts);
 
 	/* lowest priority; idle task */
-	ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
+	ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MIN);
 	if (IS_ERR(ctx)) {
 		DRM_ERROR("Failed to create default global context\n");
 		return PTR_ERR(ctx);
 	}
-	dev_priv->kernel_context = ctx;
+	i915->kernel_context = ctx;
 
 	DRM_DEBUG_DRIVER("%s context support initialized\n",
-			 DRIVER_CAPS(dev_priv)->has_logical_contexts ?
+			 DRIVER_CAPS(i915)->has_logical_contexts ?
 			 "logical" : "fake");
 	return 0;
 }
 
-void i915_gem_contexts_fini(struct drm_i915_private *i915)
+void i915_gem_fini_contexts(struct drm_i915_private *i915)
 {
-	lockdep_assert_held(&i915->drm.struct_mutex);
-
 	destroy_kernel_context(&i915->kernel_context);
 }
 
@@ -620,9 +593,7 @@ int i915_gem_context_open(struct drm_i915_private *i915,
 	idr_init(&file_priv->context_idr);
 	idr_init_base(&file_priv->vm_idr, 1);
 
-	mutex_lock(&i915->drm.struct_mutex);
 	ctx = i915_gem_create_context(i915, 0);
-	mutex_unlock(&i915->drm.struct_mutex);
 	if (IS_ERR(ctx)) {
 		err = PTR_ERR(ctx);
 		goto err;
@@ -2004,12 +1975,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
 		return -EIO;
 	}
 
-	ret = i915_mutex_lock_interruptible(dev);
-	if (ret)
-		return ret;
-
 	ext_data.ctx = i915_gem_create_context(i915, args->flags);
-	mutex_unlock(&dev->struct_mutex);
 	if (IS_ERR(ext_data.ctx))
 		return PTR_ERR(ext_data.ctx);
 
@@ -2207,7 +2173,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
 				       void *data, struct drm_file *file)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *i915 = to_i915(dev);
 	struct drm_i915_reset_stats *args = data;
 	struct i915_gem_context *ctx;
 	int ret;
@@ -2229,7 +2195,7 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
 	 */
 
 	if (capable(CAP_SYS_ADMIN))
-		args->reset_count = i915_reset_count(&dev_priv->gpu_error);
+		args->reset_count = i915_reset_count(&i915->gpu_error);
 	else
 		args->reset_count = 0;
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index 6fb3ad7e03fc..738ca18f085b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -118,8 +118,8 @@ static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
 }
 
 /* i915_gem_context.c */
-int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv);
-void i915_gem_contexts_fini(struct drm_i915_private *dev_priv);
+int __must_check i915_gem_init_contexts(struct drm_i915_private *i915);
+void i915_gem_fini_contexts(struct drm_i915_private *i915);
 
 int i915_gem_context_open(struct drm_i915_private *i915,
 			  struct drm_file *file);
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
index 0104f16b1327..e36af5a5ce42 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
@@ -65,7 +65,7 @@ void mock_context_close(struct i915_gem_context *ctx)
 
 void mock_init_contexts(struct drm_i915_private *i915)
 {
-	init_contexts(i915);
+	init_contexts(&i915->gem.contexts);
 }
 
 struct i915_gem_context *
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index b7ec9f3cdc2c..bcf617ac2bc0 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -242,8 +242,6 @@ static int per_file_stats(int id, void *ptr, void *data)
 	struct file_stats *stats = data;
 	struct i915_vma *vma;
 
-	lockdep_assert_held(&obj->base.dev->struct_mutex);
-
 	stats->count++;
 	stats->total += obj->base.size;
 	if (!atomic_read(&obj->bind_count))
@@ -251,6 +249,7 @@ static int per_file_stats(int id, void *ptr, void *data)
 	if (obj->base.name || obj->base.dma_buf)
 		stats->shared += obj->base.size;
 
+	spin_lock(&obj->vma.lock);
 	list_for_each_entry(vma, &obj->vma.list, obj_link) {
 		if (!drm_mm_node_allocated(&vma->node))
 			continue;
@@ -270,6 +269,7 @@ static int per_file_stats(int id, void *ptr, void *data)
 		if (i915_vma_is_closed(vma))
 			stats->closed += vma->node.size;
 	}
+	spin_unlock(&obj->vma.lock);
 
 	return 0;
 }
@@ -294,7 +294,8 @@ static void print_context_stats(struct seq_file *m,
 	struct file_stats kstats = {};
 	struct i915_gem_context *ctx;
 
-	list_for_each_entry(ctx, &i915->contexts.list, link) {
+	lockdep_assert_held(&i915->gem.contexts.mutex);
+	list_for_each_entry(ctx, &i915->gem.contexts.list, link) {
 		struct i915_gem_engines_iter it;
 		struct intel_context *ce;
 
@@ -342,12 +343,12 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
 
 	seq_putc(m, '\n');
 
-	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
+	ret = mutex_lock_interruptible(&i915->gem.contexts.mutex);
 	if (ret)
 		return ret;
 
 	print_context_stats(m, i915);
-	mutex_unlock(&i915->drm.struct_mutex);
+	mutex_unlock(&i915->gem.contexts.mutex);
 
 	return 0;
 }
@@ -1567,16 +1568,15 @@ static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
 
 static int i915_context_status(struct seq_file *m, void *unused)
 {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct drm_device *dev = &dev_priv->drm;
+	struct drm_i915_private *i915 = node_to_i915(m->private);
 	struct i915_gem_context *ctx;
 	int ret;
 
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	ret = mutex_lock_interruptible(&i915->gem.contexts.mutex);
 	if (ret)
 		return ret;
 
-	list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
+	list_for_each_entry(ctx, &i915->gem.contexts.list, link) {
 		struct i915_gem_engines_iter it;
 		struct intel_context *ce;
 
@@ -1613,7 +1613,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
 		seq_putc(m, '\n');
 	}
 
-	mutex_unlock(&dev->struct_mutex);
+	mutex_unlock(&i915->gem.contexts.mutex);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 7c209743e478..0244421241fd 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -2056,10 +2056,8 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
 {
 	struct drm_i915_file_private *file_priv = file->driver_priv;
 
-	mutex_lock(&dev->struct_mutex);
 	i915_gem_context_close(file);
 	i915_gem_release(dev, file);
-	mutex_unlock(&dev->struct_mutex);
 
 	kfree(file_priv);
 }
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8fbedbbb39dc..fbaaaa7c12ad 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1565,23 +1565,6 @@ struct drm_i915_private {
 	struct mutex av_mutex;
 	int audio_power_refcount;
 
-	struct {
-		struct mutex mutex;
-		struct list_head list;
-		struct llist_head free_list;
-		struct work_struct free_work;
-
-		/* The hw wants to have a stable context identifier for the
-		 * lifetime of the context (for OA, PASID, faults, etc).
-		 * This is limited in execlists to 21 bits.
-		 */
-		struct ida hw_ida;
-#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
-#define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */
-#define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */
-		struct list_head hw_id_list;
-	} contexts;
-
 	u32 fdi_rx_config;
 
 	/* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
@@ -1840,6 +1823,13 @@ struct drm_i915_private {
 		 * off the idle_work.
 		 */
 		struct work_struct idle_work;
+
+		struct i915_gem_contexts {
+			struct mutex mutex;
+			struct list_head list;
+			struct llist_head free_list;
+			struct work_struct free_work;
+		} contexts;
 	} gem;
 
 	/* For i945gm vblank irq vs. C3 workaround */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 46ed90e59a8d..6ca700189a86 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1469,7 +1469,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 		goto err_unlock;
 	}
 
-	ret = i915_gem_contexts_init(dev_priv);
+	ret = i915_gem_init_contexts(dev_priv);
 	if (ret) {
 		GEM_BUG_ON(ret == -EIO);
 		goto err_scratch;
@@ -1557,7 +1557,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 	}
 err_context:
 	if (ret != -EIO)
-		i915_gem_contexts_fini(dev_priv);
+		i915_gem_fini_contexts(dev_priv);
 err_scratch:
 	i915_gem_fini_scratch(dev_priv);
 err_ggtt:
@@ -1624,7 +1624,7 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv)
 {
 	mutex_lock(&dev_priv->drm.struct_mutex);
 	intel_engines_cleanup(dev_priv);
-	i915_gem_contexts_fini(dev_priv);
+	i915_gem_fini_contexts(dev_priv);
 	i915_gem_fini_scratch(dev_priv);
 	mutex_unlock(&dev_priv->drm.struct_mutex);
 
@@ -1638,7 +1638,7 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv)
 
 	i915_gem_drain_freed_objects(dev_priv);
 
-	WARN_ON(!list_empty(&dev_priv->contexts.list));
+	WARN_ON(!list_empty(&dev_priv->gem.contexts.list));
 }
 
 void i915_gem_init_mmio(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index df02e0fe0701..60c10dce1cb0 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1871,7 +1871,8 @@ static int gen8_configure_all_contexts(struct drm_i915_private *i915,
 	 * context. Contexts idle at the time of reconfiguration are not
 	 * trapped behind the barrier.
 	 */
-	list_for_each_entry(ctx, &i915->contexts.list, link) {
+	mutex_lock(&i915->gem.contexts.mutex);
+	list_for_each_entry(ctx, &i915->gem.contexts.list, link) {
 		struct i915_gem_engines_iter it;
 		struct intel_context *ce;
 
@@ -1902,8 +1903,11 @@ static int gen8_configure_all_contexts(struct drm_i915_private *i915,
 		}
 		i915_gem_context_unlock_engines(ctx);
 		if (err)
-			return err;
+			break;
 	}
+	mutex_unlock(&i915->gem.contexts.mutex);
+	if (err)
+		return err;
 
 	/*
 	 * After updating all other contexts, we need to modify ourselves.
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index ecac1c386109..6836e689f025 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -142,9 +142,9 @@ static const struct attribute_group media_rc6_attr_group = {
 };
 #endif
 
-static int l3_access_valid(struct drm_i915_private *dev_priv, loff_t offset)
+static int l3_access_valid(struct drm_i915_private *i915, loff_t offset)
 {
-	if (!HAS_L3_DPF(dev_priv))
+	if (!HAS_L3_DPF(i915))
 		return -EPERM;
 
 	if (offset % 4 != 0)
@@ -162,31 +162,30 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
 	     loff_t offset, size_t count)
 {
 	struct device *kdev = kobj_to_dev(kobj);
-	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
-	struct drm_device *dev = &dev_priv->drm;
+	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
 	int slice = (int)(uintptr_t)attr->private;
 	int ret;
 
 	count = round_down(count, 4);
 
-	ret = l3_access_valid(dev_priv, offset);
+	ret = l3_access_valid(i915, offset);
 	if (ret)
 		return ret;
 
 	count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
 
-	ret = i915_mutex_lock_interruptible(dev);
+	ret = mutex_lock_interruptible(&i915->gem.contexts.mutex);
 	if (ret)
 		return ret;
 
-	if (dev_priv->l3_parity.remap_info[slice])
+	if (i915->l3_parity.remap_info[slice])
 		memcpy(buf,
-		       dev_priv->l3_parity.remap_info[slice] + (offset/4),
+		       i915->l3_parity.remap_info[slice] + offset / 4,
 		       count);
 	else
 		memset(buf, 0, count);
 
-	mutex_unlock(&dev->struct_mutex);
+	mutex_unlock(&i915->gem.contexts.mutex);
 
 	return count;
 }
@@ -197,22 +196,23 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
 	      loff_t offset, size_t count)
 {
 	struct device *kdev = kobj_to_dev(kobj);
-	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
-	struct drm_device *dev = &dev_priv->drm;
+	struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
 	struct i915_gem_context *ctx;
 	int slice = (int)(uintptr_t)attr->private;
 	u32 **remap_info;
 	int ret;
 
-	ret = l3_access_valid(dev_priv, offset);
+	count = round_down(count, 4);
+
+	ret = l3_access_valid(i915, offset);
 	if (ret)
 		return ret;
 
-	ret = i915_mutex_lock_interruptible(dev);
+	ret = mutex_lock_interruptible(&i915->gem.contexts.mutex);
 	if (ret)
 		return ret;
 
-	remap_info = &dev_priv->l3_parity.remap_info[slice];
+	remap_info = &i915->l3_parity.remap_info[slice];
 	if (!*remap_info) {
 		*remap_info = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
 		if (!*remap_info) {
@@ -221,20 +221,20 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
 		}
 	}
 
-	/* TODO: Ideally we really want a GPU reset here to make sure errors
+	/*
+	 * TODO: Ideally we really want a GPU reset here to make sure errors
 	 * aren't propagated. Since I cannot find a stable way to reset the GPU
 	 * at this point it is left as a TODO.
 	*/
 	memcpy(*remap_info + (offset/4), buf, count);
 
 	/* NB: We defer the remapping until we switch to the context */
-	list_for_each_entry(ctx, &dev_priv->contexts.list, link)
-		ctx->remap_slice |= (1<<slice);
+	list_for_each_entry(ctx, &i915->gem.contexts.list, link)
+		ctx->remap_slice |= BIT(slice);
 
 	ret = count;
-
 out:
-	mutex_unlock(&dev->struct_mutex);
+	mutex_unlock(&i915->gem.contexts.mutex);
 
 	return ret;
 }
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index fd4cc4809eb8..107049afe201 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -65,7 +65,7 @@ static void mock_device_release(struct drm_device *dev)
 	mutex_lock(&i915->drm.struct_mutex);
 	for_each_engine(engine, i915, id)
 		mock_engine_free(engine);
-	i915_gem_contexts_fini(i915);
+	i915_gem_fini_contexts(i915);
 	mutex_unlock(&i915->drm.struct_mutex);
 
 	intel_timelines_fini(i915);
@@ -220,7 +220,7 @@ struct drm_i915_private *mock_gem_device(void)
 	return i915;
 
 err_context:
-	i915_gem_contexts_fini(i915);
+	i915_gem_fini_contexts(i915);
 err_engine:
 	mock_engine_free(i915->engine[RCS0]);
 err_unlock:
-- 
2.22.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 33+ messages in thread

* [PATCH 23/24] drm/i915: Make debugfs/per_file_stats scale better
  2019-07-15  8:09 [PATCH 01/24] drm/i915: Hide unshrinkable context objects from the shrinker Chris Wilson
                   ` (20 preceding siblings ...)
  2019-07-15  8:09 ` [PATCH 22/24] drm/i915: Move context management under GEM Chris Wilson
@ 2019-07-15  8:09 ` Chris Wilson
  2019-07-15  8:09 ` [PATCH 24/24] drm/i915/gt: Extract GT runtime power management from intel_pm.c Chris Wilson
                   ` (4 subsequent siblings)
  26 siblings, 0 replies; 33+ messages in thread
From: Chris Wilson @ 2019-07-15  8:09 UTC (permalink / raw)
  To: intel-gfx

Currently we walk the entire list of obj->vma for each obj within a file
to find the matching vma of this context. Since we know we are searching
for a particular vma bound to a user context, we can use the rbtree to
search for it rather than repeatedly walk everything.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_debugfs.c | 57 ++++++++++++++++++-----------
 1 file changed, 36 insertions(+), 21 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index bcf617ac2bc0..8126dffe7800 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -231,7 +231,6 @@ struct file_stats {
 	struct i915_address_space *vm;
 	unsigned long count;
 	u64 total, unbound;
-	u64 global, shared;
 	u64 active, inactive;
 	u64 closed;
 };
@@ -246,28 +245,46 @@ static int per_file_stats(int id, void *ptr, void *data)
 	stats->total += obj->base.size;
 	if (!atomic_read(&obj->bind_count))
 		stats->unbound += obj->base.size;
-	if (obj->base.name || obj->base.dma_buf)
-		stats->shared += obj->base.size;
 
 	spin_lock(&obj->vma.lock);
-	list_for_each_entry(vma, &obj->vma.list, obj_link) {
-		if (!drm_mm_node_allocated(&vma->node))
-			continue;
-
-		if (i915_vma_is_ggtt(vma)) {
-			stats->global += vma->node.size;
-		} else {
-			if (vma->vm != stats->vm)
+	if (!stats->vm) {
+		for_each_ggtt_vma(vma, obj) {
+			if (!drm_mm_node_allocated(&vma->node))
 				continue;
-		}
 
-		if (i915_vma_is_active(vma))
-			stats->active += vma->node.size;
-		else
-			stats->inactive += vma->node.size;
+			if (i915_vma_is_active(vma))
+				stats->active += vma->node.size;
+			else
+				stats->inactive += vma->node.size;
 
-		if (i915_vma_is_closed(vma))
-			stats->closed += vma->node.size;
+			if (i915_vma_is_closed(vma))
+				stats->closed += vma->node.size;
+		}
+	} else {
+		struct rb_node *p = obj->vma.tree.rb_node;
+
+		while (p) {
+			long cmp;
+
+			vma = rb_entry(p, typeof(*vma), obj_node);
+			cmp = i915_vma_compare(vma, stats->vm, NULL);
+			if (cmp == 0) {
+				if (drm_mm_node_allocated(&vma->node)) {
+					if (i915_vma_is_active(vma))
+						stats->active += vma->node.size;
+					else
+						stats->inactive += vma->node.size;
+
+					if (i915_vma_is_closed(vma))
+						stats->closed += vma->node.size;
+				}
+				break;
+			}
+			if (cmp < 0)
+				p = p->rb_right;
+			else
+				p = p->rb_left;
+		}
 	}
 	spin_unlock(&obj->vma.lock);
 
@@ -276,14 +293,12 @@ static int per_file_stats(int id, void *ptr, void *data)
 
 #define print_file_stats(m, name, stats) do { \
 	if (stats.count) \
-		seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound, %llu closed)\n", \
+		seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu unbound, %llu closed)\n", \
 			   name, \
 			   stats.count, \
 			   stats.total, \
 			   stats.active, \
 			   stats.inactive, \
-			   stats.global, \
-			   stats.shared, \
 			   stats.unbound, \
 			   stats.closed); \
 } while (0)
-- 
2.22.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 33+ messages in thread

* [PATCH 24/24] drm/i915/gt: Extract GT runtime power management from intel_pm.c
  2019-07-15  8:09 [PATCH 01/24] drm/i915: Hide unshrinkable context objects from the shrinker Chris Wilson
                   ` (21 preceding siblings ...)
  2019-07-15  8:09 ` [PATCH 23/24] drm/i915: Make debugfs/per_file_stats scale better Chris Wilson
@ 2019-07-15  8:09 ` Chris Wilson
  2019-07-15 10:11 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/24] drm/i915: Hide unshrinkable context objects from the shrinker Patchwork
                   ` (3 subsequent siblings)
  26 siblings, 0 replies; 33+ messages in thread
From: Chris Wilson @ 2019-07-15  8:09 UTC (permalink / raw)
  To: intel-gfx

The GT powerwells and powermanagement is GT centric and should be
managed from within the intel_gt. Carve out the rps worker, rps setup,
rc6 setup, llc setup from out of intel_pm.c (and surrounds) and create a
new home for them in gt/

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
---
 drivers/gpu/drm/i915/Makefile                 |    7 +-
 drivers/gpu/drm/i915/display/intel_display.c  |    8 +-
 drivers/gpu/drm/i915/gem/i915_gem_pm.c        |    1 +
 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c   |    8 +-
 drivers/gpu/drm/i915/gt/intel_engine_pm.c     |    1 +
 drivers/gpu/drm/i915/gt/intel_gt.c            |   50 +-
 drivers/gpu/drm/i915/gt/intel_gt.h            |    9 +-
 drivers/gpu/drm/i915/gt/intel_gt_irq.c        |  456 +++
 drivers/gpu/drm/i915/gt/intel_gt_irq.h        |   44 +
 drivers/gpu/drm/i915/gt/intel_gt_pm.c         |   49 +-
 drivers/gpu/drm/i915/gt/intel_gt_pm.h         |    2 +
 drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c     |  109 +
 drivers/gpu/drm/i915/gt/intel_gt_pm_irq.h     |   22 +
 drivers/gpu/drm/i915/gt/intel_gt_types.h      |   11 +-
 drivers/gpu/drm/i915/gt/intel_llc.c           |  120 +
 drivers/gpu/drm/i915/gt/intel_llc.h           |   15 +
 drivers/gpu/drm/i915/gt/intel_llc_types.h     |   13 +
 drivers/gpu/drm/i915/gt/intel_rc6.c           |  665 ++++
 drivers/gpu/drm/i915/gt/intel_rc6.h           |   24 +
 drivers/gpu/drm/i915/gt/intel_rc6_types.h     |   26 +
 drivers/gpu/drm/i915/gt/intel_ringbuffer.c    |   14 +-
 drivers/gpu/drm/i915/gt/intel_rps.c           | 1870 ++++++++++
 drivers/gpu/drm/i915/gt/intel_rps.h           |   37 +
 drivers/gpu/drm/i915/gt/intel_rps_types.h     |   93 +
 drivers/gpu/drm/i915/gt/uc/intel_guc.c        |   87 +
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c |    4 +-
 drivers/gpu/drm/i915/i915_debugfs.c           |  109 +-
 drivers/gpu/drm/i915/i915_drv.c               |   22 +-
 drivers/gpu/drm/i915/i915_drv.h               |  111 -
 drivers/gpu/drm/i915/i915_gem.c               |   38 +-
 drivers/gpu/drm/i915/i915_irq.c               | 1111 +-----
 drivers/gpu/drm/i915/i915_irq.h               |   64 +-
 drivers/gpu/drm/i915/i915_pmu.c               |   22 +-
 drivers/gpu/drm/i915/i915_request.c           |    7 +-
 drivers/gpu/drm/i915/i915_sysfs.c             |   78 +-
 drivers/gpu/drm/i915/intel_pm.c               | 3014 ++---------------
 drivers/gpu/drm/i915/intel_pm.h               |   25 -
 37 files changed, 4104 insertions(+), 4242 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/gt/intel_gt_irq.c
 create mode 100644 drivers/gpu/drm/i915/gt/intel_gt_irq.h
 create mode 100644 drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c
 create mode 100644 drivers/gpu/drm/i915/gt/intel_gt_pm_irq.h
 create mode 100644 drivers/gpu/drm/i915/gt/intel_llc.c
 create mode 100644 drivers/gpu/drm/i915/gt/intel_llc.h
 create mode 100644 drivers/gpu/drm/i915/gt/intel_llc_types.h
 create mode 100644 drivers/gpu/drm/i915/gt/intel_rc6.c
 create mode 100644 drivers/gpu/drm/i915/gt/intel_rc6.h
 create mode 100644 drivers/gpu/drm/i915/gt/intel_rc6_types.h
 create mode 100644 drivers/gpu/drm/i915/gt/intel_rps.c
 create mode 100644 drivers/gpu/drm/i915/gt/intel_rps.h
 create mode 100644 drivers/gpu/drm/i915/gt/intel_rps_types.h

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index cdc2d462db8b..44b217079d2e 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -77,13 +77,18 @@ gt-y += \
 	gt/intel_engine_pm.o \
 	gt/intel_engine_user.o \
 	gt/intel_gt.o \
+	gt/intel_gt_irq.o \
 	gt/intel_gt_pm.o \
+	gt/intel_gt_pm_irq.o \
 	gt/intel_hangcheck.o \
+	gt/intel_llc.o \
 	gt/intel_lrc.o \
+	gt/intel_mocs.o \
+	gt/intel_rc6.o \
 	gt/intel_renderstate.o \
 	gt/intel_reset.o \
 	gt/intel_ringbuffer.o \
-	gt/intel_mocs.o \
+	gt/intel_rps.o \
 	gt/intel_sseu.o \
 	gt/intel_timeline.o \
 	gt/intel_workarounds.o
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 219ae5a60b31..99e9d9b9e334 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -56,6 +56,8 @@
 #include "display/intel_tv.h"
 #include "display/intel_vdsc.h"
 
+#include "gt/intel_rps.h"
+
 #include "i915_drv.h"
 #include "i915_trace.h"
 #include "intel_acpi.h"
@@ -14250,7 +14252,7 @@ static int do_rps_boost(struct wait_queue_entry *_wait,
 	 * vblank without our intervention, so leave RPS alone.
 	 */
 	if (!i915_request_started(rq))
-		gen6_rps_boost(rq);
+		intel_rps_boost(rq);
 	i915_request_put(rq);
 
 	drm_crtc_vblank_put(wait->crtc);
@@ -14449,7 +14451,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
 	 * maximum clocks following a vblank miss (see do_rps_boost()).
 	 */
 	if (!intel_state->rps_interactive) {
-		intel_rps_mark_interactive(dev_priv, true);
+		intel_rps_mark_interactive(&dev_priv->gt.rps, true);
 		intel_state->rps_interactive = true;
 	}
 
@@ -14474,7 +14476,7 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
 	struct drm_i915_private *dev_priv = to_i915(plane->dev);
 
 	if (intel_state->rps_interactive) {
-		intel_rps_mark_interactive(dev_priv, false);
+		intel_rps_mark_interactive(&dev_priv->gt.rps, false);
 		intel_state->rps_interactive = false;
 	}
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index cb7a85ac69d0..514bbeb82ca8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -135,6 +135,7 @@ static bool switch_to_kernel_context_sync(struct intel_gt *gt)
 
 bool i915_gem_load_power_context(struct drm_i915_private *i915)
 {
+	intel_gt_pm_enable(&i915->gt);
 	return switch_to_kernel_context_sync(&i915->gt);
 }
 
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index c092bdf5f0bf..eea868f1658d 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -34,9 +34,9 @@ static void irq_enable(struct intel_engine_cs *engine)
 		return;
 
 	/* Caller disables interrupts */
-	spin_lock(&engine->i915->irq_lock);
+	spin_lock(&engine->gt->irq_lock);
 	engine->irq_enable(engine);
-	spin_unlock(&engine->i915->irq_lock);
+	spin_unlock(&engine->gt->irq_lock);
 }
 
 static void irq_disable(struct intel_engine_cs *engine)
@@ -45,9 +45,9 @@ static void irq_disable(struct intel_engine_cs *engine)
 		return;
 
 	/* Caller disables interrupts */
-	spin_lock(&engine->i915->irq_lock);
+	spin_lock(&engine->gt->irq_lock);
 	engine->irq_disable(engine);
-	spin_unlock(&engine->i915->irq_lock);
+	spin_unlock(&engine->gt->irq_lock);
 }
 
 static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 1218b7d53b88..19ec0e30c3fd 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -11,6 +11,7 @@
 #include "intel_engine_pm.h"
 #include "intel_gt.h"
 #include "intel_gt_pm.h"
+#include "intel_rc6.h"
 
 static int __engine_unpark(struct intel_wakeref *wf)
 {
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 90ed79285ff8..03feb8b43773 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -7,6 +7,8 @@
 
 #include "intel_gt.h"
 #include "intel_gt_pm.h"
+#include "intel_rc6.h"
+#include "intel_rps.h"
 #include "intel_uncore.h"
 
 void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
@@ -14,6 +16,8 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
 	gt->i915 = i915;
 	gt->uncore = &i915->uncore;
 
+	spin_lock_init(&gt->irq_lock);
+
 	INIT_LIST_HEAD(&gt->closed_vma);
 
 	spin_lock_init(&gt->closed_lock);
@@ -26,6 +30,9 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
 void intel_gt_init_hw(struct drm_i915_private *i915)
 {
 	i915->gt.ggtt = &i915->ggtt;
+
+	/* BIOS often leaves RC6 enabled, but disable it for hw init */
+	intel_gt_pm_disable(&i915->gt);
 }
 
 static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
@@ -205,7 +212,12 @@ void intel_gt_chipset_flush(struct intel_gt *gt)
 		intel_gtt_chipset_flush();
 }
 
-int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
+void intel_gt_driver_register(struct intel_gt *gt)
+{
+	intel_rps_driver_register(&gt->rps);
+}
+
+static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
 {
 	struct drm_i915_private *i915 = gt->i915;
 	struct drm_i915_gem_object *obj;
@@ -238,12 +250,46 @@ int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
 	return ret;
 }
 
-void intel_gt_fini_scratch(struct intel_gt *gt)
+static void intel_gt_fini_scratch(struct intel_gt *gt)
 {
 	i915_vma_unpin_and_release(&gt->scratch, 0);
 }
 
+int intel_gt_init(struct intel_gt *gt)
+{
+	int err;
+
+	err = intel_gt_init_scratch(gt, IS_GEN(gt->i915, 2) ? SZ_256K : SZ_4K);
+	if (err)
+		return err;
+
+	intel_rc6_init(&gt->rc6);
+	intel_rps_init(&gt->rps);
+
+	return 0;
+}
+
+void intel_gt_driver_remove(struct intel_gt *gt)
+{
+	GEM_BUG_ON(gt->awake);
+	intel_gt_pm_disable(gt);
+}
+
+void intel_gt_driver_unregister(struct intel_gt *gt)
+{
+	intel_rps_driver_unregister(&gt->rps);
+}
+
 void intel_gt_cleanup_early(struct intel_gt *gt)
 {
 	intel_gt_fini_reset(gt);
 }
+
+void intel_gt_driver_release(struct intel_gt *gt)
+{
+	/* Paranoia: make sure we have disabled everything before we exit. */
+	intel_gt_pm_disable(gt);
+
+	intel_rc6_fini(&gt->rc6);
+	intel_gt_fini_scratch(gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 640bb0531f5b..40d33df7a177 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -29,6 +29,12 @@ static inline struct intel_gt *huc_to_gt(struct intel_huc *huc)
 
 void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915);
 void intel_gt_init_hw(struct drm_i915_private *i915);
+int intel_gt_init(struct intel_gt *gt);
+void intel_gt_driver_register(struct intel_gt *gt);
+
+void intel_gt_driver_unregister(struct intel_gt *gt);
+void intel_gt_driver_remove(struct intel_gt *gt);
+void intel_gt_driver_release(struct intel_gt *gt);
 
 void intel_gt_cleanup_early(struct intel_gt *gt);
 
@@ -41,9 +47,6 @@ void intel_gt_chipset_flush(struct intel_gt *gt);
 
 void intel_gt_init_hangcheck(struct intel_gt *gt);
 
-int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size);
-void intel_gt_fini_scratch(struct intel_gt *gt);
-
 static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt,
 					  enum intel_gt_scratch_field field)
 {
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
new file mode 100644
index 000000000000..c87a1777bd88
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -0,0 +1,456 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/sched/clock.h>
+
+#include "i915_drv.h"
+#include "intel_gt.h"
+#include "intel_gt_irq.h"
+#include "intel_uncore.h"
+#include "intel_rps.h"
+
+static void guc_irq_handler(struct intel_guc *guc, u16 iir)
+{
+	if (iir & GUC_INTR_GUC2HOST)
+		intel_guc_to_host_event_handler(guc);
+}
+
+static void
+cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
+{
+	bool tasklet = false;
+
+	if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
+		tasklet = true;
+
+	if (iir & GT_RENDER_USER_INTERRUPT) {
+		intel_engine_breadcrumbs_irq(engine);
+		tasklet |= intel_engine_needs_breadcrumb_tasklet(engine);
+	}
+
+	if (tasklet)
+		tasklet_hi_schedule(&engine->execlists.tasklet);
+}
+
+static u32
+gen11_gt_engine_identity(struct intel_gt *gt,
+			 const unsigned int bank, const unsigned int bit)
+{
+	void __iomem * const regs = gt->uncore->regs;
+	u32 timeout_ts;
+	u32 ident;
+
+	lockdep_assert_held(&gt->irq_lock);
+
+	raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
+
+	/*
+	 * NB: Specs do not specify how long to spin wait,
+	 * so we do ~100us as an educated guess.
+	 */
+	timeout_ts = (local_clock() >> 10) + 100;
+	do {
+		ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank));
+	} while (!(ident & GEN11_INTR_DATA_VALID) &&
+		 !time_after32(local_clock() >> 10, timeout_ts));
+
+	if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
+		DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
+			  bank, bit, ident);
+		return 0;
+	}
+
+	raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank),
+		      GEN11_INTR_DATA_VALID);
+
+	return ident;
+}
+
+static void
+gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
+			const u16 iir)
+{
+	if (instance == OTHER_GUC_INSTANCE)
+		return guc_irq_handler(&gt->uc.guc, iir);
+
+	if (instance == OTHER_GTPM_INSTANCE)
+		return gen11_rps_irq_handler(&gt->rps, iir);
+
+	WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
+		  instance, iir);
+}
+
+static void
+gen11_engine_irq_handler(struct intel_gt *gt, const u8 class,
+			 const u8 instance, const u16 iir)
+{
+	struct intel_engine_cs *engine;
+
+	if (instance <= MAX_ENGINE_INSTANCE)
+		engine = gt->engine_class[class][instance];
+	else
+		engine = NULL;
+
+	if (likely(engine))
+		return cs_irq_handler(engine, iir);
+
+	WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
+		  class, instance);
+}
+
+static void
+gen11_gt_identity_handler(struct intel_gt *gt, const u32 identity)
+{
+	const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
+	const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
+	const u16 intr = GEN11_INTR_ENGINE_INTR(identity);
+
+	if (unlikely(!intr))
+		return;
+
+	if (class <= COPY_ENGINE_CLASS)
+		return gen11_engine_irq_handler(gt, class, instance, intr);
+
+	if (class == OTHER_CLASS)
+		return gen11_other_irq_handler(gt, instance, intr);
+
+	WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
+		  class, instance, intr);
+}
+
+static void
+gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank)
+{
+	void __iomem * const regs = gt->uncore->regs;
+	unsigned long intr_dw;
+	unsigned int bit;
+
+	lockdep_assert_held(&gt->irq_lock);
+
+	intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
+
+	for_each_set_bit(bit, &intr_dw, 32) {
+		const u32 ident = gen11_gt_engine_identity(gt, bank, bit);
+
+		gen11_gt_identity_handler(gt, ident);
+	}
+
+	/* Clear must be after shared has been served for engine */
+	raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
+}
+
+void gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl)
+{
+	unsigned int bank;
+
+	spin_lock(&gt->irq_lock);
+
+	for (bank = 0; bank < 2; bank++) {
+		if (master_ctl & GEN11_GT_DW_IRQ(bank))
+			gen11_gt_bank_handler(gt, bank);
+	}
+
+	spin_unlock(&gt->irq_lock);
+}
+
+bool gen11_gt_reset_one_iir(struct intel_gt *gt,
+			    const unsigned int bank,
+			    const unsigned int bit)
+{
+	void __iomem * const regs = gt->uncore->regs;
+	u32 dw;
+
+	lockdep_assert_held(&gt->irq_lock);
+
+	dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
+	if (dw & BIT(bit)) {
+		/*
+		 * According to the BSpec, DW_IIR bits cannot be cleared without
+		 * first servicing the Selector & Shared IIR registers.
+		 */
+		gen11_gt_engine_identity(gt, bank, bit);
+
+		/*
+		 * We locked GT INT DW by reading it. If we want to (try
+		 * to) recover from this successfully, we need to clear
+		 * our bit, otherwise we are locking the register for
+		 * everybody.
+		 */
+		raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit));
+
+		return true;
+	}
+
+	return false;
+}
+
+void gen11_gt_irq_reset(struct intel_gt *gt)
+{
+	struct intel_uncore *uncore = gt->uncore;
+
+	/* Disable RCS, BCS, VCS and VECS class engines. */
+	intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0);
+	intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE,	  0);
+
+	/* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
+	intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK,	~0);
+	intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK,	~0);
+	intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK,	~0);
+	intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK,	~0);
+	intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK,	~0);
+
+	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
+	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
+	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
+	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK,  ~0);
+}
+
+void gen11_gt_irq_postinstall(struct intel_gt *gt)
+{
+	const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT;
+	struct intel_uncore *uncore = gt->uncore;
+	const u32 dmask = irqs << 16 | irqs;
+	const u32 smask = irqs << 16;
+
+	BUILD_BUG_ON(irqs & 0xffff0000);
+
+	/* Enable RCS, BCS, VCS and VECS class interrupts. */
+	intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask);
+	intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask);
+
+	/* Unmask irqs on RCS, BCS, VCS and VECS engines. */
+	intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask);
+	intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~smask);
+	intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~dmask);
+	intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~dmask);
+	intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~dmask);
+
+	/*
+	 * RPS interrupts will get enabled/disabled on demand when RPS itself
+	 * is enabled/disabled.
+	 */
+	gt->pm_ier = 0x0;
+	gt->pm_imr = ~gt->pm_ier;
+	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
+	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
+
+	/* Same thing for GuC interrupts */
+	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
+	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK,  ~0);
+}
+
+void gen5_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
+{
+	if (gt_iir & GT_RENDER_USER_INTERRUPT)
+		intel_engine_breadcrumbs_irq(gt->engine_class[RENDER_CLASS][0]);
+	if (gt_iir & ILK_BSD_USER_INTERRUPT)
+		intel_engine_breadcrumbs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0]);
+}
+
+static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir)
+{
+	if (!HAS_L3_DPF(gt->i915))
+		return;
+
+	spin_lock(&gt->irq_lock);
+	gen5_gt_disable_irq(gt, GT_PARITY_ERROR(gt->i915));
+	spin_unlock(&gt->irq_lock);
+
+	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
+		gt->i915->l3_parity.which_slice |= 1 << 1;
+
+	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
+		gt->i915->l3_parity.which_slice |= 1 << 0;
+
+	schedule_work(&gt->i915->l3_parity.error_work);
+}
+
+void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
+{
+	if (gt_iir & GT_RENDER_USER_INTERRUPT)
+		intel_engine_breadcrumbs_irq(gt->engine_class[RENDER_CLASS][0]);
+	if (gt_iir & GT_BSD_USER_INTERRUPT)
+		intel_engine_breadcrumbs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0]);
+	if (gt_iir & GT_BLT_USER_INTERRUPT)
+		intel_engine_breadcrumbs_irq(gt->engine_class[COPY_ENGINE_CLASS][0]);
+
+	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
+		      GT_BSD_CS_ERROR_INTERRUPT |
+		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
+		DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
+
+	if (gt_iir & GT_PARITY_ERROR(gt->i915))
+		gen7_parity_error_irq_handler(gt, gt_iir);
+}
+
+void gen8_gt_irq_ack(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4])
+{
+	void __iomem * const regs = gt->uncore->regs;
+
+	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
+		gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0));
+		if (likely(gt_iir[0]))
+			raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]);
+	}
+
+	if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
+		gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1));
+		if (likely(gt_iir[1]))
+			raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]);
+	}
+
+	if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
+		gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2));
+		if (likely(gt_iir[2]))
+			raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]);
+	}
+
+	if (master_ctl & GEN8_GT_VECS_IRQ) {
+		gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3));
+		if (likely(gt_iir[3]))
+			raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]);
+	}
+}
+
+void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4])
+{
+	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
+		cs_irq_handler(gt->engine_class[RENDER_CLASS][0],
+			       gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
+		cs_irq_handler(gt->engine_class[COPY_ENGINE_CLASS][0],
+			       gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
+	}
+
+	if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
+		cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][0],
+			       gt_iir[1] >> GEN8_VCS0_IRQ_SHIFT);
+		cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][1],
+			       gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT);
+	}
+
+	if (master_ctl & GEN8_GT_VECS_IRQ) {
+		cs_irq_handler(gt->engine_class[VIDEO_ENHANCEMENT_CLASS][0],
+			       gt_iir[3] >> GEN8_VECS_IRQ_SHIFT);
+	}
+
+	if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
+		gen6_rps_irq_handler(&gt->rps, gt_iir[2]);
+		guc_irq_handler(&gt->uc.guc, gt_iir[2] >> 16);
+	}
+}
+
+void gen8_gt_irq_reset(struct intel_gt *gt)
+{
+	struct intel_uncore *uncore = gt->uncore;
+
+	GEN8_IRQ_RESET_NDX(uncore, GT, 0);
+	GEN8_IRQ_RESET_NDX(uncore, GT, 1);
+	GEN8_IRQ_RESET_NDX(uncore, GT, 2);
+	GEN8_IRQ_RESET_NDX(uncore, GT, 3);
+}
+
+void gen8_gt_irq_postinstall(struct intel_gt *gt)
+{
+	struct intel_uncore *uncore = gt->uncore;
+
+	/* These are interrupts we'll toggle with the ring mask register */
+	u32 gt_interrupts[] = {
+		(GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
+		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
+		 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
+		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT),
+
+		(GT_RENDER_USER_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
+		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
+		 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
+		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT),
+
+		0,
+
+		(GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
+		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)
+	};
+
+	gt->pm_ier = 0x0;
+	gt->pm_imr = ~gt->pm_ier;
+	GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
+	GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
+	/*
+	 * RPS interrupts will get enabled/disabled on demand when RPS itself
+	 * is enabled/disabled. Same wil be the case for GuC interrupts.
+	 */
+	GEN8_IRQ_INIT_NDX(uncore, GT, 2, gt->pm_imr, gt->pm_ier);
+	GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
+}
+
+static void gen5_gt_update_irq(struct intel_gt *gt,
+			       u32 interrupt_mask,
+			       u32 enabled_irq_mask)
+{
+	lockdep_assert_held(&gt->irq_lock);
+
+	GEM_BUG_ON(enabled_irq_mask & ~interrupt_mask);
+
+	gt->gt_imr &= ~interrupt_mask;
+	gt->gt_imr |= (~enabled_irq_mask & interrupt_mask);
+	intel_uncore_write(gt->uncore, GTIMR, gt->gt_imr);
+}
+
+void gen5_gt_enable_irq(struct intel_gt *gt, u32 mask)
+{
+	gen5_gt_update_irq(gt, mask, mask);
+	intel_uncore_posting_read_fw(gt->uncore, GTIMR);
+}
+
+void gen5_gt_disable_irq(struct intel_gt *gt, u32 mask)
+{
+	gen5_gt_update_irq(gt, mask, 0);
+}
+
+void gen5_gt_irq_reset(struct intel_gt *gt)
+{
+	struct intel_uncore *uncore = gt->uncore;
+
+	GEN3_IRQ_RESET(uncore, GT);
+	if (INTEL_GEN(gt->i915) >= 6)
+		GEN3_IRQ_RESET(uncore, GEN6_PM);
+}
+
+void gen5_gt_irq_postinstall(struct intel_gt *gt)
+{
+	struct intel_uncore *uncore = gt->uncore;
+	u32 pm_irqs = 0;
+	u32 gt_irqs = 0;
+
+	gt->gt_imr = ~0;
+	if (HAS_L3_DPF(gt->i915)) {
+		/* L3 parity interrupt is always unmasked. */
+		gt->gt_imr = ~GT_PARITY_ERROR(gt->i915);
+		gt_irqs |= GT_PARITY_ERROR(gt->i915);
+	}
+
+	gt_irqs |= GT_RENDER_USER_INTERRUPT;
+	if (IS_GEN(gt->i915, 5))
+		gt_irqs |= ILK_BSD_USER_INTERRUPT;
+	else
+		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
+
+	GEN3_IRQ_INIT(uncore, GT, gt->gt_imr, gt_irqs);
+
+	if (INTEL_GEN(gt->i915) >= 6) {
+		/*
+		 * RPS interrupts will get enabled/disabled on demand when RPS
+		 * itself is enabled/disabled.
+		 */
+		if (HAS_ENGINE(gt->i915, VECS0)) {
+			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
+			gt->pm_ier |= PM_VEBOX_USER_INTERRUPT;
+		}
+
+		gt->pm_imr = 0xffffffff;
+		GEN3_IRQ_INIT(uncore, GEN6_PM, gt->pm_imr, pm_irqs);
+	}
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.h b/drivers/gpu/drm/i915/gt/intel_gt_irq.h
new file mode 100644
index 000000000000..8f37593712c9
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.h
@@ -0,0 +1,44 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_GT_IRQ_H
+#define INTEL_GT_IRQ_H
+
+#include <linux/types.h>
+
+struct intel_gt;
+
+#define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
+		      GEN8_GT_BCS_IRQ | \
+		      GEN8_GT_VCS0_IRQ | \
+		      GEN8_GT_VCS1_IRQ | \
+		      GEN8_GT_VECS_IRQ | \
+		      GEN8_GT_PM_IRQ | \
+		      GEN8_GT_GUC_IRQ)
+
+void gen11_gt_irq_reset(struct intel_gt *gt);
+void gen11_gt_irq_postinstall(struct intel_gt *gt);
+void gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl);
+
+bool gen11_gt_reset_one_iir(struct intel_gt *gt,
+			    const unsigned int bank,
+			    const unsigned int bit);
+
+void gen5_gt_irq_handler(struct intel_gt *gt, u32 gt_iir);
+
+void gen5_gt_irq_postinstall(struct intel_gt *gt);
+void gen5_gt_irq_reset(struct intel_gt *gt);
+void gen5_gt_disable_irq(struct intel_gt *gt, u32 mask);
+void gen5_gt_enable_irq(struct intel_gt *gt, u32 mask);
+
+void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir);
+
+void gen8_gt_irq_ack(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4]);
+void gen8_gt_irq_reset(struct intel_gt *gt);
+void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4]);
+void gen8_gt_irq_postinstall(struct intel_gt *gt);
+
+#endif /* INTEL_GT_IRQ_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 61ed912341f1..50078f75c199 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -9,7 +9,10 @@
 #include "intel_engine_pm.h"
 #include "intel_gt.h"
 #include "intel_gt_pm.h"
+#include "intel_llc.h"
 #include "intel_pm.h"
+#include "intel_rc6.h"
+#include "intel_rps.h"
 #include "intel_wakeref.h"
 
 static void pm_notify(struct drm_i915_private *i915, int state)
@@ -38,12 +41,7 @@ static int intel_gt_unpark(struct intel_wakeref *wf)
 	gt->awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
 	GEM_BUG_ON(!gt->awake);
 
-	intel_enable_gt_powersave(i915);
-
-	i915_update_gfx_val(i915);
-	if (INTEL_GEN(i915) >= 6)
-		gen6_rps_busy(i915);
-
+	intel_rps_unpark(&gt->rps);
 	i915_pmu_gt_unparked(i915);
 
 	intel_gt_queue_hangcheck(gt);
@@ -62,17 +60,16 @@ void intel_gt_pm_get(struct intel_gt *gt)
 
 static int intel_gt_park(struct intel_wakeref *wf)
 {
-	struct drm_i915_private *i915 =
-		container_of(wf, typeof(*i915), gt.wakeref);
-	intel_wakeref_t wakeref = fetch_and_zero(&i915->gt.awake);
+	struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
+	struct drm_i915_private *i915 = gt->i915;
+	intel_wakeref_t wakeref = fetch_and_zero(&gt->awake);
 
 	GEM_TRACE("\n");
 
 	pm_notify(i915, INTEL_GT_PARK);
 
 	i915_pmu_gt_parked(i915);
-	if (INTEL_GEN(i915) >= 6)
-		gen6_rps_idle(i915);
+	intel_rps_park(&gt->rps);
 
 	GEM_BUG_ON(!wakeref);
 	intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
@@ -125,6 +122,36 @@ void intel_gt_sanitize(struct intel_gt *gt, bool force)
 		__intel_engine_reset(engine, false);
 }
 
+void intel_gt_pm_enable(struct intel_gt *gt)
+{
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+
+	/* Powersaving is controlled by the host when inside a VM */
+	if (intel_vgpu_active(gt->i915))
+		return;
+
+	intel_gt_pm_get(gt);
+	intel_rps_enable(&gt->rps);
+	intel_llc_enable(&gt->llc);
+
+	for_each_engine(engine, gt->i915, id) {
+		intel_engine_pm_get(engine);
+		engine->serial++; /* force kernel context reload */
+		intel_engine_pm_put(engine);
+	}
+
+	intel_rc6_enable(&gt->rc6);
+	intel_gt_pm_put(gt);
+}
+
+void intel_gt_pm_disable(struct intel_gt *gt)
+{
+	intel_rc6_disable(&gt->rc6);
+	intel_llc_disable(&gt->llc);
+	intel_rps_disable(&gt->rps);
+}
+
 int intel_gt_resume(struct intel_gt *gt)
 {
 	struct intel_engine_cs *engine;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index ba960e1fc209..a0edd53503a4 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -20,6 +20,8 @@ void intel_gt_pm_get(struct intel_gt *gt);
 void intel_gt_pm_put(struct intel_gt *gt);
 
 void intel_gt_pm_init_early(struct intel_gt *gt);
+void intel_gt_pm_enable(struct intel_gt *gt);
+void intel_gt_pm_disable(struct intel_gt *gt);
 
 void intel_gt_sanitize(struct intel_gt *gt, bool force);
 int intel_gt_resume(struct intel_gt *gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c
new file mode 100644
index 000000000000..babe866126d7
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c
@@ -0,0 +1,109 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_gt.h"
+#include "intel_gt_irq.h"
+#include "intel_gt_pm_irq.h"
+
+static void write_pm_imr(struct intel_gt *gt)
+{
+	struct drm_i915_private *i915 = gt->i915;
+	struct intel_uncore *uncore = gt->uncore;
+	u32 mask = gt->pm_imr;
+	i915_reg_t reg;
+
+	if (INTEL_GEN(i915) >= 11) {
+		reg = GEN11_GPM_WGBOXPERF_INTR_MASK;
+		mask <<= 16; /* pm is in upper half */
+	} else if (INTEL_GEN(i915) >= 8) {
+		reg = GEN8_GT_IMR(2);
+	} else {
+		reg = GEN6_PMIMR;
+	}
+
+	intel_uncore_write(uncore, reg, mask);
+}
+
+static void gen6_gt_pm_update_irq(struct intel_gt *gt,
+				  u32 interrupt_mask,
+				  u32 enabled_irq_mask)
+{
+	u32 new_val;
+
+	WARN_ON(enabled_irq_mask & ~interrupt_mask);
+
+	lockdep_assert_held(&gt->irq_lock);
+
+	new_val = gt->pm_imr;
+	new_val &= ~interrupt_mask;
+	new_val |= ~enabled_irq_mask & interrupt_mask;
+
+	if (new_val != gt->pm_imr) {
+		gt->pm_imr = new_val;
+		write_pm_imr(gt);
+	}
+}
+
+void gen6_gt_pm_unmask_irq(struct intel_gt *gt, u32 mask)
+{
+	gen6_gt_pm_update_irq(gt, mask, mask);
+}
+
+void gen6_gt_pm_mask_irq(struct intel_gt *gt, u32 mask)
+{
+	gen6_gt_pm_update_irq(gt, mask, 0);
+}
+
+void gen6_gt_pm_reset_iir(struct intel_gt *gt, u32 reset_mask)
+{
+	struct intel_uncore *uncore = gt->uncore;
+	i915_reg_t reg = INTEL_GEN(gt->i915) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
+
+	lockdep_assert_held(&gt->irq_lock);
+
+	intel_uncore_write(uncore, reg, reset_mask);
+	intel_uncore_write(uncore, reg, reset_mask);
+	intel_uncore_posting_read(uncore, reg);
+}
+
+static void write_pm_ier(struct intel_gt *gt)
+{
+	struct drm_i915_private *i915 = gt->i915;
+	struct intel_uncore *uncore = gt->uncore;
+	u32 mask = gt->pm_ier;
+	i915_reg_t reg;
+
+	if (INTEL_GEN(i915) >= 11) {
+		reg = GEN11_GPM_WGBOXPERF_INTR_ENABLE;
+		mask <<= 16; /* pm is in upper half */
+	} else if (INTEL_GEN(i915) >= 8) {
+		reg = GEN8_GT_IER(2);
+	} else {
+		reg = GEN6_PMIER;
+	}
+
+	intel_uncore_write(uncore, reg, mask);
+}
+
+void gen6_gt_pm_enable_irq(struct intel_gt *gt, u32 enable_mask)
+{
+	lockdep_assert_held(&gt->irq_lock);
+
+	gt->pm_ier |= enable_mask;
+	write_pm_ier(gt);
+	gen6_gt_pm_unmask_irq(gt, enable_mask);
+}
+
+void gen6_gt_pm_disable_irq(struct intel_gt *gt, u32 disable_mask)
+{
+	lockdep_assert_held(&gt->irq_lock);
+
+	gt->pm_ier &= ~disable_mask;
+	gen6_gt_pm_mask_irq(gt, disable_mask);
+	write_pm_ier(gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.h b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.h
new file mode 100644
index 000000000000..b29816a04809
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.h
@@ -0,0 +1,22 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_GT_PM_IRQ_H
+#define INTEL_GT_PM_IRQ_H
+
+#include <linux/types.h>
+
+struct intel_gt;
+
+void gen6_gt_pm_unmask_irq(struct intel_gt *gt, u32 mask);
+void gen6_gt_pm_mask_irq(struct intel_gt *gt, u32 mask);
+
+void gen6_gt_pm_enable_irq(struct intel_gt *gt, u32 enable_mask);
+void gen6_gt_pm_disable_irq(struct intel_gt *gt, u32 disable_mask);
+
+void gen6_gt_pm_reset_iir(struct intel_gt *gt, u32 reset_mask);
+
+#endif /* INTEL_GT_PM_IRQ_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index bfdabf49b1e7..d43752f5145d 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -16,7 +16,10 @@
 #include "uc/intel_uc.h"
 
 #include "i915_vma.h"
+#include "intel_llc_types.h"
 #include "intel_reset_types.h"
+#include "intel_rc6_types.h"
+#include "intel_rps_types.h"
 #include "intel_wakeref.h"
 
 struct drm_i915_private;
@@ -65,14 +68,20 @@ struct intel_gt {
 	 */
 	intel_wakeref_t awake;
 
+	struct intel_llc llc;
+	struct intel_rc6 rc6;
+	struct intel_rps rps;
+
 	struct blocking_notifier_head pm_notifications;
 
 	ktime_t last_init_time;
 
 	struct i915_vma *scratch;
 
-	u32 pm_imr;
+	spinlock_t irq_lock;
+	u32 gt_imr;
 	u32 pm_ier;
+	u32 pm_imr;
 
 	u32 pm_guc_events;
 
diff --git a/drivers/gpu/drm/i915/gt/intel_llc.c b/drivers/gpu/drm/i915/gt/intel_llc.c
new file mode 100644
index 000000000000..a345e60466ba
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_llc.c
@@ -0,0 +1,120 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/cpufreq.h>
+
+#include "intel_drv.h"
+#include "intel_gt.h"
+#include "intel_llc.h"
+#include "intel_sideband.h"
+
+static struct intel_gt *llc_to_gt(struct intel_llc *llc)
+{
+	return container_of(llc, struct intel_gt, llc);
+}
+
+static unsigned int cpu_max_MHz(void)
+{
+	struct cpufreq_policy *policy;
+	unsigned int max_khz;
+
+	policy = cpufreq_cpu_get(0);
+	if (policy) {
+		max_khz = policy->cpuinfo.max_freq;
+		cpufreq_cpu_put(policy);
+	} else {
+		/*
+		 * Default to measured freq if none found, PCU will ensure we
+		 * don't go over
+		 */
+		max_khz = tsc_khz;
+	}
+
+	return max_khz / 1000;
+}
+
+static void gen6_update_ring_freq(struct intel_llc *llc)
+{
+	struct drm_i915_private *i915 = llc_to_gt(llc)->i915;
+	struct intel_rps *rps = &llc_to_gt(llc)->rps;
+	unsigned int max_ia_freq, min_ring_freq;
+	unsigned int max_gpu_freq, min_gpu_freq;
+	const int min_freq = 15;
+	const int scale = 180;
+	unsigned int gpu_freq;
+
+	if (rps->max_freq <= rps->min_freq)
+		return;
+
+	max_ia_freq = cpu_max_MHz();
+
+	min_ring_freq = intel_uncore_read(llc_to_gt(llc)->uncore, DCLK) & 0xf;
+	/* convert DDR frequency from units of 266.6MHz to bandwidth */
+	min_ring_freq = mult_frac(min_ring_freq, 8, 3);
+
+	min_gpu_freq = rps->min_freq;
+	max_gpu_freq = rps->max_freq;
+	if (IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) {
+		/* Convert GT frequency to 50 HZ units */
+		min_gpu_freq /= GEN9_FREQ_SCALER;
+		max_gpu_freq /= GEN9_FREQ_SCALER;
+	}
+
+	/*
+	 * For each potential GPU frequency, load a ring frequency we'd like
+	 * to use for memory access.  We do this by specifying the IA frequency
+	 * the PCU should use as a reference to determine the ring frequency.
+	 */
+	for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) {
+		const int diff = max_gpu_freq - gpu_freq;
+		unsigned int ia_freq = 0, ring_freq = 0;
+
+		if (IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) {
+			/*
+			 * ring_freq = 2 * GT. ring_freq is in 100MHz units
+			 * No floor required for ring frequency on SKL.
+			 */
+			ring_freq = gpu_freq;
+		} else if (INTEL_GEN(i915) >= 8) {
+			/* max(2 * GT, DDR). NB: GT is 50MHz units */
+			ring_freq = max(min_ring_freq, gpu_freq);
+		} else if (IS_HASWELL(i915)) {
+			ring_freq = mult_frac(gpu_freq, 5, 4);
+			ring_freq = max(min_ring_freq, ring_freq);
+			/* leave ia_freq as the default, chosen by cpufreq */
+		} else {
+			/* On older processors, there is no separate ring
+			 * clock domain, so in order to boost the bandwidth
+			 * of the ring, we need to upclock the CPU (ia_freq).
+			 *
+			 * For GPU frequencies less than 750MHz,
+			 * just use the lowest ring freq.
+			 */
+			if (gpu_freq < min_freq)
+				ia_freq = 800;
+			else
+				ia_freq = max_ia_freq - diff * scale / 2;
+			ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
+		}
+
+		sandybridge_pcode_write(i915,
+					GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
+					ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
+					ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
+					gpu_freq);
+	}
+}
+
+void intel_llc_enable(struct intel_llc *llc)
+{
+	if (HAS_LLC(llc_to_gt(llc)->i915))
+		gen6_update_ring_freq(llc);
+}
+
+void intel_llc_disable(struct intel_llc *llc)
+{
+	/* Currently there is no HW configuration to be done to disable. */
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_llc.h b/drivers/gpu/drm/i915/gt/intel_llc.h
new file mode 100644
index 000000000000..ef09a890d2b7
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_llc.h
@@ -0,0 +1,15 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_LLC_H
+#define INTEL_LLC_H
+
+struct intel_llc;
+
+void intel_llc_enable(struct intel_llc *llc);
+void intel_llc_disable(struct intel_llc *llc);
+
+#endif /* INTEL_LLC_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_llc_types.h b/drivers/gpu/drm/i915/gt/intel_llc_types.h
new file mode 100644
index 000000000000..ecad4687b930
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_llc_types.h
@@ -0,0 +1,13 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_LLC_TYPES_H
+#define INTEL_LLC_TYPES_H
+
+struct intel_llc {
+};
+
+#endif /* INTEL_LLC_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
new file mode 100644
index 000000000000..2fc09aaab6cd
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -0,0 +1,665 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/pm_runtime.h>
+
+#include "i915_drv.h"
+#include "intel_gt.h"
+#include "intel_rc6.h"
+#include "intel_sideband.h"
+
+/**
+ * DOC: RC6
+ *
+ * RC6 is a special power stage which allows the GPU to enter an very
+ * low-voltage mode when idle, using down to 0V while at this stage.  This
+ * stage is entered automatically when the GPU is idle when RC6 support is
+ * enabled, and as soon as new workload arises GPU wakes up automatically as
+ * well.
+ *
+ * There are different RC6 modes available in Intel GPU, which differentiate
+ * among each other with the latency required to enter and leave RC6 and
+ * voltage consumed by the GPU in different states.
+ *
+ * The combination of the following flags define which states GPU is allowed
+ * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
+ * RC6pp is deepest RC6. Their support by hardware varies according to the
+ * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
+ * which brings the most power savings; deeper states save more power, but
+ * require higher latency to switch to and wake up.
+ */
+
+static struct intel_gt *rc6_to_gt(struct intel_rc6 *rc6)
+{
+	return container_of(rc6, struct intel_gt, rc6);
+}
+
+static struct intel_uncore *rc6_to_uncore(struct intel_rc6 *rc)
+{
+	return rc6_to_gt(rc)->uncore;
+}
+
+static struct drm_i915_private *rc6_to_i915(struct intel_rc6 *rc)
+{
+	return rc6_to_gt(rc)->i915;
+}
+
+static void gen11_rc6_enable(struct intel_rc6 *rc6)
+{
+	struct intel_uncore *uncore = rc6_to_uncore(rc6);
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+
+	/* 1a: Software RC state - RC0 */
+	intel_uncore_write_fw(uncore, GEN6_RC_STATE, 0);
+
+	/* 2a: Disable RC states. */
+	intel_uncore_write_fw(uncore, GEN6_RC_CONTROL, 0);
+
+	/* 2b: Program RC6 thresholds.*/
+	intel_uncore_write_fw(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
+	intel_uncore_write_fw(uncore, GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
+
+	intel_uncore_write_fw(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+	intel_uncore_write_fw(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+	for_each_engine(engine, rc6_to_gt(rc6)->i915, id)
+		intel_uncore_write_fw(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
+
+	intel_uncore_write_fw(uncore, GUC_MAX_IDLE_COUNT, 0xA);
+
+	intel_uncore_write_fw(uncore, GEN6_RC_SLEEP, 0);
+
+	intel_uncore_write_fw(uncore, GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+
+	/*
+	 * 2c: Program Coarse Power Gating Policies.
+	 *
+	 * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we
+	 * use instead is a more conservative estimate for the maximum time
+	 * it takes us to service a CS interrupt and submit a new ELSP - that
+	 * is the time which the GPU is idle waiting for the CPU to select the
+	 * next request to execute. If the idle hysteresis is less than that
+	 * interrupt service latency, the hardware will automatically gate
+	 * the power well and we will then incur the wake up cost on top of
+	 * the service latency. A similar guide from plane_state is that we
+	 * do not want the enable hysteresis to less than the wakeup latency.
+	 *
+	 * igt/gem_exec_nop/sequential provides a rough estimate for the
+	 * service latency, and puts it around 10us for Broadwell (and other
+	 * big core) and around 40us for Broxton (and other low power cores).
+	 * [Note that for legacy ringbuffer submission, this is less than 1us!]
+	 * However, the wakeup latency on Broxton is closer to 100us. To be
+	 * conservative, we have to factor in a context switch on top (due
+	 * to ksoftirqd).
+	 */
+	intel_uncore_write_fw(uncore, GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250);
+	intel_uncore_write_fw(uncore, GEN9_RENDER_PG_IDLE_HYSTERESIS, 250);
+
+	/* 3a: Enable RC6 */
+	intel_uncore_write_fw(uncore, GEN6_RC_CONTROL,
+			      GEN6_RC_CTL_HW_ENABLE |
+			      GEN6_RC_CTL_RC6_ENABLE |
+			      GEN6_RC_CTL_EI_MODE(1));
+
+	intel_uncore_write_fw(uncore, GEN9_PG_ENABLE,
+			      GEN9_RENDER_PG_ENABLE |
+			      GEN9_MEDIA_PG_ENABLE |
+			      GEN11_MEDIA_SAMPLER_PG_ENABLE);
+}
+
+static void gen9_rc6_enable(struct intel_rc6 *rc6)
+{
+	struct intel_uncore *uncore = rc6_to_uncore(rc6);
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	u32 rc6_mode;
+
+	/* 1a: Software RC state - RC0 */
+	intel_uncore_write_fw(uncore, GEN6_RC_STATE, 0);
+
+	/* 2a: Disable RC states. */
+	intel_uncore_write_fw(uncore, GEN6_RC_CONTROL, 0);
+
+	/* 2b: Program RC6 thresholds.*/
+	if (INTEL_GEN(rc6_to_i915(rc6)) >= 10) {
+		intel_uncore_write_fw(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
+		intel_uncore_write_fw(uncore, GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
+	} else if (IS_SKYLAKE(rc6_to_i915(rc6))) {
+		/*
+		 * WaRsDoubleRc6WrlWithCoarsePowerGating:skl Doubling WRL only
+		 * when CPG is enabled
+		 */
+		intel_uncore_write_fw(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
+	} else {
+		intel_uncore_write_fw(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
+	}
+
+	intel_uncore_write_fw(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+	intel_uncore_write_fw(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+	for_each_engine(engine, rc6_to_gt(rc6)->i915, id)
+		intel_uncore_write_fw(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
+
+	intel_uncore_write_fw(uncore, GUC_MAX_IDLE_COUNT, 0xA);
+
+	intel_uncore_write_fw(uncore, GEN6_RC_SLEEP, 0);
+
+	/*
+	 * 2c: Program Coarse Power Gating Policies.
+	 *
+	 * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we
+	 * use instead is a more conservative estimate for the maximum time
+	 * it takes us to service a CS interrupt and submit a new ELSP - that
+	 * is the time which the GPU is idle waiting for the CPU to select the
+	 * next request to execute. If the idle hysteresis is less than that
+	 * interrupt service latency, the hardware will automatically gate
+	 * the power well and we will then incur the wake up cost on top of
+	 * the service latency. A similar guide from plane_state is that we
+	 * do not want the enable hysteresis to less than the wakeup latency.
+	 *
+	 * igt/gem_exec_nop/sequential provides a rough estimate for the
+	 * service latency, and puts it around 10us for Broadwell (and other
+	 * big core) and around 40us for Broxton (and other low power cores).
+	 * [Note that for legacy ringbuffer submission, this is less than 1us!]
+	 * However, the wakeup latency on Broxton is closer to 100us. To be
+	 * conservative, we have to factor in a context switch on top (due
+	 * to ksoftirqd).
+	 */
+	intel_uncore_write_fw(uncore, GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250);
+	intel_uncore_write_fw(uncore, GEN9_RENDER_PG_IDLE_HYSTERESIS, 250);
+
+	/* 3a: Enable RC6 */
+	intel_uncore_write_fw(uncore, GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
+
+	/* WaRsUseTimeoutMode:cnl (pre-prod) */
+	if (IS_CNL_REVID(rc6_to_i915(rc6), CNL_REVID_A0, CNL_REVID_C0))
+		rc6_mode = GEN7_RC_CTL_TO_MODE;
+	else
+		rc6_mode = GEN6_RC_CTL_EI_MODE(1);
+
+	intel_uncore_write_fw(uncore, GEN6_RC_CONTROL,
+			      GEN6_RC_CTL_HW_ENABLE |
+			      GEN6_RC_CTL_RC6_ENABLE |
+			      rc6_mode);
+
+	intel_uncore_write_fw(uncore, GEN9_PG_ENABLE,
+			      GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE);
+}
+
+static void gen8_rc6_enable(struct intel_rc6 *rc6)
+{
+	struct intel_uncore *uncore = rc6_to_uncore(rc6);
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+
+	/* 1a: Software RC state - RC0 */
+	intel_uncore_write_fw(uncore, GEN6_RC_STATE, 0);
+
+	/* 2a: Disable RC states. */
+	intel_uncore_write_fw(uncore, GEN6_RC_CONTROL, 0);
+
+	/* 2b: Program RC6 thresholds.*/
+	intel_uncore_write_fw(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
+	intel_uncore_write_fw(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+	intel_uncore_write_fw(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+	for_each_engine(engine, rc6_to_gt(rc6)->i915, id)
+		intel_uncore_write_fw(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
+	intel_uncore_write_fw(uncore, GEN6_RC_SLEEP, 0);
+	intel_uncore_write_fw(uncore, GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
+
+	/* 3: Enable RC6 */
+	intel_uncore_write_fw(uncore, GEN6_RC_CONTROL,
+			      GEN6_RC_CTL_HW_ENABLE |
+			      GEN7_RC_CTL_TO_MODE |
+			      GEN6_RC_CTL_RC6_ENABLE);
+}
+
+static void gen6_rc6_enable(struct intel_rc6 *rc6)
+{
+	struct intel_uncore *uncore = rc6_to_uncore(rc6);
+	struct drm_i915_private *i915 = rc6_to_i915(rc6);
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	u32 rc6vids, rc6_mask;
+	int ret;
+
+	intel_uncore_write_fw(uncore, GEN6_RC_STATE, 0);
+
+	/* disable the counters and set deterministic thresholds */
+	intel_uncore_write_fw(uncore, GEN6_RC_CONTROL, 0);
+
+	intel_uncore_write_fw(uncore, GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
+	intel_uncore_write_fw(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
+	intel_uncore_write_fw(uncore, GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
+	intel_uncore_write_fw(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000);
+	intel_uncore_write_fw(uncore, GEN6_RC_IDLE_HYSTERSIS, 25);
+
+	for_each_engine(engine, i915, id)
+		intel_uncore_write_fw(uncore, RING_MAX_IDLE(engine->mmio_base),
+				      10);
+
+	intel_uncore_write_fw(uncore, GEN6_RC_SLEEP, 0);
+	intel_uncore_write_fw(uncore, GEN6_RC1e_THRESHOLD, 1000);
+	if (IS_IVYBRIDGE(i915))
+		intel_uncore_write_fw(uncore, GEN6_RC6_THRESHOLD, 125000);
+	else
+		intel_uncore_write_fw(uncore, GEN6_RC6_THRESHOLD, 50000);
+	intel_uncore_write_fw(uncore, GEN6_RC6p_THRESHOLD, 150000);
+	intel_uncore_write_fw(uncore, GEN6_RC6pp_THRESHOLD, 64000); /* unused */
+
+	/* We don't use those on Haswell */
+	rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
+	if (HAS_RC6p(i915))
+		rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
+	if (HAS_RC6pp(i915))
+		rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
+	intel_uncore_write_fw(uncore, GEN6_RC_CONTROL,
+			      rc6_mask |
+			      GEN6_RC_CTL_EI_MODE(1) |
+			      GEN6_RC_CTL_HW_ENABLE);
+
+	rc6vids = 0;
+	ret = sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS,
+				     &rc6vids, NULL);
+	if (IS_GEN(i915, 6) && ret) {
+		DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
+	} else if (IS_GEN(i915, 6) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
+		DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
+				 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
+		rc6vids &= 0xffff00;
+		rc6vids |= GEN6_ENCODE_RC6_VID(450);
+		ret = sandybridge_pcode_write(i915, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
+		if (ret)
+			DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
+	}
+}
+
+/* Check that the pcbr address is not empty. */
+static void chv_rc6_init(struct intel_rc6 *rc6)
+{
+	struct intel_uncore *uncore = rc6_to_uncore(rc6);
+	resource_size_t pctx_paddr, paddr;
+	resource_size_t pctx_size = 32 * SZ_1K;
+	u32 pcbr;
+
+	pcbr = intel_uncore_read(uncore, VLV_PCBR);
+	if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
+		DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
+		paddr = rc6_to_i915(rc6)->dsm.end + 1 - pctx_size;
+		GEM_BUG_ON(paddr > U32_MAX);
+
+		pctx_paddr = (paddr & ~4095);
+		intel_uncore_write(uncore, VLV_PCBR, pctx_paddr);
+	}
+}
+
+static void vlv_rc6_init(struct intel_rc6 *rc6)
+{
+	struct drm_i915_private *i915 = rc6_to_i915(rc6);
+	struct intel_uncore *uncore = rc6_to_uncore(rc6);
+	struct drm_i915_gem_object *pctx;
+	resource_size_t pctx_paddr;
+	resource_size_t pctx_size = 24 * SZ_1K;
+	u32 pcbr;
+
+	pcbr = intel_uncore_read(uncore, VLV_PCBR);
+	if (pcbr) {
+		/* BIOS set it up already, grab the pre-alloc'd space */
+		resource_size_t pcbr_offset;
+
+		pcbr_offset = (pcbr & ~4095) - i915->dsm.start;
+		pctx = i915_gem_object_create_stolen_for_preallocated(i915,
+								      pcbr_offset,
+								      I915_GTT_OFFSET_NONE,
+								      pctx_size);
+		goto out;
+	}
+
+	DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
+
+	/*
+	 * From the Gunit register HAS:
+	 * The Gfx driver is expected to program this register and ensure
+	 * proper allocation within Gfx stolen memory.  For example, this
+	 * register should be programmed such than the PCBR range does not
+	 * overlap with other ranges, such as the frame buffer, protected
+	 * memory, or any other relevant ranges.
+	 */
+	pctx = i915_gem_object_create_stolen(i915, pctx_size);
+	if (!pctx) {
+		DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
+		goto out;
+	}
+
+	GEM_BUG_ON(range_overflows_t(u64,
+				     i915->dsm.start,
+				     pctx->stolen->start,
+				     U32_MAX));
+	pctx_paddr = i915->dsm.start + pctx->stolen->start;
+	intel_uncore_write(uncore, VLV_PCBR, pctx_paddr);
+
+out:
+	rc6->pctx = pctx;
+}
+
+static void chv_rc6_enable(struct intel_rc6 *rc6)
+{
+	struct intel_uncore *uncore = rc6_to_uncore(rc6);
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+
+	/*  Disable RC states. */
+	intel_uncore_write_fw(uncore, GEN6_RC_CONTROL, 0);
+
+	/* 2a: Program RC6 thresholds.*/
+	intel_uncore_write_fw(uncore,
+			      GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
+	intel_uncore_write_fw(uncore, /* 12500 * 1280ns */
+			      GEN6_RC_EVALUATION_INTERVAL, 125000);
+	intel_uncore_write_fw(uncore, /* 25 * 1280ns */
+			      GEN6_RC_IDLE_HYSTERSIS, 25);
+
+	for_each_engine(engine, rc6_to_gt(rc6)->i915, id)
+		intel_uncore_write_fw(uncore, RING_MAX_IDLE(engine->mmio_base),
+				      10);
+	intel_uncore_write_fw(uncore, GEN6_RC_SLEEP, 0);
+
+	/* TO threshold set to 500 us ( 0x186 * 1.28 us) */
+	intel_uncore_write_fw(uncore, GEN6_RC6_THRESHOLD, 0x186);
+
+	/* Allows RC6 residency counter to work */
+	intel_uncore_write_fw(uncore, VLV_COUNTER_CONTROL,
+			      _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
+						 VLV_MEDIA_RC6_COUNT_EN |
+						 VLV_RENDER_RC6_COUNT_EN));
+
+	/* 3: Enable RC6 */
+	intel_uncore_write_fw(uncore, GEN6_RC_CONTROL, GEN7_RC_CTL_TO_MODE);
+}
+
+static void vlv_rc6_enable(struct intel_rc6 *rc6)
+{
+	struct intel_uncore *uncore = rc6_to_uncore(rc6);
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+
+	/*  Disable RC states. */
+	intel_uncore_write_fw(uncore, GEN6_RC_CONTROL, 0);
+
+	intel_uncore_write_fw(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
+	intel_uncore_write_fw(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000);
+	intel_uncore_write_fw(uncore, GEN6_RC_IDLE_HYSTERSIS, 25);
+
+	for_each_engine(engine, rc6_to_gt(rc6)->i915, id)
+		intel_uncore_write_fw(uncore,
+				      RING_MAX_IDLE(engine->mmio_base), 10);
+
+	intel_uncore_write_fw(uncore, GEN6_RC6_THRESHOLD, 0x557);
+
+	/* Allows RC6 residency counter to work */
+	intel_uncore_write_fw(uncore, VLV_COUNTER_CONTROL,
+			      _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
+						 VLV_MEDIA_RC0_COUNT_EN |
+						 VLV_RENDER_RC0_COUNT_EN |
+						 VLV_MEDIA_RC6_COUNT_EN |
+						 VLV_RENDER_RC6_COUNT_EN));
+
+	intel_uncore_write_fw(uncore, GEN6_RC_CONTROL,
+			      GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL);
+}
+
+void intel_rc6_init(struct intel_rc6 *rc6)
+{
+	struct drm_i915_private *i915 = rc6_to_i915(rc6);
+
+	if (IS_CHERRYVIEW(i915))
+		chv_rc6_init(rc6);
+	else if (IS_VALLEYVIEW(i915))
+		vlv_rc6_init(rc6);
+}
+
+static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6)
+{
+	struct intel_uncore *uncore = rc6_to_uncore(rc6);
+	struct drm_i915_private *i915 = rc6_to_i915(rc6);
+	u32 rc6_ctx_base, rc_ctl, rc_sw_target;
+	bool enable_rc6 = true;
+
+	rc_ctl = intel_uncore_read(uncore, GEN6_RC_CONTROL);
+	rc_sw_target = intel_uncore_read(uncore, GEN6_RC_STATE);
+	rc_sw_target &= RC_SW_TARGET_STATE_MASK;
+	rc_sw_target >>= RC_SW_TARGET_STATE_SHIFT;
+	DRM_DEBUG_DRIVER("BIOS enabled RC states: "
+			 "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
+			 onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
+			 onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
+			 rc_sw_target);
+
+	if (!(intel_uncore_read(uncore, RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
+		DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
+		enable_rc6 = false;
+	}
+
+	/*
+	 * The exact context size is not known for BXT, so assume a page size
+	 * for this check.
+	 */
+	rc6_ctx_base =
+		intel_uncore_read(uncore, RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
+	if (!(rc6_ctx_base >= i915->dsm_reserved.start &&
+	      rc6_ctx_base + PAGE_SIZE < i915->dsm_reserved.end)) {
+		DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
+		enable_rc6 = false;
+	}
+
+	if (!((intel_uncore_read(uncore, PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1 &&
+	      (intel_uncore_read(uncore, PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1 &&
+	      (intel_uncore_read(uncore, PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1 &&
+	      (intel_uncore_read(uncore, PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1)) {
+		DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
+		enable_rc6 = false;
+	}
+
+	if (!intel_uncore_read(uncore, GEN8_PUSHBUS_CONTROL) ||
+	    !intel_uncore_read(uncore, GEN8_PUSHBUS_ENABLE) ||
+	    !intel_uncore_read(uncore, GEN8_PUSHBUS_SHIFT)) {
+		DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
+		enable_rc6 = false;
+	}
+
+	if (!intel_uncore_read(uncore, GEN6_GFXPAUSE)) {
+		DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
+		enable_rc6 = false;
+	}
+
+	if (!intel_uncore_read(uncore, GEN8_MISC_CTRL0)) {
+		DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
+		enable_rc6 = false;
+	}
+
+	return enable_rc6;
+}
+
+void intel_rc6_enable(struct intel_rc6 *rc6)
+{
+	struct drm_i915_private *i915 = rc6_to_i915(rc6);
+	struct intel_uncore *uncore = rc6_to_uncore(rc6);
+
+	if (IS_GEN9_LP(i915) && !bxt_check_bios_rc6_setup(rc6)) {
+		DRM_INFO("RC6 and powersaving disabled by BIOS\n");
+		pm_runtime_get(&i915->drm.pdev->dev);
+		return;
+	}
+
+	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+
+	if (IS_CHERRYVIEW(i915))
+		chv_rc6_enable(rc6);
+	else if (IS_VALLEYVIEW(i915))
+		vlv_rc6_enable(rc6);
+	else if (INTEL_GEN(i915) >= 11)
+		gen11_rc6_enable(rc6);
+	else if (INTEL_GEN(i915) >= 9)
+		gen9_rc6_enable(rc6);
+	else if (IS_BROADWELL(i915))
+		gen8_rc6_enable(rc6);
+	else if (INTEL_GEN(i915) >= 6)
+		gen6_rc6_enable(rc6);
+
+	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+
+	rc6->enabled = true;
+}
+
+void intel_rc6_disable(struct intel_rc6 *rc6)
+{
+	struct drm_i915_private *i915 = rc6_to_i915(rc6);
+	struct intel_uncore *uncore = rc6_to_uncore(rc6);
+
+	if (INTEL_GEN(i915) < 6)
+		return;
+
+	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+	intel_uncore_write_fw(uncore, GEN6_RC_CONTROL, 0);
+	if (INTEL_GEN(i915) >= 9)
+		intel_uncore_write_fw(uncore, GEN9_PG_ENABLE, 0);
+	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+
+	rc6->enabled = false;
+}
+
+void intel_rc6_fini(struct intel_rc6 *rc6)
+{
+	struct drm_i915_gem_object *pctx;
+
+	pctx = fetch_and_zero(&rc6->pctx);
+	if (pctx)
+		i915_gem_object_put(pctx);
+}
+
+static u64 vlv_residency_raw(struct intel_uncore *uncore, const i915_reg_t reg)
+{
+	u32 lower, upper, tmp;
+	int loop = 2;
+
+	/*
+	 * The register accessed do not need forcewake. We borrow
+	 * uncore lock to prevent concurrent access to range reg.
+	 */
+	lockdep_assert_held(&uncore->lock);
+
+	/*
+	 * vlv and chv residency counters are 40 bits in width.
+	 * With a control bit, we can choose between upper or lower
+	 * 32bit window into this counter.
+	 *
+	 * Although we always use the counter in high-range mode elsewhere,
+	 * userspace may attempt to read the value before rc6 is initialised,
+	 * before we have set the default VLV_COUNTER_CONTROL value. So always
+	 * set the high bit to be safe.
+	 */
+	intel_uncore_write_fw(uncore, VLV_COUNTER_CONTROL,
+			      _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
+	upper = intel_uncore_read_fw(uncore, reg);
+	do {
+		tmp = upper;
+
+		intel_uncore_write_fw(uncore, VLV_COUNTER_CONTROL,
+				      _MASKED_BIT_DISABLE(VLV_COUNT_RANGE_HIGH));
+		lower = intel_uncore_read_fw(uncore, reg);
+
+		intel_uncore_write_fw(uncore, VLV_COUNTER_CONTROL,
+				      _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
+		upper = intel_uncore_read_fw(uncore, reg);
+	} while (upper != tmp && --loop);
+
+	/*
+	 * Everywhere else we always use VLV_COUNTER_CONTROL with the
+	 * VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set
+	 * now.
+	 */
+
+	return lower | (u64)upper << 8;
+}
+
+u64 intel_rc6_residency_ns(struct intel_rc6 *rc6, const i915_reg_t reg)
+{
+	struct drm_i915_private *i915 = rc6_to_i915(rc6);
+	struct intel_uncore *uncore = rc6_to_uncore(rc6);
+	u64 time_hw, prev_hw, overflow_hw;
+	unsigned int fw_domains;
+	unsigned long flags;
+	unsigned int i;
+	u32 mul, div;
+
+	if (!rc6->enabled)
+		return 0;
+
+	/*
+	 * Store previous hw counter values for counter wrap-around handling.
+	 *
+	 * There are only four interesting registers and they live next to each
+	 * other so we can use the relative address, compared to the smallest
+	 * one as the index into driver storage.
+	 */
+	i = (i915_mmio_reg_offset(reg) -
+	     i915_mmio_reg_offset(GEN6_GT_GFX_RC6_LOCKED)) / sizeof(u32);
+	if (WARN_ON_ONCE(i >= ARRAY_SIZE(rc6->cur_residency)))
+		return 0;
+
+	fw_domains = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
+
+	spin_lock_irqsave(&uncore->lock, flags);
+	intel_uncore_forcewake_get__locked(uncore, fw_domains);
+
+	/* On VLV and CHV, residency time is in CZ units rather than 1.28us */
+	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+		mul = 1000000;
+		div = i915->czclk_freq;
+		overflow_hw = BIT_ULL(40);
+		time_hw = vlv_residency_raw(uncore, reg);
+	} else {
+		/* 833.33ns units on Gen9LP, 1.28us elsewhere. */
+		if (IS_GEN9_LP(i915)) {
+			mul = 10000;
+			div = 12;
+		} else {
+			mul = 1280;
+			div = 1;
+		}
+
+		overflow_hw = BIT_ULL(32);
+		time_hw = intel_uncore_read_fw(uncore, reg);
+	}
+
+	/*
+	 * Counter wrap handling.
+	 *
+	 * But relying on a sufficient frequency of queries otherwise counters
+	 * can still wrap.
+	 */
+	prev_hw = rc6->prev_hw_residency[i];
+	rc6->prev_hw_residency[i] = time_hw;
+
+	/* RC6 delta from last sample. */
+	if (time_hw >= prev_hw)
+		time_hw -= prev_hw;
+	else
+		time_hw += overflow_hw - prev_hw;
+
+	/* Add delta to RC6 extended raw driver copy. */
+	time_hw += rc6->cur_residency[i];
+	rc6->cur_residency[i] = time_hw;
+
+	intel_uncore_forcewake_put__locked(uncore, fw_domains);
+	spin_unlock_irqrestore(&uncore->lock, flags);
+
+	return mul_u64_u32_div(time_hw, mul, div);
+}
+
+u64 intel_rc6_residency_us(struct intel_rc6 *rc6, i915_reg_t reg)
+{
+	return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(rc6, reg), 1000);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.h b/drivers/gpu/drm/i915/gt/intel_rc6.h
new file mode 100644
index 000000000000..caa7e10e9067
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.h
@@ -0,0 +1,24 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_RC6_H
+#define INTEL_RC6_H
+
+#include "i915_reg.h"
+
+struct intel_engine_cs;
+struct intel_rc6;
+
+void intel_rc6_init(struct intel_rc6 *rc6);
+void intel_rc6_fini(struct intel_rc6 *rc6);
+
+void intel_rc6_enable(struct intel_rc6 *rc6);
+void intel_rc6_disable(struct intel_rc6 *rc6);
+
+u64 intel_rc6_residency_ns(struct intel_rc6 *rc6, i915_reg_t reg);
+u64 intel_rc6_residency_us(struct intel_rc6 *rc6, i915_reg_t reg);
+
+#endif /* INTEL_RC6_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6_types.h b/drivers/gpu/drm/i915/gt/intel_rc6_types.h
new file mode 100644
index 000000000000..35e7e15c6aff
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_rc6_types.h
@@ -0,0 +1,26 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_RC6_TYPES_H
+#define INTEL_RC6_TYPES_H
+
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "intel_engine_types.h"
+
+struct drm_i915_gem_object;
+
+struct intel_rc6 {
+	bool enabled;
+
+	u64 prev_hw_residency[4];
+	u64 cur_residency[4];
+
+	struct drm_i915_gem_object *pctx;
+};
+
+#endif /* INTEL_RC6_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
index a0924f207d2c..3aecd4a8a256 100644
--- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
@@ -37,6 +37,8 @@
 #include "i915_trace.h"
 #include "intel_context.h"
 #include "intel_gt.h"
+#include "intel_gt_irq.h"
+#include "intel_gt_pm_irq.h"
 #include "intel_renderstate.h"
 #include "intel_reset.h"
 #include "intel_workarounds.h"
@@ -962,13 +964,13 @@ static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
 static void
 gen5_irq_enable(struct intel_engine_cs *engine)
 {
-	gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
+	gen5_gt_enable_irq(engine->gt, engine->irq_enable_mask);
 }
 
 static void
 gen5_irq_disable(struct intel_engine_cs *engine)
 {
-	gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
+	gen5_gt_disable_irq(engine->gt, engine->irq_enable_mask);
 }
 
 static void
@@ -1029,14 +1031,14 @@ gen6_irq_enable(struct intel_engine_cs *engine)
 	/* Flush/delay to ensure the RING_IMR is active before the GT IMR */
 	ENGINE_POSTING_READ(engine, RING_IMR);
 
-	gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
+	gen5_gt_enable_irq(engine->gt, engine->irq_enable_mask);
 }
 
 static void
 gen6_irq_disable(struct intel_engine_cs *engine)
 {
 	ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
-	gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
+	gen5_gt_disable_irq(engine->gt, engine->irq_enable_mask);
 }
 
 static void
@@ -1047,14 +1049,14 @@ hsw_vebox_irq_enable(struct intel_engine_cs *engine)
 	/* Flush/delay to ensure the RING_IMR is active before the GT IMR */
 	ENGINE_POSTING_READ(engine, RING_IMR);
 
-	gen6_unmask_pm_irq(engine->gt, engine->irq_enable_mask);
+	gen6_gt_pm_unmask_irq(engine->gt, engine->irq_enable_mask);
 }
 
 static void
 hsw_vebox_irq_disable(struct intel_engine_cs *engine)
 {
 	ENGINE_WRITE(engine, RING_IMR, ~0);
-	gen6_mask_pm_irq(engine->gt, engine->irq_enable_mask);
+	gen6_gt_pm_mask_irq(engine->gt, engine->irq_enable_mask);
 }
 
 static int
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
new file mode 100644
index 000000000000..f5984cee7451
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -0,0 +1,1870 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_gt.h"
+#include "intel_gt_irq.h"
+#include "intel_gt_pm_irq.h"
+#include "intel_rps.h"
+#include "intel_sideband.h"
+#include "../../../platform/x86/intel_ips.h"
+
+/*
+ * Lock protecting IPS related data structures
+ */
+static DEFINE_SPINLOCK(mchdev_lock);
+
+static struct intel_gt *rps_to_gt(struct intel_rps *rps)
+{
+	return container_of(rps, struct intel_gt, rps);
+}
+
+static struct drm_i915_private *rps_to_i915(struct intel_rps *rps)
+{
+	return rps_to_gt(rps)->i915;
+}
+
+static struct intel_uncore *rps_to_uncore(struct intel_rps *rps)
+{
+	return rps_to_gt(rps)->uncore;
+}
+
+static u32 rps_pm_sanitize_mask(struct intel_rps *rps, u32 mask)
+{
+	return mask & ~rps->pm_intrmsk_mbz;
+}
+
+static u32 rps_pm_mask(struct intel_rps *rps, u8 val)
+{
+	u32 mask = 0;
+
+	/* We use UP_EI_EXPIRED interrupts for both up/down in manual mode */
+	if (val > rps->min_freq_softlimit)
+		mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
+	if (val < rps->max_freq_softlimit)
+		mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
+
+	mask &= rps->pm_events;
+
+	return rps_pm_sanitize_mask(rps, ~mask);
+}
+
+static void rps_reset_ei(struct intel_rps *rps)
+{
+	memset(&rps->ei, 0, sizeof(rps->ei));
+}
+
+static void rps_enable_interrupts(struct intel_rps *rps)
+{
+	struct intel_gt *gt = rps_to_gt(rps);
+
+	rps_reset_ei(rps);
+
+	if (IS_VALLEYVIEW(gt->i915))
+		/* WaGsvRC0ResidencyMethod:vlv */
+		rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED;
+	else
+		rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD |
+				  GEN6_PM_RP_DOWN_THRESHOLD |
+				  GEN6_PM_RP_DOWN_TIMEOUT);
+
+	spin_lock_irq(&gt->irq_lock);
+	gen6_gt_pm_enable_irq(gt, rps->pm_events);
+	spin_unlock_irq(&gt->irq_lock);
+
+	intel_uncore_write(gt->uncore, GEN6_PMINTRMSK,
+			   rps_pm_mask(rps, rps->cur_freq));
+}
+
+static void gen6_rps_reset_interrupts(struct intel_rps *rps)
+{
+	gen6_gt_pm_reset_iir(rps_to_gt(rps), GEN6_PM_RPS_EVENTS);
+}
+
+static void gen11_rps_reset_interrupts(struct intel_rps *rps)
+{
+	while (gen11_gt_reset_one_iir(rps_to_gt(rps), 0, GEN11_GTPM))
+		;
+}
+
+static void rps_reset_interrupts(struct intel_rps *rps)
+{
+	struct intel_gt *gt = rps_to_gt(rps);
+
+	spin_lock_irq(&gt->irq_lock);
+	if (INTEL_GEN(gt->i915) >= 11)
+		gen11_rps_reset_interrupts(rps);
+	else
+		gen6_rps_reset_interrupts(rps);
+
+	rps->pm_iir = 0;
+	spin_unlock_irq(&gt->irq_lock);
+}
+
+static void rps_disable_interrupts(struct intel_rps *rps)
+{
+	struct intel_gt *gt = rps_to_gt(rps);
+
+	rps->pm_events = 0;
+
+	intel_uncore_write(gt->uncore, GEN6_PMINTRMSK,
+			   rps_pm_sanitize_mask(rps, ~0u));
+
+	spin_lock_irq(&gt->irq_lock);
+	gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS);
+	spin_unlock_irq(&gt->irq_lock);
+
+	intel_synchronize_irq(gt->i915);
+
+	/*
+	 * Now that we will not be generating any more work, flush any
+	 * outstanding tasks. As we are called on the RPS idle path,
+	 * we will reset the GPU to minimum frequencies, so the current
+	 * state of the worker can be discarded.
+	 */
+	cancel_work_sync(&rps->work);
+
+	rps_reset_interrupts(rps);
+}
+
+static const struct cparams {
+	u16 i;
+	u16 t;
+	u16 m;
+	u16 c;
+} cparams[] = {
+	{ 1, 1333, 301, 28664 },
+	{ 1, 1066, 294, 24460 },
+	{ 1, 800, 294, 25192 },
+	{ 0, 1333, 276, 27605 },
+	{ 0, 1066, 276, 27605 },
+	{ 0, 800, 231, 23784 },
+};
+
+static void gen5_rps_init(struct intel_rps *rps)
+{
+	struct drm_i915_private *i915 = rps_to_i915(rps);
+	struct intel_uncore *uncore = rps_to_uncore(rps);
+	u8 fmax, fmin, fstart;
+	u32 rgvmodectl;
+	int c_m, i;
+
+	if (i915->fsb_freq <= 3200)
+		c_m = 0;
+	else if (i915->fsb_freq <= 4800)
+		c_m = 1;
+	else
+		c_m = 2;
+
+	for (i = 0; i < ARRAY_SIZE(cparams); i++) {
+		if (cparams[i].i == c_m && cparams[i].t == i915->mem_freq) {
+			rps->ips.m = cparams[i].m;
+			rps->ips.c = cparams[i].c;
+			break;
+		}
+	}
+
+	rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
+
+	/* Set up min, max, and cur for interrupt handling */
+	fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
+	fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
+	fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
+		MEMMODE_FSTART_SHIFT;
+	DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
+			 fmax, fmin, fstart);
+
+	rps->min_freq = -fstart;
+	rps->max_freq = -fmin;
+
+	rps->idle_freq = rps->min_freq;
+	rps->cur_freq = rps->idle_freq;
+}
+
+static unsigned long
+__ips_chipset_val(struct intel_ips *ips)
+{
+	struct intel_uncore *uncore =
+		rps_to_uncore(container_of(ips, struct intel_rps, ips));
+	unsigned long now = jiffies_to_msecs(jiffies), dt;
+	unsigned long result;
+	u64 total, delta;
+
+	lockdep_assert_held(&mchdev_lock);
+
+	/*
+	 * Prevent division-by-zero if we are asking too fast.
+	 * Also, we don't get interesting results if we are polling
+	 * faster than once in 10ms, so just return the saved value
+	 * in such cases.
+	 */
+	dt = now - ips->last_time1;
+	if (dt <= 10)
+		return ips->chipset_power;
+
+	/* FIXME: handle per-counter overflow */
+	total = intel_uncore_read(uncore, DMIEC);
+	total += intel_uncore_read(uncore, DDREC);
+	total += intel_uncore_read(uncore, CSIEC);
+
+	delta = total - ips->last_count1;
+
+	result = div_u64(div_u64(ips->m * delta, dt) + ips->c, 10);
+
+	ips->last_count1 = total;
+	ips->last_time1 = now;
+
+	ips->chipset_power = result;
+
+	return result;
+}
+
+static unsigned long ips_mch_val(struct intel_uncore *uncore)
+{
+	unsigned int m, x, b;
+	u32 tsfs;
+
+	tsfs = intel_uncore_read(uncore, TSFS);
+	x = intel_uncore_read8(uncore, TR1);
+
+	b = tsfs & TSFS_INTR_MASK;
+	m = (tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT;
+
+	return m * x / 127 - b;
+}
+
+static int _pxvid_to_vd(u8 pxvid)
+{
+	if (pxvid == 0)
+		return 0;
+
+	if (pxvid >= 8 && pxvid < 31)
+		pxvid = 31;
+
+	return (pxvid + 2) * 125;
+}
+
+static u32 pvid_to_extvid(struct drm_i915_private *i915, u8 pxvid)
+{
+	const int vd = _pxvid_to_vd(pxvid);
+
+	if (INTEL_INFO(i915)->is_mobile)
+		return max(vd - 1125, 0);
+
+	return vd;
+}
+
+static void __gen5_ips_update(struct intel_ips *ips)
+{
+	struct intel_uncore *uncore =
+		rps_to_uncore(container_of(ips, struct intel_rps, ips));
+	u64 now, delta, dt;
+	u32 count;
+
+	lockdep_assert_held(&mchdev_lock);
+
+	now = ktime_get_raw_ns();
+	dt = now - ips->last_time2;
+	do_div(dt, NSEC_PER_MSEC);
+
+	/* Don't divide by 0 */
+	if (dt <= 10)
+		return;
+
+	count = intel_uncore_read(uncore, GFXEC);
+	delta = count - ips->last_count2;
+
+	ips->last_count2 = count;
+	ips->last_time2 = now;
+
+	/* More magic constants... */
+	ips->gfx_power = div_u64(delta * 1181, dt * 10);
+}
+
+static void gen5_rps_update(struct intel_rps *rps)
+{
+	spin_lock_irq(&mchdev_lock);
+	__gen5_ips_update(&rps->ips);
+	spin_unlock_irq(&mchdev_lock);
+}
+
+static bool gen5_rps_set(struct intel_rps *rps, u8 val)
+{
+	struct intel_uncore *uncore = rps_to_uncore(rps);
+	u16 rgvswctl;
+
+	lockdep_assert_held(&mchdev_lock);
+
+	rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
+	if (rgvswctl & MEMCTL_CMD_STS) {
+		DRM_DEBUG("gpu busy, RCS change rejected\n");
+		return false; /* still busy with another command */
+	}
+
+	val = -val;
+
+	rgvswctl =
+		(MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
+		(val << MEMCTL_FREQ_SHIFT) |
+		MEMCTL_SFCAVM;
+	intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
+	intel_uncore_posting_read16(uncore, MEMSWCTL);
+
+	rgvswctl |= MEMCTL_CMD_STS;
+	intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
+
+	return true;
+}
+
+static unsigned long intel_pxfreq(u32 vidfreq)
+{
+	int div = (vidfreq & 0x3f0000) >> 16;
+	int post = (vidfreq & 0x3000) >> 12;
+	int pre = (vidfreq & 0x7);
+
+	if (!pre)
+		return 0;
+
+	return div * 133333 / (pre << post);
+}
+
+static unsigned int init_emon(struct intel_uncore *uncore)
+{
+	u8 pxw[16];
+	int i;
+
+	/* Disable to program */
+	intel_uncore_write(uncore, ECR, 0);
+	intel_uncore_posting_read(uncore, ECR);
+
+	/* Program energy weights for various events */
+	intel_uncore_write(uncore, SDEW, 0x15040d00);
+	intel_uncore_write(uncore, CSIEW0, 0x007f0000);
+	intel_uncore_write(uncore, CSIEW1, 0x1e220004);
+	intel_uncore_write(uncore, CSIEW2, 0x04000004);
+
+	for (i = 0; i < 5; i++)
+		intel_uncore_write(uncore, PEW(i), 0);
+	for (i = 0; i < 3; i++)
+		intel_uncore_write(uncore, DEW(i), 0);
+
+	/* Program P-state weights to account for frequency power adjustment */
+	for (i = 0; i < 16; i++) {
+		u32 pxvidfreq = intel_uncore_read(uncore, PXVFREQ(i));
+		unsigned int freq = intel_pxfreq(pxvidfreq);
+		unsigned int vid =
+			(pxvidfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT;
+		unsigned int val;
+
+		val = vid * vid * freq / 1000 * 255;
+		val /= 127 * 127 * 900;
+
+		pxw[i] = val;
+	}
+	/* Render standby states get 0 weight */
+	pxw[14] = 0;
+	pxw[15] = 0;
+
+	for (i = 0; i < 4; i++) {
+		intel_uncore_write(uncore, PXW(i),
+				   pxw[i * 4 + 0] << 24 |
+				   pxw[i * 4 + 1] << 16 |
+				   pxw[i * 4 + 2] <<  8 |
+				   pxw[i * 4 + 3] <<  0);
+	}
+
+	/* Adjust magic regs to magic values (more experimental results) */
+	intel_uncore_write(uncore, OGW0, 0);
+	intel_uncore_write(uncore, OGW1, 0);
+	intel_uncore_write(uncore, EG0, 0x00007f00);
+	intel_uncore_write(uncore, EG1, 0x0000000e);
+	intel_uncore_write(uncore, EG2, 0x000e0000);
+	intel_uncore_write(uncore, EG3, 0x68000300);
+	intel_uncore_write(uncore, EG4, 0x42000000);
+	intel_uncore_write(uncore, EG5, 0x00140031);
+	intel_uncore_write(uncore, EG6, 0);
+	intel_uncore_write(uncore, EG7, 0);
+
+	for (i = 0; i < 8; i++)
+		intel_uncore_write(uncore, PXWL(i), 0);
+
+	/* Enable PMON + select events */
+	intel_uncore_write(uncore, ECR, 0x80000019);
+
+	return intel_uncore_read(uncore, LCFUSE02) & LCFUSE_HIV_MASK;
+}
+
+static bool gen5_rps_enable(struct intel_rps *rps)
+{
+	struct intel_uncore *uncore = rps_to_uncore(rps);
+	u8 fstart, vstart;
+	u32 rgvmodectl;
+
+	spin_lock_irq(&mchdev_lock);
+
+	rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
+
+	/* Enable temp reporting */
+	intel_uncore_write16(uncore, PMMISC,
+			     intel_uncore_read16(uncore, PMMISC) | MCPPCE_EN);
+	intel_uncore_write16(uncore, TSC1,
+			     intel_uncore_read16(uncore, TSC1) | TSE);
+
+	/* 100ms RC evaluation intervals */
+	intel_uncore_write(uncore, RCUPEI, 100000);
+	intel_uncore_write(uncore, RCDNEI, 100000);
+
+	/* Set max/min thresholds to 90ms and 80ms respectively */
+	intel_uncore_write(uncore, RCBMAXAVG, 90000);
+	intel_uncore_write(uncore, RCBMINAVG, 80000);
+
+	intel_uncore_write(uncore, MEMIHYST, 1);
+
+	/* Set up min, max, and cur for interrupt handling */
+	fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
+		MEMMODE_FSTART_SHIFT;
+
+	vstart = (intel_uncore_read(uncore, PXVFREQ(fstart)) &
+		  PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT;
+
+	intel_uncore_write(uncore,
+			   MEMINTREN,
+			   MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
+
+	intel_uncore_write(uncore, VIDSTART, vstart);
+	intel_uncore_posting_read(uncore, VIDSTART);
+
+	rgvmodectl |= MEMMODE_SWMODE_EN;
+	intel_uncore_write(uncore, MEMMODECTL, rgvmodectl);
+
+	if (wait_for_atomic((intel_uncore_read(uncore, MEMSWCTL) &
+			     MEMCTL_CMD_STS) == 0, 10))
+		DRM_ERROR("stuck trying to change perf mode\n");
+	mdelay(1);
+
+	gen5_rps_set(rps, rps->cur_freq);
+
+	rps->ips.last_count1 = intel_uncore_read(uncore, DMIEC);
+	rps->ips.last_count1 += intel_uncore_read(uncore, DDREC);
+	rps->ips.last_count1 += intel_uncore_read(uncore, CSIEC);
+	rps->ips.last_time1 = jiffies_to_msecs(jiffies);
+
+	rps->ips.last_count2 = intel_uncore_read(uncore, GFXEC);
+	rps->ips.last_time2 = ktime_get_raw_ns();
+
+	spin_unlock_irq(&mchdev_lock);
+
+	rps->ips.corr = init_emon(uncore);
+
+	return true;
+}
+
+static void gen5_rps_disable(struct intel_rps *rps)
+{
+	struct intel_uncore *uncore = rps_to_uncore(rps);
+	u16 rgvswctl;
+
+	spin_lock_irq(&mchdev_lock);
+
+	rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
+
+	/* Ack interrupts, disable EFC interrupt */
+	intel_uncore_write(uncore, MEMINTREN,
+			   intel_uncore_read(uncore, MEMINTREN) &
+			   ~MEMINT_EVAL_CHG_EN);
+	intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
+	intel_uncore_write(uncore, DEIER,
+			   intel_uncore_read(uncore, DEIER) & ~DE_PCU_EVENT);
+	intel_uncore_write(uncore, DEIIR, DE_PCU_EVENT);
+	intel_uncore_write(uncore, DEIMR,
+			   intel_uncore_read(uncore, DEIMR) | DE_PCU_EVENT);
+
+	/* Go back to the starting frequency */
+	gen5_rps_set(rps, rps->idle_freq);
+	mdelay(1);
+	rgvswctl |= MEMCTL_CMD_STS;
+	intel_uncore_write(uncore, MEMSWCTL, rgvswctl);
+	mdelay(1);
+
+	spin_unlock_irq(&mchdev_lock);
+}
+
+static u32 rps_limits(struct intel_rps *rps, u8 val)
+{
+	u32 limits;
+
+	/*
+	 * Only set the down limit when we've reached the lowest level to avoid
+	 * getting more interrupts, otherwise leave this clear. This prevents a
+	 * race in the hw when coming out of rc6: There's a tiny window where
+	 * the hw runs at the minimal clock before selecting the desired
+	 * frequency, if the down threshold expires in that window we will not
+	 * receive a down interrupt.
+	 */
+	if (INTEL_GEN(rps_to_i915(rps)) >= 9) {
+		limits = rps->max_freq_softlimit << 23;
+		if (val <= rps->min_freq_softlimit)
+			limits |= rps->min_freq_softlimit << 14;
+	} else {
+		limits = rps->max_freq_softlimit << 24;
+		if (val <= rps->min_freq_softlimit)
+			limits |= rps->min_freq_softlimit << 16;
+	}
+
+	return limits;
+}
+
+static void rps_set_power(struct intel_rps *rps, int new_power)
+{
+	struct intel_uncore *uncore = rps_to_uncore(rps);
+	struct drm_i915_private *i915 = rps_to_i915(rps);
+	u32 threshold_up = 0, threshold_down = 0; /* in % */
+	u32 ei_up = 0, ei_down = 0;
+
+	lockdep_assert_held(&rps->power.mutex);
+
+	if (new_power == rps->power.mode)
+		return;
+
+	/* Note the units here are not exactly 1us, but 1280ns. */
+	switch (new_power) {
+	case LOW_POWER:
+		/* Upclock if more than 95% busy over 16ms */
+		ei_up = 16000;
+		threshold_up = 95;
+
+		/* Downclock if less than 85% busy over 32ms */
+		ei_down = 32000;
+		threshold_down = 85;
+		break;
+
+	case BETWEEN:
+		/* Upclock if more than 90% busy over 13ms */
+		ei_up = 13000;
+		threshold_up = 90;
+
+		/* Downclock if less than 75% busy over 32ms */
+		ei_down = 32000;
+		threshold_down = 75;
+		break;
+
+	case HIGH_POWER:
+		/* Upclock if more than 85% busy over 10ms */
+		ei_up = 10000;
+		threshold_up = 85;
+
+		/* Downclock if less than 60% busy over 32ms */
+		ei_down = 32000;
+		threshold_down = 60;
+		break;
+	}
+
+	/* When byt can survive without system hang with dynamic
+	 * sw freq adjustments, this restriction can be lifted.
+	 */
+	if (IS_VALLEYVIEW(i915))
+		goto skip_hw_write;
+
+	intel_uncore_write(uncore, GEN6_RP_UP_EI,
+			   GT_INTERVAL_FROM_US(i915, ei_up));
+	intel_uncore_write(uncore, GEN6_RP_UP_THRESHOLD,
+			   GT_INTERVAL_FROM_US(i915,
+					       ei_up * threshold_up / 100));
+
+	intel_uncore_write(uncore, GEN6_RP_DOWN_EI,
+			   GT_INTERVAL_FROM_US(i915, ei_down));
+	intel_uncore_write(uncore, GEN6_RP_DOWN_THRESHOLD,
+			   GT_INTERVAL_FROM_US(i915,
+					       ei_down * threshold_down / 100));
+
+	intel_uncore_write(uncore, GEN6_RP_CONTROL,
+			   (INTEL_GEN(i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
+			   GEN6_RP_MEDIA_HW_NORMAL_MODE |
+			   GEN6_RP_MEDIA_IS_GFX |
+			   GEN6_RP_ENABLE |
+			   GEN6_RP_UP_BUSY_AVG |
+			   GEN6_RP_DOWN_IDLE_AVG);
+
+skip_hw_write:
+	rps->power.mode = new_power;
+	rps->power.up_threshold = threshold_up;
+	rps->power.down_threshold = threshold_down;
+}
+
+static void gen6_rps_set_thresholds(struct intel_rps *rps, u8 val)
+{
+	int new_power;
+
+	new_power = rps->power.mode;
+	switch (rps->power.mode) {
+	case LOW_POWER:
+		if (val > rps->efficient_freq + 1 &&
+		    val > rps->cur_freq)
+			new_power = BETWEEN;
+		break;
+
+	case BETWEEN:
+		if (val <= rps->efficient_freq &&
+		    val < rps->cur_freq)
+			new_power = LOW_POWER;
+		else if (val >= rps->rp0_freq &&
+			 val > rps->cur_freq)
+			new_power = HIGH_POWER;
+		break;
+
+	case HIGH_POWER:
+		if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 &&
+		    val < rps->cur_freq)
+			new_power = BETWEEN;
+		break;
+	}
+	/* Max/min bins are special */
+	if (val <= rps->min_freq_softlimit)
+		new_power = LOW_POWER;
+	if (val >= rps->max_freq_softlimit)
+		new_power = HIGH_POWER;
+
+	mutex_lock(&rps->power.mutex);
+	if (rps->power.interactive)
+		new_power = HIGH_POWER;
+	rps_set_power(rps, new_power);
+	mutex_unlock(&rps->power.mutex);
+}
+
+void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive)
+{
+	if (!rps->enabled)
+		return;
+
+	mutex_lock(&rps->power.mutex);
+	if (interactive) {
+		if (!rps->power.interactive++ && rps->active)
+			rps_set_power(rps, HIGH_POWER);
+	} else {
+		GEM_BUG_ON(!rps->power.interactive);
+		rps->power.interactive--;
+	}
+	mutex_unlock(&rps->power.mutex);
+}
+
+static int gen6_rps_set(struct intel_rps *rps, u8 val)
+{
+	struct intel_uncore *uncore = rps_to_uncore(rps);
+	struct drm_i915_private *i915 = rps_to_i915(rps);
+	u32 swreq;
+
+	if (INTEL_GEN(i915) >= 9)
+		swreq = GEN9_FREQUENCY(val);
+	else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
+		swreq = HSW_FREQUENCY(val);
+	else
+		swreq = (GEN6_FREQUENCY(val) |
+			 GEN6_OFFSET(0) |
+			 GEN6_AGGRESSIVE_TURBO);
+	intel_uncore_write(uncore, GEN6_RPNSWREQ, swreq);
+
+	return 0;
+}
+
+static int vlv_rps_set(struct intel_rps *rps, u8 val)
+{
+	struct drm_i915_private *i915 = rps_to_i915(rps);
+	int err;
+
+	vlv_punit_get(i915);
+	err = vlv_punit_write(i915, PUNIT_REG_GPU_FREQ_REQ, val);
+	vlv_punit_put(i915);
+
+	return err;
+}
+
+static int rps_set(struct intel_rps *rps, u8 val)
+{
+	struct drm_i915_private *i915 = rps_to_i915(rps);
+	int err;
+
+	if (INTEL_GEN(i915) < 6)
+		return 0;
+
+	if (val == rps->last_freq)
+		return 0;
+
+	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+		err = vlv_rps_set(rps, val);
+	else
+		err = gen6_rps_set(rps, val);
+	if (err)
+		return err;
+
+	gen6_rps_set_thresholds(rps, val);
+	rps->last_freq = val;
+
+	return 0;
+}
+
+void intel_rps_unpark(struct intel_rps *rps)
+{
+	u8 freq;
+
+	if (!rps->enabled)
+		return;
+
+	/*
+	 * Use the user's desired frequency as a guide, but for better
+	 * performance, jump directly to RPe as our starting frequency.
+	 */
+	mutex_lock(&rps->lock);
+	rps->active = true;
+	freq = max(rps->cur_freq, rps->efficient_freq),
+	freq = clamp(freq, rps->min_freq_softlimit, rps->max_freq_softlimit);
+	intel_rps_set(rps, freq);
+	rps->last_adj = 0;
+	mutex_unlock(&rps->lock);
+
+	if (INTEL_GEN(rps_to_i915(rps)) >= 6)
+		rps_enable_interrupts(rps);
+
+	if (IS_GEN(rps_to_i915(rps), 5))
+		gen5_rps_update(rps);
+}
+
+void intel_rps_park(struct intel_rps *rps)
+{
+	struct drm_i915_private *i915 = rps_to_i915(rps);
+
+	if (!rps->enabled)
+		return;
+
+	if (INTEL_GEN(i915) >= 6)
+		rps_disable_interrupts(rps);
+
+	rps->active = false;
+	if (rps->last_freq <= rps->idle_freq)
+		return;
+
+	/*
+	 * The punit delays the write of the frequency and voltage until it
+	 * determines the GPU is awake. During normal usage we don't want to
+	 * waste power changing the frequency if the GPU is sleeping (rc6).
+	 * However, the GPU and driver is now idle and we do not want to delay
+	 * switching to minimum voltage (reducing power whilst idle) as we do
+	 * not expect to be woken in the near future and so must flush the
+	 * change by waking the device.
+	 *
+	 * We choose to take the media powerwell (either would do to trick the
+	 * punit into committing the voltage change) as that takes a lot less
+	 * power than the render powerwell.
+	 */
+	intel_uncore_forcewake_get(rps_to_uncore(rps), FORCEWAKE_MEDIA);
+	rps_set(rps, rps->idle_freq);
+	intel_uncore_forcewake_put(rps_to_uncore(rps), FORCEWAKE_MEDIA);
+}
+
+void intel_rps_boost(struct i915_request *rq)
+{
+	struct intel_rps *rps = &rq->engine->gt->rps;
+	unsigned long flags;
+
+	if (i915_request_signaled(rq) || !rps->active)
+		return;
+
+	/* Serializes with i915_request_retire() */
+	spin_lock_irqsave(&rq->lock, flags);
+	if (!i915_request_has_waitboost(rq) &&
+	    !dma_fence_is_signaled_locked(&rq->fence)) {
+		rq->flags |= I915_REQUEST_WAITBOOST;
+
+		if (!atomic_fetch_inc(&rps->num_waiters) &&
+		    READ_ONCE(rps->cur_freq) < rps->boost_freq)
+			schedule_work(&rps->work);
+
+		atomic_inc(&rps->boosts);
+	}
+	spin_unlock_irqrestore(&rq->lock, flags);
+}
+
+int intel_rps_set(struct intel_rps *rps, u8 val)
+{
+	int err = 0;
+
+	lockdep_assert_held(&rps->lock);
+	GEM_BUG_ON(val > rps->max_freq);
+	GEM_BUG_ON(val < rps->min_freq);
+
+	if (rps->active) {
+		err = rps_set(rps, val);
+
+		/*
+		 * Make sure we continue to get interrupts
+		 * until we hit the minimum or maximum frequencies.
+		 */
+		if (INTEL_GEN(rps_to_i915(rps)) >= 6) {
+			struct intel_uncore *uncore = rps_to_uncore(rps);
+
+			intel_uncore_write(uncore, GEN6_RP_INTERRUPT_LIMITS,
+					   rps_limits(rps, val));
+
+			intel_uncore_write(uncore, GEN6_PMINTRMSK,
+					   rps_pm_mask(rps, val));
+		}
+	}
+
+	if (err == 0) {
+		trace_intel_gpu_freq_change(intel_gpu_freq(rps, val));
+		rps->cur_freq = val;
+	}
+
+	return err;
+}
+
+static void gen6_rps_init(struct intel_rps *rps)
+{
+	struct drm_i915_private *i915 = rps_to_i915(rps);
+	struct intel_uncore *uncore = rps_to_uncore(rps);
+
+	/* All of these values are in units of 50MHz */
+
+	/* static values from HW: RP0 > RP1 > RPn (min_freq) */
+	if (IS_GEN9_LP(i915)) {
+		u32 rp_state_cap = intel_uncore_read(uncore, BXT_RP_STATE_CAP);
+
+		rps->rp0_freq = (rp_state_cap >> 16) & 0xff;
+		rps->rp1_freq = (rp_state_cap >>  8) & 0xff;
+		rps->min_freq = (rp_state_cap >>  0) & 0xff;
+	} else {
+		u32 rp_state_cap = intel_uncore_read(uncore, GEN6_RP_STATE_CAP);
+
+		rps->rp0_freq = (rp_state_cap >>  0) & 0xff;
+		rps->rp1_freq = (rp_state_cap >>  8) & 0xff;
+		rps->min_freq = (rp_state_cap >> 16) & 0xff;
+	}
+
+	/* hw_max = RP0 until we check for overclocking */
+	rps->max_freq = rps->rp0_freq;
+
+	rps->efficient_freq = rps->rp1_freq;
+	if (IS_HASWELL(i915) || IS_BROADWELL(i915) ||
+	    IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) {
+		u32 ddcc_status = 0;
+
+		if (sandybridge_pcode_read(i915,
+					   HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
+					   &ddcc_status, NULL) == 0)
+			rps->efficient_freq =
+				clamp_t(u8,
+					(ddcc_status >> 8) & 0xff,
+					rps->min_freq,
+					rps->max_freq);
+	}
+
+	if (IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) {
+		/* Store the frequency values in 16.66 MHZ units, which is
+		 * the natural hardware unit for SKL
+		 */
+		rps->rp0_freq *= GEN9_FREQ_SCALER;
+		rps->rp1_freq *= GEN9_FREQ_SCALER;
+		rps->min_freq *= GEN9_FREQ_SCALER;
+		rps->max_freq *= GEN9_FREQ_SCALER;
+		rps->efficient_freq *= GEN9_FREQ_SCALER;
+	}
+}
+
+static bool rps_reset(struct intel_rps *rps)
+{
+	/* force a reset */
+	rps->power.mode = -1;
+	rps->last_freq = -1;
+
+	if (rps_set(rps, rps->min_freq)) {
+		DRM_ERROR("Failed to reset RPS to initial values\n");
+		return false;
+	}
+
+	rps->cur_freq = rps->min_freq;
+	return true;
+}
+
+/* See the Gen9_GT_PM_Programming_Guide doc for the below */
+static bool gen9_rps_enable(struct intel_rps *rps)
+{
+	struct drm_i915_private *i915 = rps_to_i915(rps);
+	struct intel_uncore *uncore = rps_to_uncore(rps);
+
+	/* Program defaults and thresholds for RPS */
+	if (IS_GEN(i915, 9))
+		intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ,
+				      GEN9_FREQUENCY(rps->rp1_freq));
+
+	/* 1 second timeout */
+	intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT,
+			      GT_INTERVAL_FROM_US(i915, 1000000));
+
+	intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 0xa);
+
+	return rps_reset(rps);
+}
+
+static bool gen8_rps_enable(struct intel_rps *rps)
+{
+	struct intel_uncore *uncore = rps_to_uncore(rps);
+
+	intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ,
+			      HSW_FREQUENCY(rps->rp1_freq));
+
+	/* NB: Docs say 1s, and 1000000 - which aren't equivalent */
+	intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT,
+			      100000000 / 128); /* 1 second timeout */
+
+	intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
+
+	return rps_reset(rps);
+}
+
+static bool gen6_rps_enable(struct intel_rps *rps)
+{
+	struct intel_uncore *uncore = rps_to_uncore(rps);
+
+	/* Power down if completely idle for over 50ms */
+	intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 50000);
+	intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
+
+	return rps_reset(rps);
+}
+
+static int chv_rps_max_freq(struct intel_rps *rps)
+{
+	struct drm_i915_private *i915 = rps_to_i915(rps);
+	u32 val;
+
+	val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE);
+
+	switch (RUNTIME_INFO(i915)->sseu.eu_total) {
+	case 8:
+		/* (2 * 4) config */
+		val >>= FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT;
+		break;
+	case 12:
+		/* (2 * 6) config */
+		val >>= FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT;
+		break;
+	case 16:
+		/* (2 * 8) config */
+	default:
+		/* Setting (2 * 8) Min RP0 for any other combination */
+		val >>= FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT;
+		break;
+	}
+
+	return val & FB_GFX_FREQ_FUSE_MASK;
+}
+
+static int chv_rps_rpe_freq(struct intel_rps *rps)
+{
+	struct drm_i915_private *i915 = rps_to_i915(rps);
+	u32 val;
+
+	val = vlv_punit_read(i915, PUNIT_GPU_DUTYCYCLE_REG);
+	val >>= PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT;
+
+	return val & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
+}
+
+static int chv_rps_guar_freq(struct intel_rps *rps)
+{
+	struct drm_i915_private *i915 = rps_to_i915(rps);
+	u32 val;
+
+	val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE);
+
+	return val & FB_GFX_FREQ_FUSE_MASK;
+}
+
+static u32 chv_rps_min_freq(struct intel_rps *rps)
+{
+	struct drm_i915_private *i915 = rps_to_i915(rps);
+	u32 val;
+
+	val = vlv_punit_read(i915, FB_GFX_FMIN_AT_VMIN_FUSE);
+	val >>= FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT;
+       
+	return val & FB_GFX_FREQ_FUSE_MASK;
+}
+
+static bool chv_rps_enable(struct intel_rps *rps)
+{
+	struct intel_uncore *uncore = rps_to_uncore(rps);
+	struct drm_i915_private *i915 = rps_to_i915(rps);
+	u32 val;
+
+	/* 1: Program defaults and thresholds for RPS*/
+	intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000);
+	intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400);
+	intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000);
+	intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000);
+	intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000);
+
+	intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
+
+	/* 2: Enable RPS */
+	intel_uncore_write_fw(uncore, GEN6_RP_CONTROL,
+			      GEN6_RP_MEDIA_HW_NORMAL_MODE |
+			      GEN6_RP_MEDIA_IS_GFX |
+			      GEN6_RP_ENABLE |
+			      GEN6_RP_UP_BUSY_AVG |
+			      GEN6_RP_DOWN_IDLE_AVG);
+
+	/* Setting Fixed Bias */
+	vlv_punit_get(i915);
+
+	val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | CHV_BIAS_CPU_50_SOC_50;
+	vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val);
+
+	val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
+
+	vlv_punit_put(i915);
+
+	/* RPS code assumes GPLL is used */
+	WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
+
+	DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
+	DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
+
+	return rps_reset(rps);
+}
+
+static int vlv_rps_guar_freq(struct intel_rps *rps)
+{
+	struct drm_i915_private *i915 = rps_to_i915(rps);
+	u32 val, rp1;
+
+	val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE);
+
+	rp1 = val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK;
+	rp1 >>= FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
+
+	return rp1;
+}
+
+static int vlv_rps_max_freq(struct intel_rps *rps)
+{
+	struct drm_i915_private *i915 = rps_to_i915(rps);
+	u32 val, rp0;
+
+	val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE);
+
+	rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
+	/* Clamp to max */
+	rp0 = min_t(u32, rp0, 0xea);
+
+	return rp0;
+}
+
+static int vlv_rps_rpe_freq(struct intel_rps *rps)
+{
+	struct drm_i915_private *i915 = rps_to_i915(rps);
+	u32 val, rpe;
+
+	val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
+	rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
+	val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
+	rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
+
+	return rpe;
+}
+
+static int vlv_rps_min_freq(struct intel_rps *rps)
+{
+	struct drm_i915_private *i915 = rps_to_i915(rps);
+	u32 val;
+
+	val = vlv_punit_read(i915, PUNIT_REG_GPU_LFM) & 0xff;
+	/*
+	 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
+	 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
+	 * a BYT-M B0 the above register contains 0xbf. Moreover when setting
+	 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
+	 * to make sure it matches what Punit accepts.
+	 */
+	return max_t(u32, val, 0xc0);
+}
+
+static bool vlv_rps_enable(struct intel_rps *rps)
+{
+	struct intel_uncore *uncore = rps_to_uncore(rps);
+	struct drm_i915_private *i915 = rps_to_i915(rps);
+	u32 val;
+
+	intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000);
+	intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400);
+	intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000);
+	intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000);
+	intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000);
+
+	intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
+
+	intel_uncore_write_fw(uncore, GEN6_RP_CONTROL,
+			      GEN6_RP_MEDIA_TURBO |
+			      GEN6_RP_MEDIA_HW_NORMAL_MODE |
+			      GEN6_RP_MEDIA_IS_GFX |
+			      GEN6_RP_ENABLE |
+			      GEN6_RP_UP_BUSY_AVG |
+			      GEN6_RP_DOWN_IDLE_CONT);
+
+	vlv_punit_get(i915);
+
+	/* Setting Fixed Bias */
+	val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | VLV_BIAS_CPU_125_SOC_875;
+	vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val);
+
+	val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
+
+	vlv_punit_put(i915);
+
+	/* RPS code assumes GPLL is used */
+	WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
+
+	DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
+	DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
+
+	return rps_reset(rps);
+}
+
+static unsigned long __ips_gfx_val(struct intel_ips *ips)
+{
+	struct intel_rps *rps = container_of(ips, typeof(*rps), ips);
+	struct intel_uncore *uncore = rps_to_uncore(rps);
+	unsigned long t, corr, state1, corr2, state2;
+	u32 pxvid, ext_v;
+
+	lockdep_assert_held(&mchdev_lock);
+
+	pxvid = intel_uncore_read(uncore, PXVFREQ(rps->cur_freq));
+	pxvid = (pxvid >> 24) & 0x7f;
+	ext_v = pvid_to_extvid(rps_to_i915(rps), pxvid);
+
+	state1 = ext_v;
+
+	/* Revel in the empirically derived constants */
+
+	/* Correction factor in 1/100000 units */
+	t = ips_mch_val(uncore);
+	if (t > 80)
+		corr = t * 2349 + 135940;
+	else if (t >= 50)
+		corr = t * 964 + 29317;
+	else /* < 50 */
+		corr = t * 301 + 1004;
+
+	corr = corr * 150142 * state1 / 10000 - 78642;
+	corr /= 100000;
+	corr2 = corr * ips->corr;
+
+	state2 = corr2 * state1 / 10000;
+	state2 /= 100; /* convert to mW */
+
+	__gen5_ips_update(ips);
+
+	return ips->gfx_power + state2;
+}
+
+void intel_rps_enable(struct intel_rps *rps)
+{
+	struct drm_i915_private *i915 = rps_to_i915(rps);
+	struct intel_uncore *uncore = rps_to_uncore(rps);
+
+	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+	if (IS_CHERRYVIEW(i915))
+		rps->enabled = chv_rps_enable(rps);
+	else if (IS_VALLEYVIEW(i915))
+		rps->enabled = vlv_rps_enable(rps);
+	else if (INTEL_GEN(i915) >= 9)
+		rps->enabled = gen9_rps_enable(rps);
+	else if (INTEL_GEN(i915) >= 8)
+		rps->enabled = gen8_rps_enable(rps);
+	else if (INTEL_GEN(i915) >= 6)
+		rps->enabled = gen6_rps_enable(rps);
+	else if (IS_IRONLAKE_M(i915))
+		rps->enabled = gen5_rps_enable(rps);
+	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+	if (!rps->enabled)
+		return;
+
+	WARN_ON(rps->max_freq < rps->min_freq);
+	WARN_ON(rps->idle_freq > rps->max_freq);
+
+	WARN_ON(rps->efficient_freq < rps->min_freq);
+	WARN_ON(rps->efficient_freq > rps->max_freq);
+}
+
+static void gen6_rps_disable(struct intel_rps *rps)
+{
+	intel_uncore_write(rps_to_uncore(rps), GEN6_RP_CONTROL, 0);
+}
+
+void intel_rps_disable(struct intel_rps *rps)
+{
+	struct drm_i915_private *i915 = rps_to_i915(rps);
+
+	rps->enabled = false;
+
+	if (INTEL_GEN(i915) >= 6)
+		gen6_rps_disable(rps);
+	else if (IS_IRONLAKE_M(i915))
+		gen5_rps_disable(rps);
+}
+
+static int byt_gpu_freq(struct intel_rps *rps, int val)
+{
+	/*
+	 * N = val - 0xb7
+	 * Slow = Fast = GPLL ref * N
+	 */
+	return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * (val - 0xb7), 1000);
+}
+
+static int byt_freq_opcode(struct intel_rps *rps, int val)
+{
+	return DIV_ROUND_CLOSEST(1000 * val, rps->gpll_ref_freq) + 0xb7;
+}
+
+static int chv_gpu_freq(struct intel_rps *rps, int val)
+{
+	/*
+	 * N = val / 2
+	 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
+	 */
+	return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * val, 2 * 2 * 1000);
+}
+
+static int chv_freq_opcode(struct intel_rps *rps, int val)
+{
+	/* CHV needs even values */
+	return DIV_ROUND_CLOSEST(2 * 1000 * val, rps->gpll_ref_freq) * 2;
+}
+
+int intel_gpu_freq(struct intel_rps *rps, int val)
+{
+	struct drm_i915_private *i915 = rps_to_i915(rps);
+
+	if (INTEL_GEN(i915) >= 9)
+		return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
+					 GEN9_FREQ_SCALER);
+	else if (IS_CHERRYVIEW(i915))
+		return chv_gpu_freq(rps, val);
+	else if (IS_VALLEYVIEW(i915))
+		return byt_gpu_freq(rps, val);
+	else
+		return val * GT_FREQUENCY_MULTIPLIER;
+}
+
+int intel_freq_opcode(struct intel_rps *rps, int val)
+{
+	struct drm_i915_private *i915 = rps_to_i915(rps);
+
+	if (INTEL_GEN(i915) >= 9)
+		return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
+					 GT_FREQUENCY_MULTIPLIER);
+	else if (IS_CHERRYVIEW(i915))
+		return chv_freq_opcode(rps, val);
+	else if (IS_VALLEYVIEW(i915))
+		return byt_freq_opcode(rps, val);
+	else
+		return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
+}
+
+static void vlv_init_gpll_ref_freq(struct intel_rps *rps)
+{
+	struct drm_i915_private *i915 = rps_to_i915(rps);
+
+	rps->gpll_ref_freq =
+		vlv_get_cck_clock(i915, "GPLL ref",
+				  CCK_GPLL_CLOCK_CONTROL,
+				  i915->czclk_freq);
+
+	DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n", rps->gpll_ref_freq);
+}
+
+static void vlv_rps_init(struct intel_rps *rps)
+{
+	struct drm_i915_private *i915 = rps_to_i915(rps);
+	u32 val;
+
+	vlv_iosf_sb_get(i915,
+			BIT(VLV_IOSF_SB_PUNIT) |
+			BIT(VLV_IOSF_SB_NC) |
+			BIT(VLV_IOSF_SB_CCK));
+
+	vlv_init_gpll_ref_freq(rps);
+
+	val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
+	switch ((val >> 6) & 3) {
+	case 0:
+	case 1:
+		i915->mem_freq = 800;
+		break;
+	case 2:
+		i915->mem_freq = 1066;
+		break;
+	case 3:
+		i915->mem_freq = 1333;
+		break;
+	}
+	DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", i915->mem_freq);
+
+	rps->max_freq = vlv_rps_max_freq(rps);
+	rps->rp0_freq = rps->max_freq;
+	DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
+			 intel_gpu_freq(rps, rps->max_freq),
+			 rps->max_freq);
+
+	rps->efficient_freq = vlv_rps_rpe_freq(rps);
+	DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
+			 intel_gpu_freq(rps, rps->efficient_freq),
+			 rps->efficient_freq);
+
+	rps->rp1_freq = vlv_rps_guar_freq(rps);
+	DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
+			 intel_gpu_freq(rps, rps->rp1_freq),
+			 rps->rp1_freq);
+
+	rps->min_freq = vlv_rps_min_freq(rps);
+	DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
+			 intel_gpu_freq(rps, rps->min_freq),
+			 rps->min_freq);
+
+	vlv_iosf_sb_put(i915,
+			BIT(VLV_IOSF_SB_PUNIT) |
+			BIT(VLV_IOSF_SB_NC) |
+			BIT(VLV_IOSF_SB_CCK));
+}
+
+static void chv_rps_init(struct intel_rps *rps)
+{
+	struct drm_i915_private *i915 = rps_to_i915(rps);
+	u32 val;
+
+	vlv_iosf_sb_get(i915,
+			BIT(VLV_IOSF_SB_PUNIT) |
+			BIT(VLV_IOSF_SB_NC) |
+			BIT(VLV_IOSF_SB_CCK));
+
+	vlv_init_gpll_ref_freq(rps);
+
+	val = vlv_cck_read(i915, CCK_FUSE_REG);
+
+	switch ((val >> 2) & 0x7) {
+	case 3:
+		i915->mem_freq = 2000;
+		break;
+	default:
+		i915->mem_freq = 1600;
+		break;
+	}
+	DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", i915->mem_freq);
+
+	rps->max_freq = chv_rps_max_freq(rps);
+	rps->rp0_freq = rps->max_freq;
+	DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
+			 intel_gpu_freq(rps, rps->max_freq),
+			 rps->max_freq);
+
+	rps->efficient_freq = chv_rps_rpe_freq(rps);
+	DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
+			 intel_gpu_freq(rps, rps->efficient_freq),
+			 rps->efficient_freq);
+
+	rps->rp1_freq = chv_rps_guar_freq(rps);
+	DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
+			 intel_gpu_freq(rps, rps->rp1_freq),
+			 rps->rp1_freq);
+
+	rps->min_freq = chv_rps_min_freq(rps);
+	DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
+			 intel_gpu_freq(rps, rps->min_freq),
+			 rps->min_freq);
+
+	vlv_iosf_sb_put(i915,
+			BIT(VLV_IOSF_SB_PUNIT) |
+			BIT(VLV_IOSF_SB_NC) |
+			BIT(VLV_IOSF_SB_CCK));
+
+	WARN_ONCE((rps->max_freq | rps->efficient_freq | rps->rp1_freq |
+		   rps->min_freq) & 1,
+		  "Odd GPU freq values\n");
+}
+
+static void vlv_c0_read(struct intel_uncore *uncore, struct intel_rps_ei *ei)
+{
+	ei->ktime = ktime_get_raw();
+	ei->render_c0 = intel_uncore_read(uncore, VLV_RENDER_C0_COUNT);
+	ei->media_c0 = intel_uncore_read(uncore, VLV_MEDIA_C0_COUNT);
+}
+
+static u32 vlv_wa_c0_ei(struct intel_rps *rps, u32 pm_iir)
+{
+	struct intel_uncore *uncore = rps_to_uncore(rps);
+	const struct intel_rps_ei *prev = &rps->ei;
+	struct intel_rps_ei now;
+	u32 events = 0;
+
+	if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
+		return 0;
+
+	vlv_c0_read(uncore, &now);
+
+	if (prev->ktime) {
+		u64 time, c0;
+		u32 render, media;
+
+		time = ktime_us_delta(now.ktime, prev->ktime);
+
+		time *= rps_to_i915(rps)->czclk_freq;
+
+		/* Workload can be split between render + media,
+		 * e.g. SwapBuffers being blitted in X after being rendered in
+		 * mesa. To account for this we need to combine both engines
+		 * into our activity counter.
+		 */
+		render = now.render_c0 - prev->render_c0;
+		media = now.media_c0 - prev->media_c0;
+		c0 = max(render, media);
+		c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
+
+		if (c0 > time * rps->power.up_threshold)
+			events = GEN6_PM_RP_UP_THRESHOLD;
+		else if (c0 < time * rps->power.down_threshold)
+			events = GEN6_PM_RP_DOWN_THRESHOLD;
+	}
+
+	rps->ei = now;
+	return events;
+}
+
+static void rps_work(struct work_struct *work)
+{
+	struct intel_rps *rps = container_of(work, typeof(*rps), work);
+	struct intel_gt *gt = rps_to_gt(rps);
+	bool client_boost = false;
+	int new_freq, adj, min, max;
+	u32 pm_iir = 0;
+
+	spin_lock_irq(&gt->irq_lock);
+	pm_iir = fetch_and_zero(&rps->pm_iir);
+	client_boost = atomic_read(&rps->num_waiters);
+	spin_unlock_irq(&gt->irq_lock);
+
+	/* Make sure we didn't queue anything we're not going to process. */
+	if ((pm_iir & rps->pm_events) == 0 && !client_boost)
+		goto out;
+
+	mutex_lock(&rps->lock);
+
+	pm_iir |= vlv_wa_c0_ei(rps, pm_iir);
+
+	adj = rps->last_adj;
+	new_freq = rps->cur_freq;
+	min = rps->min_freq_softlimit;
+	max = rps->max_freq_softlimit;
+	if (client_boost)
+		max = rps->max_freq;
+	if (client_boost && new_freq < rps->boost_freq) {
+		new_freq = rps->boost_freq;
+		adj = 0;
+	} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
+		if (adj > 0)
+			adj *= 2;
+		else /* CHV needs even encode values */
+			adj = IS_CHERRYVIEW(gt->i915) ? 2 : 1;
+
+		if (new_freq >= rps->max_freq_softlimit)
+			adj = 0;
+	} else if (client_boost) {
+		adj = 0;
+	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
+		if (rps->cur_freq > rps->efficient_freq)
+			new_freq = rps->efficient_freq;
+		else if (rps->cur_freq > rps->min_freq_softlimit)
+			new_freq = rps->min_freq_softlimit;
+		adj = 0;
+	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
+		if (adj < 0)
+			adj *= 2;
+		else /* CHV needs even encode values */
+			adj = IS_CHERRYVIEW(gt->i915) ? -2 : -1;
+
+		if (new_freq <= rps->min_freq_softlimit)
+			adj = 0;
+	} else { /* unknown event */
+		adj = 0;
+	}
+
+	rps->last_adj = adj;
+
+	/*
+	 * Limit deboosting and boosting to keep ourselves at the extremes
+	 * when in the respective power modes (i.e. slowly decrease frequencies
+	 * while in the HIGH_POWER zone and slowly increase frequencies while
+	 * in the LOW_POWER zone). On idle, we will hit the timeout and drop
+	 * to the next level quickly, and conversely if busy we expect to
+	 * hit a waitboost and rapidly switch into max power.
+	 */
+	if ((adj < 0 && rps->power.mode == HIGH_POWER) ||
+	    (adj > 0 && rps->power.mode == LOW_POWER))
+		rps->last_adj = 0;
+
+	/* sysfs frequency interfaces may have snuck in while servicing the
+	 * interrupt
+	 */
+	new_freq += adj;
+	new_freq = clamp_t(int, new_freq, min, max);
+
+	if (intel_rps_set(rps, new_freq)) {
+		DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
+		rps->last_adj = 0;
+	}
+
+	mutex_unlock(&rps->lock);
+
+out:
+	spin_lock_irq(&gt->irq_lock);
+	gen6_gt_pm_unmask_irq(gt, rps->pm_events);
+	spin_unlock_irq(&gt->irq_lock);
+}
+
+void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
+{
+	struct intel_gt *gt = rps_to_gt(rps);
+	const u32 events = rps->pm_events & pm_iir;
+
+	lockdep_assert_held(&gt->irq_lock);
+
+	if (unlikely(!events))
+		return;
+
+	gen6_gt_pm_mask_irq(gt, events);
+
+	rps->pm_iir |= events;
+	schedule_work(&rps->work);
+}
+
+void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
+{
+	struct drm_i915_private *i915 = rps_to_i915(rps);
+
+	if (pm_iir & rps->pm_events) {
+		struct intel_gt *gt = rps_to_gt(rps);
+
+		spin_lock(&gt->irq_lock);
+		gen6_gt_pm_mask_irq(gt, pm_iir & rps->pm_events);
+		rps->pm_iir |= pm_iir & rps->pm_events;
+		schedule_work(&rps->work);
+		spin_unlock(&gt->irq_lock);
+	}
+
+	if (INTEL_GEN(i915) >= 8)
+		return;
+
+	if (pm_iir & PM_VEBOX_USER_INTERRUPT)
+		intel_engine_breadcrumbs_irq(i915->engine[VECS0]);
+
+	if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
+		DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
+}
+
+void gen5_rps_irq_handler(struct intel_rps *rps)
+{
+	struct intel_uncore *uncore = rps_to_uncore(rps);
+	u32 busy_up, busy_down, max_avg, min_avg;
+	u8 new_freq;
+
+	spin_lock(&mchdev_lock);
+
+	intel_uncore_write16(uncore,
+			     MEMINTRSTS,
+			     intel_uncore_read(uncore, MEMINTRSTS));
+
+	intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
+	busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG);
+	busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG);
+	max_avg = intel_uncore_read(uncore, RCBMAXAVG);
+	min_avg = intel_uncore_read(uncore, RCBMINAVG);
+
+	/* Handle RCS change request from hw */
+	new_freq = rps->cur_freq;
+	if (busy_up > max_avg)
+		new_freq++;
+	else if (busy_down < min_avg)
+		new_freq--;
+	new_freq = clamp(new_freq,
+			 rps->min_freq_softlimit,
+			 rps->max_freq_softlimit);
+
+	if (new_freq != rps->cur_freq && gen5_rps_set(rps, new_freq))
+		rps->cur_freq = new_freq;
+
+	spin_unlock(&mchdev_lock);
+}
+
+void intel_rps_init(struct intel_rps *rps)
+{
+	struct drm_i915_private *i915 = rps_to_i915(rps);
+
+	mutex_init(&rps->lock);
+	mutex_init(&rps->power.mutex);
+
+	INIT_WORK(&rps->work, rps_work);
+
+	atomic_set(&rps->num_waiters, 0);
+
+	if (IS_CHERRYVIEW(i915))
+		chv_rps_init(rps);
+	else if (IS_VALLEYVIEW(i915))
+		vlv_rps_init(rps);
+	else if (INTEL_GEN(i915) >= 6)
+		gen6_rps_init(rps);
+	else if (IS_IRONLAKE_M(i915))
+		gen5_rps_init(rps);
+
+	/* Derive initial user preferences/limits from the hardware limits */
+	rps->max_freq_softlimit = rps->max_freq;
+	rps->min_freq_softlimit = rps->min_freq;
+
+	/* After setting max-softlimit, find the overclock max freq */
+	if (IS_GEN(i915, 6) || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) {
+		u32 params = 0;
+
+		sandybridge_pcode_read(i915, GEN6_READ_OC_PARAMS,
+				       &params, NULL);
+		if (params & BIT(31)) { /* OC supported */
+			DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
+					 (rps->max_freq & 0xff) * 50,
+					 (params & 0xff) * 50);
+			rps->max_freq = params & 0xff;
+		}
+	}
+
+	/* Finally allow us to boost to max by default */
+	rps->boost_freq = rps->max_freq;
+	rps->idle_freq = rps->min_freq;
+	rps->cur_freq = rps->idle_freq;
+
+	rps->pm_intrmsk_mbz = 0;
+
+	/*
+	 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
+	 * if GEN6_PM_UP_EI_EXPIRED is masked.
+	 *
+	 * TODO: verify if this can be reproduced on VLV,CHV.
+	 */
+	if (INTEL_GEN(i915) <= 7)
+		rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
+
+	if (INTEL_GEN(i915) >= 8)
+		rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
+}
+
+u32 intel_get_cagf(struct intel_rps *rps, u32 rpstat)
+{
+	struct drm_i915_private *i915 = rps_to_i915(rps);
+	u32 cagf;
+
+	if (INTEL_GEN(i915) >= 9)
+		cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
+	else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
+		cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
+	else
+		cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
+
+	return  cagf;
+}
+
+/* External interface for intel_ips.ko */
+
+static struct drm_i915_private __rcu *ips_mchdev;
+
+/**
+ * Tells the intel_ips driver that the i915 driver is now loaded, if
+ * IPS got loaded first.
+ *
+ * This awkward dance is so that neither module has to depend on the
+ * other in order for IPS to do the appropriate communication of
+ * GPU turbo limits to i915.
+ */
+static void
+ips_ping_for_i915_load(void)
+{
+	void (*link)(void);
+
+	link = symbol_get(ips_link_to_i915_driver);
+	if (link) {
+		link();
+		symbol_put(ips_link_to_i915_driver);
+	}
+}
+
+void intel_rps_driver_register(struct intel_rps *rps)
+{
+	struct intel_gt *gt = rps_to_gt(rps);
+
+	/*
+	 * We only register the i915 ips part with intel-ips once everything is
+	 * set up, to avoid intel-ips sneaking in and reading bogus values.
+	 */
+	rcu_assign_pointer(ips_mchdev, gt->i915);
+
+	ips_ping_for_i915_load();
+}
+
+void intel_rps_driver_unregister(struct intel_rps *rps)
+{
+	rcu_assign_pointer(ips_mchdev, NULL);
+}
+
+static struct drm_i915_private *mchdev_get(void)
+{
+	struct drm_i915_private *i915;
+
+	rcu_read_lock();
+	i915 = rcu_dereference(ips_mchdev);
+	if (!kref_get_unless_zero(&i915->drm.ref))
+		i915 = NULL;
+	rcu_read_unlock();
+
+	return i915;
+}
+
+/**
+ * i915_read_mch_val - return value for IPS use
+ *
+ * Calculate and return a value for the IPS driver to use when deciding whether
+ * we have thermal and power headroom to increase CPU or GPU power budget.
+ */
+unsigned long i915_read_mch_val(void)
+{
+	struct drm_i915_private *i915;
+	unsigned long chipset_val = 0;
+	unsigned long graphics_val = 0;
+	intel_wakeref_t wakeref;
+
+	i915 = mchdev_get();
+	if (!i915)
+		return 0;
+
+	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+		struct intel_ips *ips = &i915->gt.rps.ips;
+
+		spin_lock_irq(&mchdev_lock);
+		chipset_val = __ips_chipset_val(ips);
+		graphics_val = __ips_gfx_val(ips);
+		spin_unlock_irq(&mchdev_lock);
+	}
+
+	drm_dev_put(&i915->drm);
+	return chipset_val + graphics_val;
+}
+EXPORT_SYMBOL_GPL(i915_read_mch_val);
+
+/**
+ * i915_gpu_raise - raise GPU frequency limit
+ *
+ * Raise the limit; IPS indicates we have thermal headroom.
+ */
+bool i915_gpu_raise(void)
+{
+	struct drm_i915_private *i915;
+	struct intel_rps *rps;
+
+	i915 = mchdev_get();
+	if (!i915)
+		return false;
+
+	rps = &i915->gt.rps;
+
+	spin_lock_irq(&mchdev_lock);
+	if (rps->max_freq_softlimit < rps->max_freq)
+		rps->max_freq_softlimit++;
+	spin_unlock_irq(&mchdev_lock);
+
+	drm_dev_put(&i915->drm);
+	return true;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_raise);
+
+/**
+ * i915_gpu_lower - lower GPU frequency limit
+ *
+ * IPS indicates we're close to a thermal limit, so throttle back the GPU
+ * frequency maximum.
+ */
+bool i915_gpu_lower(void)
+{
+	struct drm_i915_private *i915;
+	struct intel_rps *rps;
+
+	i915 = mchdev_get();
+	if (!i915)
+		return false;
+
+	rps = &i915->gt.rps;
+
+	spin_lock_irq(&mchdev_lock);
+	if (rps->max_freq_softlimit > rps->min_freq)
+		rps->max_freq_softlimit--;
+	spin_unlock_irq(&mchdev_lock);
+
+	drm_dev_put(&i915->drm);
+	return true;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_lower);
+
+/**
+ * i915_gpu_busy - indicate GPU business to IPS
+ *
+ * Tell the IPS driver whether or not the GPU is busy.
+ */
+bool i915_gpu_busy(void)
+{
+	struct drm_i915_private *i915;
+	bool ret;
+
+	i915 = mchdev_get();
+	if (!i915)
+		return false;
+
+	ret = i915->gt.awake;
+
+	drm_dev_put(&i915->drm);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_busy);
+
+/**
+ * i915_gpu_turbo_disable - disable graphics turbo
+ *
+ * Disable graphics turbo by resetting the max frequency and setting the
+ * current frequency to the default.
+ */
+bool i915_gpu_turbo_disable(void)
+{
+	struct drm_i915_private *i915;
+	struct intel_rps *rps;
+	bool ret;
+
+	i915 = mchdev_get();
+	if (!i915)
+		return false;
+
+	rps = &i915->gt.rps;
+
+	spin_lock_irq(&mchdev_lock);
+	rps->max_freq_softlimit = rps->min_freq;
+	ret = gen5_rps_set(&i915->gt.rps, rps->min_freq);
+	spin_unlock_irq(&mchdev_lock);
+
+	drm_dev_put(&i915->drm);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h
new file mode 100644
index 000000000000..997a4b4e0207
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_rps.h
@@ -0,0 +1,37 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_RPS_H
+#define INTEL_RPS_H
+
+#include "intel_rps_types.h"
+
+struct i915_request;
+
+void intel_rps_init(struct intel_rps *rps);
+
+void intel_rps_driver_register(struct intel_rps *rps);
+void intel_rps_driver_unregister(struct intel_rps *rps);
+
+void intel_rps_enable(struct intel_rps *rps);
+void intel_rps_disable(struct intel_rps *rps);
+
+void intel_rps_park(struct intel_rps *rps);
+void intel_rps_unpark(struct intel_rps *rps);
+void intel_rps_boost(struct i915_request *rq);
+
+int intel_rps_set(struct intel_rps *rps, u8 val);
+void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive);
+
+int intel_gpu_freq(struct intel_rps *rps, int val);
+int intel_freq_opcode(struct intel_rps *rps, int val);
+u32 intel_get_cagf(struct intel_rps *rps, u32 rpstat1);
+
+void gen5_rps_irq_handler(struct intel_rps *rps);
+void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir);
+void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir);
+
+#endif /* INTEL_RPS_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_rps_types.h b/drivers/gpu/drm/i915/gt/intel_rps_types.h
new file mode 100644
index 000000000000..40eb1fb651e7
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_rps_types.h
@@ -0,0 +1,93 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_RPS_TYPES_H
+#define INTEL_RPS_TYPES_H
+
+#include <linux/atomic.h>
+#include <linux/ktime.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+struct intel_ips {
+	u64 last_count1;
+	unsigned long last_time1;
+	unsigned long chipset_power;
+	u64 last_count2;
+	u64 last_time2;
+	unsigned long gfx_power;
+	u8 corr;
+
+	int c, m;
+};
+
+struct intel_rps_ei {
+	ktime_t ktime;
+	u32 render_c0;
+	u32 media_c0;
+};
+
+struct intel_rps {
+	struct mutex lock; /* protects enabling and the worker */
+
+	/*
+	 * work, interrupts_enabled and pm_iir are protected by
+	 * dev_priv->irq_lock
+	 */
+	struct work_struct work;
+	bool enabled;
+	bool active;
+	u32 pm_iir;
+
+	/* PM interrupt bits that should never be masked */
+	u32 pm_intrmsk_mbz;
+	u32 pm_events;
+
+	/* Frequencies are stored in potentially platform dependent multiples.
+	 * In other words, *_freq needs to be multiplied by X to be interesting.
+	 * Soft limits are those which are used for the dynamic reclocking done
+	 * by the driver (raise frequencies under heavy loads, and lower for
+	 * lighter loads). Hard limits are those imposed by the hardware.
+	 *
+	 * A distinction is made for overclocking, which is never enabled by
+	 * default, and is considered to be above the hard limit if it's
+	 * possible at all.
+	 */
+	u8 cur_freq;		/* Current frequency (cached, may not == HW) */
+	u8 last_freq;		/* Last swqreq frequency */
+	u8 min_freq_softlimit;	/* Minimum frequency permitted by the driver */
+	u8 max_freq_softlimit;	/* Max frequency permitted by the driver */
+	u8 max_freq;		/* Maximum frequency, RP0 if not overclocking */
+	u8 min_freq;		/* AKA RPn. Minimum frequency */
+	u8 boost_freq;		/* Frequency to request when wait boosting */
+	u8 idle_freq;		/* Frequency to request when we are idle */
+	u8 efficient_freq;	/* AKA RPe. Pre-determined balanced frequency */
+	u8 rp1_freq;		/* "less than" RP0 power/freqency */
+	u8 rp0_freq;		/* Non-overclocked max frequency. */
+	u16 gpll_ref_freq;	/* vlv/chv GPLL reference frequency */
+
+	int last_adj;
+
+	struct {
+		struct mutex mutex;
+
+		enum { LOW_POWER, BETWEEN, HIGH_POWER } mode;
+		unsigned int interactive;
+
+		u8 up_threshold; /* Current %busy required to uplock */
+		u8 down_threshold; /* Current %busy required to downclock */
+	} power;
+
+	atomic_t num_waiters;
+	atomic_t boosts;
+
+	/* manual wa residency calculations */
+	struct intel_rps_ei ei;
+	struct intel_ips ips;
+};
+
+#endif /* INTEL_RPS_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 83f2c197375f..19029c08455d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -23,6 +23,8 @@
  */
 
 #include "gt/intel_gt.h"
+#include "gt/intel_gt_irq.h"
+#include "gt/intel_gt_pm_irq.h"
 #include "intel_guc.h"
 #include "intel_guc_ads.h"
 #include "intel_guc_submission.h"
@@ -75,6 +77,91 @@ void intel_guc_init_send_regs(struct intel_guc *guc)
 	guc->send_regs.fw_domains = fw_domains;
 }
 
+static void gen9_reset_guc_interrupts(struct intel_guc *guc)
+{
+	struct intel_gt *gt = guc_to_gt(guc);
+
+	assert_rpm_wakelock_held(&gt->i915->runtime_pm);
+
+	spin_lock_irq(&gt->irq_lock);
+	gen6_gt_pm_reset_iir(gt, gt->pm_guc_events);
+	spin_unlock_irq(&gt->irq_lock);
+}
+
+static void gen9_enable_guc_interrupts(struct intel_guc *guc)
+{
+	struct intel_gt *gt = guc_to_gt(guc);
+
+	assert_rpm_wakelock_held(&gt->i915->runtime_pm);
+
+	spin_lock_irq(&gt->irq_lock);
+	if (!guc->interrupts.enabled) {
+		WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) &
+			     gt->pm_guc_events);
+		guc->interrupts.enabled = true;
+		gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
+	}
+	spin_unlock_irq(&gt->irq_lock);
+}
+
+static void gen9_disable_guc_interrupts(struct intel_guc *guc)
+{
+	struct intel_gt *gt = guc_to_gt(guc);
+
+	assert_rpm_wakelock_held(&gt->i915->runtime_pm);
+
+	spin_lock_irq(&gt->irq_lock);
+	guc->interrupts.enabled = false;
+
+	gen6_gt_pm_disable_irq(gt, gt->pm_guc_events);
+
+	spin_unlock_irq(&gt->irq_lock);
+	intel_synchronize_irq(gt->i915);
+
+	gen9_reset_guc_interrupts(guc);
+}
+
+static void gen11_reset_guc_interrupts(struct intel_guc *guc)
+{
+	struct intel_gt *gt = guc_to_gt(guc);
+
+	spin_lock_irq(&gt->irq_lock);
+	gen11_gt_reset_one_iir(gt, 0, GEN11_GUC);
+	spin_unlock_irq(&gt->irq_lock);
+}
+
+static void gen11_enable_guc_interrupts(struct intel_guc *guc)
+{
+	struct intel_gt *gt = guc_to_gt(guc);
+
+	spin_lock_irq(&gt->irq_lock);
+	if (!guc->interrupts.enabled) {
+		u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
+
+		WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC));
+		intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, events);
+		intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~events);
+		guc->interrupts.enabled = true;
+	}
+	spin_unlock_irq(&gt->irq_lock);
+}
+
+static void gen11_disable_guc_interrupts(struct intel_guc *guc)
+{
+	struct intel_gt *gt = guc_to_gt(guc);
+
+	spin_lock_irq(&gt->irq_lock);
+	guc->interrupts.enabled = false;
+
+	intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0);
+	intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
+
+	spin_unlock_irq(&gt->irq_lock);
+	intel_synchronize_irq(gt->i915);
+
+	gen11_reset_guc_interrupts(guc);
+}
+
 void intel_guc_init_early(struct intel_guc *guc)
 {
 	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index a0f2a01365bc..b8fa81137dea 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1004,7 +1004,7 @@ void intel_guc_submission_fini(struct intel_guc *guc)
 
 static void guc_interrupts_capture(struct intel_gt *gt)
 {
-	struct intel_rps *rps = &gt->i915->gt_pm.rps;
+	struct intel_rps *rps = &gt->rps;
 	struct intel_uncore *uncore = gt->uncore;
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
@@ -1050,7 +1050,7 @@ static void guc_interrupts_capture(struct intel_gt *gt)
 
 static void guc_interrupts_release(struct intel_gt *gt)
 {
-	struct intel_rps *rps = &gt->i915->gt_pm.rps;
+	struct intel_rps *rps = &gt->rps;
 	struct intel_uncore *uncore = gt->uncore;
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 8126dffe7800..d45ca2d965d4 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -40,6 +40,8 @@
 
 #include "gem/i915_gem_context.h"
 #include "gt/intel_reset.h"
+#include "gt/intel_rc6.h"
+#include "gt/intel_rps.h"
 #include "gt/uc/intel_guc_submission.h"
 
 #include "i915_debugfs.h"
@@ -767,7 +769,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	struct intel_uncore *uncore = &dev_priv->uncore;
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct intel_rps *rps = &dev_priv->gt.rps;
 	intel_wakeref_t wakeref;
 	int ret = 0;
 
@@ -803,23 +805,23 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
 		seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
 
 		seq_printf(m, "actual GPU freq: %d MHz\n",
-			   intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
+			   intel_gpu_freq(rps, (freq_sts >> 8) & 0xff));
 
 		seq_printf(m, "current GPU freq: %d MHz\n",
-			   intel_gpu_freq(dev_priv, rps->cur_freq));
+			   intel_gpu_freq(rps, rps->cur_freq));
 
 		seq_printf(m, "max GPU freq: %d MHz\n",
-			   intel_gpu_freq(dev_priv, rps->max_freq));
+			   intel_gpu_freq(rps, rps->max_freq));
 
 		seq_printf(m, "min GPU freq: %d MHz\n",
-			   intel_gpu_freq(dev_priv, rps->min_freq));
+			   intel_gpu_freq(rps, rps->min_freq));
 
 		seq_printf(m, "idle GPU freq: %d MHz\n",
-			   intel_gpu_freq(dev_priv, rps->idle_freq));
+			   intel_gpu_freq(rps, rps->idle_freq));
 
 		seq_printf(m,
 			   "efficient (RPe) frequency: %d MHz\n",
-			   intel_gpu_freq(dev_priv, rps->efficient_freq));
+			   intel_gpu_freq(rps, rps->efficient_freq));
 	} else if (INTEL_GEN(dev_priv) >= 6) {
 		u32 rp_state_limits;
 		u32 gt_perf_status;
@@ -853,7 +855,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
 			else
 				reqf >>= 25;
 		}
-		reqf = intel_gpu_freq(dev_priv, reqf);
+		reqf = intel_gpu_freq(rps, reqf);
 
 		rpmodectl = I915_READ(GEN6_RP_CONTROL);
 		rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
@@ -866,8 +868,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
 		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
 		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
 		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
-		cagf = intel_gpu_freq(dev_priv,
-				      intel_get_cagf(dev_priv, rpstat));
+		cagf = intel_gpu_freq(rps, intel_get_cagf(rps, rpstat));
 
 		intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
 
@@ -944,37 +945,37 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
 		max_freq *= (IS_GEN9_BC(dev_priv) ||
 			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
 		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
-			   intel_gpu_freq(dev_priv, max_freq));
+			   intel_gpu_freq(rps, max_freq));
 
 		max_freq = (rp_state_cap & 0xff00) >> 8;
 		max_freq *= (IS_GEN9_BC(dev_priv) ||
 			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
 		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
-			   intel_gpu_freq(dev_priv, max_freq));
+			   intel_gpu_freq(rps, max_freq));
 
 		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
 			    rp_state_cap >> 0) & 0xff;
 		max_freq *= (IS_GEN9_BC(dev_priv) ||
 			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
 		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
-			   intel_gpu_freq(dev_priv, max_freq));
+			   intel_gpu_freq(rps, max_freq));
 		seq_printf(m, "Max overclocked frequency: %dMHz\n",
-			   intel_gpu_freq(dev_priv, rps->max_freq));
+			   intel_gpu_freq(rps, rps->max_freq));
 
 		seq_printf(m, "Current freq: %d MHz\n",
-			   intel_gpu_freq(dev_priv, rps->cur_freq));
+			   intel_gpu_freq(rps, rps->cur_freq));
 		seq_printf(m, "Actual freq: %d MHz\n", cagf);
 		seq_printf(m, "Idle freq: %d MHz\n",
-			   intel_gpu_freq(dev_priv, rps->idle_freq));
+			   intel_gpu_freq(rps, rps->idle_freq));
 		seq_printf(m, "Min freq: %d MHz\n",
-			   intel_gpu_freq(dev_priv, rps->min_freq));
+			   intel_gpu_freq(rps, rps->min_freq));
 		seq_printf(m, "Boost freq: %d MHz\n",
-			   intel_gpu_freq(dev_priv, rps->boost_freq));
+			   intel_gpu_freq(rps, rps->boost_freq));
 		seq_printf(m, "Max freq: %d MHz\n",
-			   intel_gpu_freq(dev_priv, rps->max_freq));
+			   intel_gpu_freq(rps, rps->max_freq));
 		seq_printf(m,
 			   "efficient (RPe) frequency: %d MHz\n",
-			   intel_gpu_freq(dev_priv, rps->efficient_freq));
+			   intel_gpu_freq(rps, rps->efficient_freq));
 	} else {
 		seq_puts(m, "no P-state info available\n");
 	}
@@ -1152,11 +1153,13 @@ static void print_rc6_res(struct seq_file *m,
 			  const char *title,
 			  const i915_reg_t reg)
 {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_i915_private *i915 = node_to_i915(m->private);
+	intel_wakeref_t wakeref;
 
-	seq_printf(m, "%s %u (%llu us)\n",
-		   title, I915_READ(reg),
-		   intel_rc6_residency_us(dev_priv, reg));
+	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+		seq_printf(m, "%s %u (%llu us)\n", title,
+			   intel_uncore_read(&i915->uncore, reg),
+			   intel_rc6_residency_us(&i915->gt.rc6, reg));
 }
 
 static int vlv_drpc_info(struct seq_file *m)
@@ -1431,34 +1434,10 @@ static int i915_sr_status(struct seq_file *m, void *unused)
 	return 0;
 }
 
-static int i915_emon_status(struct seq_file *m, void *unused)
-{
-	struct drm_i915_private *i915 = node_to_i915(m->private);
-	intel_wakeref_t wakeref;
-
-	if (!IS_GEN(i915, 5))
-		return -ENODEV;
-
-	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
-		unsigned long temp, chipset, gfx;
-
-		temp = i915_mch_val(i915);
-		chipset = i915_chipset_val(i915);
-		gfx = i915_gfx_val(i915);
-
-		seq_printf(m, "GMCH temp: %ld\n", temp);
-		seq_printf(m, "Chipset power: %ld\n", chipset);
-		seq_printf(m, "GFX power: %ld\n", gfx);
-		seq_printf(m, "Total power: %ld\n", chipset + gfx);
-	}
-
-	return 0;
-}
-
 static int i915_ring_freq_table(struct seq_file *m, void *unused)
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct intel_rps *rps = &dev_priv->gt.rps;
 	unsigned int max_gpu_freq, min_gpu_freq;
 	intel_wakeref_t wakeref;
 	int gpu_freq, ia_freq;
@@ -1483,10 +1462,11 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
 				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
 				       &ia_freq, NULL);
 		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
-			   intel_gpu_freq(dev_priv, (gpu_freq *
-						     (IS_GEN9_BC(dev_priv) ||
-						      INTEL_GEN(dev_priv) >= 10 ?
-						      GEN9_FREQ_SCALER : 1))),
+			   intel_gpu_freq(rps,
+					  (gpu_freq *
+					   (IS_GEN9_BC(dev_priv) ||
+					    INTEL_GEN(dev_priv) >= 10 ?
+					    GEN9_FREQ_SCALER : 1))),
 			   ((ia_freq >> 0) & 0xff) * 100,
 			   ((ia_freq >> 8) & 0xff) * 100);
 	}
@@ -1723,7 +1703,7 @@ static const char *rps_power_to_str(unsigned int power)
 static int i915_rps_boost_info(struct seq_file *m, void *data)
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct intel_rps *rps = &dev_priv->gt.rps;
 	u32 act_freq = rps->cur_freq;
 	intel_wakeref_t wakeref;
 
@@ -1735,7 +1715,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
 			vlv_punit_put(dev_priv);
 			act_freq = (act_freq >> 8) & 0xff;
 		} else {
-			act_freq = intel_get_cagf(dev_priv,
+			act_freq = intel_get_cagf(rps,
 						  I915_READ(GEN6_RPSTAT1));
 		}
 	}
@@ -1746,17 +1726,17 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
 		   atomic_read(&rps->num_waiters));
 	seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
 	seq_printf(m, "Frequency requested %d, actual %d\n",
-		   intel_gpu_freq(dev_priv, rps->cur_freq),
-		   intel_gpu_freq(dev_priv, act_freq));
+		   intel_gpu_freq(rps, rps->cur_freq),
+		   intel_gpu_freq(rps, act_freq));
 	seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
-		   intel_gpu_freq(dev_priv, rps->min_freq),
-		   intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
-		   intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
-		   intel_gpu_freq(dev_priv, rps->max_freq));
+		   intel_gpu_freq(rps, rps->min_freq),
+		   intel_gpu_freq(rps, rps->min_freq_softlimit),
+		   intel_gpu_freq(rps, rps->max_freq_softlimit),
+		   intel_gpu_freq(rps, rps->max_freq));
 	seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
-		   intel_gpu_freq(dev_priv, rps->idle_freq),
-		   intel_gpu_freq(dev_priv, rps->efficient_freq),
-		   intel_gpu_freq(dev_priv, rps->boost_freq));
+		   intel_gpu_freq(rps, rps->idle_freq),
+		   intel_gpu_freq(rps, rps->efficient_freq),
+		   intel_gpu_freq(rps, rps->boost_freq));
 
 	seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
 
@@ -4300,7 +4280,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
 	{"i915_frequency_info", i915_frequency_info, 0},
 	{"i915_hangcheck_info", i915_hangcheck_info, 0},
 	{"i915_drpc_info", i915_drpc_info, 0},
-	{"i915_emon_status", i915_emon_status, 0},
 	{"i915_ring_freq_table", i915_ring_freq_table, 0},
 	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
 	{"i915_fbc_status", i915_fbc_status, 0},
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 0244421241fd..ab9c480c7709 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1641,9 +1641,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
 	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
 			   PM_QOS_DEFAULT_VALUE);
 
-	/* BIOS often leaves RC6 enabled, but disable it for hw init */
-	intel_sanitize_gt_powersave(dev_priv);
-
 	intel_gt_init_workarounds(dev_priv);
 
 	/* On the 945G/GM, the chipset reports the MSI capability on the
@@ -1749,8 +1746,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
 		acpi_video_register();
 	}
 
-	if (IS_GEN(dev_priv, 5))
-		intel_gpu_ips_init(dev_priv);
+	intel_gt_driver_register(&dev_priv->gt);
 
 	intel_audio_init(dev_priv);
 
@@ -1793,7 +1789,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
 	 */
 	drm_kms_helper_poll_fini(&dev_priv->drm);
 
-	intel_gpu_ips_teardown();
+	intel_gt_driver_unregister(&dev_priv->gt);
 	acpi_video_unregister();
 	intel_opregion_unregister(dev_priv);
 
@@ -1935,10 +1931,8 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
 out_cleanup_hw:
 	i915_driver_hw_remove(dev_priv);
+	intel_gt_driver_release(&dev_priv->gt);
 	i915_ggtt_driver_release(dev_priv);
-
-	/* Paranoia: make sure we have disabled everything before we exit. */
-	intel_sanitize_gt_powersave(dev_priv);
 out_cleanup_mmio:
 	i915_driver_mmio_release(dev_priv);
 out_runtime_pm_put:
@@ -2010,8 +2004,7 @@ static void i915_driver_release(struct drm_device *dev)
 
 	i915_ggtt_driver_release(dev_priv);
 
-	/* Paranoia: make sure we have disabled everything before we exit. */
-	intel_sanitize_gt_powersave(dev_priv);
+	intel_gt_driver_release(&dev_priv->gt);
 
 	i915_driver_mmio_release(dev_priv);
 
@@ -2244,7 +2237,7 @@ static int i915_drm_resume(struct drm_device *dev)
 	int ret;
 
 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
-	intel_sanitize_gt_powersave(dev_priv);
+	intel_gt_pm_disable(&dev_priv->gt);
 
 	i915_gem_sanitize(dev_priv);
 
@@ -2377,7 +2370,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
 		hsw_disable_pc8(dev_priv);
 	}
 
-	intel_sanitize_gt_powersave(dev_priv);
+	intel_gt_pm_disable(&dev_priv->gt);
 
 	intel_power_domains_resume(dev_priv);
 
@@ -2921,9 +2914,6 @@ static int intel_runtime_suspend(struct device *kdev)
 	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
 	int ret;
 
-	if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && HAS_RC6(dev_priv))))
-		return -ENODEV;
-
 	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
 		return -ENODEV;
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index fbaaaa7c12ad..6f611699b3be 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -634,106 +634,6 @@ struct vlv_s0ix_state {
 	u32 clock_gate_dis2;
 };
 
-struct intel_rps_ei {
-	ktime_t ktime;
-	u32 render_c0;
-	u32 media_c0;
-};
-
-struct intel_rps {
-	struct mutex lock; /* protects enabling and the worker */
-
-	/*
-	 * work, interrupts_enabled and pm_iir are protected by
-	 * dev_priv->irq_lock
-	 */
-	struct work_struct work;
-	bool interrupts_enabled;
-	u32 pm_iir;
-
-	/* PM interrupt bits that should never be masked */
-	u32 pm_intrmsk_mbz;
-
-	/* Frequencies are stored in potentially platform dependent multiples.
-	 * In other words, *_freq needs to be multiplied by X to be interesting.
-	 * Soft limits are those which are used for the dynamic reclocking done
-	 * by the driver (raise frequencies under heavy loads, and lower for
-	 * lighter loads). Hard limits are those imposed by the hardware.
-	 *
-	 * A distinction is made for overclocking, which is never enabled by
-	 * default, and is considered to be above the hard limit if it's
-	 * possible at all.
-	 */
-	u8 cur_freq;		/* Current frequency (cached, may not == HW) */
-	u8 min_freq_softlimit;	/* Minimum frequency permitted by the driver */
-	u8 max_freq_softlimit;	/* Max frequency permitted by the driver */
-	u8 max_freq;		/* Maximum frequency, RP0 if not overclocking */
-	u8 min_freq;		/* AKA RPn. Minimum frequency */
-	u8 boost_freq;		/* Frequency to request when wait boosting */
-	u8 idle_freq;		/* Frequency to request when we are idle */
-	u8 efficient_freq;	/* AKA RPe. Pre-determined balanced frequency */
-	u8 rp1_freq;		/* "less than" RP0 power/freqency */
-	u8 rp0_freq;		/* Non-overclocked max frequency. */
-	u16 gpll_ref_freq;	/* vlv/chv GPLL reference frequency */
-
-	int last_adj;
-
-	struct {
-		struct mutex mutex;
-
-		enum { LOW_POWER, BETWEEN, HIGH_POWER } mode;
-		unsigned int interactive;
-
-		u8 up_threshold; /* Current %busy required to uplock */
-		u8 down_threshold; /* Current %busy required to downclock */
-	} power;
-
-	bool enabled;
-	atomic_t num_waiters;
-	atomic_t boosts;
-
-	/* manual wa residency calculations */
-	struct intel_rps_ei ei;
-};
-
-struct intel_rc6 {
-	bool enabled;
-	u64 prev_hw_residency[4];
-	u64 cur_residency[4];
-};
-
-struct intel_llc_pstate {
-	bool enabled;
-};
-
-struct intel_gen6_power_mgmt {
-	struct intel_rps rps;
-	struct intel_rc6 rc6;
-	struct intel_llc_pstate llc_pstate;
-};
-
-/* defined intel_pm.c */
-extern spinlock_t mchdev_lock;
-
-struct intel_ilk_power_mgmt {
-	u8 cur_delay;
-	u8 min_delay;
-	u8 max_delay;
-	u8 fmax;
-	u8 fstart;
-
-	u64 last_count1;
-	unsigned long last_time1;
-	unsigned long chipset_power;
-	u64 last_count2;
-	u64 last_time2;
-	unsigned long gfx_power;
-	u8 corr;
-
-	int c_m;
-	int r_t;
-};
-
 #define MAX_L3_SLICES 2
 struct intel_l3_parity {
 	u32 *remap_info[MAX_L3_SLICES];
@@ -1399,8 +1299,6 @@ struct drm_i915_private {
 		u32 irq_mask;
 		u32 de_irq_mask[I915_MAX_PIPES];
 	};
-	u32 gt_irq_mask;
-	u32 pm_rps_events;
 	u32 pipestat_irq_mask[I915_MAX_PIPES];
 
 	struct i915_hotplug hotplug;
@@ -1533,21 +1431,12 @@ struct drm_i915_private {
 	 */
 	u32 edram_size_mb;
 
-	/* gen6+ GT PM state */
-	struct intel_gen6_power_mgmt gt_pm;
-
-	/* ilk-only ips/rps state. Everything in here is protected by the global
-	 * mchdev_lock in intel_pm.c */
-	struct intel_ilk_power_mgmt ips;
-
 	struct i915_power_domains power_domains;
 
 	struct i915_psr psr;
 
 	struct i915_gpu_error gpu_error;
 
-	struct drm_i915_gem_object *vlv_pctx;
-
 	/* list of fbdev register on this device */
 	struct intel_fbdev *fbdev;
 	struct work_struct fbdev_suspend_work;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6ca700189a86..944c80ed726b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -50,6 +50,7 @@
 #include "gt/intel_gt_pm.h"
 #include "gt/intel_mocs.h"
 #include "gt/intel_reset.h"
+#include "gt/intel_rps.h"
 #include "gt/intel_workarounds.h"
 
 #include "i915_drv.h"
@@ -936,7 +937,7 @@ wait_for_timelines(struct drm_i915_private *i915,
 		 * stalls, so allow the gpu to boost to maximum clocks.
 		 */
 		if (wait & I915_WAIT_FOR_IDLE_BOOST)
-			gen6_rps_boost(rq);
+			intel_rps_boost(rq);
 
 		timeout = i915_request_wait(rq, wait, timeout);
 		i915_request_put(rq);
@@ -1390,17 +1391,6 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
 	goto out_ctx;
 }
 
-static int
-i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
-{
-	return intel_gt_init_scratch(&i915->gt, size);
-}
-
-static void i915_gem_fini_scratch(struct drm_i915_private *i915)
-{
-	intel_gt_fini_scratch(&i915->gt);
-}
-
 static int intel_engines_verify_workarounds(struct drm_i915_private *i915)
 {
 	struct intel_engine_cs *engine;
@@ -1456,12 +1446,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 		goto err_unlock;
 	}
 
-	ret = i915_gem_init_scratch(dev_priv,
-				    IS_GEN(dev_priv, 2) ? SZ_256K : PAGE_SIZE);
-	if (ret) {
-		GEM_BUG_ON(ret == -EIO);
-		goto err_ggtt;
-	}
+	intel_gt_init(&dev_priv->gt);
 
 	ret = intel_engines_setup(dev_priv);
 	if (ret) {
@@ -1481,8 +1466,6 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 		goto err_context;
 	}
 
-	intel_init_gt_powersave(dev_priv);
-
 	ret = intel_uc_init(&dev_priv->gt.uc);
 	if (ret)
 		goto err_pm;
@@ -1551,16 +1534,13 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 err_uc_init:
 	intel_uc_fini(&dev_priv->gt.uc);
 err_pm:
-	if (ret != -EIO) {
-		intel_cleanup_gt_powersave(dev_priv);
+	if (ret != -EIO)
 		intel_engines_cleanup(dev_priv);
-	}
 err_context:
 	if (ret != -EIO)
 		i915_gem_fini_contexts(dev_priv);
 err_scratch:
-	i915_gem_fini_scratch(dev_priv);
-err_ggtt:
+	intel_gt_driver_release(&dev_priv->gt);
 err_unlock:
 	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
 	mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -1602,12 +1582,10 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 
 void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
 {
-	GEM_BUG_ON(dev_priv->gt.awake);
-
 	intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref);
 
 	i915_gem_suspend_late(dev_priv);
-	intel_disable_gt_powersave(dev_priv);
+	intel_gt_driver_remove(&dev_priv->gt);
 
 	/* Flush any outstanding unpin_work. */
 	i915_gem_drain_workqueue(dev_priv);
@@ -1624,14 +1602,12 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv)
 {
 	mutex_lock(&dev_priv->drm.struct_mutex);
 	intel_engines_cleanup(dev_priv);
+	intel_gt_driver_release(&dev_priv->gt);
 	i915_gem_fini_contexts(dev_priv);
-	i915_gem_fini_scratch(dev_priv);
 	mutex_unlock(&dev_priv->drm.struct_mutex);
 
 	intel_wa_list_free(&dev_priv->gt_wa_list);
 
-	intel_cleanup_gt_powersave(dev_priv);
-
 	intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
 	i915_gem_cleanup_userptr(dev_priv);
 	intel_timelines_fini(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index af1bb653579a..68abdb86ef87 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -43,6 +43,8 @@
 #include "display/intel_psr.h"
 
 #include "gt/intel_gt.h"
+#include "gt/intel_gt_irq.h"
+#include "gt/intel_rps.h"
 
 #include "i915_drv.h"
 #include "i915_irq.h"
@@ -150,8 +152,8 @@ static const u32 hpd_mcc[HPD_NUM_PINS] = {
 	[HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP
 };
 
-static void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
-			   i915_reg_t iir, i915_reg_t ier)
+void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
+		    i915_reg_t iir, i915_reg_t ier)
 {
 	intel_uncore_write(uncore, imr, 0xffffffff);
 	intel_uncore_posting_read(uncore, imr);
@@ -165,7 +167,7 @@ static void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
 	intel_uncore_posting_read(uncore, iir);
 }
 
-static void gen2_irq_reset(struct intel_uncore *uncore)
+void gen2_irq_reset(struct intel_uncore *uncore)
 {
 	intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
 	intel_uncore_posting_read16(uncore, GEN2_IMR);
@@ -179,19 +181,6 @@ static void gen2_irq_reset(struct intel_uncore *uncore)
 	intel_uncore_posting_read16(uncore, GEN2_IIR);
 }
 
-#define GEN8_IRQ_RESET_NDX(uncore, type, which) \
-({ \
-	unsigned int which_ = which; \
-	gen3_irq_reset((uncore), GEN8_##type##_IMR(which_), \
-		       GEN8_##type##_IIR(which_), GEN8_##type##_IER(which_)); \
-})
-
-#define GEN3_IRQ_RESET(uncore, type) \
-	gen3_irq_reset((uncore), type##IMR, type##IIR, type##IER)
-
-#define GEN2_IRQ_RESET(uncore) \
-	gen2_irq_reset(uncore)
-
 /*
  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
  */
@@ -225,10 +214,10 @@ static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
 	intel_uncore_posting_read16(uncore, GEN2_IIR);
 }
 
-static void gen3_irq_init(struct intel_uncore *uncore,
-			  i915_reg_t imr, u32 imr_val,
-			  i915_reg_t ier, u32 ier_val,
-			  i915_reg_t iir)
+void gen3_irq_init(struct intel_uncore *uncore,
+		   i915_reg_t imr, u32 imr_val,
+		   i915_reg_t ier, u32 ier_val,
+		   i915_reg_t iir)
 {
 	gen3_assert_iir_is_zero(uncore, iir);
 
@@ -237,8 +226,8 @@ static void gen3_irq_init(struct intel_uncore *uncore,
 	intel_uncore_posting_read(uncore, imr);
 }
 
-static void gen2_irq_init(struct intel_uncore *uncore,
-			  u32 imr_val, u32 ier_val)
+void gen2_irq_init(struct intel_uncore *uncore,
+		   u32 imr_val, u32 ier_val)
 {
 	gen2_assert_iir_is_zero(uncore);
 
@@ -265,9 +254,6 @@ static void gen2_irq_init(struct intel_uncore *uncore,
 #define GEN2_IRQ_INIT(uncore, imr_val, ier_val) \
 	gen2_irq_init((uncore), imr_val, ier_val)
 
-static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
-static void guc_irq_handler(struct intel_guc *guc, u16 guc_iir);
-
 /* For display hotplug interrupt */
 static inline void
 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
@@ -306,41 +292,6 @@ void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
 	spin_unlock_irq(&dev_priv->irq_lock);
 }
 
-static u32
-gen11_gt_engine_identity(struct intel_gt *gt,
-			 const unsigned int bank, const unsigned int bit);
-
-static bool gen11_reset_one_iir(struct intel_gt *gt,
-				const unsigned int bank,
-				const unsigned int bit)
-{
-	void __iomem * const regs = gt->uncore->regs;
-	u32 dw;
-
-	lockdep_assert_held(&gt->i915->irq_lock);
-
-	dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
-	if (dw & BIT(bit)) {
-		/*
-		 * According to the BSpec, DW_IIR bits cannot be cleared without
-		 * first servicing the Selector & Shared IIR registers.
-		 */
-		gen11_gt_engine_identity(gt, bank, bit);
-
-		/*
-		 * We locked GT INT DW by reading it. If we want to (try
-		 * to) recover from this succesfully, we need to clear
-		 * our bit, otherwise we are locking the register for
-		 * everybody.
-		 */
-		raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit));
-
-		return true;
-	}
-
-	return false;
-}
-
 /**
  * ilk_update_display_irq - update DEIMR
  * @dev_priv: driver private
@@ -371,326 +322,6 @@ void ilk_update_display_irq(struct drm_i915_private *dev_priv,
 	}
 }
 
-/**
- * ilk_update_gt_irq - update GTIMR
- * @dev_priv: driver private
- * @interrupt_mask: mask of interrupt bits to update
- * @enabled_irq_mask: mask of interrupt bits to enable
- */
-static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
-			      u32 interrupt_mask,
-			      u32 enabled_irq_mask)
-{
-	lockdep_assert_held(&dev_priv->irq_lock);
-
-	WARN_ON(enabled_irq_mask & ~interrupt_mask);
-
-	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
-		return;
-
-	dev_priv->gt_irq_mask &= ~interrupt_mask;
-	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
-	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-}
-
-void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
-{
-	ilk_update_gt_irq(dev_priv, mask, mask);
-	intel_uncore_posting_read_fw(&dev_priv->uncore, GTIMR);
-}
-
-void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
-{
-	ilk_update_gt_irq(dev_priv, mask, 0);
-}
-
-static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
-{
-	WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11);
-
-	return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
-}
-
-static void write_pm_imr(struct intel_gt *gt)
-{
-	struct drm_i915_private *i915 = gt->i915;
-	struct intel_uncore *uncore = gt->uncore;
-	u32 mask = gt->pm_imr;
-	i915_reg_t reg;
-
-	if (INTEL_GEN(i915) >= 11) {
-		reg = GEN11_GPM_WGBOXPERF_INTR_MASK;
-		/* pm is in upper half */
-		mask = mask << 16;
-	} else if (INTEL_GEN(i915) >= 8) {
-		reg = GEN8_GT_IMR(2);
-	} else {
-		reg = GEN6_PMIMR;
-	}
-
-	intel_uncore_write(uncore, reg, mask);
-	intel_uncore_posting_read(uncore, reg);
-}
-
-static void write_pm_ier(struct intel_gt *gt)
-{
-	struct drm_i915_private *i915 = gt->i915;
-	struct intel_uncore *uncore = gt->uncore;
-	u32 mask = gt->pm_ier;
-	i915_reg_t reg;
-
-	if (INTEL_GEN(i915) >= 11) {
-		reg = GEN11_GPM_WGBOXPERF_INTR_ENABLE;
-		/* pm is in upper half */
-		mask = mask << 16;
-	} else if (INTEL_GEN(i915) >= 8) {
-		reg = GEN8_GT_IER(2);
-	} else {
-		reg = GEN6_PMIER;
-	}
-
-	intel_uncore_write(uncore, reg, mask);
-}
-
-/**
- * snb_update_pm_irq - update GEN6_PMIMR
- * @gt: gt for the interrupts
- * @interrupt_mask: mask of interrupt bits to update
- * @enabled_irq_mask: mask of interrupt bits to enable
- */
-static void snb_update_pm_irq(struct intel_gt *gt,
-			      u32 interrupt_mask,
-			      u32 enabled_irq_mask)
-{
-	u32 new_val;
-
-	WARN_ON(enabled_irq_mask & ~interrupt_mask);
-
-	lockdep_assert_held(&gt->i915->irq_lock);
-
-	new_val = gt->pm_imr;
-	new_val &= ~interrupt_mask;
-	new_val |= (~enabled_irq_mask & interrupt_mask);
-
-	if (new_val != gt->pm_imr) {
-		gt->pm_imr = new_val;
-		write_pm_imr(gt);
-	}
-}
-
-void gen6_unmask_pm_irq(struct intel_gt *gt, u32 mask)
-{
-	if (WARN_ON(!intel_irqs_enabled(gt->i915)))
-		return;
-
-	snb_update_pm_irq(gt, mask, mask);
-}
-
-static void __gen6_mask_pm_irq(struct intel_gt *gt, u32 mask)
-{
-	snb_update_pm_irq(gt, mask, 0);
-}
-
-void gen6_mask_pm_irq(struct intel_gt *gt, u32 mask)
-{
-	if (WARN_ON(!intel_irqs_enabled(gt->i915)))
-		return;
-
-	__gen6_mask_pm_irq(gt, mask);
-}
-
-static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
-{
-	i915_reg_t reg = gen6_pm_iir(dev_priv);
-
-	lockdep_assert_held(&dev_priv->irq_lock);
-
-	I915_WRITE(reg, reset_mask);
-	I915_WRITE(reg, reset_mask);
-	POSTING_READ(reg);
-}
-
-static void gen6_enable_pm_irq(struct intel_gt *gt, u32 enable_mask)
-{
-	lockdep_assert_held(&gt->i915->irq_lock);
-
-	gt->pm_ier |= enable_mask;
-	write_pm_ier(gt);
-	gen6_unmask_pm_irq(gt, enable_mask);
-	/* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
-}
-
-static void gen6_disable_pm_irq(struct intel_gt *gt, u32 disable_mask)
-{
-	lockdep_assert_held(&gt->i915->irq_lock);
-
-	gt->pm_ier &= ~disable_mask;
-	__gen6_mask_pm_irq(gt, disable_mask);
-	write_pm_ier(gt);
-	/* though a barrier is missing here, but don't really need a one */
-}
-
-void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv)
-{
-	spin_lock_irq(&dev_priv->irq_lock);
-
-	while (gen11_reset_one_iir(&dev_priv->gt, 0, GEN11_GTPM))
-		;
-
-	dev_priv->gt_pm.rps.pm_iir = 0;
-
-	spin_unlock_irq(&dev_priv->irq_lock);
-}
-
-void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
-{
-	spin_lock_irq(&dev_priv->irq_lock);
-	gen6_reset_pm_iir(dev_priv, GEN6_PM_RPS_EVENTS);
-	dev_priv->gt_pm.rps.pm_iir = 0;
-	spin_unlock_irq(&dev_priv->irq_lock);
-}
-
-void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
-{
-	struct intel_gt *gt = &dev_priv->gt;
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
-	if (READ_ONCE(rps->interrupts_enabled))
-		return;
-
-	spin_lock_irq(&dev_priv->irq_lock);
-	WARN_ON_ONCE(rps->pm_iir);
-
-	if (INTEL_GEN(dev_priv) >= 11)
-		WARN_ON_ONCE(gen11_reset_one_iir(gt, 0, GEN11_GTPM));
-	else
-		WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
-
-	rps->interrupts_enabled = true;
-	gen6_enable_pm_irq(gt, dev_priv->pm_rps_events);
-
-	spin_unlock_irq(&dev_priv->irq_lock);
-}
-
-void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
-{
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
-	if (!READ_ONCE(rps->interrupts_enabled))
-		return;
-
-	spin_lock_irq(&dev_priv->irq_lock);
-	rps->interrupts_enabled = false;
-
-	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
-
-	gen6_disable_pm_irq(&dev_priv->gt, GEN6_PM_RPS_EVENTS);
-
-	spin_unlock_irq(&dev_priv->irq_lock);
-	intel_synchronize_irq(dev_priv);
-
-	/* Now that we will not be generating any more work, flush any
-	 * outstanding tasks. As we are called on the RPS idle path,
-	 * we will reset the GPU to minimum frequencies, so the current
-	 * state of the worker can be discarded.
-	 */
-	cancel_work_sync(&rps->work);
-	if (INTEL_GEN(dev_priv) >= 11)
-		gen11_reset_rps_interrupts(dev_priv);
-	else
-		gen6_reset_rps_interrupts(dev_priv);
-}
-
-void gen9_reset_guc_interrupts(struct intel_guc *guc)
-{
-	struct intel_gt *gt = guc_to_gt(guc);
-	struct drm_i915_private *i915 = gt->i915;
-
-	assert_rpm_wakelock_held(&i915->runtime_pm);
-
-	spin_lock_irq(&i915->irq_lock);
-	gen6_reset_pm_iir(i915, gt->pm_guc_events);
-	spin_unlock_irq(&i915->irq_lock);
-}
-
-void gen9_enable_guc_interrupts(struct intel_guc *guc)
-{
-	struct intel_gt *gt = guc_to_gt(guc);
-	struct drm_i915_private *i915 = gt->i915;
-
-	assert_rpm_wakelock_held(&i915->runtime_pm);
-
-	spin_lock_irq(&i915->irq_lock);
-	if (!guc->interrupts.enabled) {
-		WARN_ON_ONCE(intel_uncore_read(gt->uncore, gen6_pm_iir(i915)) &
-			     gt->pm_guc_events);
-		guc->interrupts.enabled = true;
-		gen6_enable_pm_irq(gt, gt->pm_guc_events);
-	}
-	spin_unlock_irq(&i915->irq_lock);
-}
-
-void gen9_disable_guc_interrupts(struct intel_guc *guc)
-{
-	struct intel_gt *gt = guc_to_gt(guc);
-	struct drm_i915_private *i915 = gt->i915;
-
-	assert_rpm_wakelock_held(&i915->runtime_pm);
-
-	spin_lock_irq(&i915->irq_lock);
-	guc->interrupts.enabled = false;
-
-	gen6_disable_pm_irq(gt, gt->pm_guc_events);
-
-	spin_unlock_irq(&i915->irq_lock);
-	intel_synchronize_irq(i915);
-
-	gen9_reset_guc_interrupts(guc);
-}
-
-void gen11_reset_guc_interrupts(struct intel_guc *guc)
-{
-	struct intel_gt *gt = guc_to_gt(guc);
-	struct drm_i915_private *i915 = gt->i915;
-
-	spin_lock_irq(&i915->irq_lock);
-	gen11_reset_one_iir(gt, 0, GEN11_GUC);
-	spin_unlock_irq(&i915->irq_lock);
-}
-
-void gen11_enable_guc_interrupts(struct intel_guc *guc)
-{
-	struct intel_gt *gt = guc_to_gt(guc);
-
-	spin_lock_irq(&gt->i915->irq_lock);
-	if (!guc->interrupts.enabled) {
-		u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
-
-		WARN_ON_ONCE(gen11_reset_one_iir(gt, 0, GEN11_GUC));
-		intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, events);
-		intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~events);
-		guc->interrupts.enabled = true;
-	}
-	spin_unlock_irq(&gt->i915->irq_lock);
-}
-
-void gen11_disable_guc_interrupts(struct intel_guc *guc)
-{
-	struct intel_gt *gt = guc_to_gt(guc);
-	struct drm_i915_private *i915 = gt->i915;
-
-	spin_lock_irq(&i915->irq_lock);
-	guc->interrupts.enabled = false;
-
-	intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0);
-	intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
-
-	spin_unlock_irq(&i915->irq_lock);
-	intel_synchronize_irq(i915);
-
-	gen11_reset_guc_interrupts(guc);
-}
-
 /**
  * bdw_update_port_irq - update DE port interrupt
  * @dev_priv: driver private
@@ -1262,198 +893,6 @@ int intel_get_crtc_scanline(struct intel_crtc *crtc)
 	return position;
 }
 
-static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
-{
-	struct intel_uncore *uncore = &dev_priv->uncore;
-	u32 busy_up, busy_down, max_avg, min_avg;
-	u8 new_delay;
-
-	spin_lock(&mchdev_lock);
-
-	intel_uncore_write16(uncore,
-			     MEMINTRSTS,
-			     intel_uncore_read(uncore, MEMINTRSTS));
-
-	new_delay = dev_priv->ips.cur_delay;
-
-	intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
-	busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG);
-	busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG);
-	max_avg = intel_uncore_read(uncore, RCBMAXAVG);
-	min_avg = intel_uncore_read(uncore, RCBMINAVG);
-
-	/* Handle RCS change request from hw */
-	if (busy_up > max_avg) {
-		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
-			new_delay = dev_priv->ips.cur_delay - 1;
-		if (new_delay < dev_priv->ips.max_delay)
-			new_delay = dev_priv->ips.max_delay;
-	} else if (busy_down < min_avg) {
-		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
-			new_delay = dev_priv->ips.cur_delay + 1;
-		if (new_delay > dev_priv->ips.min_delay)
-			new_delay = dev_priv->ips.min_delay;
-	}
-
-	if (ironlake_set_drps(dev_priv, new_delay))
-		dev_priv->ips.cur_delay = new_delay;
-
-	spin_unlock(&mchdev_lock);
-
-	return;
-}
-
-static void vlv_c0_read(struct drm_i915_private *dev_priv,
-			struct intel_rps_ei *ei)
-{
-	ei->ktime = ktime_get_raw();
-	ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
-	ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
-}
-
-void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
-{
-	memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei));
-}
-
-static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
-{
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
-	const struct intel_rps_ei *prev = &rps->ei;
-	struct intel_rps_ei now;
-	u32 events = 0;
-
-	if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
-		return 0;
-
-	vlv_c0_read(dev_priv, &now);
-
-	if (prev->ktime) {
-		u64 time, c0;
-		u32 render, media;
-
-		time = ktime_us_delta(now.ktime, prev->ktime);
-
-		time *= dev_priv->czclk_freq;
-
-		/* Workload can be split between render + media,
-		 * e.g. SwapBuffers being blitted in X after being rendered in
-		 * mesa. To account for this we need to combine both engines
-		 * into our activity counter.
-		 */
-		render = now.render_c0 - prev->render_c0;
-		media = now.media_c0 - prev->media_c0;
-		c0 = max(render, media);
-		c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
-
-		if (c0 > time * rps->power.up_threshold)
-			events = GEN6_PM_RP_UP_THRESHOLD;
-		else if (c0 < time * rps->power.down_threshold)
-			events = GEN6_PM_RP_DOWN_THRESHOLD;
-	}
-
-	rps->ei = now;
-	return events;
-}
-
-static void gen6_pm_rps_work(struct work_struct *work)
-{
-	struct drm_i915_private *dev_priv =
-		container_of(work, struct drm_i915_private, gt_pm.rps.work);
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
-	bool client_boost = false;
-	int new_delay, adj, min, max;
-	u32 pm_iir = 0;
-
-	spin_lock_irq(&dev_priv->irq_lock);
-	if (rps->interrupts_enabled) {
-		pm_iir = fetch_and_zero(&rps->pm_iir);
-		client_boost = atomic_read(&rps->num_waiters);
-	}
-	spin_unlock_irq(&dev_priv->irq_lock);
-
-	/* Make sure we didn't queue anything we're not going to process. */
-	WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
-	if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
-		goto out;
-
-	mutex_lock(&rps->lock);
-
-	pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
-
-	adj = rps->last_adj;
-	new_delay = rps->cur_freq;
-	min = rps->min_freq_softlimit;
-	max = rps->max_freq_softlimit;
-	if (client_boost)
-		max = rps->max_freq;
-	if (client_boost && new_delay < rps->boost_freq) {
-		new_delay = rps->boost_freq;
-		adj = 0;
-	} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
-		if (adj > 0)
-			adj *= 2;
-		else /* CHV needs even encode values */
-			adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
-
-		if (new_delay >= rps->max_freq_softlimit)
-			adj = 0;
-	} else if (client_boost) {
-		adj = 0;
-	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
-		if (rps->cur_freq > rps->efficient_freq)
-			new_delay = rps->efficient_freq;
-		else if (rps->cur_freq > rps->min_freq_softlimit)
-			new_delay = rps->min_freq_softlimit;
-		adj = 0;
-	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
-		if (adj < 0)
-			adj *= 2;
-		else /* CHV needs even encode values */
-			adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
-
-		if (new_delay <= rps->min_freq_softlimit)
-			adj = 0;
-	} else { /* unknown event */
-		adj = 0;
-	}
-
-	rps->last_adj = adj;
-
-	/*
-	 * Limit deboosting and boosting to keep ourselves at the extremes
-	 * when in the respective power modes (i.e. slowly decrease frequencies
-	 * while in the HIGH_POWER zone and slowly increase frequencies while
-	 * in the LOW_POWER zone). On idle, we will hit the timeout and drop
-	 * to the next level quickly, and conversely if busy we expect to
-	 * hit a waitboost and rapidly switch into max power.
-	 */
-	if ((adj < 0 && rps->power.mode == HIGH_POWER) ||
-	    (adj > 0 && rps->power.mode == LOW_POWER))
-		rps->last_adj = 0;
-
-	/* sysfs frequency interfaces may have snuck in while servicing the
-	 * interrupt
-	 */
-	new_delay += adj;
-	new_delay = clamp_t(int, new_delay, min, max);
-
-	if (intel_set_rps(dev_priv, new_delay)) {
-		DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
-		rps->last_adj = 0;
-	}
-
-	mutex_unlock(&rps->lock);
-
-out:
-	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
-	spin_lock_irq(&dev_priv->irq_lock);
-	if (rps->interrupts_enabled)
-		gen6_unmask_pm_irq(&dev_priv->gt, dev_priv->pm_rps_events);
-	spin_unlock_irq(&dev_priv->irq_lock);
-}
-
-
 /**
  * ivybridge_parity_work - Workqueue called when a parity error interrupt
  * occurred.
@@ -1467,6 +906,7 @@ static void ivybridge_parity_work(struct work_struct *work)
 {
 	struct drm_i915_private *dev_priv =
 		container_of(work, typeof(*dev_priv), l3_parity.error_work);
+	struct intel_gt *gt = &dev_priv->gt;
 	u32 error_status, row, bank, subbank;
 	char *parity_event[6];
 	u32 misccpctl;
@@ -1528,144 +968,13 @@ static void ivybridge_parity_work(struct work_struct *work)
 
 out:
 	WARN_ON(dev_priv->l3_parity.which_slice);
-	spin_lock_irq(&dev_priv->irq_lock);
-	gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
-	spin_unlock_irq(&dev_priv->irq_lock);
+	spin_lock_irq(&gt->irq_lock);
+	gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
+	spin_unlock_irq(&gt->irq_lock);
 
 	mutex_unlock(&dev_priv->drm.struct_mutex);
 }
 
-static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
-					       u32 iir)
-{
-	if (!HAS_L3_DPF(dev_priv))
-		return;
-
-	spin_lock(&dev_priv->irq_lock);
-	gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
-	spin_unlock(&dev_priv->irq_lock);
-
-	iir &= GT_PARITY_ERROR(dev_priv);
-	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
-		dev_priv->l3_parity.which_slice |= 1 << 1;
-
-	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
-		dev_priv->l3_parity.which_slice |= 1 << 0;
-
-	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
-}
-
-static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
-			       u32 gt_iir)
-{
-	if (gt_iir & GT_RENDER_USER_INTERRUPT)
-		intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
-	if (gt_iir & ILK_BSD_USER_INTERRUPT)
-		intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
-}
-
-static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
-			       u32 gt_iir)
-{
-	if (gt_iir & GT_RENDER_USER_INTERRUPT)
-		intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
-	if (gt_iir & GT_BSD_USER_INTERRUPT)
-		intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
-	if (gt_iir & GT_BLT_USER_INTERRUPT)
-		intel_engine_breadcrumbs_irq(dev_priv->engine[BCS0]);
-
-	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
-		      GT_BSD_CS_ERROR_INTERRUPT |
-		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
-		DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
-
-	if (gt_iir & GT_PARITY_ERROR(dev_priv))
-		ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
-}
-
-static void
-gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
-{
-	bool tasklet = false;
-
-	if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
-		tasklet = true;
-
-	if (iir & GT_RENDER_USER_INTERRUPT) {
-		intel_engine_breadcrumbs_irq(engine);
-		tasklet |= intel_engine_needs_breadcrumb_tasklet(engine);
-	}
-
-	if (tasklet)
-		tasklet_hi_schedule(&engine->execlists.tasklet);
-}
-
-static void gen8_gt_irq_ack(struct drm_i915_private *i915,
-			    u32 master_ctl, u32 gt_iir[4])
-{
-	void __iomem * const regs = i915->uncore.regs;
-
-#define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
-		      GEN8_GT_BCS_IRQ | \
-		      GEN8_GT_VCS0_IRQ | \
-		      GEN8_GT_VCS1_IRQ | \
-		      GEN8_GT_VECS_IRQ | \
-		      GEN8_GT_PM_IRQ | \
-		      GEN8_GT_GUC_IRQ)
-
-	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
-		gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0));
-		if (likely(gt_iir[0]))
-			raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]);
-	}
-
-	if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
-		gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1));
-		if (likely(gt_iir[1]))
-			raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]);
-	}
-
-	if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
-		gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2));
-		if (likely(gt_iir[2]))
-			raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]);
-	}
-
-	if (master_ctl & GEN8_GT_VECS_IRQ) {
-		gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3));
-		if (likely(gt_iir[3]))
-			raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]);
-	}
-}
-
-static void gen8_gt_irq_handler(struct drm_i915_private *i915,
-				u32 master_ctl, u32 gt_iir[4])
-{
-	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
-		gen8_cs_irq_handler(i915->engine[RCS0],
-				    gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
-		gen8_cs_irq_handler(i915->engine[BCS0],
-				    gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
-	}
-
-	if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
-		gen8_cs_irq_handler(i915->engine[VCS0],
-				    gt_iir[1] >> GEN8_VCS0_IRQ_SHIFT);
-		gen8_cs_irq_handler(i915->engine[VCS1],
-				    gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT);
-	}
-
-	if (master_ctl & GEN8_GT_VECS_IRQ) {
-		gen8_cs_irq_handler(i915->engine[VECS0],
-				    gt_iir[3] >> GEN8_VECS_IRQ_SHIFT);
-	}
-
-	if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
-		gen6_rps_irq_handler(i915, gt_iir[2]);
-		guc_irq_handler(&i915->gt.uc.guc, gt_iir[2] >> 16);
-	}
-}
-
 static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
 {
 	switch (pin) {
@@ -1913,60 +1222,6 @@ static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
 				     res1, res2);
 }
 
-/* The RPS events need forcewake, so we add them to a work queue and mask their
- * IMR bits until the work is done. Other interrupts can be processed without
- * the work queue. */
-static void gen11_rps_irq_handler(struct intel_gt *gt, u32 pm_iir)
-{
-	struct drm_i915_private *i915 = gt->i915;
-	struct intel_rps *rps = &i915->gt_pm.rps;
-	const u32 events = i915->pm_rps_events & pm_iir;
-
-	lockdep_assert_held(&i915->irq_lock);
-
-	if (unlikely(!events))
-		return;
-
-	gen6_mask_pm_irq(gt, events);
-
-	if (!rps->interrupts_enabled)
-		return;
-
-	rps->pm_iir |= events;
-	schedule_work(&rps->work);
-}
-
-static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
-{
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
-	if (pm_iir & dev_priv->pm_rps_events) {
-		spin_lock(&dev_priv->irq_lock);
-		gen6_mask_pm_irq(&dev_priv->gt,
-				 pm_iir & dev_priv->pm_rps_events);
-		if (rps->interrupts_enabled) {
-			rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
-			schedule_work(&rps->work);
-		}
-		spin_unlock(&dev_priv->irq_lock);
-	}
-
-	if (INTEL_GEN(dev_priv) >= 8)
-		return;
-
-	if (pm_iir & PM_VEBOX_USER_INTERRUPT)
-		intel_engine_breadcrumbs_irq(dev_priv->engine[VECS0]);
-
-	if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
-		DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
-}
-
-static void guc_irq_handler(struct intel_guc *guc, u16 iir)
-{
-	if (iir & GUC_INTR_GUC2HOST)
-		intel_guc_to_host_event_handler(guc);
-}
-
 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
 {
 	enum pipe pipe;
@@ -2274,9 +1529,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
 		I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
 
 		if (gt_iir)
-			snb_gt_irq_handler(dev_priv, gt_iir);
+			gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
 		if (pm_iir)
-			gen6_rps_irq_handler(dev_priv, pm_iir);
+			gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
 
 		if (hotplug_status)
 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
@@ -2332,7 +1587,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
 		ier = I915_READ(VLV_IER);
 		I915_WRITE(VLV_IER, 0);
 
-		gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
+		gen8_gt_irq_ack(&dev_priv->gt, master_ctl, gt_iir);
 
 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
@@ -2356,7 +1611,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
 		I915_WRITE(VLV_IER, ier);
 		I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
 
-		gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
+		gen8_gt_irq_handler(&dev_priv->gt, master_ctl, gt_iir);
 
 		if (hotplug_status)
 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
@@ -2656,7 +1911,7 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
 	}
 
 	if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
-		ironlake_rps_change_irq_handler(dev_priv);
+		gen5_rps_irq_handler(&dev_priv->gt.rps);
 }
 
 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
@@ -2741,9 +1996,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
 		I915_WRITE(GTIIR, gt_iir);
 		ret = IRQ_HANDLED;
 		if (INTEL_GEN(dev_priv) >= 6)
-			snb_gt_irq_handler(dev_priv, gt_iir);
+			gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
 		else
-			ilk_gt_irq_handler(dev_priv, gt_iir);
+			gen5_gt_irq_handler(&dev_priv->gt, gt_iir);
 	}
 
 	de_iir = I915_READ(DEIIR);
@@ -2761,7 +2016,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
 		if (pm_iir) {
 			I915_WRITE(GEN6_PMIIR, pm_iir);
 			ret = IRQ_HANDLED;
-			gen6_rps_irq_handler(dev_priv, pm_iir);
+			gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
 		}
 	}
 
@@ -3035,7 +2290,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
 	}
 
 	/* Find, clear, then process each source of interrupt */
-	gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
+	gen8_gt_irq_ack(&dev_priv->gt, master_ctl, gt_iir);
 
 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
 	if (master_ctl & ~GEN8_GT_IRQS) {
@@ -3046,134 +2301,11 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
 
 	gen8_master_intr_enable(regs);
 
-	gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
+	gen8_gt_irq_handler(&dev_priv->gt, master_ctl, gt_iir);
 
 	return IRQ_HANDLED;
 }
 
-static u32
-gen11_gt_engine_identity(struct intel_gt *gt,
-			 const unsigned int bank, const unsigned int bit)
-{
-	void __iomem * const regs = gt->uncore->regs;
-	u32 timeout_ts;
-	u32 ident;
-
-	lockdep_assert_held(&gt->i915->irq_lock);
-
-	raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
-
-	/*
-	 * NB: Specs do not specify how long to spin wait,
-	 * so we do ~100us as an educated guess.
-	 */
-	timeout_ts = (local_clock() >> 10) + 100;
-	do {
-		ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank));
-	} while (!(ident & GEN11_INTR_DATA_VALID) &&
-		 !time_after32(local_clock() >> 10, timeout_ts));
-
-	if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
-		DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
-			  bank, bit, ident);
-		return 0;
-	}
-
-	raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank),
-		      GEN11_INTR_DATA_VALID);
-
-	return ident;
-}
-
-static void
-gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
-			const u16 iir)
-{
-	if (instance == OTHER_GUC_INSTANCE)
-		return guc_irq_handler(&gt->uc.guc, iir);
-
-	if (instance == OTHER_GTPM_INSTANCE)
-		return gen11_rps_irq_handler(gt, iir);
-
-	WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
-		  instance, iir);
-}
-
-static void
-gen11_engine_irq_handler(struct intel_gt *gt, const u8 class,
-			 const u8 instance, const u16 iir)
-{
-	struct intel_engine_cs *engine;
-
-	if (instance <= MAX_ENGINE_INSTANCE)
-		engine = gt->engine_class[class][instance];
-	else
-		engine = NULL;
-
-	if (likely(engine))
-		return gen8_cs_irq_handler(engine, iir);
-
-	WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
-		  class, instance);
-}
-
-static void
-gen11_gt_identity_handler(struct intel_gt *gt, const u32 identity)
-{
-	const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
-	const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
-	const u16 intr = GEN11_INTR_ENGINE_INTR(identity);
-
-	if (unlikely(!intr))
-		return;
-
-	if (class <= COPY_ENGINE_CLASS)
-		return gen11_engine_irq_handler(gt, class, instance, intr);
-
-	if (class == OTHER_CLASS)
-		return gen11_other_irq_handler(gt, instance, intr);
-
-	WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
-		  class, instance, intr);
-}
-
-static void
-gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank)
-{
-	void __iomem * const regs = gt->uncore->regs;
-	unsigned long intr_dw;
-	unsigned int bit;
-
-	lockdep_assert_held(&gt->i915->irq_lock);
-
-	intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
-
-	for_each_set_bit(bit, &intr_dw, 32) {
-		const u32 ident = gen11_gt_engine_identity(gt, bank, bit);
-
-		gen11_gt_identity_handler(gt, ident);
-	}
-
-	/* Clear must be after shared has been served for engine */
-	raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
-}
-
-static void
-gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl)
-{
-	struct drm_i915_private *i915 = gt->i915;
-	unsigned int bank;
-
-	spin_lock(&i915->irq_lock);
-
-	for (bank = 0; bank < 2; bank++) {
-		if (master_ctl & GEN11_GT_DW_IRQ(bank))
-			gen11_gt_bank_handler(gt, bank);
-	}
-
-	spin_unlock(&i915->irq_lock);
-}
-
 static u32
 gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
 {
@@ -3482,15 +2614,6 @@ static void ibx_irq_pre_postinstall(struct drm_i915_private *dev_priv)
 	POSTING_READ(SDEIER);
 }
 
-static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
-{
-	struct intel_uncore *uncore = &dev_priv->uncore;
-
-	GEN3_IRQ_RESET(uncore, GT);
-	if (INTEL_GEN(dev_priv) >= 6)
-		GEN3_IRQ_RESET(uncore, GEN6_PM);
-}
-
 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
 {
 	struct intel_uncore *uncore = &dev_priv->uncore;
@@ -3555,7 +2678,7 @@ static void ironlake_irq_reset(struct drm_i915_private *dev_priv)
 		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
 	}
 
-	gen5_gt_irq_reset(dev_priv);
+	gen5_gt_irq_reset(&dev_priv->gt);
 
 	ibx_irq_reset(dev_priv);
 }
@@ -3565,7 +2688,7 @@ static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
 	I915_WRITE(VLV_MASTER_IER, 0);
 	POSTING_READ(VLV_MASTER_IER);
 
-	gen5_gt_irq_reset(dev_priv);
+	gen5_gt_irq_reset(&dev_priv->gt);
 
 	spin_lock_irq(&dev_priv->irq_lock);
 	if (dev_priv->display_irqs_enabled)
@@ -3573,16 +2696,6 @@ static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
 	spin_unlock_irq(&dev_priv->irq_lock);
 }
 
-static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
-{
-	struct intel_uncore *uncore = &dev_priv->uncore;
-
-	GEN8_IRQ_RESET_NDX(uncore, GT, 0);
-	GEN8_IRQ_RESET_NDX(uncore, GT, 1);
-	GEN8_IRQ_RESET_NDX(uncore, GT, 2);
-	GEN8_IRQ_RESET_NDX(uncore, GT, 3);
-}
-
 static void gen8_irq_reset(struct drm_i915_private *dev_priv)
 {
 	struct intel_uncore *uncore = &dev_priv->uncore;
@@ -3590,7 +2703,7 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv)
 
 	gen8_master_intr_disable(dev_priv->uncore.regs);
 
-	gen8_gt_irq_reset(dev_priv);
+	gen8_gt_irq_reset(&dev_priv->gt);
 
 	intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
 	intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
@@ -3608,27 +2721,6 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv)
 		ibx_irq_reset(dev_priv);
 }
 
-static void gen11_gt_irq_reset(struct intel_gt *gt)
-{
-	struct intel_uncore *uncore = gt->uncore;
-
-	/* Disable RCS, BCS, VCS and VECS class engines. */
-	intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0);
-	intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE,	  0);
-
-	/* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
-	intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK,	~0);
-	intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK,	~0);
-	intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK,	~0);
-	intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK,	~0);
-	intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK,	~0);
-
-	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
-	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
-	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
-	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK,  ~0);
-}
-
 static void gen11_irq_reset(struct drm_i915_private *dev_priv)
 {
 	struct intel_uncore *uncore = &dev_priv->uncore;
@@ -3710,7 +2802,7 @@ static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
 	I915_WRITE(GEN8_MASTER_IRQ, 0);
 	POSTING_READ(GEN8_MASTER_IRQ);
 
-	gen8_gt_irq_reset(dev_priv);
+	gen8_gt_irq_reset(&dev_priv->gt);
 
 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
 
@@ -3993,44 +3085,6 @@ static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
 		spt_hpd_detection_setup(dev_priv);
 }
 
-static void gen5_gt_irq_postinstall(struct drm_i915_private *dev_priv)
-{
-	struct intel_uncore *uncore = &dev_priv->uncore;
-	u32 pm_irqs, gt_irqs;
-
-	pm_irqs = gt_irqs = 0;
-
-	dev_priv->gt_irq_mask = ~0;
-	if (HAS_L3_DPF(dev_priv)) {
-		/* L3 parity interrupt is always unmasked. */
-		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
-		gt_irqs |= GT_PARITY_ERROR(dev_priv);
-	}
-
-	gt_irqs |= GT_RENDER_USER_INTERRUPT;
-	if (IS_GEN(dev_priv, 5)) {
-		gt_irqs |= ILK_BSD_USER_INTERRUPT;
-	} else {
-		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
-	}
-
-	GEN3_IRQ_INIT(uncore, GT, dev_priv->gt_irq_mask, gt_irqs);
-
-	if (INTEL_GEN(dev_priv) >= 6) {
-		/*
-		 * RPS interrupts will get enabled/disabled on demand when RPS
-		 * itself is enabled/disabled.
-		 */
-		if (HAS_ENGINE(dev_priv, VECS0)) {
-			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
-			dev_priv->gt.pm_ier |= PM_VEBOX_USER_INTERRUPT;
-		}
-
-		dev_priv->gt.pm_imr = 0xffffffff;
-		GEN3_IRQ_INIT(uncore, GEN6_PM, dev_priv->gt.pm_imr, pm_irqs);
-	}
-}
-
 static void ironlake_irq_postinstall(struct drm_i915_private *dev_priv)
 {
 	struct intel_uncore *uncore = &dev_priv->uncore;
@@ -4064,7 +3118,7 @@ static void ironlake_irq_postinstall(struct drm_i915_private *dev_priv)
 	GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
 		      display_mask | extra_mask);
 
-	gen5_gt_irq_postinstall(dev_priv);
+	gen5_gt_irq_postinstall(&dev_priv->gt);
 
 	ilk_hpd_detection_setup(dev_priv);
 
@@ -4113,7 +3167,7 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
 
 static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
 {
-	gen5_gt_irq_postinstall(dev_priv);
+	gen5_gt_irq_postinstall(&dev_priv->gt);
 
 	spin_lock_irq(&dev_priv->irq_lock);
 	if (dev_priv->display_irqs_enabled)
@@ -4124,41 +3178,6 @@ static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
 	POSTING_READ(VLV_MASTER_IER);
 }
 
-static void gen8_gt_irq_postinstall(struct drm_i915_private *i915)
-{
-	struct intel_gt *gt = &i915->gt;
-	struct intel_uncore *uncore = gt->uncore;
-
-	/* These are interrupts we'll toggle with the ring mask register */
-	u32 gt_interrupts[] = {
-		(GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
-		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
-		 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
-		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT),
-
-		(GT_RENDER_USER_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
-		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
-		 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
-		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT),
-
-		0,
-
-		(GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
-		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)
-	};
-
-	gt->pm_ier = 0x0;
-	gt->pm_imr = ~gt->pm_ier;
-	GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
-	GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
-	/*
-	 * RPS interrupts will get enabled/disabled on demand when RPS itself
-	 * is enabled/disabled. Same wil be the case for GuC interrupts.
-	 */
-	GEN8_IRQ_INIT_NDX(uncore, GT, 2, gt->pm_imr, gt->pm_ier);
-	GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
-}
-
 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
 {
 	struct intel_uncore *uncore = &dev_priv->uncore;
@@ -4234,7 +3253,7 @@ static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
 	if (HAS_PCH_SPLIT(dev_priv))
 		ibx_irq_pre_postinstall(dev_priv);
 
-	gen8_gt_irq_postinstall(dev_priv);
+	gen8_gt_irq_postinstall(&dev_priv->gt);
 	gen8_de_irq_postinstall(dev_priv);
 
 	if (HAS_PCH_SPLIT(dev_priv))
@@ -4243,40 +3262,6 @@ static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
 	gen8_master_intr_enable(dev_priv->uncore.regs);
 }
 
-static void gen11_gt_irq_postinstall(struct intel_gt *gt)
-{
-	const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT;
-	struct intel_uncore *uncore = gt->uncore;
-	const u32 dmask = irqs << 16 | irqs;
-	const u32 smask = irqs << 16;
-
-	BUILD_BUG_ON(irqs & 0xffff0000);
-
-	/* Enable RCS, BCS, VCS and VECS class interrupts. */
-	intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask);
-	intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask);
-
-	/* Unmask irqs on RCS, BCS, VCS and VECS engines. */
-	intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask);
-	intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~smask);
-	intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~dmask);
-	intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~dmask);
-	intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~dmask);
-
-	/*
-	 * RPS interrupts will get enabled/disabled on demand when RPS itself
-	 * is enabled/disabled.
-	 */
-	gt->pm_ier = 0x0;
-	gt->pm_imr = ~gt->pm_ier;
-	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
-	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
-
-	/* Same thing for GuC interrupts */
-	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
-	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK,  ~0);
-}
-
 static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
 {
 	u32 mask = SDE_GMBUS_ICP;
@@ -4312,7 +3297,7 @@ static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
 
 static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
 {
-	gen8_gt_irq_postinstall(dev_priv);
+	gen8_gt_irq_postinstall(&dev_priv->gt);
 
 	spin_lock_irq(&dev_priv->irq_lock);
 	if (dev_priv->display_irqs_enabled)
@@ -4748,7 +3733,6 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
 void intel_irq_init(struct drm_i915_private *dev_priv)
 {
 	struct drm_device *dev = &dev_priv->drm;
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
 	int i;
 
 	if (IS_I945GM(dev_priv))
@@ -4756,8 +3740,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
 
 	intel_hpd_init_work(dev_priv);
 
-	INIT_WORK(&rps->work, gen6_pm_rps_work);
-
 	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
 	for (i = 0; i < MAX_L3_SLICES; ++i)
 		dev_priv->l3_parity.remap_info[i] = NULL;
@@ -4766,33 +3748,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
 	if (HAS_GUC_SCHED(dev_priv) && INTEL_GEN(dev_priv) < 11)
 		dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16;
 
-	/* Let's track the enabled rps events */
-	if (IS_VALLEYVIEW(dev_priv))
-		/* WaGsvRC0ResidencyMethod:vlv */
-		dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
-	else
-		dev_priv->pm_rps_events = (GEN6_PM_RP_UP_THRESHOLD |
-					   GEN6_PM_RP_DOWN_THRESHOLD |
-					   GEN6_PM_RP_DOWN_TIMEOUT);
-
-	/* We share the register with other engine */
-	if (INTEL_GEN(dev_priv) > 9)
-		GEM_WARN_ON(dev_priv->pm_rps_events & 0xffff0000);
-
-	rps->pm_intrmsk_mbz = 0;
-
-	/*
-	 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
-	 * if GEN6_PM_UP_EI_EXPIRED is masked.
-	 *
-	 * TODO: verify if this can be reproduced on VLV,CHV.
-	 */
-	if (INTEL_GEN(dev_priv) <= 7)
-		rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
-
-	if (INTEL_GEN(dev_priv) >= 8)
-		rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
-
 	dev->vblank_disable_immediate = true;
 
 	/* Most platforms treat the display irq block as an always-on
diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h
index 8918809cd805..8368c5568b84 100644
--- a/drivers/gpu/drm/i915/i915_irq.h
+++ b/drivers/gpu/drm/i915/i915_irq.h
@@ -76,22 +76,6 @@ ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, u32 bits)
 	ibx_display_interrupt_update(dev_priv, bits, 0);
 }
 
-void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask);
-void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask);
-void gen6_mask_pm_irq(struct intel_gt *gt, u32 mask);
-void gen6_unmask_pm_irq(struct intel_gt *gt, u32 mask);
-void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv);
-void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv);
-void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv);
-void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv);
-void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
-
-static inline u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915,
-					    u32 mask)
-{
-	return mask & ~i915->gt_pm.rps.pm_intrmsk_mbz;
-}
-
 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
 static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
@@ -113,12 +97,6 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
 				     u8 pipe_mask);
 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
 				     u8 pipe_mask);
-void gen9_reset_guc_interrupts(struct intel_guc *guc);
-void gen9_enable_guc_interrupts(struct intel_guc *guc);
-void gen9_disable_guc_interrupts(struct intel_guc *guc);
-void gen11_reset_guc_interrupts(struct intel_guc *guc);
-void gen11_enable_guc_interrupts(struct intel_guc *guc);
-void gen11_disable_guc_interrupts(struct intel_guc *guc);
 
 bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
 			      bool in_vblank_irq, int *vpos, int *hpos,
@@ -139,4 +117,46 @@ void i965_disable_vblank(struct drm_crtc *crtc);
 void ilk_disable_vblank(struct drm_crtc *crtc);
 void bdw_disable_vblank(struct drm_crtc *crtc);
 
+void gen2_irq_reset(struct intel_uncore *uncore);
+void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
+		    i915_reg_t iir, i915_reg_t ier);
+
+void gen2_irq_init(struct intel_uncore *uncore,
+		   u32 imr_val, u32 ier_val);
+void gen3_irq_init(struct intel_uncore *uncore,
+		   i915_reg_t imr, u32 imr_val,
+		   i915_reg_t ier, u32 ier_val,
+		   i915_reg_t iir);
+
+#define GEN8_IRQ_RESET_NDX(uncore, type, which) \
+({ \
+	unsigned int which_ = which; \
+	gen3_irq_reset((uncore), GEN8_##type##_IMR(which_), \
+		       GEN8_##type##_IIR(which_), GEN8_##type##_IER(which_)); \
+})
+
+#define GEN3_IRQ_RESET(uncore, type) \
+	gen3_irq_reset((uncore), type##IMR, type##IIR, type##IER)
+
+#define GEN2_IRQ_RESET(uncore) \
+	gen2_irq_reset(uncore)
+
+#define GEN8_IRQ_INIT_NDX(uncore, type, which, imr_val, ier_val) \
+({ \
+	unsigned int which_ = which; \
+	gen3_irq_init((uncore), \
+		      GEN8_##type##_IMR(which_), imr_val, \
+		      GEN8_##type##_IER(which_), ier_val, \
+		      GEN8_##type##_IIR(which_)); \
+})
+
+#define GEN3_IRQ_INIT(uncore, type, imr_val, ier_val) \
+	gen3_irq_init((uncore), \
+		      type##IMR, imr_val, \
+		      type##IER, ier_val, \
+		      type##IIR)
+
+#define GEN2_IRQ_INIT(uncore, imr_val, ier_val) \
+	gen2_irq_init((uncore), imr_val, ier_val)
+
 #endif /* __I915_IRQ_H__ */
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 81fbb262807d..ae0b84851c50 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -9,6 +9,8 @@
 
 #include "gt/intel_engine.h"
 #include "gt/intel_engine_user.h"
+#include "gt/intel_rc6.h"
+#include "gt/intel_rps.h"
 
 #include "i915_drv.h"
 #include "i915_pmu.h"
@@ -218,32 +220,34 @@ add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul)
 static void
 frequency_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
 {
+	struct intel_gt *gt = &dev_priv->gt;
+	struct intel_rps *rps = &gt->rps;
+
 	if (dev_priv->pmu.enable &
 	    config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
 		u32 val;
 
-		val = dev_priv->gt_pm.rps.cur_freq;
-		if (dev_priv->gt.awake) {
+		val = rps->cur_freq;
+		if (gt->awake) {
 			intel_wakeref_t wakeref;
 
 			with_intel_runtime_pm_if_in_use(&dev_priv->runtime_pm,
 							wakeref) {
 				val = intel_uncore_read_notrace(&dev_priv->uncore,
 								GEN6_RPSTAT1);
-				val = intel_get_cagf(dev_priv, val);
+				val = intel_get_cagf(rps, val);
 			}
 		}
 
 		add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT],
-				intel_gpu_freq(dev_priv, val),
+				intel_gpu_freq(rps, val),
 				period_ns / 1000);
 	}
 
 	if (dev_priv->pmu.enable &
 	    config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
 		add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ],
-				intel_gpu_freq(dev_priv,
-					       dev_priv->gt_pm.rps.cur_freq),
+				intel_gpu_freq(rps, rps->cur_freq),
 				period_ns / 1000);
 	}
 }
@@ -426,16 +430,16 @@ static u64 __get_rc6(struct drm_i915_private *i915)
 {
 	u64 val;
 
-	val = intel_rc6_residency_ns(i915,
+	val = intel_rc6_residency_ns(&i915->gt.rc6,
 				     IS_VALLEYVIEW(i915) ?
 				     VLV_GT_RENDER_RC6 :
 				     GEN6_GT_GFX_RC6);
 
 	if (HAS_RC6p(i915))
-		val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6p);
+		val += intel_rc6_residency_ns(&i915->gt.rc6, GEN6_GT_GFX_RC6p);
 
 	if (HAS_RC6pp(i915))
-		val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6pp);
+		val += intel_rc6_residency_ns(&i915->gt.rc6, GEN6_GT_GFX_RC6pp);
 
 	return val;
 }
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 92313a59563c..e367bf5f9288 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -31,6 +31,7 @@
 
 #include "gem/i915_gem_context.h"
 #include "gt/intel_context.h"
+#include "gt/intel_rps.h"
 
 #include "i915_active.h"
 #include "i915_drv.h"
@@ -269,8 +270,8 @@ static bool i915_request_retire(struct i915_request *rq)
 	if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
 		i915_request_cancel_breadcrumb(rq);
 	if (i915_request_has_waitboost(rq)) {
-		GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters));
-		atomic_dec(&rq->i915->gt_pm.rps.num_waiters);
+		GEM_BUG_ON(!atomic_read(&rq->engine->gt->rps.num_waiters));
+		atomic_dec(&rq->engine->gt->rps.num_waiters);
 	}
 	if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) {
 		set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
@@ -1414,7 +1415,7 @@ long i915_request_wait(struct i915_request *rq,
 	 */
 	if (flags & I915_WAIT_PRIORITY) {
 		if (!i915_request_started(rq) && INTEL_GEN(rq->i915) >= 6)
-			gen6_rps_boost(rq);
+			intel_rps_boost(rq);
 		i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
 	}
 
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 6836e689f025..cf8a40d8ea09 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -30,6 +30,9 @@
 #include <linux/stat.h>
 #include <linux/sysfs.h>
 
+#include "gt/intel_rc6.h"
+#include "gt/intel_rps.h"
+
 #include "i915_drv.h"
 #include "intel_drv.h"
 #include "intel_pm.h"
@@ -49,7 +52,7 @@ static u32 calc_residency(struct drm_i915_private *dev_priv,
 	u64 res = 0;
 
 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
-		res = intel_rc6_residency_us(dev_priv, reg);
+		res = intel_rc6_residency_us(&dev_priv->gt.rc6, reg);
 
 	return DIV_ROUND_CLOSEST_ULL(res, 1000);
 }
@@ -261,6 +264,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
 				    struct device_attribute *attr, char *buf)
 {
 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+	struct intel_rps *rps = &dev_priv->gt.rps;
 	intel_wakeref_t wakeref;
 	u32 freq;
 
@@ -273,31 +277,31 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
 
 		freq = (freq >> 8) & 0xff;
 	} else {
-		freq = intel_get_cagf(dev_priv, I915_READ(GEN6_RPSTAT1));
+		freq = intel_get_cagf(rps, I915_READ(GEN6_RPSTAT1));
 	}
 
 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
 
-	return snprintf(buf, PAGE_SIZE, "%d\n", intel_gpu_freq(dev_priv, freq));
+	return snprintf(buf, PAGE_SIZE, "%d\n", intel_gpu_freq(rps, freq));
 }
 
 static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
 				    struct device_attribute *attr, char *buf)
 {
 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+	struct intel_rps *rps = &dev_priv->gt.rps;
 
 	return snprintf(buf, PAGE_SIZE, "%d\n",
-			intel_gpu_freq(dev_priv,
-				       dev_priv->gt_pm.rps.cur_freq));
+			intel_gpu_freq(rps, rps->cur_freq));
 }
 
 static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 {
 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+	struct intel_rps *rps = &dev_priv->gt.rps;
 
 	return snprintf(buf, PAGE_SIZE, "%d\n",
-			intel_gpu_freq(dev_priv,
-				       dev_priv->gt_pm.rps.boost_freq));
+			intel_gpu_freq(rps, rps->boost_freq));
 }
 
 static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
@@ -305,7 +309,7 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
 				       const char *buf, size_t count)
 {
 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct intel_rps *rps = &dev_priv->gt.rps;
 	bool boost = false;
 	ssize_t ret;
 	u32 val;
@@ -315,7 +319,7 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
 		return ret;
 
 	/* Validate against (static) hardware limits */
-	val = intel_freq_opcode(dev_priv, val);
+	val = intel_freq_opcode(rps, val);
 	if (val < rps->min_freq || val > rps->max_freq)
 		return -EINVAL;
 
@@ -335,19 +339,19 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
 				     struct device_attribute *attr, char *buf)
 {
 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+	struct intel_rps *rps = &dev_priv->gt.rps;
 
 	return snprintf(buf, PAGE_SIZE, "%d\n",
-			intel_gpu_freq(dev_priv,
-				       dev_priv->gt_pm.rps.efficient_freq));
+			intel_gpu_freq(rps, rps->efficient_freq));
 }
 
 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 {
 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+	struct intel_rps *rps = &dev_priv->gt.rps;
 
 	return snprintf(buf, PAGE_SIZE, "%d\n",
-			intel_gpu_freq(dev_priv,
-				       dev_priv->gt_pm.rps.max_freq_softlimit));
+			intel_gpu_freq(rps, rps->max_freq_softlimit));
 }
 
 static ssize_t gt_max_freq_mhz_store(struct device *kdev,
@@ -355,19 +359,17 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
 				     const char *buf, size_t count)
 {
 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
-	intel_wakeref_t wakeref;
-	u32 val;
+	struct intel_rps *rps = &dev_priv->gt.rps;
 	ssize_t ret;
+	u32 val;
 
 	ret = kstrtou32(buf, 0, &val);
 	if (ret)
 		return ret;
 
-	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
 	mutex_lock(&rps->lock);
 
-	val = intel_freq_opcode(dev_priv, val);
+	val = intel_freq_opcode(rps, val);
 	if (val < rps->min_freq ||
 	    val > rps->max_freq ||
 	    val < rps->min_freq_softlimit) {
@@ -377,7 +379,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
 
 	if (val > rps->rp0_freq)
 		DRM_DEBUG("User requested overclocking to %d\n",
-			  intel_gpu_freq(dev_priv, val));
+			  intel_gpu_freq(rps, val));
 
 	rps->max_freq_softlimit = val;
 
@@ -385,14 +387,15 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
 		      rps->min_freq_softlimit,
 		      rps->max_freq_softlimit);
 
-	/* We still need *_set_rps to process the new max_delay and
+	/*
+	 * We still need *_set_rps to process the new max_delay and
 	 * update the interrupt limits and PMINTRMSK even though
-	 * frequency request may be unchanged. */
-	ret = intel_set_rps(dev_priv, val);
+	 * frequency request may be unchanged.
+	 */
+	intel_rps_set(rps, val);
 
 unlock:
 	mutex_unlock(&rps->lock);
-	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
 
 	return ret ?: count;
 }
@@ -400,10 +403,10 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
 static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 {
 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+	struct intel_rps *rps = &dev_priv->gt.rps;
 
 	return snprintf(buf, PAGE_SIZE, "%d\n",
-			intel_gpu_freq(dev_priv,
-				       dev_priv->gt_pm.rps.min_freq_softlimit));
+			intel_gpu_freq(rps, rps->min_freq_softlimit));
 }
 
 static ssize_t gt_min_freq_mhz_store(struct device *kdev,
@@ -411,19 +414,17 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
 				     const char *buf, size_t count)
 {
 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
-	intel_wakeref_t wakeref;
-	u32 val;
+	struct intel_rps *rps = &dev_priv->gt.rps;
 	ssize_t ret;
+	u32 val;
 
 	ret = kstrtou32(buf, 0, &val);
 	if (ret)
 		return ret;
 
-	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
 	mutex_lock(&rps->lock);
 
-	val = intel_freq_opcode(dev_priv, val);
+	val = intel_freq_opcode(rps, val);
 	if (val < rps->min_freq ||
 	    val > rps->max_freq ||
 	    val > rps->max_freq_softlimit) {
@@ -437,14 +438,15 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
 		      rps->min_freq_softlimit,
 		      rps->max_freq_softlimit);
 
-	/* We still need *_set_rps to process the new min_delay and
+	/*
+	 * We still need *_set_rps to process the new min_delay and
 	 * update the interrupt limits and PMINTRMSK even though
-	 * frequency request may be unchanged. */
-	ret = intel_set_rps(dev_priv, val);
+	 * frequency request may be unchanged.
+	 */
+	intel_rps_set(rps, val);
 
 unlock:
 	mutex_unlock(&rps->lock);
-	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
 
 	return ret ?: count;
 }
@@ -466,15 +468,15 @@ static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 {
 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	struct intel_rps *rps = &dev_priv->gt.rps;
 	u32 val;
 
 	if (attr == &dev_attr_gt_RP0_freq_mhz)
-		val = intel_gpu_freq(dev_priv, rps->rp0_freq);
+		val = intel_gpu_freq(rps, rps->rp0_freq);
 	else if (attr == &dev_attr_gt_RP1_freq_mhz)
-		val = intel_gpu_freq(dev_priv, rps->rp1_freq);
+		val = intel_gpu_freq(rps, rps->rp1_freq);
 	else if (attr == &dev_attr_gt_RPn_freq_mhz)
-		val = intel_gpu_freq(dev_priv, rps->min_freq);
+		val = intel_gpu_freq(rps, rps->min_freq);
 	else
 		BUG();
 
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 22472f2bd31b..7ea7c50793ac 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -37,32 +37,13 @@
 #include "display/intel_fbc.h"
 #include "display/intel_sprite.h"
 
+#include "gt/intel_rps.h"
+
 #include "i915_drv.h"
 #include "i915_irq.h"
 #include "intel_drv.h"
 #include "intel_pm.h"
 #include "intel_sideband.h"
-#include "../../../platform/x86/intel_ips.h"
-
-/**
- * DOC: RC6
- *
- * RC6 is a special power stage which allows the GPU to enter an very
- * low-voltage mode when idle, using down to 0V while at this stage.  This
- * stage is entered automatically when the GPU is idle when RC6 support is
- * enabled, and as soon as new workload arises GPU wakes up automatically as well.
- *
- * There are different RC6 modes available in Intel GPU, which differentiate
- * among each other with the latency required to enter and leave RC6 and
- * voltage consumed by the GPU in different states.
- *
- * The combination of the following flags define which states GPU is allowed
- * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
- * RC6pp is deepest RC6. Their support by hardware varies according to the
- * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
- * which brings the most power savings; deeper states save more power, but
- * require higher latency to switch to and wake up.
- */
 
 static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
 {
@@ -215,7 +196,6 @@ static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv)
 		break;
 	}
 
-	dev_priv->ips.r_t = dev_priv->mem_freq;
 
 	switch (csipll & 0x3ff) {
 	case 0x00c:
@@ -246,13 +226,6 @@ static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv)
 		break;
 	}
 
-	if (dev_priv->fsb_freq == 3200) {
-		dev_priv->ips.c_m = 0;
-	} else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
-		dev_priv->ips.c_m = 1;
-	} else {
-		dev_priv->ips.c_m = 2;
-	}
 }
 
 static const struct cxsr_latency cxsr_latency_table[] = {
@@ -6388,2632 +6361,260 @@ void intel_init_ipc(struct drm_i915_private *dev_priv)
 	intel_enable_ipc(dev_priv);
 }
 
-/*
- * Lock protecting IPS related data structures
- */
-DEFINE_SPINLOCK(mchdev_lock);
+static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
+{
+	/*
+	 * On Ibex Peak and Cougar Point, we need to disable clock
+	 * gating for the panel power sequencer or it will fail to
+	 * start up when no ports are active.
+	 */
+	I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+}
 
-bool ironlake_set_drps(struct drm_i915_private *i915, u8 val)
+static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
 {
-	struct intel_uncore *uncore = &i915->uncore;
-	u16 rgvswctl;
+	enum pipe pipe;
 
-	lockdep_assert_held(&mchdev_lock);
+	for_each_pipe(dev_priv, pipe) {
+		I915_WRITE(DSPCNTR(pipe),
+			   I915_READ(DSPCNTR(pipe)) |
+			   DISPPLANE_TRICKLE_FEED_DISABLE);
 
-	rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
-	if (rgvswctl & MEMCTL_CMD_STS) {
-		DRM_DEBUG("gpu busy, RCS change rejected\n");
-		return false; /* still busy with another command */
+		I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe)));
+		POSTING_READ(DSPSURF(pipe));
 	}
-
-	rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
-		(val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
-	intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
-	intel_uncore_posting_read16(uncore, MEMSWCTL);
-
-	rgvswctl |= MEMCTL_CMD_STS;
-	intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
-
-	return true;
 }
 
-static void ironlake_enable_drps(struct drm_i915_private *dev_priv)
+static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
 {
-	struct intel_uncore *uncore = &dev_priv->uncore;
-	u32 rgvmodectl;
-	u8 fmax, fmin, fstart, vstart;
-
-	spin_lock_irq(&mchdev_lock);
-
-	rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
-
-	/* Enable temp reporting */
-	intel_uncore_write16(uncore, PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
-	intel_uncore_write16(uncore, TSC1, I915_READ(TSC1) | TSE);
-
-	/* 100ms RC evaluation intervals */
-	intel_uncore_write(uncore, RCUPEI, 100000);
-	intel_uncore_write(uncore, RCDNEI, 100000);
-
-	/* Set max/min thresholds to 90ms and 80ms respectively */
-	intel_uncore_write(uncore, RCBMAXAVG, 90000);
-	intel_uncore_write(uncore, RCBMINAVG, 80000);
-
-	intel_uncore_write(uncore, MEMIHYST, 1);
-
-	/* Set up min, max, and cur for interrupt handling */
-	fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
-	fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
-	fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
-		MEMMODE_FSTART_SHIFT;
-
-	vstart = (intel_uncore_read(uncore, PXVFREQ(fstart)) &
-		  PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT;
-
-	dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
-	dev_priv->ips.fstart = fstart;
+	u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
 
-	dev_priv->ips.max_delay = fstart;
-	dev_priv->ips.min_delay = fmin;
-	dev_priv->ips.cur_delay = fstart;
+	/*
+	 * Required for FBC
+	 * WaFbcDisableDpfcClockGating:ilk
+	 */
+	dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
+		   ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
+		   ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
 
-	DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
-			 fmax, fmin, fstart);
+	I915_WRITE(PCH_3DCGDIS0,
+		   MARIUNIT_CLOCK_GATE_DISABLE |
+		   SVSMUNIT_CLOCK_GATE_DISABLE);
+	I915_WRITE(PCH_3DCGDIS1,
+		   VFMUNIT_CLOCK_GATE_DISABLE);
 
-	intel_uncore_write(uncore,
-			   MEMINTREN,
-			   MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
+	/*
+	 * According to the spec the following bits should be set in
+	 * order to enable memory self-refresh
+	 * The bit 22/21 of 0x42004
+	 * The bit 5 of 0x42020
+	 * The bit 15 of 0x45000
+	 */
+	I915_WRITE(ILK_DISPLAY_CHICKEN2,
+		   (I915_READ(ILK_DISPLAY_CHICKEN2) |
+		    ILK_DPARB_GATE | ILK_VSDPFD_FULL));
+	dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
+	I915_WRITE(DISP_ARB_CTL,
+		   (I915_READ(DISP_ARB_CTL) |
+		    DISP_FBC_WM_DIS));
 
 	/*
-	 * Interrupts will be enabled in ironlake_irq_postinstall
+	 * Based on the document from hardware guys the following bits
+	 * should be set unconditionally in order to enable FBC.
+	 * The bit 22 of 0x42000
+	 * The bit 22 of 0x42004
+	 * The bit 7,8,9 of 0x42020.
 	 */
+	if (IS_IRONLAKE_M(dev_priv)) {
+		/* WaFbcAsynchFlipDisableFbcQueue:ilk */
+		I915_WRITE(ILK_DISPLAY_CHICKEN1,
+			   I915_READ(ILK_DISPLAY_CHICKEN1) |
+			   ILK_FBCQ_DIS);
+		I915_WRITE(ILK_DISPLAY_CHICKEN2,
+			   I915_READ(ILK_DISPLAY_CHICKEN2) |
+			   ILK_DPARB_GATE);
+	}
 
-	intel_uncore_write(uncore, VIDSTART, vstart);
-	intel_uncore_posting_read(uncore, VIDSTART);
+	I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
 
-	rgvmodectl |= MEMMODE_SWMODE_EN;
-	intel_uncore_write(uncore, MEMMODECTL, rgvmodectl);
+	I915_WRITE(ILK_DISPLAY_CHICKEN2,
+		   I915_READ(ILK_DISPLAY_CHICKEN2) |
+		   ILK_ELPIN_409_SELECT);
+	I915_WRITE(_3D_CHICKEN2,
+		   _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
+		   _3D_CHICKEN2_WM_READ_PIPELINED);
 
-	if (wait_for_atomic((intel_uncore_read(uncore, MEMSWCTL) &
-			     MEMCTL_CMD_STS) == 0, 10))
-		DRM_ERROR("stuck trying to change perf mode\n");
-	mdelay(1);
+	/* WaDisableRenderCachePipelinedFlush:ilk */
+	I915_WRITE(CACHE_MODE_0,
+		   _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
 
-	ironlake_set_drps(dev_priv, fstart);
+	/* WaDisable_RenderCache_OperationalFlush:ilk */
+	I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
 
-	dev_priv->ips.last_count1 =
-		intel_uncore_read(uncore, DMIEC) +
-		intel_uncore_read(uncore, DDREC) +
-		intel_uncore_read(uncore, CSIEC);
-	dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
-	dev_priv->ips.last_count2 = intel_uncore_read(uncore, GFXEC);
-	dev_priv->ips.last_time2 = ktime_get_raw_ns();
+	g4x_disable_trickle_feed(dev_priv);
 
-	spin_unlock_irq(&mchdev_lock);
+	ibx_init_clock_gating(dev_priv);
 }
 
-static void ironlake_disable_drps(struct drm_i915_private *i915)
+static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
 {
-	struct intel_uncore *uncore = &i915->uncore;
-	u16 rgvswctl;
-
-	spin_lock_irq(&mchdev_lock);
-
-	rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
-
-	/* Ack interrupts, disable EFC interrupt */
-	intel_uncore_write(uncore,
-			   MEMINTREN,
-			   intel_uncore_read(uncore, MEMINTREN) &
-			   ~MEMINT_EVAL_CHG_EN);
-	intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
-	intel_uncore_write(uncore,
-			   DEIER,
-			   intel_uncore_read(uncore, DEIER) & ~DE_PCU_EVENT);
-	intel_uncore_write(uncore, DEIIR, DE_PCU_EVENT);
-	intel_uncore_write(uncore,
-			   DEIMR,
-			   intel_uncore_read(uncore, DEIMR) | DE_PCU_EVENT);
-
-	/* Go back to the starting frequency */
-	ironlake_set_drps(i915, i915->ips.fstart);
-	mdelay(1);
-	rgvswctl |= MEMCTL_CMD_STS;
-	intel_uncore_write(uncore, MEMSWCTL, rgvswctl);
-	mdelay(1);
+	int pipe;
+	u32 val;
 
-	spin_unlock_irq(&mchdev_lock);
+	/*
+	 * On Ibex Peak and Cougar Point, we need to disable clock
+	 * gating for the panel power sequencer or it will fail to
+	 * start up when no ports are active.
+	 */
+	I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
+		   PCH_DPLUNIT_CLOCK_GATE_DISABLE |
+		   PCH_CPUNIT_CLOCK_GATE_DISABLE);
+	I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
+		   DPLS_EDP_PPS_FIX_DIS);
+	/* The below fixes the weird display corruption, a few pixels shifted
+	 * downward, on (only) LVDS of some HP laptops with IVY.
+	 */
+	for_each_pipe(dev_priv, pipe) {
+		val = I915_READ(TRANS_CHICKEN2(pipe));
+		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+		val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
+		if (dev_priv->vbt.fdi_rx_polarity_inverted)
+			val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
+		val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
+		val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
+		val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
+		I915_WRITE(TRANS_CHICKEN2(pipe), val);
+	}
+	/* WADP0ClockGatingDisable */
+	for_each_pipe(dev_priv, pipe) {
+		I915_WRITE(TRANS_CHICKEN1(pipe),
+			   TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
+	}
 }
 
-/* There's a funny hw issue where the hw returns all 0 when reading from
- * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
- * ourselves, instead of doing a rmw cycle (which might result in us clearing
- * all limits and the gpu stuck at whatever frequency it is at atm).
- */
-static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
+static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
 {
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
-	u32 limits;
-
-	/* Only set the down limit when we've reached the lowest level to avoid
-	 * getting more interrupts, otherwise leave this clear. This prevents a
-	 * race in the hw when coming out of rc6: There's a tiny window where
-	 * the hw runs at the minimal clock before selecting the desired
-	 * frequency, if the down threshold expires in that window we will not
-	 * receive a down interrupt. */
-	if (INTEL_GEN(dev_priv) >= 9) {
-		limits = (rps->max_freq_softlimit) << 23;
-		if (val <= rps->min_freq_softlimit)
-			limits |= (rps->min_freq_softlimit) << 14;
-	} else {
-		limits = rps->max_freq_softlimit << 24;
-		if (val <= rps->min_freq_softlimit)
-			limits |= rps->min_freq_softlimit << 16;
-	}
+	u32 tmp;
 
-	return limits;
+	tmp = I915_READ(MCH_SSKPD);
+	if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
+		DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
+			      tmp);
 }
 
-static void rps_set_power(struct drm_i915_private *dev_priv, int new_power)
+static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
 {
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
-	u32 threshold_up = 0, threshold_down = 0; /* in % */
-	u32 ei_up = 0, ei_down = 0;
-
-	lockdep_assert_held(&rps->power.mutex);
+	u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
 
-	if (new_power == rps->power.mode)
-		return;
+	I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
 
-	/* Note the units here are not exactly 1us, but 1280ns. */
-	switch (new_power) {
-	case LOW_POWER:
-		/* Upclock if more than 95% busy over 16ms */
-		ei_up = 16000;
-		threshold_up = 95;
+	I915_WRITE(ILK_DISPLAY_CHICKEN2,
+		   I915_READ(ILK_DISPLAY_CHICKEN2) |
+		   ILK_ELPIN_409_SELECT);
 
-		/* Downclock if less than 85% busy over 32ms */
-		ei_down = 32000;
-		threshold_down = 85;
-		break;
+	/* WaDisableHiZPlanesWhenMSAAEnabled:snb */
+	I915_WRITE(_3D_CHICKEN,
+		   _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
 
-	case BETWEEN:
-		/* Upclock if more than 90% busy over 13ms */
-		ei_up = 13000;
-		threshold_up = 90;
+	/* WaDisable_RenderCache_OperationalFlush:snb */
+	I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
 
-		/* Downclock if less than 75% busy over 32ms */
-		ei_down = 32000;
-		threshold_down = 75;
-		break;
+	/*
+	 * BSpec recoomends 8x4 when MSAA is used,
+	 * however in practice 16x4 seems fastest.
+	 *
+	 * Note that PS/WM thread counts depend on the WIZ hashing
+	 * disable bit, which we don't touch here, but it's good
+	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+	 */
+	I915_WRITE(GEN6_GT_MODE,
+		   _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
 
-	case HIGH_POWER:
-		/* Upclock if more than 85% busy over 10ms */
-		ei_up = 10000;
-		threshold_up = 85;
+	I915_WRITE(CACHE_MODE_0,
+		   _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
 
-		/* Downclock if less than 60% busy over 32ms */
-		ei_down = 32000;
-		threshold_down = 60;
-		break;
-	}
+	I915_WRITE(GEN6_UCGCTL1,
+		   I915_READ(GEN6_UCGCTL1) |
+		   GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
+		   GEN6_CSUNIT_CLOCK_GATE_DISABLE);
 
-	/* When byt can survive without system hang with dynamic
-	 * sw freq adjustments, this restriction can be lifted.
+	/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
+	 * gating disable must be set.  Failure to set it results in
+	 * flickering pixels due to Z write ordering failures after
+	 * some amount of runtime in the Mesa "fire" demo, and Unigine
+	 * Sanctuary and Tropics, and apparently anything else with
+	 * alpha test or pixel discard.
+	 *
+	 * According to the spec, bit 11 (RCCUNIT) must also be set,
+	 * but we didn't debug actual testcases to find it out.
+	 *
+	 * WaDisableRCCUnitClockGating:snb
+	 * WaDisableRCPBUnitClockGating:snb
 	 */
-	if (IS_VALLEYVIEW(dev_priv))
-		goto skip_hw_write;
-
-	I915_WRITE(GEN6_RP_UP_EI,
-		   GT_INTERVAL_FROM_US(dev_priv, ei_up));
-	I915_WRITE(GEN6_RP_UP_THRESHOLD,
-		   GT_INTERVAL_FROM_US(dev_priv,
-				       ei_up * threshold_up / 100));
-
-	I915_WRITE(GEN6_RP_DOWN_EI,
-		   GT_INTERVAL_FROM_US(dev_priv, ei_down));
-	I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
-		   GT_INTERVAL_FROM_US(dev_priv,
-				       ei_down * threshold_down / 100));
-
-	I915_WRITE(GEN6_RP_CONTROL,
-		   (INTEL_GEN(dev_priv) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
-		   GEN6_RP_MEDIA_HW_NORMAL_MODE |
-		   GEN6_RP_MEDIA_IS_GFX |
-		   GEN6_RP_ENABLE |
-		   GEN6_RP_UP_BUSY_AVG |
-		   GEN6_RP_DOWN_IDLE_AVG);
-
-skip_hw_write:
-	rps->power.mode = new_power;
-	rps->power.up_threshold = threshold_up;
-	rps->power.down_threshold = threshold_down;
-}
-
-static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
-{
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
-	int new_power;
-
-	new_power = rps->power.mode;
-	switch (rps->power.mode) {
-	case LOW_POWER:
-		if (val > rps->efficient_freq + 1 &&
-		    val > rps->cur_freq)
-			new_power = BETWEEN;
-		break;
+	I915_WRITE(GEN6_UCGCTL2,
+		   GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
+		   GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
 
-	case BETWEEN:
-		if (val <= rps->efficient_freq &&
-		    val < rps->cur_freq)
-			new_power = LOW_POWER;
-		else if (val >= rps->rp0_freq &&
-			 val > rps->cur_freq)
-			new_power = HIGH_POWER;
-		break;
+	/* WaStripsFansDisableFastClipPerformanceFix:snb */
+	I915_WRITE(_3D_CHICKEN3,
+		   _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
 
-	case HIGH_POWER:
-		if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 &&
-		    val < rps->cur_freq)
-			new_power = BETWEEN;
-		break;
-	}
-	/* Max/min bins are special */
-	if (val <= rps->min_freq_softlimit)
-		new_power = LOW_POWER;
-	if (val >= rps->max_freq_softlimit)
-		new_power = HIGH_POWER;
+	/*
+	 * Bspec says:
+	 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
+	 * 3DSTATE_SF number of SF output attributes is more than 16."
+	 */
+	I915_WRITE(_3D_CHICKEN3,
+		   _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
 
-	mutex_lock(&rps->power.mutex);
-	if (rps->power.interactive)
-		new_power = HIGH_POWER;
-	rps_set_power(dev_priv, new_power);
-	mutex_unlock(&rps->power.mutex);
-}
+	/*
+	 * According to the spec the following bits should be
+	 * set in order to enable memory self-refresh and fbc:
+	 * The bit21 and bit22 of 0x42000
+	 * The bit21 and bit22 of 0x42004
+	 * The bit5 and bit7 of 0x42020
+	 * The bit14 of 0x70180
+	 * The bit14 of 0x71180
+	 *
+	 * WaFbcAsynchFlipDisableFbcQueue:snb
+	 */
+	I915_WRITE(ILK_DISPLAY_CHICKEN1,
+		   I915_READ(ILK_DISPLAY_CHICKEN1) |
+		   ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
+	I915_WRITE(ILK_DISPLAY_CHICKEN2,
+		   I915_READ(ILK_DISPLAY_CHICKEN2) |
+		   ILK_DPARB_GATE | ILK_VSDPFD_FULL);
+	I915_WRITE(ILK_DSPCLK_GATE_D,
+		   I915_READ(ILK_DSPCLK_GATE_D) |
+		   ILK_DPARBUNIT_CLOCK_GATE_ENABLE  |
+		   ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
 
-void intel_rps_mark_interactive(struct drm_i915_private *i915, bool interactive)
-{
-	struct intel_rps *rps = &i915->gt_pm.rps;
+	g4x_disable_trickle_feed(dev_priv);
 
-	if (INTEL_GEN(i915) < 6)
-		return;
+	cpt_init_clock_gating(dev_priv);
 
-	mutex_lock(&rps->power.mutex);
-	if (interactive) {
-		if (!rps->power.interactive++ && READ_ONCE(i915->gt.awake))
-			rps_set_power(i915, HIGH_POWER);
-	} else {
-		GEM_BUG_ON(!rps->power.interactive);
-		rps->power.interactive--;
-	}
-	mutex_unlock(&rps->power.mutex);
+	gen6_check_mch_setup(dev_priv);
 }
 
-static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
+static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
 {
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
-	u32 mask = 0;
-
-	/* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
-	if (val > rps->min_freq_softlimit)
-		mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
-	if (val < rps->max_freq_softlimit)
-		mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
+	u32 reg = I915_READ(GEN7_FF_THREAD_MODE);
 
-	mask &= dev_priv->pm_rps_events;
+	/*
+	 * WaVSThreadDispatchOverride:ivb,vlv
+	 *
+	 * This actually overrides the dispatch
+	 * mode for all thread types.
+	 */
+	reg &= ~GEN7_FF_SCHED_MASK;
+	reg |= GEN7_FF_TS_SCHED_HW;
+	reg |= GEN7_FF_VS_SCHED_HW;
+	reg |= GEN7_FF_DS_SCHED_HW;
 
-	return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
+	I915_WRITE(GEN7_FF_THREAD_MODE, reg);
 }
 
-/* gen6_set_rps is called to update the frequency request, but should also be
- * called when the range (min_delay and max_delay) is modified so that we can
- * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
-static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
-{
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
-	/* min/max delay may still have been modified so be sure to
-	 * write the limits value.
-	 */
-	if (val != rps->cur_freq) {
-		gen6_set_rps_thresholds(dev_priv, val);
-
-		if (INTEL_GEN(dev_priv) >= 9)
-			I915_WRITE(GEN6_RPNSWREQ,
-				   GEN9_FREQUENCY(val));
-		else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
-			I915_WRITE(GEN6_RPNSWREQ,
-				   HSW_FREQUENCY(val));
-		else
-			I915_WRITE(GEN6_RPNSWREQ,
-				   GEN6_FREQUENCY(val) |
-				   GEN6_OFFSET(0) |
-				   GEN6_AGGRESSIVE_TURBO);
-	}
-
-	/* Make sure we continue to get interrupts
-	 * until we hit the minimum or maximum frequencies.
-	 */
-	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
-	I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
-
-	rps->cur_freq = val;
-	trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
-
-	return 0;
-}
-
-static int valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
-{
-	int err;
-
-	if (WARN_ONCE(IS_CHERRYVIEW(dev_priv) && (val & 1),
-		      "Odd GPU freq value\n"))
-		val &= ~1;
-
-	I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
-
-	if (val != dev_priv->gt_pm.rps.cur_freq) {
-		vlv_punit_get(dev_priv);
-		err = vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
-		vlv_punit_put(dev_priv);
-		if (err)
-			return err;
-
-		gen6_set_rps_thresholds(dev_priv, val);
-	}
-
-	dev_priv->gt_pm.rps.cur_freq = val;
-	trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
-
-	return 0;
-}
-
-/* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
- *
- * * If Gfx is Idle, then
- * 1. Forcewake Media well.
- * 2. Request idle freq.
- * 3. Release Forcewake of Media well.
-*/
-static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
-{
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
-	u32 val = rps->idle_freq;
-	int err;
-
-	if (rps->cur_freq <= val)
-		return;
-
-	/* The punit delays the write of the frequency and voltage until it
-	 * determines the GPU is awake. During normal usage we don't want to
-	 * waste power changing the frequency if the GPU is sleeping (rc6).
-	 * However, the GPU and driver is now idle and we do not want to delay
-	 * switching to minimum voltage (reducing power whilst idle) as we do
-	 * not expect to be woken in the near future and so must flush the
-	 * change by waking the device.
-	 *
-	 * We choose to take the media powerwell (either would do to trick the
-	 * punit into committing the voltage change) as that takes a lot less
-	 * power than the render powerwell.
-	 */
-	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_MEDIA);
-	err = valleyview_set_rps(dev_priv, val);
-	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_MEDIA);
-
-	if (err)
-		DRM_ERROR("Failed to set RPS for idle\n");
-}
-
-void gen6_rps_busy(struct drm_i915_private *dev_priv)
-{
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
-	mutex_lock(&rps->lock);
-	if (rps->enabled) {
-		u8 freq;
-
-		if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
-			gen6_rps_reset_ei(dev_priv);
-		I915_WRITE(GEN6_PMINTRMSK,
-			   gen6_rps_pm_mask(dev_priv, rps->cur_freq));
-
-		gen6_enable_rps_interrupts(dev_priv);
-
-		/* Use the user's desired frequency as a guide, but for better
-		 * performance, jump directly to RPe as our starting frequency.
-		 */
-		freq = max(rps->cur_freq,
-			   rps->efficient_freq);
-
-		if (intel_set_rps(dev_priv,
-				  clamp(freq,
-					rps->min_freq_softlimit,
-					rps->max_freq_softlimit)))
-			DRM_DEBUG_DRIVER("Failed to set idle frequency\n");
-	}
-	mutex_unlock(&rps->lock);
-}
-
-void gen6_rps_idle(struct drm_i915_private *dev_priv)
-{
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
-	/* Flush our bottom-half so that it does not race with us
-	 * setting the idle frequency and so that it is bounded by
-	 * our rpm wakeref. And then disable the interrupts to stop any
-	 * futher RPS reclocking whilst we are asleep.
-	 */
-	gen6_disable_rps_interrupts(dev_priv);
-
-	mutex_lock(&rps->lock);
-	if (rps->enabled) {
-		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-			vlv_set_rps_idle(dev_priv);
-		else
-			gen6_set_rps(dev_priv, rps->idle_freq);
-		rps->last_adj = 0;
-		I915_WRITE(GEN6_PMINTRMSK,
-			   gen6_sanitize_rps_pm_mask(dev_priv, ~0));
-	}
-	mutex_unlock(&rps->lock);
-}
-
-void gen6_rps_boost(struct i915_request *rq)
-{
-	struct intel_rps *rps = &rq->i915->gt_pm.rps;
-	unsigned long flags;
-	bool boost;
-
-	/* This is intentionally racy! We peek at the state here, then
-	 * validate inside the RPS worker.
-	 */
-	if (!rps->enabled)
-		return;
-
-	if (i915_request_signaled(rq))
-		return;
-
-	/* Serializes with i915_request_retire() */
-	boost = false;
-	spin_lock_irqsave(&rq->lock, flags);
-	if (!i915_request_has_waitboost(rq) &&
-	    !dma_fence_is_signaled_locked(&rq->fence)) {
-		boost = !atomic_fetch_inc(&rps->num_waiters);
-		rq->flags |= I915_REQUEST_WAITBOOST;
-	}
-	spin_unlock_irqrestore(&rq->lock, flags);
-	if (!boost)
-		return;
-
-	if (READ_ONCE(rps->cur_freq) < rps->boost_freq)
-		schedule_work(&rps->work);
-
-	atomic_inc(&rps->boosts);
-}
-
-int intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
-{
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
-	int err;
-
-	lockdep_assert_held(&rps->lock);
-	GEM_BUG_ON(val > rps->max_freq);
-	GEM_BUG_ON(val < rps->min_freq);
-
-	if (!rps->enabled) {
-		rps->cur_freq = val;
-		return 0;
-	}
-
-	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-		err = valleyview_set_rps(dev_priv, val);
-	else
-		err = gen6_set_rps(dev_priv, val);
-
-	return err;
-}
-
-static void gen9_disable_rc6(struct drm_i915_private *dev_priv)
-{
-	I915_WRITE(GEN6_RC_CONTROL, 0);
-	I915_WRITE(GEN9_PG_ENABLE, 0);
-}
-
-static void gen9_disable_rps(struct drm_i915_private *dev_priv)
-{
-	I915_WRITE(GEN6_RP_CONTROL, 0);
-}
-
-static void gen6_disable_rc6(struct drm_i915_private *dev_priv)
-{
-	I915_WRITE(GEN6_RC_CONTROL, 0);
-}
-
-static void gen6_disable_rps(struct drm_i915_private *dev_priv)
-{
-	I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
-	I915_WRITE(GEN6_RP_CONTROL, 0);
-}
-
-static void cherryview_disable_rc6(struct drm_i915_private *dev_priv)
-{
-	I915_WRITE(GEN6_RC_CONTROL, 0);
-}
-
-static void cherryview_disable_rps(struct drm_i915_private *dev_priv)
-{
-	I915_WRITE(GEN6_RP_CONTROL, 0);
-}
-
-static void valleyview_disable_rc6(struct drm_i915_private *dev_priv)
-{
-	/* We're doing forcewake before Disabling RC6,
-	 * This what the BIOS expects when going into suspend */
-	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
-	I915_WRITE(GEN6_RC_CONTROL, 0);
-
-	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
-{
-	I915_WRITE(GEN6_RP_CONTROL, 0);
-}
-
-static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
-{
-	bool enable_rc6 = true;
-	unsigned long rc6_ctx_base;
-	u32 rc_ctl;
-	int rc_sw_target;
-
-	rc_ctl = I915_READ(GEN6_RC_CONTROL);
-	rc_sw_target = (I915_READ(GEN6_RC_STATE) & RC_SW_TARGET_STATE_MASK) >>
-		       RC_SW_TARGET_STATE_SHIFT;
-	DRM_DEBUG_DRIVER("BIOS enabled RC states: "
-			 "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
-			 onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
-			 onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
-			 rc_sw_target);
-
-	if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
-		DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
-		enable_rc6 = false;
-	}
-
-	/*
-	 * The exact context size is not known for BXT, so assume a page size
-	 * for this check.
-	 */
-	rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
-	if (!((rc6_ctx_base >= dev_priv->dsm_reserved.start) &&
-	      (rc6_ctx_base + PAGE_SIZE < dev_priv->dsm_reserved.end))) {
-		DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
-		enable_rc6 = false;
-	}
-
-	if (!(((I915_READ(PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1) &&
-	      ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) &&
-	      ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) &&
-	      ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) {
-		DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
-		enable_rc6 = false;
-	}
-
-	if (!I915_READ(GEN8_PUSHBUS_CONTROL) ||
-	    !I915_READ(GEN8_PUSHBUS_ENABLE) ||
-	    !I915_READ(GEN8_PUSHBUS_SHIFT)) {
-		DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
-		enable_rc6 = false;
-	}
-
-	if (!I915_READ(GEN6_GFXPAUSE)) {
-		DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
-		enable_rc6 = false;
-	}
-
-	if (!I915_READ(GEN8_MISC_CTRL0)) {
-		DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
-		enable_rc6 = false;
-	}
-
-	return enable_rc6;
-}
-
-static bool sanitize_rc6(struct drm_i915_private *i915)
-{
-	struct intel_device_info *info = mkwrite_device_info(i915);
-
-	/* Powersaving is controlled by the host when inside a VM */
-	if (intel_vgpu_active(i915)) {
-		info->has_rc6 = 0;
-		info->has_rps = false;
-	}
-
-	if (info->has_rc6 &&
-	    IS_GEN9_LP(i915) && !bxt_check_bios_rc6_setup(i915)) {
-		DRM_INFO("RC6 disabled by BIOS\n");
-		info->has_rc6 = 0;
-	}
-
-	/*
-	 * We assume that we do not have any deep rc6 levels if we don't have
-	 * have the previous rc6 level supported, i.e. we use HAS_RC6()
-	 * as the initial coarse check for rc6 in general, moving on to
-	 * progressively finer/deeper levels.
-	 */
-	if (!info->has_rc6 && info->has_rc6p)
-		info->has_rc6p = 0;
-
-	return info->has_rc6;
-}
-
-static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
-{
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
-	/* All of these values are in units of 50MHz */
-
-	/* static values from HW: RP0 > RP1 > RPn (min_freq) */
-	if (IS_GEN9_LP(dev_priv)) {
-		u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
-		rps->rp0_freq = (rp_state_cap >> 16) & 0xff;
-		rps->rp1_freq = (rp_state_cap >>  8) & 0xff;
-		rps->min_freq = (rp_state_cap >>  0) & 0xff;
-	} else {
-		u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
-		rps->rp0_freq = (rp_state_cap >>  0) & 0xff;
-		rps->rp1_freq = (rp_state_cap >>  8) & 0xff;
-		rps->min_freq = (rp_state_cap >> 16) & 0xff;
-	}
-	/* hw_max = RP0 until we check for overclocking */
-	rps->max_freq = rps->rp0_freq;
-
-	rps->efficient_freq = rps->rp1_freq;
-	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
-	    IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
-		u32 ddcc_status = 0;
-
-		if (sandybridge_pcode_read(dev_priv,
-					   HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
-					   &ddcc_status, NULL) == 0)
-			rps->efficient_freq =
-				clamp_t(u8,
-					((ddcc_status >> 8) & 0xff),
-					rps->min_freq,
-					rps->max_freq);
-	}
-
-	if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
-		/* Store the frequency values in 16.66 MHZ units, which is
-		 * the natural hardware unit for SKL
-		 */
-		rps->rp0_freq *= GEN9_FREQ_SCALER;
-		rps->rp1_freq *= GEN9_FREQ_SCALER;
-		rps->min_freq *= GEN9_FREQ_SCALER;
-		rps->max_freq *= GEN9_FREQ_SCALER;
-		rps->efficient_freq *= GEN9_FREQ_SCALER;
-	}
-}
-
-static void reset_rps(struct drm_i915_private *dev_priv,
-		      int (*set)(struct drm_i915_private *, u8))
-{
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
-	u8 freq = rps->cur_freq;
-
-	/* force a reset */
-	rps->power.mode = -1;
-	rps->cur_freq = -1;
-
-	if (set(dev_priv, freq))
-		DRM_ERROR("Failed to reset RPS to initial values\n");
-}
-
-/* See the Gen9_GT_PM_Programming_Guide doc for the below */
-static void gen9_enable_rps(struct drm_i915_private *dev_priv)
-{
-	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
-	/* Program defaults and thresholds for RPS */
-	if (IS_GEN(dev_priv, 9))
-		I915_WRITE(GEN6_RC_VIDEO_FREQ,
-			GEN9_FREQUENCY(dev_priv->gt_pm.rps.rp1_freq));
-
-	/* 1 second timeout*/
-	I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
-		GT_INTERVAL_FROM_US(dev_priv, 1000000));
-
-	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa);
-
-	/* Leaning on the below call to gen6_set_rps to program/setup the
-	 * Up/Down EI & threshold registers, as well as the RP_CONTROL,
-	 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
-	reset_rps(dev_priv, gen6_set_rps);
-
-	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void gen11_enable_rc6(struct drm_i915_private *dev_priv)
-{
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-
-	/* 1a: Software RC state - RC0 */
-	I915_WRITE(GEN6_RC_STATE, 0);
-
-	/*
-	 * 1b: Get forcewake during program sequence. Although the driver
-	 * hasn't enabled a state yet where we need forcewake, BIOS may have.
-	 */
-	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
-	/* 2a: Disable RC states. */
-	I915_WRITE(GEN6_RC_CONTROL, 0);
-
-	/* 2b: Program RC6 thresholds.*/
-	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
-	I915_WRITE(GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
-
-	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
-	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
-	for_each_engine(engine, dev_priv, id)
-		I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
-
-	if (HAS_GUC(dev_priv))
-		I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
-
-	I915_WRITE(GEN6_RC_SLEEP, 0);
-
-	I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
-
-	/*
-	 * 2c: Program Coarse Power Gating Policies.
-	 *
-	 * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we
-	 * use instead is a more conservative estimate for the maximum time
-	 * it takes us to service a CS interrupt and submit a new ELSP - that
-	 * is the time which the GPU is idle waiting for the CPU to select the
-	 * next request to execute. If the idle hysteresis is less than that
-	 * interrupt service latency, the hardware will automatically gate
-	 * the power well and we will then incur the wake up cost on top of
-	 * the service latency. A similar guide from plane_state is that we
-	 * do not want the enable hysteresis to less than the wakeup latency.
-	 *
-	 * igt/gem_exec_nop/sequential provides a rough estimate for the
-	 * service latency, and puts it around 10us for Broadwell (and other
-	 * big core) and around 40us for Broxton (and other low power cores).
-	 * [Note that for legacy ringbuffer submission, this is less than 1us!]
-	 * However, the wakeup latency on Broxton is closer to 100us. To be
-	 * conservative, we have to factor in a context switch on top (due
-	 * to ksoftirqd).
-	 */
-	I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250);
-	I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 250);
-
-	/* 3a: Enable RC6 */
-	I915_WRITE(GEN6_RC_CONTROL,
-		   GEN6_RC_CTL_HW_ENABLE |
-		   GEN6_RC_CTL_RC6_ENABLE |
-		   GEN6_RC_CTL_EI_MODE(1));
-
-	/* 3b: Enable Coarse Power Gating only when RC6 is enabled. */
-	I915_WRITE(GEN9_PG_ENABLE,
-		   GEN9_RENDER_PG_ENABLE |
-		   GEN9_MEDIA_PG_ENABLE |
-		   GEN11_MEDIA_SAMPLER_PG_ENABLE);
-
-	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
-{
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-	u32 rc6_mode;
-
-	/* 1a: Software RC state - RC0 */
-	I915_WRITE(GEN6_RC_STATE, 0);
-
-	/* 1b: Get forcewake during program sequence. Although the driver
-	 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
-	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
-	/* 2a: Disable RC states. */
-	I915_WRITE(GEN6_RC_CONTROL, 0);
-
-	/* 2b: Program RC6 thresholds.*/
-	if (INTEL_GEN(dev_priv) >= 10) {
-		I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
-		I915_WRITE(GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
-	} else if (IS_SKYLAKE(dev_priv)) {
-		/*
-		 * WaRsDoubleRc6WrlWithCoarsePowerGating:skl Doubling WRL only
-		 * when CPG is enabled
-		 */
-		I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
-	} else {
-		I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
-	}
-
-	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
-	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
-	for_each_engine(engine, dev_priv, id)
-		I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
-
-	if (HAS_GUC(dev_priv))
-		I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
-
-	I915_WRITE(GEN6_RC_SLEEP, 0);
-
-	/*
-	 * 2c: Program Coarse Power Gating Policies.
-	 *
-	 * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we
-	 * use instead is a more conservative estimate for the maximum time
-	 * it takes us to service a CS interrupt and submit a new ELSP - that
-	 * is the time which the GPU is idle waiting for the CPU to select the
-	 * next request to execute. If the idle hysteresis is less than that
-	 * interrupt service latency, the hardware will automatically gate
-	 * the power well and we will then incur the wake up cost on top of
-	 * the service latency. A similar guide from plane_state is that we
-	 * do not want the enable hysteresis to less than the wakeup latency.
-	 *
-	 * igt/gem_exec_nop/sequential provides a rough estimate for the
-	 * service latency, and puts it around 10us for Broadwell (and other
-	 * big core) and around 40us for Broxton (and other low power cores).
-	 * [Note that for legacy ringbuffer submission, this is less than 1us!]
-	 * However, the wakeup latency on Broxton is closer to 100us. To be
-	 * conservative, we have to factor in a context switch on top (due
-	 * to ksoftirqd).
-	 */
-	I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250);
-	I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 250);
-
-	/* 3a: Enable RC6 */
-	I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
-
-	/* WaRsUseTimeoutMode:cnl (pre-prod) */
-	if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_C0))
-		rc6_mode = GEN7_RC_CTL_TO_MODE;
-	else
-		rc6_mode = GEN6_RC_CTL_EI_MODE(1);
-
-	I915_WRITE(GEN6_RC_CONTROL,
-		   GEN6_RC_CTL_HW_ENABLE |
-		   GEN6_RC_CTL_RC6_ENABLE |
-		   rc6_mode);
-
-	/*
-	 * 3b: Enable Coarse Power Gating only when RC6 is enabled.
-	 * WaRsDisableCoarsePowerGating:skl,cnl - Render/Media PG need to be disabled with RC6.
-	 */
-	if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
-		I915_WRITE(GEN9_PG_ENABLE, 0);
-	else
-		I915_WRITE(GEN9_PG_ENABLE,
-			   GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE);
-
-	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void gen8_enable_rc6(struct drm_i915_private *dev_priv)
-{
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-
-	/* 1a: Software RC state - RC0 */
-	I915_WRITE(GEN6_RC_STATE, 0);
-
-	/* 1b: Get forcewake during program sequence. Although the driver
-	 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
-	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
-	/* 2a: Disable RC states. */
-	I915_WRITE(GEN6_RC_CONTROL, 0);
-
-	/* 2b: Program RC6 thresholds.*/
-	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
-	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
-	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
-	for_each_engine(engine, dev_priv, id)
-		I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
-	I915_WRITE(GEN6_RC_SLEEP, 0);
-	I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
-
-	/* 3: Enable RC6 */
-
-	I915_WRITE(GEN6_RC_CONTROL,
-		   GEN6_RC_CTL_HW_ENABLE |
-		   GEN7_RC_CTL_TO_MODE |
-		   GEN6_RC_CTL_RC6_ENABLE);
-
-	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void gen8_enable_rps(struct drm_i915_private *dev_priv)
-{
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
-	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
-	/* 1 Program defaults and thresholds for RPS*/
-	I915_WRITE(GEN6_RPNSWREQ,
-		   HSW_FREQUENCY(rps->rp1_freq));
-	I915_WRITE(GEN6_RC_VIDEO_FREQ,
-		   HSW_FREQUENCY(rps->rp1_freq));
-	/* NB: Docs say 1s, and 1000000 - which aren't equivalent */
-	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
-
-	/* Docs recommend 900MHz, and 300 MHz respectively */
-	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
-		   rps->max_freq_softlimit << 24 |
-		   rps->min_freq_softlimit << 16);
-
-	I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
-	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
-	I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
-	I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
-
-	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
-
-	/* 2: Enable RPS */
-	I915_WRITE(GEN6_RP_CONTROL,
-		   GEN6_RP_MEDIA_TURBO |
-		   GEN6_RP_MEDIA_HW_NORMAL_MODE |
-		   GEN6_RP_MEDIA_IS_GFX |
-		   GEN6_RP_ENABLE |
-		   GEN6_RP_UP_BUSY_AVG |
-		   GEN6_RP_DOWN_IDLE_AVG);
-
-	reset_rps(dev_priv, gen6_set_rps);
-
-	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
-{
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-	u32 rc6vids, rc6_mask;
-	u32 gtfifodbg;
-	int ret;
-
-	I915_WRITE(GEN6_RC_STATE, 0);
-
-	/* Clear the DBG now so we don't confuse earlier errors */
-	gtfifodbg = I915_READ(GTFIFODBG);
-	if (gtfifodbg) {
-		DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
-		I915_WRITE(GTFIFODBG, gtfifodbg);
-	}
-
-	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
-	/* disable the counters and set deterministic thresholds */
-	I915_WRITE(GEN6_RC_CONTROL, 0);
-
-	I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
-	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
-	I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
-	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
-	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
-
-	for_each_engine(engine, dev_priv, id)
-		I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
-
-	I915_WRITE(GEN6_RC_SLEEP, 0);
-	I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
-	if (IS_IVYBRIDGE(dev_priv))
-		I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
-	else
-		I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
-	I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
-	I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
-
-	/* We don't use those on Haswell */
-	rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
-	if (HAS_RC6p(dev_priv))
-		rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
-	if (HAS_RC6pp(dev_priv))
-		rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
-	I915_WRITE(GEN6_RC_CONTROL,
-		   rc6_mask |
-		   GEN6_RC_CTL_EI_MODE(1) |
-		   GEN6_RC_CTL_HW_ENABLE);
-
-	rc6vids = 0;
-	ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
-				     &rc6vids, NULL);
-	if (IS_GEN(dev_priv, 6) && ret) {
-		DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
-	} else if (IS_GEN(dev_priv, 6) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
-		DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
-			  GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
-		rc6vids &= 0xffff00;
-		rc6vids |= GEN6_ENCODE_RC6_VID(450);
-		ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
-		if (ret)
-			DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
-	}
-
-	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void gen6_enable_rps(struct drm_i915_private *dev_priv)
-{
-	/* Here begins a magic sequence of register writes to enable
-	 * auto-downclocking.
-	 *
-	 * Perhaps there might be some value in exposing these to
-	 * userspace...
-	 */
-	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
-	/* Power down if completely idle for over 50ms */
-	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
-	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
-
-	reset_rps(dev_priv, gen6_set_rps);
-
-	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
-{
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
-	const int min_freq = 15;
-	const int scaling_factor = 180;
-	unsigned int gpu_freq;
-	unsigned int max_ia_freq, min_ring_freq;
-	unsigned int max_gpu_freq, min_gpu_freq;
-	struct cpufreq_policy *policy;
-
-	lockdep_assert_held(&rps->lock);
-
-	if (rps->max_freq <= rps->min_freq)
-		return;
-
-	policy = cpufreq_cpu_get(0);
-	if (policy) {
-		max_ia_freq = policy->cpuinfo.max_freq;
-		cpufreq_cpu_put(policy);
-	} else {
-		/*
-		 * Default to measured freq if none found, PCU will ensure we
-		 * don't go over
-		 */
-		max_ia_freq = tsc_khz;
-	}
-
-	/* Convert from kHz to MHz */
-	max_ia_freq /= 1000;
-
-	min_ring_freq = I915_READ(DCLK) & 0xf;
-	/* convert DDR frequency from units of 266.6MHz to bandwidth */
-	min_ring_freq = mult_frac(min_ring_freq, 8, 3);
-
-	min_gpu_freq = rps->min_freq;
-	max_gpu_freq = rps->max_freq;
-	if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
-		/* Convert GT frequency to 50 HZ units */
-		min_gpu_freq /= GEN9_FREQ_SCALER;
-		max_gpu_freq /= GEN9_FREQ_SCALER;
-	}
-
-	/*
-	 * For each potential GPU frequency, load a ring frequency we'd like
-	 * to use for memory access.  We do this by specifying the IA frequency
-	 * the PCU should use as a reference to determine the ring frequency.
-	 */
-	for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) {
-		const int diff = max_gpu_freq - gpu_freq;
-		unsigned int ia_freq = 0, ring_freq = 0;
-
-		if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
-			/*
-			 * ring_freq = 2 * GT. ring_freq is in 100MHz units
-			 * No floor required for ring frequency on SKL.
-			 */
-			ring_freq = gpu_freq;
-		} else if (INTEL_GEN(dev_priv) >= 8) {
-			/* max(2 * GT, DDR). NB: GT is 50MHz units */
-			ring_freq = max(min_ring_freq, gpu_freq);
-		} else if (IS_HASWELL(dev_priv)) {
-			ring_freq = mult_frac(gpu_freq, 5, 4);
-			ring_freq = max(min_ring_freq, ring_freq);
-			/* leave ia_freq as the default, chosen by cpufreq */
-		} else {
-			/* On older processors, there is no separate ring
-			 * clock domain, so in order to boost the bandwidth
-			 * of the ring, we need to upclock the CPU (ia_freq).
-			 *
-			 * For GPU frequencies less than 750MHz,
-			 * just use the lowest ring freq.
-			 */
-			if (gpu_freq < min_freq)
-				ia_freq = 800;
-			else
-				ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
-			ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
-		}
-
-		sandybridge_pcode_write(dev_priv,
-					GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
-					ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
-					ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
-					gpu_freq);
-	}
-}
-
-static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
-{
-	u32 val, rp0;
-
-	val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
-
-	switch (RUNTIME_INFO(dev_priv)->sseu.eu_total) {
-	case 8:
-		/* (2 * 4) config */
-		rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
-		break;
-	case 12:
-		/* (2 * 6) config */
-		rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT);
-		break;
-	case 16:
-		/* (2 * 8) config */
-	default:
-		/* Setting (2 * 8) Min RP0 for any other combination */
-		rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT);
-		break;
-	}
-
-	rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK);
-
-	return rp0;
-}
-
-static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
-{
-	u32 val, rpe;
-
-	val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
-	rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
-
-	return rpe;
-}
-
-static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
-{
-	u32 val, rp1;
-
-	val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
-	rp1 = (val & FB_GFX_FREQ_FUSE_MASK);
-
-	return rp1;
-}
-
-static u32 cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
-{
-	u32 val, rpn;
-
-	val = vlv_punit_read(dev_priv, FB_GFX_FMIN_AT_VMIN_FUSE);
-	rpn = ((val >> FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT) &
-		       FB_GFX_FREQ_FUSE_MASK);
-
-	return rpn;
-}
-
-static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
-{
-	u32 val, rp1;
-
-	val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
-
-	rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
-
-	return rp1;
-}
-
-static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
-{
-	u32 val, rp0;
-
-	val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
-
-	rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
-	/* Clamp to max */
-	rp0 = min_t(u32, rp0, 0xea);
-
-	return rp0;
-}
-
-static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
-{
-	u32 val, rpe;
-
-	val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
-	rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
-	val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
-	rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
-
-	return rpe;
-}
-
-static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
-{
-	u32 val;
-
-	val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
-	/*
-	 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
-	 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
-	 * a BYT-M B0 the above register contains 0xbf. Moreover when setting
-	 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
-	 * to make sure it matches what Punit accepts.
-	 */
-	return max_t(u32, val, 0xc0);
-}
-
-/* Check that the pctx buffer wasn't move under us. */
-static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
-{
-	unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
-
-	WARN_ON(pctx_addr != dev_priv->dsm.start +
-			     dev_priv->vlv_pctx->stolen->start);
-}
-
-
-/* Check that the pcbr address is not empty. */
-static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
-{
-	unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
-
-	WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
-}
-
-static void cherryview_setup_pctx(struct drm_i915_private *dev_priv)
-{
-	resource_size_t pctx_paddr, paddr;
-	resource_size_t pctx_size = 32*1024;
-	u32 pcbr;
-
-	pcbr = I915_READ(VLV_PCBR);
-	if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
-		DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
-		paddr = dev_priv->dsm.end + 1 - pctx_size;
-		GEM_BUG_ON(paddr > U32_MAX);
-
-		pctx_paddr = (paddr & (~4095));
-		I915_WRITE(VLV_PCBR, pctx_paddr);
-	}
-
-	DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
-}
-
-static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
-{
-	struct drm_i915_gem_object *pctx;
-	resource_size_t pctx_paddr;
-	resource_size_t pctx_size = 24*1024;
-	u32 pcbr;
-
-	pcbr = I915_READ(VLV_PCBR);
-	if (pcbr) {
-		/* BIOS set it up already, grab the pre-alloc'd space */
-		resource_size_t pcbr_offset;
-
-		pcbr_offset = (pcbr & (~4095)) - dev_priv->dsm.start;
-		pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv,
-								      pcbr_offset,
-								      I915_GTT_OFFSET_NONE,
-								      pctx_size);
-		goto out;
-	}
-
-	DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
-
-	/*
-	 * From the Gunit register HAS:
-	 * The Gfx driver is expected to program this register and ensure
-	 * proper allocation within Gfx stolen memory.  For example, this
-	 * register should be programmed such than the PCBR range does not
-	 * overlap with other ranges, such as the frame buffer, protected
-	 * memory, or any other relevant ranges.
-	 */
-	pctx = i915_gem_object_create_stolen(dev_priv, pctx_size);
-	if (!pctx) {
-		DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
-		goto out;
-	}
-
-	GEM_BUG_ON(range_overflows_t(u64,
-				     dev_priv->dsm.start,
-				     pctx->stolen->start,
-				     U32_MAX));
-	pctx_paddr = dev_priv->dsm.start + pctx->stolen->start;
-	I915_WRITE(VLV_PCBR, pctx_paddr);
-
-out:
-	DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
-	dev_priv->vlv_pctx = pctx;
-}
-
-static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
-{
-	struct drm_i915_gem_object *pctx;
-
-	pctx = fetch_and_zero(&dev_priv->vlv_pctx);
-	if (pctx)
-		i915_gem_object_put(pctx);
-}
-
-static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
-{
-	dev_priv->gt_pm.rps.gpll_ref_freq =
-		vlv_get_cck_clock(dev_priv, "GPLL ref",
-				  CCK_GPLL_CLOCK_CONTROL,
-				  dev_priv->czclk_freq);
-
-	DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n",
-			 dev_priv->gt_pm.rps.gpll_ref_freq);
-}
-
-static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
-{
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
-	u32 val;
-
-	valleyview_setup_pctx(dev_priv);
-
-	vlv_iosf_sb_get(dev_priv,
-			BIT(VLV_IOSF_SB_PUNIT) |
-			BIT(VLV_IOSF_SB_NC) |
-			BIT(VLV_IOSF_SB_CCK));
-
-	vlv_init_gpll_ref_freq(dev_priv);
-
-	val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
-	switch ((val >> 6) & 3) {
-	case 0:
-	case 1:
-		dev_priv->mem_freq = 800;
-		break;
-	case 2:
-		dev_priv->mem_freq = 1066;
-		break;
-	case 3:
-		dev_priv->mem_freq = 1333;
-		break;
-	}
-	DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
-
-	rps->max_freq = valleyview_rps_max_freq(dev_priv);
-	rps->rp0_freq = rps->max_freq;
-	DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
-			 intel_gpu_freq(dev_priv, rps->max_freq),
-			 rps->max_freq);
-
-	rps->efficient_freq = valleyview_rps_rpe_freq(dev_priv);
-	DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
-			 intel_gpu_freq(dev_priv, rps->efficient_freq),
-			 rps->efficient_freq);
-
-	rps->rp1_freq = valleyview_rps_guar_freq(dev_priv);
-	DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
-			 intel_gpu_freq(dev_priv, rps->rp1_freq),
-			 rps->rp1_freq);
-
-	rps->min_freq = valleyview_rps_min_freq(dev_priv);
-	DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
-			 intel_gpu_freq(dev_priv, rps->min_freq),
-			 rps->min_freq);
-
-	vlv_iosf_sb_put(dev_priv,
-			BIT(VLV_IOSF_SB_PUNIT) |
-			BIT(VLV_IOSF_SB_NC) |
-			BIT(VLV_IOSF_SB_CCK));
-}
-
-static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
-{
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
-	u32 val;
-
-	cherryview_setup_pctx(dev_priv);
-
-	vlv_iosf_sb_get(dev_priv,
-			BIT(VLV_IOSF_SB_PUNIT) |
-			BIT(VLV_IOSF_SB_NC) |
-			BIT(VLV_IOSF_SB_CCK));
-
-	vlv_init_gpll_ref_freq(dev_priv);
-
-	val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
-
-	switch ((val >> 2) & 0x7) {
-	case 3:
-		dev_priv->mem_freq = 2000;
-		break;
-	default:
-		dev_priv->mem_freq = 1600;
-		break;
-	}
-	DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
-
-	rps->max_freq = cherryview_rps_max_freq(dev_priv);
-	rps->rp0_freq = rps->max_freq;
-	DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
-			 intel_gpu_freq(dev_priv, rps->max_freq),
-			 rps->max_freq);
-
-	rps->efficient_freq = cherryview_rps_rpe_freq(dev_priv);
-	DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
-			 intel_gpu_freq(dev_priv, rps->efficient_freq),
-			 rps->efficient_freq);
-
-	rps->rp1_freq = cherryview_rps_guar_freq(dev_priv);
-	DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
-			 intel_gpu_freq(dev_priv, rps->rp1_freq),
-			 rps->rp1_freq);
-
-	rps->min_freq = cherryview_rps_min_freq(dev_priv);
-	DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
-			 intel_gpu_freq(dev_priv, rps->min_freq),
-			 rps->min_freq);
-
-	vlv_iosf_sb_put(dev_priv,
-			BIT(VLV_IOSF_SB_PUNIT) |
-			BIT(VLV_IOSF_SB_NC) |
-			BIT(VLV_IOSF_SB_CCK));
-
-	WARN_ONCE((rps->max_freq | rps->efficient_freq | rps->rp1_freq |
-		   rps->min_freq) & 1,
-		  "Odd GPU freq values\n");
-}
-
-static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
-{
-	valleyview_cleanup_pctx(dev_priv);
-}
-
-static void cherryview_enable_rc6(struct drm_i915_private *dev_priv)
-{
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-	u32 gtfifodbg, rc6_mode, pcbr;
-
-	gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV |
-					     GT_FIFO_FREE_ENTRIES_CHV);
-	if (gtfifodbg) {
-		DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
-				 gtfifodbg);
-		I915_WRITE(GTFIFODBG, gtfifodbg);
-	}
-
-	cherryview_check_pctx(dev_priv);
-
-	/* 1a & 1b: Get forcewake during program sequence. Although the driver
-	 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
-	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
-	/*  Disable RC states. */
-	I915_WRITE(GEN6_RC_CONTROL, 0);
-
-	/* 2a: Program RC6 thresholds.*/
-	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
-	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
-	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
-
-	for_each_engine(engine, dev_priv, id)
-		I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
-	I915_WRITE(GEN6_RC_SLEEP, 0);
-
-	/* TO threshold set to 500 us ( 0x186 * 1.28 us) */
-	I915_WRITE(GEN6_RC6_THRESHOLD, 0x186);
-
-	/* Allows RC6 residency counter to work */
-	I915_WRITE(VLV_COUNTER_CONTROL,
-		   _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
-				      VLV_MEDIA_RC6_COUNT_EN |
-				      VLV_RENDER_RC6_COUNT_EN));
-
-	/* For now we assume BIOS is allocating and populating the PCBR  */
-	pcbr = I915_READ(VLV_PCBR);
-
-	/* 3: Enable RC6 */
-	rc6_mode = 0;
-	if (pcbr >> VLV_PCBR_ADDR_SHIFT)
-		rc6_mode = GEN7_RC_CTL_TO_MODE;
-	I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
-
-	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
-{
-	u32 val;
-
-	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
-	/* 1: Program defaults and thresholds for RPS*/
-	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
-	I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
-	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
-	I915_WRITE(GEN6_RP_UP_EI, 66000);
-	I915_WRITE(GEN6_RP_DOWN_EI, 350000);
-
-	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
-
-	/* 2: Enable RPS */
-	I915_WRITE(GEN6_RP_CONTROL,
-		   GEN6_RP_MEDIA_HW_NORMAL_MODE |
-		   GEN6_RP_MEDIA_IS_GFX |
-		   GEN6_RP_ENABLE |
-		   GEN6_RP_UP_BUSY_AVG |
-		   GEN6_RP_DOWN_IDLE_AVG);
-
-	/* Setting Fixed Bias */
-	vlv_punit_get(dev_priv);
-
-	val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | CHV_BIAS_CPU_50_SOC_50;
-	vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
-
-	val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
-
-	vlv_punit_put(dev_priv);
-
-	/* RPS code assumes GPLL is used */
-	WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
-
-	DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
-	DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
-
-	reset_rps(dev_priv, valleyview_set_rps);
-
-	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void valleyview_enable_rc6(struct drm_i915_private *dev_priv)
-{
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-	u32 gtfifodbg;
-
-	valleyview_check_pctx(dev_priv);
-
-	gtfifodbg = I915_READ(GTFIFODBG);
-	if (gtfifodbg) {
-		DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
-				 gtfifodbg);
-		I915_WRITE(GTFIFODBG, gtfifodbg);
-	}
-
-	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
-	/*  Disable RC states. */
-	I915_WRITE(GEN6_RC_CONTROL, 0);
-
-	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
-	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
-	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
-
-	for_each_engine(engine, dev_priv, id)
-		I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
-
-	I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
-
-	/* Allows RC6 residency counter to work */
-	I915_WRITE(VLV_COUNTER_CONTROL,
-		   _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
-				      VLV_MEDIA_RC0_COUNT_EN |
-				      VLV_RENDER_RC0_COUNT_EN |
-				      VLV_MEDIA_RC6_COUNT_EN |
-				      VLV_RENDER_RC6_COUNT_EN));
-
-	I915_WRITE(GEN6_RC_CONTROL,
-		   GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL);
-
-	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
-{
-	u32 val;
-
-	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
-	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
-	I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
-	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
-	I915_WRITE(GEN6_RP_UP_EI, 66000);
-	I915_WRITE(GEN6_RP_DOWN_EI, 350000);
-
-	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
-
-	I915_WRITE(GEN6_RP_CONTROL,
-		   GEN6_RP_MEDIA_TURBO |
-		   GEN6_RP_MEDIA_HW_NORMAL_MODE |
-		   GEN6_RP_MEDIA_IS_GFX |
-		   GEN6_RP_ENABLE |
-		   GEN6_RP_UP_BUSY_AVG |
-		   GEN6_RP_DOWN_IDLE_CONT);
-
-	vlv_punit_get(dev_priv);
-
-	/* Setting Fixed Bias */
-	val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | VLV_BIAS_CPU_125_SOC_875;
-	vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
-
-	val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
-
-	vlv_punit_put(dev_priv);
-
-	/* RPS code assumes GPLL is used */
-	WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
-
-	DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
-	DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
-
-	reset_rps(dev_priv, valleyview_set_rps);
-
-	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static unsigned long intel_pxfreq(u32 vidfreq)
-{
-	unsigned long freq;
-	int div = (vidfreq & 0x3f0000) >> 16;
-	int post = (vidfreq & 0x3000) >> 12;
-	int pre = (vidfreq & 0x7);
-
-	if (!pre)
-		return 0;
-
-	freq = ((div * 133333) / ((1<<post) * pre));
-
-	return freq;
-}
-
-static const struct cparams {
-	u16 i;
-	u16 t;
-	u16 m;
-	u16 c;
-} cparams[] = {
-	{ 1, 1333, 301, 28664 },
-	{ 1, 1066, 294, 24460 },
-	{ 1, 800, 294, 25192 },
-	{ 0, 1333, 276, 27605 },
-	{ 0, 1066, 276, 27605 },
-	{ 0, 800, 231, 23784 },
-};
-
-static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
-{
-	u64 total_count, diff, ret;
-	u32 count1, count2, count3, m = 0, c = 0;
-	unsigned long now = jiffies_to_msecs(jiffies), diff1;
-	int i;
-
-	lockdep_assert_held(&mchdev_lock);
-
-	diff1 = now - dev_priv->ips.last_time1;
-
-	/* Prevent division-by-zero if we are asking too fast.
-	 * Also, we don't get interesting results if we are polling
-	 * faster than once in 10ms, so just return the saved value
-	 * in such cases.
-	 */
-	if (diff1 <= 10)
-		return dev_priv->ips.chipset_power;
-
-	count1 = I915_READ(DMIEC);
-	count2 = I915_READ(DDREC);
-	count3 = I915_READ(CSIEC);
-
-	total_count = count1 + count2 + count3;
-
-	/* FIXME: handle per-counter overflow */
-	if (total_count < dev_priv->ips.last_count1) {
-		diff = ~0UL - dev_priv->ips.last_count1;
-		diff += total_count;
-	} else {
-		diff = total_count - dev_priv->ips.last_count1;
-	}
-
-	for (i = 0; i < ARRAY_SIZE(cparams); i++) {
-		if (cparams[i].i == dev_priv->ips.c_m &&
-		    cparams[i].t == dev_priv->ips.r_t) {
-			m = cparams[i].m;
-			c = cparams[i].c;
-			break;
-		}
-	}
-
-	diff = div_u64(diff, diff1);
-	ret = ((m * diff) + c);
-	ret = div_u64(ret, 10);
-
-	dev_priv->ips.last_count1 = total_count;
-	dev_priv->ips.last_time1 = now;
-
-	dev_priv->ips.chipset_power = ret;
-
-	return ret;
-}
-
-unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
-{
-	intel_wakeref_t wakeref;
-	unsigned long val = 0;
-
-	if (!IS_GEN(dev_priv, 5))
-		return 0;
-
-	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
-		spin_lock_irq(&mchdev_lock);
-		val = __i915_chipset_val(dev_priv);
-		spin_unlock_irq(&mchdev_lock);
-	}
-
-	return val;
-}
-
-unsigned long i915_mch_val(struct drm_i915_private *i915)
-{
-	unsigned long m, x, b;
-	u32 tsfs;
-
-	tsfs = intel_uncore_read(&i915->uncore, TSFS);
-
-	m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
-	x = intel_uncore_read8(&i915->uncore, TR1);
-
-	b = tsfs & TSFS_INTR_MASK;
-
-	return ((m * x) / 127) - b;
-}
-
-static int _pxvid_to_vd(u8 pxvid)
-{
-	if (pxvid == 0)
-		return 0;
-
-	if (pxvid >= 8 && pxvid < 31)
-		pxvid = 31;
-
-	return (pxvid + 2) * 125;
-}
-
-static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
-{
-	const int vd = _pxvid_to_vd(pxvid);
-	const int vm = vd - 1125;
-
-	if (INTEL_INFO(dev_priv)->is_mobile)
-		return vm > 0 ? vm : 0;
-
-	return vd;
-}
-
-static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
-{
-	u64 now, diff, diffms;
-	u32 count;
-
-	lockdep_assert_held(&mchdev_lock);
-
-	now = ktime_get_raw_ns();
-	diffms = now - dev_priv->ips.last_time2;
-	do_div(diffms, NSEC_PER_MSEC);
-
-	/* Don't divide by 0 */
-	if (!diffms)
-		return;
-
-	count = I915_READ(GFXEC);
-
-	if (count < dev_priv->ips.last_count2) {
-		diff = ~0UL - dev_priv->ips.last_count2;
-		diff += count;
-	} else {
-		diff = count - dev_priv->ips.last_count2;
-	}
-
-	dev_priv->ips.last_count2 = count;
-	dev_priv->ips.last_time2 = now;
-
-	/* More magic constants... */
-	diff = diff * 1181;
-	diff = div_u64(diff, diffms * 10);
-	dev_priv->ips.gfx_power = diff;
-}
-
-void i915_update_gfx_val(struct drm_i915_private *dev_priv)
-{
-	intel_wakeref_t wakeref;
-
-	if (!IS_GEN(dev_priv, 5))
-		return;
-
-	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
-		spin_lock_irq(&mchdev_lock);
-		__i915_update_gfx_val(dev_priv);
-		spin_unlock_irq(&mchdev_lock);
-	}
-}
-
-static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
-{
-	unsigned long t, corr, state1, corr2, state2;
-	u32 pxvid, ext_v;
-
-	lockdep_assert_held(&mchdev_lock);
-
-	pxvid = I915_READ(PXVFREQ(dev_priv->gt_pm.rps.cur_freq));
-	pxvid = (pxvid >> 24) & 0x7f;
-	ext_v = pvid_to_extvid(dev_priv, pxvid);
-
-	state1 = ext_v;
-
-	t = i915_mch_val(dev_priv);
-
-	/* Revel in the empirically derived constants */
-
-	/* Correction factor in 1/100000 units */
-	if (t > 80)
-		corr = ((t * 2349) + 135940);
-	else if (t >= 50)
-		corr = ((t * 964) + 29317);
-	else /* < 50 */
-		corr = ((t * 301) + 1004);
-
-	corr = corr * ((150142 * state1) / 10000 - 78642);
-	corr /= 100000;
-	corr2 = (corr * dev_priv->ips.corr);
-
-	state2 = (corr2 * state1) / 10000;
-	state2 /= 100; /* convert to mW */
-
-	__i915_update_gfx_val(dev_priv);
-
-	return dev_priv->ips.gfx_power + state2;
-}
-
-unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
-{
-	intel_wakeref_t wakeref;
-	unsigned long val = 0;
-
-	if (!IS_GEN(dev_priv, 5))
-		return 0;
-
-	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
-		spin_lock_irq(&mchdev_lock);
-		val = __i915_gfx_val(dev_priv);
-		spin_unlock_irq(&mchdev_lock);
-	}
-
-	return val;
-}
-
-static struct drm_i915_private __rcu *i915_mch_dev;
-
-static struct drm_i915_private *mchdev_get(void)
-{
-	struct drm_i915_private *i915;
-
-	rcu_read_lock();
-	i915 = rcu_dereference(i915_mch_dev);
-	if (!kref_get_unless_zero(&i915->drm.ref))
-		i915 = NULL;
-	rcu_read_unlock();
-
-	return i915;
-}
-
-/**
- * i915_read_mch_val - return value for IPS use
- *
- * Calculate and return a value for the IPS driver to use when deciding whether
- * we have thermal and power headroom to increase CPU or GPU power budget.
- */
-unsigned long i915_read_mch_val(void)
-{
-	struct drm_i915_private *i915;
-	unsigned long chipset_val = 0;
-	unsigned long graphics_val = 0;
-	intel_wakeref_t wakeref;
-
-	i915 = mchdev_get();
-	if (!i915)
-		return 0;
-
-	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
-		spin_lock_irq(&mchdev_lock);
-		chipset_val = __i915_chipset_val(i915);
-		graphics_val = __i915_gfx_val(i915);
-		spin_unlock_irq(&mchdev_lock);
-	}
-
-	drm_dev_put(&i915->drm);
-	return chipset_val + graphics_val;
-}
-EXPORT_SYMBOL_GPL(i915_read_mch_val);
-
-/**
- * i915_gpu_raise - raise GPU frequency limit
- *
- * Raise the limit; IPS indicates we have thermal headroom.
- */
-bool i915_gpu_raise(void)
-{
-	struct drm_i915_private *i915;
-
-	i915 = mchdev_get();
-	if (!i915)
-		return false;
-
-	spin_lock_irq(&mchdev_lock);
-	if (i915->ips.max_delay > i915->ips.fmax)
-		i915->ips.max_delay--;
-	spin_unlock_irq(&mchdev_lock);
-
-	drm_dev_put(&i915->drm);
-	return true;
-}
-EXPORT_SYMBOL_GPL(i915_gpu_raise);
-
-/**
- * i915_gpu_lower - lower GPU frequency limit
- *
- * IPS indicates we're close to a thermal limit, so throttle back the GPU
- * frequency maximum.
- */
-bool i915_gpu_lower(void)
-{
-	struct drm_i915_private *i915;
-
-	i915 = mchdev_get();
-	if (!i915)
-		return false;
-
-	spin_lock_irq(&mchdev_lock);
-	if (i915->ips.max_delay < i915->ips.min_delay)
-		i915->ips.max_delay++;
-	spin_unlock_irq(&mchdev_lock);
-
-	drm_dev_put(&i915->drm);
-	return true;
-}
-EXPORT_SYMBOL_GPL(i915_gpu_lower);
-
-/**
- * i915_gpu_busy - indicate GPU business to IPS
- *
- * Tell the IPS driver whether or not the GPU is busy.
- */
-bool i915_gpu_busy(void)
-{
-	struct drm_i915_private *i915;
-	bool ret;
-
-	i915 = mchdev_get();
-	if (!i915)
-		return false;
-
-	ret = i915->gt.awake;
-
-	drm_dev_put(&i915->drm);
-	return ret;
-}
-EXPORT_SYMBOL_GPL(i915_gpu_busy);
-
-/**
- * i915_gpu_turbo_disable - disable graphics turbo
- *
- * Disable graphics turbo by resetting the max frequency and setting the
- * current frequency to the default.
- */
-bool i915_gpu_turbo_disable(void)
-{
-	struct drm_i915_private *i915;
-	bool ret;
-
-	i915 = mchdev_get();
-	if (!i915)
-		return false;
-
-	spin_lock_irq(&mchdev_lock);
-	i915->ips.max_delay = i915->ips.fstart;
-	ret = ironlake_set_drps(i915, i915->ips.fstart);
-	spin_unlock_irq(&mchdev_lock);
-
-	drm_dev_put(&i915->drm);
-	return ret;
-}
-EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
-
-/**
- * Tells the intel_ips driver that the i915 driver is now loaded, if
- * IPS got loaded first.
- *
- * This awkward dance is so that neither module has to depend on the
- * other in order for IPS to do the appropriate communication of
- * GPU turbo limits to i915.
- */
-static void
-ips_ping_for_i915_load(void)
-{
-	void (*link)(void);
-
-	link = symbol_get(ips_link_to_i915_driver);
-	if (link) {
-		link();
-		symbol_put(ips_link_to_i915_driver);
-	}
-}
-
-void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
-{
-	/* We only register the i915 ips part with intel-ips once everything is
-	 * set up, to avoid intel-ips sneaking in and reading bogus values. */
-	rcu_assign_pointer(i915_mch_dev, dev_priv);
-
-	ips_ping_for_i915_load();
-}
-
-void intel_gpu_ips_teardown(void)
-{
-	rcu_assign_pointer(i915_mch_dev, NULL);
-}
-
-static void intel_init_emon(struct drm_i915_private *dev_priv)
-{
-	u32 lcfuse;
-	u8 pxw[16];
-	int i;
-
-	/* Disable to program */
-	I915_WRITE(ECR, 0);
-	POSTING_READ(ECR);
-
-	/* Program energy weights for various events */
-	I915_WRITE(SDEW, 0x15040d00);
-	I915_WRITE(CSIEW0, 0x007f0000);
-	I915_WRITE(CSIEW1, 0x1e220004);
-	I915_WRITE(CSIEW2, 0x04000004);
-
-	for (i = 0; i < 5; i++)
-		I915_WRITE(PEW(i), 0);
-	for (i = 0; i < 3; i++)
-		I915_WRITE(DEW(i), 0);
-
-	/* Program P-state weights to account for frequency power adjustment */
-	for (i = 0; i < 16; i++) {
-		u32 pxvidfreq = I915_READ(PXVFREQ(i));
-		unsigned long freq = intel_pxfreq(pxvidfreq);
-		unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
-			PXVFREQ_PX_SHIFT;
-		unsigned long val;
-
-		val = vid * vid;
-		val *= (freq / 1000);
-		val *= 255;
-		val /= (127*127*900);
-		if (val > 0xff)
-			DRM_ERROR("bad pxval: %ld\n", val);
-		pxw[i] = val;
-	}
-	/* Render standby states get 0 weight */
-	pxw[14] = 0;
-	pxw[15] = 0;
-
-	for (i = 0; i < 4; i++) {
-		u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
-			(pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
-		I915_WRITE(PXW(i), val);
-	}
-
-	/* Adjust magic regs to magic values (more experimental results) */
-	I915_WRITE(OGW0, 0);
-	I915_WRITE(OGW1, 0);
-	I915_WRITE(EG0, 0x00007f00);
-	I915_WRITE(EG1, 0x0000000e);
-	I915_WRITE(EG2, 0x000e0000);
-	I915_WRITE(EG3, 0x68000300);
-	I915_WRITE(EG4, 0x42000000);
-	I915_WRITE(EG5, 0x00140031);
-	I915_WRITE(EG6, 0);
-	I915_WRITE(EG7, 0);
-
-	for (i = 0; i < 8; i++)
-		I915_WRITE(PXWL(i), 0);
-
-	/* Enable PMON + select events */
-	I915_WRITE(ECR, 0x80000019);
-
-	lcfuse = I915_READ(LCFUSE02);
-
-	dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
-}
-
-void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
-{
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
-	/*
-	 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
-	 * requirement.
-	 */
-	if (!sanitize_rc6(dev_priv)) {
-		DRM_INFO("RC6 disabled, disabling runtime PM support\n");
-		pm_runtime_get(&dev_priv->drm.pdev->dev);
-	}
-
-	/* Initialize RPS limits (for userspace) */
-	if (IS_CHERRYVIEW(dev_priv))
-		cherryview_init_gt_powersave(dev_priv);
-	else if (IS_VALLEYVIEW(dev_priv))
-		valleyview_init_gt_powersave(dev_priv);
-	else if (INTEL_GEN(dev_priv) >= 6)
-		gen6_init_rps_frequencies(dev_priv);
-
-	/* Derive initial user preferences/limits from the hardware limits */
-	rps->max_freq_softlimit = rps->max_freq;
-	rps->min_freq_softlimit = rps->min_freq;
-
-	/* After setting max-softlimit, find the overclock max freq */
-	if (IS_GEN(dev_priv, 6) ||
-	    IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
-		u32 params = 0;
-
-		sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS,
-				       &params, NULL);
-		if (params & BIT(31)) { /* OC supported */
-			DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
-					 (rps->max_freq & 0xff) * 50,
-					 (params & 0xff) * 50);
-			rps->max_freq = params & 0xff;
-		}
-	}
-
-	/* Finally allow us to boost to max by default */
-	rps->boost_freq = rps->max_freq;
-	rps->idle_freq = rps->min_freq;
-	rps->cur_freq = rps->idle_freq;
-}
-
-void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
-{
-	if (IS_VALLEYVIEW(dev_priv))
-		valleyview_cleanup_gt_powersave(dev_priv);
-
-	if (!HAS_RC6(dev_priv))
-		pm_runtime_put(&dev_priv->drm.pdev->dev);
-}
-
-void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
-{
-	dev_priv->gt_pm.rps.enabled = true; /* force RPS disabling */
-	dev_priv->gt_pm.rc6.enabled = true; /* force RC6 disabling */
-	intel_disable_gt_powersave(dev_priv);
-
-	if (INTEL_GEN(dev_priv) >= 11)
-		gen11_reset_rps_interrupts(dev_priv);
-	else if (INTEL_GEN(dev_priv) >= 6)
-		gen6_reset_rps_interrupts(dev_priv);
-}
-
-static inline void intel_disable_llc_pstate(struct drm_i915_private *i915)
-{
-	lockdep_assert_held(&i915->gt_pm.rps.lock);
-
-	if (!i915->gt_pm.llc_pstate.enabled)
-		return;
-
-	/* Currently there is no HW configuration to be done to disable. */
-
-	i915->gt_pm.llc_pstate.enabled = false;
-}
-
-static void intel_disable_rc6(struct drm_i915_private *dev_priv)
-{
-	lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
-
-	if (!dev_priv->gt_pm.rc6.enabled)
-		return;
-
-	if (INTEL_GEN(dev_priv) >= 9)
-		gen9_disable_rc6(dev_priv);
-	else if (IS_CHERRYVIEW(dev_priv))
-		cherryview_disable_rc6(dev_priv);
-	else if (IS_VALLEYVIEW(dev_priv))
-		valleyview_disable_rc6(dev_priv);
-	else if (INTEL_GEN(dev_priv) >= 6)
-		gen6_disable_rc6(dev_priv);
-
-	dev_priv->gt_pm.rc6.enabled = false;
-}
-
-static void intel_disable_rps(struct drm_i915_private *dev_priv)
-{
-	lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
-
-	if (!dev_priv->gt_pm.rps.enabled)
-		return;
-
-	if (INTEL_GEN(dev_priv) >= 9)
-		gen9_disable_rps(dev_priv);
-	else if (IS_CHERRYVIEW(dev_priv))
-		cherryview_disable_rps(dev_priv);
-	else if (IS_VALLEYVIEW(dev_priv))
-		valleyview_disable_rps(dev_priv);
-	else if (INTEL_GEN(dev_priv) >= 6)
-		gen6_disable_rps(dev_priv);
-	else if (IS_IRONLAKE_M(dev_priv))
-		ironlake_disable_drps(dev_priv);
-
-	dev_priv->gt_pm.rps.enabled = false;
-}
-
-void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
-{
-	mutex_lock(&dev_priv->gt_pm.rps.lock);
-
-	intel_disable_rc6(dev_priv);
-	intel_disable_rps(dev_priv);
-	if (HAS_LLC(dev_priv))
-		intel_disable_llc_pstate(dev_priv);
-
-	mutex_unlock(&dev_priv->gt_pm.rps.lock);
-}
-
-static inline void intel_enable_llc_pstate(struct drm_i915_private *i915)
-{
-	lockdep_assert_held(&i915->gt_pm.rps.lock);
-
-	if (i915->gt_pm.llc_pstate.enabled)
-		return;
-
-	gen6_update_ring_freq(i915);
-
-	i915->gt_pm.llc_pstate.enabled = true;
-}
-
-static void intel_enable_rc6(struct drm_i915_private *dev_priv)
-{
-	lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
-
-	if (dev_priv->gt_pm.rc6.enabled)
-		return;
-
-	if (IS_CHERRYVIEW(dev_priv))
-		cherryview_enable_rc6(dev_priv);
-	else if (IS_VALLEYVIEW(dev_priv))
-		valleyview_enable_rc6(dev_priv);
-	else if (INTEL_GEN(dev_priv) >= 11)
-		gen11_enable_rc6(dev_priv);
-	else if (INTEL_GEN(dev_priv) >= 9)
-		gen9_enable_rc6(dev_priv);
-	else if (IS_BROADWELL(dev_priv))
-		gen8_enable_rc6(dev_priv);
-	else if (INTEL_GEN(dev_priv) >= 6)
-		gen6_enable_rc6(dev_priv);
-
-	dev_priv->gt_pm.rc6.enabled = true;
-}
-
-static void intel_enable_rps(struct drm_i915_private *dev_priv)
-{
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
-	lockdep_assert_held(&rps->lock);
-
-	if (rps->enabled)
-		return;
-
-	if (IS_CHERRYVIEW(dev_priv)) {
-		cherryview_enable_rps(dev_priv);
-	} else if (IS_VALLEYVIEW(dev_priv)) {
-		valleyview_enable_rps(dev_priv);
-	} else if (INTEL_GEN(dev_priv) >= 9) {
-		gen9_enable_rps(dev_priv);
-	} else if (IS_BROADWELL(dev_priv)) {
-		gen8_enable_rps(dev_priv);
-	} else if (INTEL_GEN(dev_priv) >= 6) {
-		gen6_enable_rps(dev_priv);
-	} else if (IS_IRONLAKE_M(dev_priv)) {
-		ironlake_enable_drps(dev_priv);
-		intel_init_emon(dev_priv);
-	}
-
-	WARN_ON(rps->max_freq < rps->min_freq);
-	WARN_ON(rps->idle_freq > rps->max_freq);
-
-	WARN_ON(rps->efficient_freq < rps->min_freq);
-	WARN_ON(rps->efficient_freq > rps->max_freq);
-
-	rps->enabled = true;
-}
-
-void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
-{
-	/* Powersaving is controlled by the host when inside a VM */
-	if (intel_vgpu_active(dev_priv))
-		return;
-
-	mutex_lock(&dev_priv->gt_pm.rps.lock);
-
-	if (HAS_RC6(dev_priv))
-		intel_enable_rc6(dev_priv);
-	if (HAS_RPS(dev_priv))
-		intel_enable_rps(dev_priv);
-	if (HAS_LLC(dev_priv))
-		intel_enable_llc_pstate(dev_priv);
-
-	mutex_unlock(&dev_priv->gt_pm.rps.lock);
-}
-
-static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
-{
-	/*
-	 * On Ibex Peak and Cougar Point, we need to disable clock
-	 * gating for the panel power sequencer or it will fail to
-	 * start up when no ports are active.
-	 */
-	I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
-}
-
-static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
-{
-	enum pipe pipe;
-
-	for_each_pipe(dev_priv, pipe) {
-		I915_WRITE(DSPCNTR(pipe),
-			   I915_READ(DSPCNTR(pipe)) |
-			   DISPPLANE_TRICKLE_FEED_DISABLE);
-
-		I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe)));
-		POSTING_READ(DSPSURF(pipe));
-	}
-}
-
-static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
-{
-	u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
-
-	/*
-	 * Required for FBC
-	 * WaFbcDisableDpfcClockGating:ilk
-	 */
-	dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
-		   ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
-		   ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
-
-	I915_WRITE(PCH_3DCGDIS0,
-		   MARIUNIT_CLOCK_GATE_DISABLE |
-		   SVSMUNIT_CLOCK_GATE_DISABLE);
-	I915_WRITE(PCH_3DCGDIS1,
-		   VFMUNIT_CLOCK_GATE_DISABLE);
-
-	/*
-	 * According to the spec the following bits should be set in
-	 * order to enable memory self-refresh
-	 * The bit 22/21 of 0x42004
-	 * The bit 5 of 0x42020
-	 * The bit 15 of 0x45000
-	 */
-	I915_WRITE(ILK_DISPLAY_CHICKEN2,
-		   (I915_READ(ILK_DISPLAY_CHICKEN2) |
-		    ILK_DPARB_GATE | ILK_VSDPFD_FULL));
-	dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
-	I915_WRITE(DISP_ARB_CTL,
-		   (I915_READ(DISP_ARB_CTL) |
-		    DISP_FBC_WM_DIS));
-
-	/*
-	 * Based on the document from hardware guys the following bits
-	 * should be set unconditionally in order to enable FBC.
-	 * The bit 22 of 0x42000
-	 * The bit 22 of 0x42004
-	 * The bit 7,8,9 of 0x42020.
-	 */
-	if (IS_IRONLAKE_M(dev_priv)) {
-		/* WaFbcAsynchFlipDisableFbcQueue:ilk */
-		I915_WRITE(ILK_DISPLAY_CHICKEN1,
-			   I915_READ(ILK_DISPLAY_CHICKEN1) |
-			   ILK_FBCQ_DIS);
-		I915_WRITE(ILK_DISPLAY_CHICKEN2,
-			   I915_READ(ILK_DISPLAY_CHICKEN2) |
-			   ILK_DPARB_GATE);
-	}
-
-	I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
-
-	I915_WRITE(ILK_DISPLAY_CHICKEN2,
-		   I915_READ(ILK_DISPLAY_CHICKEN2) |
-		   ILK_ELPIN_409_SELECT);
-	I915_WRITE(_3D_CHICKEN2,
-		   _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
-		   _3D_CHICKEN2_WM_READ_PIPELINED);
-
-	/* WaDisableRenderCachePipelinedFlush:ilk */
-	I915_WRITE(CACHE_MODE_0,
-		   _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
-
-	/* WaDisable_RenderCache_OperationalFlush:ilk */
-	I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
-
-	g4x_disable_trickle_feed(dev_priv);
-
-	ibx_init_clock_gating(dev_priv);
-}
-
-static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
-{
-	int pipe;
-	u32 val;
-
-	/*
-	 * On Ibex Peak and Cougar Point, we need to disable clock
-	 * gating for the panel power sequencer or it will fail to
-	 * start up when no ports are active.
-	 */
-	I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
-		   PCH_DPLUNIT_CLOCK_GATE_DISABLE |
-		   PCH_CPUNIT_CLOCK_GATE_DISABLE);
-	I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
-		   DPLS_EDP_PPS_FIX_DIS);
-	/* The below fixes the weird display corruption, a few pixels shifted
-	 * downward, on (only) LVDS of some HP laptops with IVY.
-	 */
-	for_each_pipe(dev_priv, pipe) {
-		val = I915_READ(TRANS_CHICKEN2(pipe));
-		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
-		val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
-		if (dev_priv->vbt.fdi_rx_polarity_inverted)
-			val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
-		val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
-		val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
-		val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
-		I915_WRITE(TRANS_CHICKEN2(pipe), val);
-	}
-	/* WADP0ClockGatingDisable */
-	for_each_pipe(dev_priv, pipe) {
-		I915_WRITE(TRANS_CHICKEN1(pipe),
-			   TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
-	}
-}
-
-static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
-{
-	u32 tmp;
-
-	tmp = I915_READ(MCH_SSKPD);
-	if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
-		DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
-			      tmp);
-}
-
-static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
-{
-	u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
-
-	I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
-
-	I915_WRITE(ILK_DISPLAY_CHICKEN2,
-		   I915_READ(ILK_DISPLAY_CHICKEN2) |
-		   ILK_ELPIN_409_SELECT);
-
-	/* WaDisableHiZPlanesWhenMSAAEnabled:snb */
-	I915_WRITE(_3D_CHICKEN,
-		   _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
-
-	/* WaDisable_RenderCache_OperationalFlush:snb */
-	I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
-
-	/*
-	 * BSpec recoomends 8x4 when MSAA is used,
-	 * however in practice 16x4 seems fastest.
-	 *
-	 * Note that PS/WM thread counts depend on the WIZ hashing
-	 * disable bit, which we don't touch here, but it's good
-	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
-	 */
-	I915_WRITE(GEN6_GT_MODE,
-		   _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
-
-	I915_WRITE(CACHE_MODE_0,
-		   _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
-
-	I915_WRITE(GEN6_UCGCTL1,
-		   I915_READ(GEN6_UCGCTL1) |
-		   GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
-		   GEN6_CSUNIT_CLOCK_GATE_DISABLE);
-
-	/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
-	 * gating disable must be set.  Failure to set it results in
-	 * flickering pixels due to Z write ordering failures after
-	 * some amount of runtime in the Mesa "fire" demo, and Unigine
-	 * Sanctuary and Tropics, and apparently anything else with
-	 * alpha test or pixel discard.
-	 *
-	 * According to the spec, bit 11 (RCCUNIT) must also be set,
-	 * but we didn't debug actual testcases to find it out.
-	 *
-	 * WaDisableRCCUnitClockGating:snb
-	 * WaDisableRCPBUnitClockGating:snb
-	 */
-	I915_WRITE(GEN6_UCGCTL2,
-		   GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
-		   GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
-
-	/* WaStripsFansDisableFastClipPerformanceFix:snb */
-	I915_WRITE(_3D_CHICKEN3,
-		   _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
-
-	/*
-	 * Bspec says:
-	 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
-	 * 3DSTATE_SF number of SF output attributes is more than 16."
-	 */
-	I915_WRITE(_3D_CHICKEN3,
-		   _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
-
-	/*
-	 * According to the spec the following bits should be
-	 * set in order to enable memory self-refresh and fbc:
-	 * The bit21 and bit22 of 0x42000
-	 * The bit21 and bit22 of 0x42004
-	 * The bit5 and bit7 of 0x42020
-	 * The bit14 of 0x70180
-	 * The bit14 of 0x71180
-	 *
-	 * WaFbcAsynchFlipDisableFbcQueue:snb
-	 */
-	I915_WRITE(ILK_DISPLAY_CHICKEN1,
-		   I915_READ(ILK_DISPLAY_CHICKEN1) |
-		   ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
-	I915_WRITE(ILK_DISPLAY_CHICKEN2,
-		   I915_READ(ILK_DISPLAY_CHICKEN2) |
-		   ILK_DPARB_GATE | ILK_VSDPFD_FULL);
-	I915_WRITE(ILK_DSPCLK_GATE_D,
-		   I915_READ(ILK_DSPCLK_GATE_D) |
-		   ILK_DPARBUNIT_CLOCK_GATE_ENABLE  |
-		   ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
-
-	g4x_disable_trickle_feed(dev_priv);
-
-	cpt_init_clock_gating(dev_priv);
-
-	gen6_check_mch_setup(dev_priv);
-}
-
-static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
-{
-	u32 reg = I915_READ(GEN7_FF_THREAD_MODE);
-
-	/*
-	 * WaVSThreadDispatchOverride:ivb,vlv
-	 *
-	 * This actually overrides the dispatch
-	 * mode for all thread types.
-	 */
-	reg &= ~GEN7_FF_SCHED_MASK;
-	reg |= GEN7_FF_TS_SCHED_HW;
-	reg |= GEN7_FF_VS_SCHED_HW;
-	reg |= GEN7_FF_DS_SCHED_HW;
-
-	I915_WRITE(GEN7_FF_THREAD_MODE, reg);
-}
-
-static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
+static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
 {
 	/*
 	 * TODO: this bit should only be enabled when really needed, then
@@ -9733,217 +7334,8 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
 	}
 }
 
-static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
-{
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
-	/*
-	 * N = val - 0xb7
-	 * Slow = Fast = GPLL ref * N
-	 */
-	return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * (val - 0xb7), 1000);
-}
-
-static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
-{
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
-	return DIV_ROUND_CLOSEST(1000 * val, rps->gpll_ref_freq) + 0xb7;
-}
-
-static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
-{
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
-	/*
-	 * N = val / 2
-	 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
-	 */
-	return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * val, 2 * 2 * 1000);
-}
-
-static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
-{
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
-	/* CHV needs even values */
-	return DIV_ROUND_CLOSEST(2 * 1000 * val, rps->gpll_ref_freq) * 2;
-}
-
-int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
-{
-	if (INTEL_GEN(dev_priv) >= 9)
-		return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
-					 GEN9_FREQ_SCALER);
-	else if (IS_CHERRYVIEW(dev_priv))
-		return chv_gpu_freq(dev_priv, val);
-	else if (IS_VALLEYVIEW(dev_priv))
-		return byt_gpu_freq(dev_priv, val);
-	else
-		return val * GT_FREQUENCY_MULTIPLIER;
-}
-
-int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
-{
-	if (INTEL_GEN(dev_priv) >= 9)
-		return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
-					 GT_FREQUENCY_MULTIPLIER);
-	else if (IS_CHERRYVIEW(dev_priv))
-		return chv_freq_opcode(dev_priv, val);
-	else if (IS_VALLEYVIEW(dev_priv))
-		return byt_freq_opcode(dev_priv, val);
-	else
-		return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
-}
-
 void intel_pm_setup(struct drm_i915_private *dev_priv)
 {
-	mutex_init(&dev_priv->gt_pm.rps.lock);
-	mutex_init(&dev_priv->gt_pm.rps.power.mutex);
-
-	atomic_set(&dev_priv->gt_pm.rps.num_waiters, 0);
-
 	dev_priv->runtime_pm.suspended = false;
 	atomic_set(&dev_priv->runtime_pm.wakeref_count, 0);
 }
-
-static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
-			     const i915_reg_t reg)
-{
-	u32 lower, upper, tmp;
-	int loop = 2;
-
-	/*
-	 * The register accessed do not need forcewake. We borrow
-	 * uncore lock to prevent concurrent access to range reg.
-	 */
-	lockdep_assert_held(&dev_priv->uncore.lock);
-
-	/*
-	 * vlv and chv residency counters are 40 bits in width.
-	 * With a control bit, we can choose between upper or lower
-	 * 32bit window into this counter.
-	 *
-	 * Although we always use the counter in high-range mode elsewhere,
-	 * userspace may attempt to read the value before rc6 is initialised,
-	 * before we have set the default VLV_COUNTER_CONTROL value. So always
-	 * set the high bit to be safe.
-	 */
-	I915_WRITE_FW(VLV_COUNTER_CONTROL,
-		      _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
-	upper = I915_READ_FW(reg);
-	do {
-		tmp = upper;
-
-		I915_WRITE_FW(VLV_COUNTER_CONTROL,
-			      _MASKED_BIT_DISABLE(VLV_COUNT_RANGE_HIGH));
-		lower = I915_READ_FW(reg);
-
-		I915_WRITE_FW(VLV_COUNTER_CONTROL,
-			      _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
-		upper = I915_READ_FW(reg);
-	} while (upper != tmp && --loop);
-
-	/*
-	 * Everywhere else we always use VLV_COUNTER_CONTROL with the
-	 * VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set
-	 * now.
-	 */
-
-	return lower | (u64)upper << 8;
-}
-
-u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
-			   const i915_reg_t reg)
-{
-	struct intel_uncore *uncore = &dev_priv->uncore;
-	u64 time_hw, prev_hw, overflow_hw;
-	unsigned int fw_domains;
-	unsigned long flags;
-	unsigned int i;
-	u32 mul, div;
-
-	if (!HAS_RC6(dev_priv))
-		return 0;
-
-	/*
-	 * Store previous hw counter values for counter wrap-around handling.
-	 *
-	 * There are only four interesting registers and they live next to each
-	 * other so we can use the relative address, compared to the smallest
-	 * one as the index into driver storage.
-	 */
-	i = (i915_mmio_reg_offset(reg) -
-	     i915_mmio_reg_offset(GEN6_GT_GFX_RC6_LOCKED)) / sizeof(u32);
-	if (WARN_ON_ONCE(i >= ARRAY_SIZE(dev_priv->gt_pm.rc6.cur_residency)))
-		return 0;
-
-	fw_domains = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
-
-	spin_lock_irqsave(&uncore->lock, flags);
-	intel_uncore_forcewake_get__locked(uncore, fw_domains);
-
-	/* On VLV and CHV, residency time is in CZ units rather than 1.28us */
-	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-		mul = 1000000;
-		div = dev_priv->czclk_freq;
-		overflow_hw = BIT_ULL(40);
-		time_hw = vlv_residency_raw(dev_priv, reg);
-	} else {
-		/* 833.33ns units on Gen9LP, 1.28us elsewhere. */
-		if (IS_GEN9_LP(dev_priv)) {
-			mul = 10000;
-			div = 12;
-		} else {
-			mul = 1280;
-			div = 1;
-		}
-
-		overflow_hw = BIT_ULL(32);
-		time_hw = intel_uncore_read_fw(uncore, reg);
-	}
-
-	/*
-	 * Counter wrap handling.
-	 *
-	 * But relying on a sufficient frequency of queries otherwise counters
-	 * can still wrap.
-	 */
-	prev_hw = dev_priv->gt_pm.rc6.prev_hw_residency[i];
-	dev_priv->gt_pm.rc6.prev_hw_residency[i] = time_hw;
-
-	/* RC6 delta from last sample. */
-	if (time_hw >= prev_hw)
-		time_hw -= prev_hw;
-	else
-		time_hw += overflow_hw - prev_hw;
-
-	/* Add delta to RC6 extended raw driver copy. */
-	time_hw += dev_priv->gt_pm.rc6.cur_residency[i];
-	dev_priv->gt_pm.rc6.cur_residency[i] = time_hw;
-
-	intel_uncore_forcewake_put__locked(uncore, fw_domains);
-	spin_unlock_irqrestore(&uncore->lock, flags);
-
-	return mul_u64_u32_div(time_hw, mul, div);
-}
-
-u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
-			   i915_reg_t reg)
-{
-	return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(dev_priv, reg), 1000);
-}
-
-u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat)
-{
-	u32 cagf;
-
-	if (INTEL_GEN(dev_priv) >= 9)
-		cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
-	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
-		cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
-	else
-		cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
-
-	return  cagf;
-}
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index e3573e1e16e3..b56e6285d1c3 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -29,16 +29,6 @@ void intel_update_watermarks(struct intel_crtc *crtc);
 void intel_init_pm(struct drm_i915_private *dev_priv);
 void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
 void intel_pm_setup(struct drm_i915_private *dev_priv);
-void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
-void intel_gpu_ips_teardown(void);
-void intel_init_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
-void gen6_rps_busy(struct drm_i915_private *dev_priv);
-void gen6_rps_idle(struct drm_i915_private *dev_priv);
-void gen6_rps_boost(struct i915_request *rq);
 void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv);
 void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv);
 void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv);
@@ -70,21 +60,6 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
 void intel_init_ipc(struct drm_i915_private *dev_priv);
 void intel_enable_ipc(struct drm_i915_private *dev_priv);
 
-int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
-int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
-u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv, i915_reg_t reg);
-u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv, i915_reg_t reg);
-
-u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat1);
-
-unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
-unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
-unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
-void i915_update_gfx_val(struct drm_i915_private *dev_priv);
-
-bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
-int intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
-void intel_rps_mark_interactive(struct drm_i915_private *i915, bool interactive);
 bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable);
 
 #endif /* __INTEL_PM_H__ */
-- 
2.22.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 33+ messages in thread

* Re: [PATCH 05/24] drm/i915/selftests: Ignore self-preemption suppression under gvt
  2019-07-15  8:09 ` [PATCH 05/24] drm/i915/selftests: Ignore self-preemption suppression under gvt Chris Wilson
@ 2019-07-15  9:11   ` Zhenyu Wang
  0 siblings, 0 replies; 33+ messages in thread
From: Zhenyu Wang @ 2019-07-15  9:11 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx


[-- Attachment #1.1: Type: text/plain, Size: 1501 bytes --]

On 2019.07.15 09:09:27 +0100, Chris Wilson wrote:
> GVT forces single port submission of individual requests. We do not
> enjoy the context amalgamation that the test depends upon for setting up
> the test (where port 0 has a large number of requests with a priority
> change somewhere in the middle). Under single request submission of gvt
> it is quite able for the preemption event to occur while another context
> is active and so there be a real need to act upon that preemption.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> Cc: Zhenyu Wang <zhenyuw@linux.intel.com>
> ---
>  drivers/gpu/drm/i915/gt/selftest_lrc.c | 3 +++
>  1 file changed, 3 insertions(+)
> 
> diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
> index bde0164551b1..071e21c0ac15 100644
> --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
> +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
> @@ -854,6 +854,9 @@ static int live_suppress_self_preempt(void *arg)
>  	if (USES_GUC_SUBMISSION(i915))
>  		return 0; /* presume black blox */
>  
> +	if (intel_vgpu_active(i915))
> +		return 0; /* GVT forces single port & request submission */
> +
>  	mutex_lock(&i915->drm.struct_mutex);
>  	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
>  

Acked-by: Zhenyu Wang <zhenyuw@linux.intel.com>

-- 
Open Source Technology Center, Intel ltd.

$gpg --keyserver wwwkeys.pgp.net --recv-keys 4D781827

[-- Attachment #1.2: signature.asc --]
[-- Type: application/pgp-signature, Size: 195 bytes --]

[-- Attachment #2: Type: text/plain, Size: 159 bytes --]

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 33+ messages in thread

* ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/24] drm/i915: Hide unshrinkable context objects from the shrinker
  2019-07-15  8:09 [PATCH 01/24] drm/i915: Hide unshrinkable context objects from the shrinker Chris Wilson
                   ` (22 preceding siblings ...)
  2019-07-15  8:09 ` [PATCH 24/24] drm/i915/gt: Extract GT runtime power management from intel_pm.c Chris Wilson
@ 2019-07-15 10:11 ` Patchwork
  2019-07-15 10:22 ` ✗ Fi.CI.SPARSE: " Patchwork
                   ` (2 subsequent siblings)
  26 siblings, 0 replies; 33+ messages in thread
From: Patchwork @ 2019-07-15 10:11 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [01/24] drm/i915: Hide unshrinkable context objects from the shrinker
URL   : https://patchwork.freedesktop.org/series/63704/
State : warning

== Summary ==

$ dim checkpatch origin/drm-tip
27b7aa232c3a drm/i915: Hide unshrinkable context objects from the shrinker
1da771964799 drm/i915/gt: Move the [class][inst] lookup for engines onto the GT
-:196: WARNING:FILE_PATH_CHANGES: added, moved or deleted file(s), does MAINTAINERS need updating?
#196: 
new file mode 100644

-:201: WARNING:SPDX_LICENSE_TAG: Missing or malformed SPDX-License-Identifier tag in line 1
#201: FILE: drivers/gpu/drm/i915/gt/intel_engine_user.c:1:
+/*

-:202: WARNING:SPDX_LICENSE_TAG: Misplaced SPDX-License-Identifier tag - use line 1 instead
#202: FILE: drivers/gpu/drm/i915/gt/intel_engine_user.c:2:
+ * SPDX-License-Identifier: MIT

-:272: WARNING:SPDX_LICENSE_TAG: Missing or malformed SPDX-License-Identifier tag in line 1
#272: FILE: drivers/gpu/drm/i915/gt/intel_engine_user.h:1:
+/*

-:273: WARNING:SPDX_LICENSE_TAG: Misplaced SPDX-License-Identifier tag - use line 1 instead
#273: FILE: drivers/gpu/drm/i915/gt/intel_engine_user.h:2:
+ * SPDX-License-Identifier: MIT

total: 0 errors, 5 warnings, 0 checks, 335 lines checked
d8793a63f716 drm/i915/gtt: Recursive ppgtt alloc for gen8
0531b2530a95 drm/i915/gtt: Tidy up ppgtt insertion for gen8
-:235: WARNING:LONG_LINE: line over 100 characters
#235: FILE: drivers/gpu/drm/i915/i915_gem_gtt.c:1331:
+							    i915_pd_entry(pml4, gen8_pd_index(idx, 3)),

total: 0 errors, 1 warnings, 0 checks, 334 lines checked
a30ca32dab48 drm/i915: Lock the engine while dumping the active request
62bdc57ea3ba drm/i915: Rely on spinlock protection for GPU error capture
0a6f23b937dd drm/i915/oa: Reconfigure contexts on the fly
bf2f0758edec drm/i915: Add to timeline requires the timeline mutex
5e0cd64beff0 drm/i915: Teach execbuffer to take the engine wakeref not GT
a4b30a0defce drm/i915/gt: Track timeline activeness in enter/exit
5b81a90414d4 drm/i915/gt: Convert timeline tracking to spinlock
d531da740339 drm/i915/gt: Guard timeline pinning with its own mutex
eb12270767ec drm/i915: Protect request retirement with timeline->mutex
d8457aa7dc9c drm/i915: Replace struct_mutex for batch pool serialisation
-:305: WARNING:FILE_PATH_CHANGES: added, moved or deleted file(s), does MAINTAINERS need updating?
#305: 
new file mode 100644

-:310: WARNING:SPDX_LICENSE_TAG: Missing or malformed SPDX-License-Identifier tag in line 1
#310: FILE: drivers/gpu/drm/i915/gt/intel_engine_pool.c:1:
+/*

-:311: WARNING:SPDX_LICENSE_TAG: Misplaced SPDX-License-Identifier tag - use line 1 instead
#311: FILE: drivers/gpu/drm/i915/gt/intel_engine_pool.c:2:
+ * SPDX-License-Identifier: MIT

-:493: WARNING:SPDX_LICENSE_TAG: Missing or malformed SPDX-License-Identifier tag in line 1
#493: FILE: drivers/gpu/drm/i915/gt/intel_engine_pool.h:1:
+/*

-:494: WARNING:SPDX_LICENSE_TAG: Misplaced SPDX-License-Identifier tag - use line 1 instead
#494: FILE: drivers/gpu/drm/i915/gt/intel_engine_pool.h:2:
+ * SPDX-License-Identifier: MIT

-:533: WARNING:SPDX_LICENSE_TAG: Missing or malformed SPDX-License-Identifier tag in line 1
#533: FILE: drivers/gpu/drm/i915/gt/intel_engine_pool_types.h:1:
+/*

-:534: WARNING:SPDX_LICENSE_TAG: Misplaced SPDX-License-Identifier tag - use line 1 instead
#534: FILE: drivers/gpu/drm/i915/gt/intel_engine_pool_types.h:2:
+ * SPDX-License-Identifier: MIT

-:550: CHECK:UNCOMMENTED_DEFINITION: spinlock_t definition without comment
#550: FILE: drivers/gpu/drm/i915/gt/intel_engine_pool_types.h:18:
+	spinlock_t lock;

total: 0 errors, 7 warnings, 1 checks, 606 lines checked
7ceaac40a247 drm/i915/gt: Mark context->active_count as protected by timeline->mutex
febf50f4193b drm/i915: Forgo last_fence active request tracking
5397db1e54c5 drm/i915/overlay: Switch to using i915_active tracking
ae607b7efb4f drm/i915: Extract intel_frontbuffer active tracking
987bde495314 drm/i915: Markup expected timeline locks for i915_active
-:281: CHECK:UNCOMMENTED_DEFINITION: struct mutex definition without comment
#281: FILE: drivers/gpu/drm/i915/i915_active_types.h:28:
+	struct mutex *lock;

total: 0 errors, 0 warnings, 1 checks, 233 lines checked
82fde2f441e1 drm/i915: Remove logical HW ID
29b620e651d8 drm/i915: Move context management under GEM
-:451: CHECK:UNCOMMENTED_DEFINITION: struct mutex definition without comment
#451: FILE: drivers/gpu/drm/i915/i915_drv.h:1828:
+			struct mutex mutex;

total: 0 errors, 0 warnings, 1 checks, 568 lines checked
ed9fdf22bf8e drm/i915: Make debugfs/per_file_stats scale better
-:94: WARNING:LONG_LINE: line over 100 characters
#94: FILE: drivers/gpu/drm/i915/i915_debugfs.c:296:
+		seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu unbound, %llu closed)\n", \

total: 0 errors, 1 warnings, 0 checks, 85 lines checked
20cfb02ec660 drm/i915/gt: Extract GT runtime power management from intel_pm.c
-:254: WARNING:FILE_PATH_CHANGES: added, moved or deleted file(s), does MAINTAINERS need updating?
#254: 
new file mode 100644

-:259: WARNING:SPDX_LICENSE_TAG: Missing or malformed SPDX-License-Identifier tag in line 1
#259: FILE: drivers/gpu/drm/i915/gt/intel_gt_irq.c:1:
+/*

-:260: WARNING:SPDX_LICENSE_TAG: Misplaced SPDX-License-Identifier tag - use line 1 instead
#260: FILE: drivers/gpu/drm/i915/gt/intel_gt_irq.c:2:
+ * SPDX-License-Identifier: MIT

-:721: WARNING:SPDX_LICENSE_TAG: Missing or malformed SPDX-License-Identifier tag in line 1
#721: FILE: drivers/gpu/drm/i915/gt/intel_gt_irq.h:1:
+/*

-:722: WARNING:SPDX_LICENSE_TAG: Misplaced SPDX-License-Identifier tag - use line 1 instead
#722: FILE: drivers/gpu/drm/i915/gt/intel_gt_irq.h:2:
+ * SPDX-License-Identifier: MIT

-:872: WARNING:SPDX_LICENSE_TAG: Missing or malformed SPDX-License-Identifier tag in line 1
#872: FILE: drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c:1:
+/*

-:873: WARNING:SPDX_LICENSE_TAG: Misplaced SPDX-License-Identifier tag - use line 1 instead
#873: FILE: drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c:2:
+ * SPDX-License-Identifier: MIT

-:987: WARNING:SPDX_LICENSE_TAG: Missing or malformed SPDX-License-Identifier tag in line 1
#987: FILE: drivers/gpu/drm/i915/gt/intel_gt_pm_irq.h:1:
+/*

-:988: WARNING:SPDX_LICENSE_TAG: Misplaced SPDX-License-Identifier tag - use line 1 instead
#988: FILE: drivers/gpu/drm/i915/gt/intel_gt_pm_irq.h:2:
+ * SPDX-License-Identifier: MIT

-:1039: CHECK:UNCOMMENTED_DEFINITION: spinlock_t definition without comment
#1039: FILE: drivers/gpu/drm/i915/gt/intel_gt_types.h:81:
+	spinlock_t irq_lock;

-:1052: WARNING:SPDX_LICENSE_TAG: Missing or malformed SPDX-License-Identifier tag in line 1
#1052: FILE: drivers/gpu/drm/i915/gt/intel_llc.c:1:
+/*

-:1053: WARNING:SPDX_LICENSE_TAG: Misplaced SPDX-License-Identifier tag - use line 1 instead
#1053: FILE: drivers/gpu/drm/i915/gt/intel_llc.c:2:
+ * SPDX-License-Identifier: MIT

-:1178: WARNING:SPDX_LICENSE_TAG: Missing or malformed SPDX-License-Identifier tag in line 1
#1178: FILE: drivers/gpu/drm/i915/gt/intel_llc.h:1:
+/*

-:1179: WARNING:SPDX_LICENSE_TAG: Misplaced SPDX-License-Identifier tag - use line 1 instead
#1179: FILE: drivers/gpu/drm/i915/gt/intel_llc.h:2:
+ * SPDX-License-Identifier: MIT

-:1199: WARNING:SPDX_LICENSE_TAG: Missing or malformed SPDX-License-Identifier tag in line 1
#1199: FILE: drivers/gpu/drm/i915/gt/intel_llc_types.h:1:
+/*

-:1200: WARNING:SPDX_LICENSE_TAG: Misplaced SPDX-License-Identifier tag - use line 1 instead
#1200: FILE: drivers/gpu/drm/i915/gt/intel_llc_types.h:2:
+ * SPDX-License-Identifier: MIT

-:1218: WARNING:SPDX_LICENSE_TAG: Missing or malformed SPDX-License-Identifier tag in line 1
#1218: FILE: drivers/gpu/drm/i915/gt/intel_rc6.c:1:
+/*

-:1219: WARNING:SPDX_LICENSE_TAG: Misplaced SPDX-License-Identifier tag - use line 1 instead
#1219: FILE: drivers/gpu/drm/i915/gt/intel_rc6.c:2:
+ * SPDX-License-Identifier: MIT

-:1889: WARNING:SPDX_LICENSE_TAG: Missing or malformed SPDX-License-Identifier tag in line 1
#1889: FILE: drivers/gpu/drm/i915/gt/intel_rc6.h:1:
+/*

-:1890: WARNING:SPDX_LICENSE_TAG: Misplaced SPDX-License-Identifier tag - use line 1 instead
#1890: FILE: drivers/gpu/drm/i915/gt/intel_rc6.h:2:
+ * SPDX-License-Identifier: MIT

-:1919: WARNING:SPDX_LICENSE_TAG: Missing or malformed SPDX-License-Identifier tag in line 1
#1919: FILE: drivers/gpu/drm/i915/gt/intel_rc6_types.h:1:
+/*

-:1920: WARNING:SPDX_LICENSE_TAG: Misplaced SPDX-License-Identifier tag - use line 1 instead
#1920: FILE: drivers/gpu/drm/i915/gt/intel_rc6_types.h:2:
+ * SPDX-License-Identifier: MIT

-:2014: WARNING:SPDX_LICENSE_TAG: Missing or malformed SPDX-License-Identifier tag in line 1
#2014: FILE: drivers/gpu/drm/i915/gt/intel_rps.c:1:
+/*

-:2015: WARNING:SPDX_LICENSE_TAG: Misplaced SPDX-License-Identifier tag - use line 1 instead
#2015: FILE: drivers/gpu/drm/i915/gt/intel_rps.c:2:
+ * SPDX-License-Identifier: MIT

-:2059: WARNING:LONG_LINE: line over 100 characters
#2059: FILE: drivers/gpu/drm/i915/gt/intel_rps.c:46:
+		mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;

-:3005: ERROR:TRAILING_WHITESPACE: trailing whitespace
#3005: FILE: drivers/gpu/drm/i915/gt/intel_rps.c:992:
+       $

-:3005: WARNING:LEADING_SPACE: please, no spaces at the start of a line
#3005: FILE: drivers/gpu/drm/i915/gt/intel_rps.c:992:
+       $

-:3890: WARNING:SPDX_LICENSE_TAG: Missing or malformed SPDX-License-Identifier tag in line 1
#3890: FILE: drivers/gpu/drm/i915/gt/intel_rps.h:1:
+/*

-:3891: WARNING:SPDX_LICENSE_TAG: Misplaced SPDX-License-Identifier tag - use line 1 instead
#3891: FILE: drivers/gpu/drm/i915/gt/intel_rps.h:2:
+ * SPDX-License-Identifier: MIT

-:3933: WARNING:SPDX_LICENSE_TAG: Missing or malformed SPDX-License-Identifier tag in line 1
#3933: FILE: drivers/gpu/drm/i915/gt/intel_rps_types.h:1:
+/*

-:3934: WARNING:SPDX_LICENSE_TAG: Misplaced SPDX-License-Identifier tag - use line 1 instead
#3934: FILE: drivers/gpu/drm/i915/gt/intel_rps_types.h:2:
+ * SPDX-License-Identifier: MIT

-:4008: CHECK:UNCOMMENTED_DEFINITION: struct mutex definition without comment
#4008: FILE: drivers/gpu/drm/i915/gt/intel_rps_types.h:76:
+		struct mutex mutex;

total: 1 errors, 29 warnings, 2 checks, 9248 lines checked

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 33+ messages in thread

* ✗ Fi.CI.SPARSE: warning for series starting with [01/24] drm/i915: Hide unshrinkable context objects from the shrinker
  2019-07-15  8:09 [PATCH 01/24] drm/i915: Hide unshrinkable context objects from the shrinker Chris Wilson
                   ` (23 preceding siblings ...)
  2019-07-15 10:11 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/24] drm/i915: Hide unshrinkable context objects from the shrinker Patchwork
@ 2019-07-15 10:22 ` Patchwork
  2019-07-15 11:00 ` ✓ Fi.CI.BAT: success " Patchwork
  2019-07-15 13:36 ` ✗ Fi.CI.IGT: failure " Patchwork
  26 siblings, 0 replies; 33+ messages in thread
From: Patchwork @ 2019-07-15 10:22 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [01/24] drm/i915: Hide unshrinkable context objects from the shrinker
URL   : https://patchwork.freedesktop.org/series/63704/
State : warning

== Summary ==

$ dim sparse origin/drm-tip
Sparse version: v0.5.2
Commit: drm/i915: Hide unshrinkable context objects from the shrinker
Okay!

Commit: drm/i915/gt: Move the [class][inst] lookup for engines onto the GT
+./include/uapi/linux/perf_event.h:147:56: warning: cast truncates bits from constant value (8000000000000000 becomes 0)

Commit: drm/i915/gtt: Recursive ppgtt alloc for gen8
-O:drivers/gpu/drm/i915/i915_gem_gtt.c:1046:9: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/i915_gem_gtt.c:1046:9: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/i915_gem_gtt.c:1095:9: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/i915_gem_gtt.c:1095:9: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/i915_gem_gtt.c:1159:9: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/i915_gem_gtt.c:1159:9: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/i915_gem_gtt.c:1504:9: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/i915_gem_gtt.c:1504:9: warning: expression using sizeof(void)
-drivers/gpu/drm/i915/i915_gem_gtt.c:354:14: warning: expression using sizeof(void)
-drivers/gpu/drm/i915/i915_gem_gtt.c:354:14: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/i915_gem_gtt.c:354:14: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/i915_gem_gtt.c:354:14: warning: expression using sizeof(void)

Commit: drm/i915/gtt: Tidy up ppgtt insertion for gen8
Okay!

Commit: drm/i915: Lock the engine while dumping the active request
Okay!

Commit: drm/i915: Rely on spinlock protection for GPU error capture
-O:drivers/gpu/drm/i915/i915_gpu_error.c:1007:21: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/i915_gpu_error.c:1007:21: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/i915_gpu_error.c:1021:21: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/i915_gpu_error.c:1021:21: warning: expression using sizeof(void)
-./include/linux/slab.h:666:13: error: not a function <noident>
-./include/linux/slab.h:666:13: error: not a function <noident>

Commit: drm/i915/oa: Reconfigure contexts on the fly
Okay!

Commit: drm/i915: Add to timeline requires the timeline mutex
Okay!

Commit: drm/i915: Teach execbuffer to take the engine wakeref not GT
Okay!

Commit: drm/i915/gt: Track timeline activeness in enter/exit
Okay!

Commit: drm/i915/gt: Convert timeline tracking to spinlock
Okay!

Commit: drm/i915/gt: Guard timeline pinning with its own mutex
Okay!

Commit: drm/i915: Protect request retirement with timeline->mutex
Okay!

Commit: drm/i915: Replace struct_mutex for batch pool serialisation
+./include/uapi/linux/perf_event.h:147:56: warning: cast truncates bits from constant value (8000000000000000 becomes 0)

Commit: drm/i915/gt: Mark context->active_count as protected by timeline->mutex
Okay!

Commit: drm/i915: Forgo last_fence active request tracking
Okay!

Commit: drm/i915/overlay: Switch to using i915_active tracking
Okay!

Commit: drm/i915: Extract intel_frontbuffer active tracking
Okay!

Commit: drm/i915: Markup expected timeline locks for i915_active
Okay!

Commit: drm/i915: Remove logical HW ID
Okay!

Commit: drm/i915: Move context management under GEM
-O:drivers/gpu/drm/i915/i915_sysfs.c:176:17: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/i915_sysfs.c:176:17: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/i915_sysfs.c:175:17: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/i915_sysfs.c:175:17: warning: expression using sizeof(void)

Commit: drm/i915: Make debugfs/per_file_stats scale better
Okay!

Commit: drm/i915/gt: Extract GT runtime power management from intel_pm.c
+drivers/gpu/drm/i915/gt/intel_llc.c:83:37: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_llc.c:83:37: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_llc.c:86:37: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_llc.c:86:37: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:1060:15: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:1091:16: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:1433:22: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:1433:22: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:1522:20: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:1522:20: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:1522:20: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:1522:20: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:1522:20: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:1522:20: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:1522:20: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:1522:20: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:1522:20: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:1522:20: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:1522:20: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:1522:20: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:1522:20: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:1522:20: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:1601:20: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:1601:20: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:1601:20: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:1601:20: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:1601:20: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:1601:20: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:1601:20: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:1601:20: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:1601:20: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:1601:20: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:1601:20: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:1601:20: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:1601:20: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:1601:20: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:255:24: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:721:16: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:721:16: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:722:16: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:722:16: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:722:16: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:722:16: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:722:16: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:722:16: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:722:16: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:722:16: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:722:16: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:722:16: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:722:16: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:722:16: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:722:16: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:722:16: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:857:33: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:857:33: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:857:33: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:857:33: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:857:33: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:857:33: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:857:33: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:857:33: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:857:33: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:857:33: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:857:33: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:857:33: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:857:33: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/gt/intel_rps.c:857:33: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/i915_irq.c:1346:22: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/i915_irq.c:1346:22: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/i915_irq.c:1439:21: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/i915_irq.c:1439:21: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/i915_irq.c:1439:21: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/i915_irq.c:1439:21: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/i915_irq.c:1439:21: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/i915_irq.c:1439:21: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/i915_irq.c:1439:21: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/i915_irq.c:1439:21: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/i915_irq.c:1439:21: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/i915_irq.c:1439:21: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/i915_irq.c:1439:21: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/i915_irq.c:1439:21: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/i915_irq.c:1439:21: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/i915_irq.c:1439:21: warning: expression using sizeof(void)
+./include/uapi/linux/perf_event.h:147:56: warning: cast truncates bits from constant value (8000000000000000 becomes 0)
+./include/uapi/linux/perf_event.h:147:56: warning: cast truncates bits from constant value (8000000000000000 becomes 0)
+./include/uapi/linux/perf_event.h:147:56: warning: cast truncates bits from constant value (8000000000000000 becomes 0)
+./include/uapi/linux/perf_event.h:147:56: warning: cast truncates bits from constant value (8000000000000000 becomes 0)
+./include/uapi/linux/perf_event.h:147:56: warning: cast truncates bits from constant value (8000000000000000 becomes 0)
+./include/uapi/linux/perf_event.h:147:56: warning: cast truncates bits from constant value (8000000000000000 becomes 0)

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 33+ messages in thread

* ✓ Fi.CI.BAT: success for series starting with [01/24] drm/i915: Hide unshrinkable context objects from the shrinker
  2019-07-15  8:09 [PATCH 01/24] drm/i915: Hide unshrinkable context objects from the shrinker Chris Wilson
                   ` (24 preceding siblings ...)
  2019-07-15 10:22 ` ✗ Fi.CI.SPARSE: " Patchwork
@ 2019-07-15 11:00 ` Patchwork
  2019-07-15 13:36 ` ✗ Fi.CI.IGT: failure " Patchwork
  26 siblings, 0 replies; 33+ messages in thread
From: Patchwork @ 2019-07-15 11:00 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [01/24] drm/i915: Hide unshrinkable context objects from the shrinker
URL   : https://patchwork.freedesktop.org/series/63704/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_6483 -> Patchwork_13659
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/

Known issues
------------

  Here are the changes found in Patchwork_13659 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_ctx_create@basic-files:
    - fi-bsw-n3050:       [PASS][1] -> [INCOMPLETE][2] ([fdo#105876])
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/fi-bsw-n3050/igt@gem_ctx_create@basic-files.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/fi-bsw-n3050/igt@gem_ctx_create@basic-files.html
    - fi-icl-guc:         [PASS][3] -> [INCOMPLETE][4] ([fdo#107713] / [fdo#109100])
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/fi-icl-guc/igt@gem_ctx_create@basic-files.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/fi-icl-guc/igt@gem_ctx_create@basic-files.html

  * igt@kms_frontbuffer_tracking@basic:
    - fi-icl-dsi:         [PASS][5] -> [FAIL][6] ([fdo#103167])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/fi-icl-dsi/igt@kms_frontbuffer_tracking@basic.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/fi-icl-dsi/igt@kms_frontbuffer_tracking@basic.html

  * igt@prime_vgem@basic-fence-flip:
    - fi-ilk-650:         [PASS][7] -> [DMESG-WARN][8] ([fdo#106387])
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/fi-ilk-650/igt@prime_vgem@basic-fence-flip.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/fi-ilk-650/igt@prime_vgem@basic-fence-flip.html

  
#### Possible fixes ####

  * igt@i915_hangman@error-state-basic:
    - fi-bsw-kefka:       [SKIP][9] ([fdo#109271]) -> [PASS][10]
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/fi-bsw-kefka/igt@i915_hangman@error-state-basic.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/fi-bsw-kefka/igt@i915_hangman@error-state-basic.html

  * igt@i915_selftest@live_execlists:
    - fi-skl-gvtdvm:      [DMESG-FAIL][11] ([fdo#111108]) -> [PASS][12]
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/fi-skl-gvtdvm/igt@i915_selftest@live_execlists.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/fi-skl-gvtdvm/igt@i915_selftest@live_execlists.html

  
  [fdo#103167]: https://bugs.freedesktop.org/show_bug.cgi?id=103167
  [fdo#105876]: https://bugs.freedesktop.org/show_bug.cgi?id=105876
  [fdo#106387]: https://bugs.freedesktop.org/show_bug.cgi?id=106387
  [fdo#107713]: https://bugs.freedesktop.org/show_bug.cgi?id=107713
  [fdo#109100]: https://bugs.freedesktop.org/show_bug.cgi?id=109100
  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [fdo#111108]: https://bugs.freedesktop.org/show_bug.cgi?id=111108


Participating hosts (49 -> 46)
------------------------------

  Additional (4): fi-icl-u4 fi-icl-u3 fi-icl-u2 fi-apl-guc 
  Missing    (7): fi-ilk-m540 fi-hsw-4200u fi-byt-squawks fi-bsw-cyan fi-icl-y fi-byt-clapper fi-bdw-samus 


Build changes
-------------

  * Linux: CI_DRM_6483 -> Patchwork_13659

  CI_DRM_6483: 4aea794468d67b8bd2a6d1fc07e2772007f98540 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5098: 41ff022b62b45a5b84504daa3537fa1b295b97c9 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_13659: 20cfb02ec6600ffcf88bc5f7f54d60fee4bf5167 @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

20cfb02ec660 drm/i915/gt: Extract GT runtime power management from intel_pm.c
ed9fdf22bf8e drm/i915: Make debugfs/per_file_stats scale better
29b620e651d8 drm/i915: Move context management under GEM
82fde2f441e1 drm/i915: Remove logical HW ID
987bde495314 drm/i915: Markup expected timeline locks for i915_active
ae607b7efb4f drm/i915: Extract intel_frontbuffer active tracking
5397db1e54c5 drm/i915/overlay: Switch to using i915_active tracking
febf50f4193b drm/i915: Forgo last_fence active request tracking
7ceaac40a247 drm/i915/gt: Mark context->active_count as protected by timeline->mutex
d8457aa7dc9c drm/i915: Replace struct_mutex for batch pool serialisation
eb12270767ec drm/i915: Protect request retirement with timeline->mutex
d531da740339 drm/i915/gt: Guard timeline pinning with its own mutex
5b81a90414d4 drm/i915/gt: Convert timeline tracking to spinlock
a4b30a0defce drm/i915/gt: Track timeline activeness in enter/exit
5e0cd64beff0 drm/i915: Teach execbuffer to take the engine wakeref not GT
bf2f0758edec drm/i915: Add to timeline requires the timeline mutex
0a6f23b937dd drm/i915/oa: Reconfigure contexts on the fly
62bdc57ea3ba drm/i915: Rely on spinlock protection for GPU error capture
a30ca32dab48 drm/i915: Lock the engine while dumping the active request
0531b2530a95 drm/i915/gtt: Tidy up ppgtt insertion for gen8
d8793a63f716 drm/i915/gtt: Recursive ppgtt alloc for gen8
1da771964799 drm/i915/gt: Move the [class][inst] lookup for engines onto the GT
27b7aa232c3a drm/i915: Hide unshrinkable context objects from the shrinker

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 33+ messages in thread

* ✗ Fi.CI.IGT: failure for series starting with [01/24] drm/i915: Hide unshrinkable context objects from the shrinker
  2019-07-15  8:09 [PATCH 01/24] drm/i915: Hide unshrinkable context objects from the shrinker Chris Wilson
                   ` (25 preceding siblings ...)
  2019-07-15 11:00 ` ✓ Fi.CI.BAT: success " Patchwork
@ 2019-07-15 13:36 ` Patchwork
  26 siblings, 0 replies; 33+ messages in thread
From: Patchwork @ 2019-07-15 13:36 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [01/24] drm/i915: Hide unshrinkable context objects from the shrinker
URL   : https://patchwork.freedesktop.org/series/63704/
State : failure

== Summary ==

CI Bug Log - changes from CI_DRM_6483_full -> Patchwork_13659_full
====================================================

Summary
-------

  **FAILURE**

  Serious unknown changes coming with Patchwork_13659_full absolutely need to be
  verified manually.
  
  If you think the reported changes have nothing to do with the changes
  introduced in Patchwork_13659_full, please notify your bug team to allow them
  to document this new failure mode, which will reduce false positives in CI.

  

Possible new issues
-------------------

  Here are the unknown changes that may have been introduced in Patchwork_13659_full:

### IGT changes ###

#### Possible regressions ####

  * igt@debugfs_test@emon_crash:
    - shard-iclb:         [PASS][1] -> [SKIP][2]
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/shard-iclb2/igt@debugfs_test@emon_crash.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-iclb3/igt@debugfs_test@emon_crash.html

  * igt@gem_exec_parallel@bcs0-fds:
    - shard-hsw:          [PASS][3] -> [FAIL][4]
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/shard-hsw6/igt@gem_exec_parallel@bcs0-fds.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-hsw5/igt@gem_exec_parallel@bcs0-fds.html

  * igt@gem_exec_parallel@contexts:
    - shard-hsw:          [PASS][5] -> [TIMEOUT][6]
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/shard-hsw6/igt@gem_exec_parallel@contexts.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-hsw4/igt@gem_exec_parallel@contexts.html

  * igt@i915_pm_rps@min-max-config-idle:
    - shard-kbl:          [PASS][7] -> [FAIL][8]
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/shard-kbl3/igt@i915_pm_rps@min-max-config-idle.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-kbl6/igt@i915_pm_rps@min-max-config-idle.html
    - shard-apl:          [PASS][9] -> [FAIL][10]
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/shard-apl4/igt@i915_pm_rps@min-max-config-idle.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-apl2/igt@i915_pm_rps@min-max-config-idle.html
    - shard-glk:          [PASS][11] -> [FAIL][12]
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/shard-glk1/igt@i915_pm_rps@min-max-config-idle.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-glk2/igt@i915_pm_rps@min-max-config-idle.html
    - shard-iclb:         [PASS][13] -> [FAIL][14]
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/shard-iclb7/igt@i915_pm_rps@min-max-config-idle.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-iclb1/igt@i915_pm_rps@min-max-config-idle.html
    - shard-snb:          [PASS][15] -> [FAIL][16]
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/shard-snb7/igt@i915_pm_rps@min-max-config-idle.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-snb1/igt@i915_pm_rps@min-max-config-idle.html

  * igt@i915_selftest@mock_timelines:
    - shard-skl:          [PASS][17] -> [INCOMPLETE][18]
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/shard-skl2/igt@i915_selftest@mock_timelines.html
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-skl1/igt@i915_selftest@mock_timelines.html

  * igt@runner@aborted:
    - shard-hsw:          NOTRUN -> [FAIL][19]
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-hsw7/igt@runner@aborted.html
    - shard-kbl:          NOTRUN -> [FAIL][20]
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-kbl6/igt@runner@aborted.html
    - shard-apl:          NOTRUN -> [FAIL][21]
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-apl4/igt@runner@aborted.html
    - shard-snb:          NOTRUN -> [FAIL][22]
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-snb5/igt@runner@aborted.html

  
Known issues
------------

  Here are the changes found in Patchwork_13659_full that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@debugfs_test@emon_crash:
    - shard-skl:          [PASS][23] -> [SKIP][24] ([fdo#109271])
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/shard-skl1/igt@debugfs_test@emon_crash.html
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-skl5/igt@debugfs_test@emon_crash.html
    - shard-apl:          [PASS][25] -> [SKIP][26] ([fdo#109271])
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/shard-apl3/igt@debugfs_test@emon_crash.html
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-apl5/igt@debugfs_test@emon_crash.html
    - shard-snb:          [PASS][27] -> [SKIP][28] ([fdo#109271])
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/shard-snb6/igt@debugfs_test@emon_crash.html
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-snb2/igt@debugfs_test@emon_crash.html
    - shard-kbl:          [PASS][29] -> [SKIP][30] ([fdo#109271])
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/shard-kbl6/igt@debugfs_test@emon_crash.html
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-kbl1/igt@debugfs_test@emon_crash.html
    - shard-hsw:          [PASS][31] -> [SKIP][32] ([fdo#109271]) +1 similar issue
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/shard-hsw6/igt@debugfs_test@emon_crash.html
   [32]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-hsw5/igt@debugfs_test@emon_crash.html
    - shard-glk:          [PASS][33] -> [SKIP][34] ([fdo#109271])
   [33]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/shard-glk6/igt@debugfs_test@emon_crash.html
   [34]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-glk6/igt@debugfs_test@emon_crash.html

  * igt@gem_exec_balancer@smoke:
    - shard-iclb:         [PASS][35] -> [SKIP][36] ([fdo#110854])
   [35]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/shard-iclb2/igt@gem_exec_balancer@smoke.html
   [36]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-iclb6/igt@gem_exec_balancer@smoke.html

  * igt@gem_exec_schedule@deep-bsd:
    - shard-apl:          [PASS][37] -> [INCOMPLETE][38] ([fdo#103927]) +1 similar issue
   [37]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/shard-apl8/igt@gem_exec_schedule@deep-bsd.html
   [38]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-apl4/igt@gem_exec_schedule@deep-bsd.html

  * igt@i915_pm_rps@min-max-config-idle:
    - shard-hsw:          [PASS][39] -> [FAIL][40] ([fdo#102250])
   [39]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/shard-hsw2/igt@i915_pm_rps@min-max-config-idle.html
   [40]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-hsw8/igt@i915_pm_rps@min-max-config-idle.html
    - shard-skl:          [PASS][41] -> [FAIL][42] ([fdo#102250])
   [41]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/shard-skl10/igt@i915_pm_rps@min-max-config-idle.html
   [42]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-skl7/igt@i915_pm_rps@min-max-config-idle.html

  * igt@i915_selftest@mock_timelines:
    - shard-glk:          [PASS][43] -> [INCOMPLETE][44] ([fdo#103359] / [k.org#198133])
   [43]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/shard-glk4/igt@i915_selftest@mock_timelines.html
   [44]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-glk1/igt@i915_selftest@mock_timelines.html
    - shard-hsw:          [PASS][45] -> [INCOMPLETE][46] ([fdo#103540])
   [45]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/shard-hsw2/igt@i915_selftest@mock_timelines.html
   [46]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-hsw7/igt@i915_selftest@mock_timelines.html
    - shard-kbl:          [PASS][47] -> [INCOMPLETE][48] ([fdo#103665])
   [47]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/shard-kbl1/igt@i915_selftest@mock_timelines.html
   [48]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-kbl6/igt@i915_selftest@mock_timelines.html
    - shard-iclb:         [PASS][49] -> [INCOMPLETE][50] ([fdo#107713]) +1 similar issue
   [49]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/shard-iclb2/igt@i915_selftest@mock_timelines.html
   [50]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-iclb5/igt@i915_selftest@mock_timelines.html
    - shard-snb:          [PASS][51] -> [INCOMPLETE][52] ([fdo#105411])
   [51]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/shard-snb6/igt@i915_selftest@mock_timelines.html
   [52]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-snb5/igt@i915_selftest@mock_timelines.html

  * igt@kms_draw_crc@draw-method-rgb565-mmap-wc-untiled:
    - shard-skl:          [PASS][53] -> [FAIL][54] ([fdo#103184] / [fdo#103232])
   [53]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/shard-skl8/igt@kms_draw_crc@draw-method-rgb565-mmap-wc-untiled.html
   [54]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-skl7/igt@kms_draw_crc@draw-method-rgb565-mmap-wc-untiled.html

  * igt@kms_flip@flip-vs-expired-vblank:
    - shard-skl:          [PASS][55] -> [FAIL][56] ([fdo#105363])
   [55]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/shard-skl3/igt@kms_flip@flip-vs-expired-vblank.html
   [56]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-skl10/igt@kms_flip@flip-vs-expired-vblank.html

  * igt@kms_flip@flip-vs-suspend-interruptible:
    - shard-apl:          [PASS][57] -> [DMESG-WARN][58] ([fdo#108566]) +3 similar issues
   [57]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/shard-apl7/igt@kms_flip@flip-vs-suspend-interruptible.html
   [58]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-apl8/igt@kms_flip@flip-vs-suspend-interruptible.html

  * igt@kms_frontbuffer_tracking@basic:
    - shard-iclb:         [PASS][59] -> [FAIL][60] ([fdo#103167])
   [59]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/shard-iclb4/igt@kms_frontbuffer_tracking@basic.html
   [60]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-iclb8/igt@kms_frontbuffer_tracking@basic.html

  * igt@kms_psr2_su@page_flip:
    - shard-iclb:         [PASS][61] -> [SKIP][62] ([fdo#109642] / [fdo#111068])
   [61]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/shard-iclb2/igt@kms_psr2_su@page_flip.html
   [62]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-iclb4/igt@kms_psr2_su@page_flip.html

  * igt@kms_setmode@basic:
    - shard-kbl:          [PASS][63] -> [FAIL][64] ([fdo#99912])
   [63]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/shard-kbl2/igt@kms_setmode@basic.html
   [64]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-kbl7/igt@kms_setmode@basic.html

  
#### Possible fixes ####

  * igt@gem_workarounds@suspend-resume:
    - shard-apl:          [DMESG-WARN][65] ([fdo#108566]) -> [PASS][66] +2 similar issues
   [65]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/shard-apl4/igt@gem_workarounds@suspend-resume.html
   [66]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-apl2/igt@gem_workarounds@suspend-resume.html

  * igt@kms_flip@flip-vs-expired-vblank:
    - shard-glk:          [FAIL][67] ([fdo#105363]) -> [PASS][68]
   [67]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/shard-glk8/igt@kms_flip@flip-vs-expired-vblank.html
   [68]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-glk9/igt@kms_flip@flip-vs-expired-vblank.html

  * igt@kms_flip@flip-vs-expired-vblank-interruptible:
    - shard-skl:          [FAIL][69] ([fdo#105363]) -> [PASS][70]
   [69]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/shard-skl8/igt@kms_flip@flip-vs-expired-vblank-interruptible.html
   [70]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-skl9/igt@kms_flip@flip-vs-expired-vblank-interruptible.html

  * igt@kms_frontbuffer_tracking@fbc-1p-offscren-pri-indfb-draw-mmap-wc:
    - shard-skl:          [FAIL][71] ([fdo#108040]) -> [PASS][72]
   [71]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/shard-skl10/igt@kms_frontbuffer_tracking@fbc-1p-offscren-pri-indfb-draw-mmap-wc.html
   [72]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-skl9/igt@kms_frontbuffer_tracking@fbc-1p-offscren-pri-indfb-draw-mmap-wc.html

  * igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-shrfb-plflip-blt:
    - shard-iclb:         [FAIL][73] ([fdo#103167]) -> [PASS][74] +1 similar issue
   [73]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/shard-iclb1/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-shrfb-plflip-blt.html
   [74]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-iclb8/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-shrfb-plflip-blt.html

  * igt@kms_frontbuffer_tracking@fbcpsr-suspend:
    - shard-skl:          [INCOMPLETE][75] ([fdo#104108] / [fdo#106978]) -> [PASS][76]
   [75]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/shard-skl6/igt@kms_frontbuffer_tracking@fbcpsr-suspend.html
   [76]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-skl9/igt@kms_frontbuffer_tracking@fbcpsr-suspend.html

  * igt@kms_plane_alpha_blend@pipe-c-coverage-7efc:
    - shard-skl:          [FAIL][77] ([fdo#108145] / [fdo#110403]) -> [PASS][78] +1 similar issue
   [77]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/shard-skl3/igt@kms_plane_alpha_blend@pipe-c-coverage-7efc.html
   [78]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-skl10/igt@kms_plane_alpha_blend@pipe-c-coverage-7efc.html

  * igt@kms_psr@psr2_cursor_mmap_cpu:
    - shard-iclb:         [SKIP][79] ([fdo#109441]) -> [PASS][80] +2 similar issues
   [79]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/shard-iclb1/igt@kms_psr@psr2_cursor_mmap_cpu.html
   [80]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-iclb2/igt@kms_psr@psr2_cursor_mmap_cpu.html

  * igt@kms_vblank@pipe-a-ts-continuation-dpms-suspend:
    - shard-snb:          [DMESG-WARN][81] ([fdo#102365]) -> [PASS][82]
   [81]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6483/shard-snb7/igt@kms_vblank@pipe-a-ts-continuation-dpms-suspend.html
   [82]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/shard-snb1/igt@kms_vblank@pipe-a-ts-continuation-dpms-suspend.html

  
  [fdo#102250]: https://bugs.freedesktop.org/show_bug.cgi?id=102250
  [fdo#102365]: https://bugs.freedesktop.org/show_bug.cgi?id=102365
  [fdo#103167]: https://bugs.freedesktop.org/show_bug.cgi?id=103167
  [fdo#103184]: https://bugs.freedesktop.org/show_bug.cgi?id=103184
  [fdo#103232]: https://bugs.freedesktop.org/show_bug.cgi?id=103232
  [fdo#103359]: https://bugs.freedesktop.org/show_bug.cgi?id=103359
  [fdo#103540]: https://bugs.freedesktop.org/show_bug.cgi?id=103540
  [fdo#103665]: https://bugs.freedesktop.org/show_bug.cgi?id=103665
  [fdo#103927]: https://bugs.freedesktop.org/show_bug.cgi?id=103927
  [fdo#104108]: https://bugs.freedesktop.org/show_bug.cgi?id=104108
  [fdo#105363]: https://bugs.freedesktop.org/show_bug.cgi?id=105363
  [fdo#105411]: https://bugs.freedesktop.org/show_bug.cgi?id=105411
  [fdo#106978]: https://bugs.freedesktop.org/show_bug.cgi?id=106978
  [fdo#107713]: https://bugs.freedesktop.org/show_bug.cgi?id=107713
  [fdo#108040]: https://bugs.freedesktop.org/show_bug.cgi?id=108040
  [fdo#108145]: https://bugs.freedesktop.org/show_bug.cgi?id=108145
  [fdo#108566]: https://bugs.freedesktop.org/show_bug.cgi?id=108566
  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [fdo#109441]: https://bugs.freedesktop.org/show_bug.cgi?id=109441
  [fdo#109642]: https://bugs.freedesktop.org/show_bug.cgi?id=109642
  [fdo#110403]: https://bugs.freedesktop.org/show_bug.cgi?id=110403
  [fdo#110854]: https://bugs.freedesktop.org/show_bug.cgi?id=110854
  [fdo#111068]: https://bugs.freedesktop.org/show_bug.cgi?id=111068
  [fdo#99912]: https://bugs.freedesktop.org/show_bug.cgi?id=99912
  [k.org#198133]: https://bugzilla.kernel.org/show_bug.cgi?id=198133


Participating hosts (10 -> 10)
------------------------------

  No changes in participating hosts


Build changes
-------------

  * Linux: CI_DRM_6483 -> Patchwork_13659

  CI_DRM_6483: 4aea794468d67b8bd2a6d1fc07e2772007f98540 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5098: 41ff022b62b45a5b84504daa3537fa1b295b97c9 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_13659: 20cfb02ec6600ffcf88bc5f7f54d60fee4bf5167 @ git://anongit.freedesktop.org/gfx-ci/linux
  piglit_4509: fdc5a4ca11124ab8413c7988896eec4c97336694 @ git://anongit.freedesktop.org/piglit

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13659/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [PATCH 06/24] drm/i915: Lock the engine while dumping the active request
  2019-07-15  8:09 ` [PATCH 06/24] drm/i915: Lock the engine while dumping the active request Chris Wilson
@ 2019-07-16 10:26   ` Tvrtko Ursulin
  0 siblings, 0 replies; 33+ messages in thread
From: Tvrtko Ursulin @ 2019-07-16 10:26 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx; +Cc: Alex Shumsky


On 15/07/2019 09:09, Chris Wilson wrote:
> We cannot let the request be retired and freed while we are trying to
> dump it during error capture. It is not sufficient just to grab a
> reference to the request, as during retirement we may free the ring
> which we are also dumping. So take the engine lock to prevent retiring
> and freeing of the request.
> 
> Reported-by: Alex Shumsky <alexthreed@gmail.com>
> Fixes: 83c317832eb1 ("drm/i915: Dump the ringbuffer of the active request for debugging")
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> Cc: Alex Shumsky <alexthreed@gmail.com>
> ---
>   drivers/gpu/drm/i915/gt/intel_engine_cs.c | 11 ++++-------
>   drivers/gpu/drm/i915/i915_gpu_error.c     |  6 ++++--
>   2 files changed, 8 insertions(+), 9 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
> index 75099500d1ed..5f8909546d36 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
> @@ -1472,6 +1472,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
>   	struct i915_gpu_error * const error = &engine->i915->gpu_error;
>   	struct i915_request *rq;
>   	intel_wakeref_t wakeref;
> +	unsigned long flags;
>   
>   	if (header) {
>   		va_list ap;
> @@ -1491,10 +1492,9 @@ void intel_engine_dump(struct intel_engine_cs *engine,
>   		   i915_reset_engine_count(error, engine),
>   		   i915_reset_count(error));
>   
> -	rcu_read_lock();
> -
>   	drm_printf(m, "\tRequests:\n");
>   
> +	spin_lock_irqsave(&engine->active.lock, flags);
>   	rq = intel_engine_find_active_request(engine);
>   	if (rq) {
>   		print_request(m, rq, "\t\tactive ");
> @@ -1514,8 +1514,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
>   
>   		print_request_ring(m, rq);
>   	}
> -
> -	rcu_read_unlock();
> +	spin_unlock_irqrestore(&engine->active.lock, flags);
>   
>   	wakeref = intel_runtime_pm_get_if_in_use(&engine->i915->runtime_pm);
>   	if (wakeref) {
> @@ -1654,7 +1653,6 @@ struct i915_request *
>   intel_engine_find_active_request(struct intel_engine_cs *engine)
>   {
>   	struct i915_request *request, *active = NULL;
> -	unsigned long flags;
>   
>   	/*
>   	 * We are called by the error capture, reset and to dump engine
> @@ -1667,7 +1665,7 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
>   	 * At all other times, we must assume the GPU is still running, but
>   	 * we only care about the snapshot of this moment.
>   	 */
> -	spin_lock_irqsave(&engine->active.lock, flags);
> +	lockdep_assert_held(&engine->active.lock);
>   	list_for_each_entry(request, &engine->active.requests, sched.link) {
>   		if (i915_request_completed(request))
>   			continue;
> @@ -1682,7 +1680,6 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
>   		active = request;
>   		break;
>   	}
> -	spin_unlock_irqrestore(&engine->active.lock, flags);
>   
>   	return active;
>   }
> diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
> index 78e388fa059c..c5b89bf4d616 100644
> --- a/drivers/gpu/drm/i915/i915_gpu_error.c
> +++ b/drivers/gpu/drm/i915/i915_gpu_error.c
> @@ -1411,6 +1411,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
>   		struct intel_engine_cs *engine = i915->engine[i];
>   		struct drm_i915_error_engine *ee = &error->engine[i];
>   		struct i915_request *request;
> +		unsigned long flags;
>   
>   		ee->engine_id = -1;
>   
> @@ -1422,10 +1423,11 @@ static void gem_record_rings(struct i915_gpu_state *error)
>   		error_record_engine_registers(error, engine, ee);
>   		error_record_engine_execlists(engine, ee);
>   
> +		spin_lock_irqsave(&engine->active.lock, flags);
>   		request = intel_engine_find_active_request(engine);
>   		if (request) {
>   			struct i915_gem_context *ctx = request->gem_context;
> -			struct intel_ring *ring;
> +			struct intel_ring *ring = request->ring;
>   
>   			ee->vm = ctx->vm ?: &engine->gt->ggtt->vm;
>   
> @@ -1455,7 +1457,6 @@ static void gem_record_rings(struct i915_gpu_state *error)
>   			ee->rq_post = request->postfix;
>   			ee->rq_tail = request->tail;
>   
> -			ring = request->ring;
>   			ee->cpu_ring_head = ring->head;
>   			ee->cpu_ring_tail = ring->tail;
>   			ee->ringbuffer =
> @@ -1463,6 +1464,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
>   
>   			engine_record_requests(engine, request, ee);
>   		}
> +		spin_unlock_irqrestore(&engine->active.lock, flags);
>   
>   		ee->hws_page =
>   			i915_error_object_create(i915,
> 

I missed in the initial read the fact underlying allocations are already 
atomic.

Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Regards,

Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [PATCH 07/24] drm/i915: Rely on spinlock protection for GPU error capture
  2019-07-15  8:09 ` [PATCH 07/24] drm/i915: Rely on spinlock protection for GPU error capture Chris Wilson
@ 2019-07-16 10:48   ` Tvrtko Ursulin
  2019-07-16 11:17     ` Chris Wilson
  0 siblings, 1 reply; 33+ messages in thread
From: Tvrtko Ursulin @ 2019-07-16 10:48 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 15/07/2019 09:09, Chris Wilson wrote:
> Trust that we now have adequate protection over the low level structures
> via the engine->active.lock to allow ourselves to capture the GPU error
> state without the heavy hammer of stop_machine(). Sadly this does mean
> that we have to forgo some of the lesser used information (not derived
> from the active state) that is not controlled by the active locks.
> 
> A useful side-effect is that this allows us to restore error capturing
> for Braswell and Broxton.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>   drivers/gpu/drm/i915/i915_gem_gtt.c   |   5 -
>   drivers/gpu/drm/i915/i915_gpu_error.c | 503 +++++++++++---------------
>   drivers/gpu/drm/i915/i915_gpu_error.h |  17 -
>   3 files changed, 207 insertions(+), 318 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index 220aba5a94d2..5c9c7cae4817 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -2966,11 +2966,6 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
>   		ggtt->vm.insert_page    = bxt_vtd_ggtt_insert_page__BKL;
>   		if (ggtt->vm.clear_range != nop_clear_range)
>   			ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
> -
> -		/* Prevent recursively calling stop_machine() and deadlocks. */
> -		dev_info(dev_priv->drm.dev,
> -			 "Disabling error capture for VT-d workaround\n");
> -		i915_disable_error_state(dev_priv, -ENODEV);
>   	}
>   
>   	ggtt->invalidate = gen6_ggtt_invalidate;
> diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
> index c5b89bf4d616..026ee1f5536d 100644
> --- a/drivers/gpu/drm/i915/i915_gpu_error.c
> +++ b/drivers/gpu/drm/i915/i915_gpu_error.c
> @@ -30,7 +30,6 @@
>   #include <linux/ascii85.h>
>   #include <linux/nmi.h>
>   #include <linux/scatterlist.h>
> -#include <linux/stop_machine.h>
>   #include <linux/utsname.h>
>   #include <linux/zlib.h>
>   
> @@ -46,6 +45,9 @@
>   #include "i915_scatterlist.h"
>   #include "intel_csr.h"
>   
> +#define ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
> +#define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN)
> +
>   static inline const struct intel_engine_cs *
>   engine_lookup(const struct drm_i915_private *i915, unsigned int id)
>   {
> @@ -67,26 +69,6 @@ engine_name(const struct drm_i915_private *i915, unsigned int id)
>   	return __engine_name(engine_lookup(i915, id));
>   }
>   
> -static const char *tiling_flag(int tiling)
> -{
> -	switch (tiling) {
> -	default:
> -	case I915_TILING_NONE: return "";
> -	case I915_TILING_X: return " X";
> -	case I915_TILING_Y: return " Y";
> -	}
> -}
> -
> -static const char *dirty_flag(int dirty)
> -{
> -	return dirty ? " dirty" : "";
> -}
> -
> -static const char *purgeable_flag(int purgeable)
> -{
> -	return purgeable ? " purgeable" : "";
> -}
> -
>   static void __sg_set_buf(struct scatterlist *sg,
>   			 void *addr, unsigned int len, loff_t it)
>   {
> @@ -114,7 +96,7 @@ static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len)
>   	if (e->cur == e->end) {
>   		struct scatterlist *sgl;
>   
> -		sgl = (typeof(sgl))__get_free_page(GFP_KERNEL);
> +		sgl = (typeof(sgl))__get_free_page(ALLOW_FAIL);
>   		if (!sgl) {
>   			e->err = -ENOMEM;
>   			return false;
> @@ -134,7 +116,7 @@ static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len)
>   	}
>   
>   	e->size = ALIGN(len + 1, SZ_64K);
> -	e->buf = kmalloc(e->size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
> +	e->buf = kmalloc(e->size, ALLOW_FAIL);
>   	if (!e->buf) {
>   		e->size = PAGE_ALIGN(len + 1);
>   		e->buf = kmalloc(e->size, GFP_KERNEL);
> @@ -211,47 +193,127 @@ i915_error_printer(struct drm_i915_error_state_buf *e)
>   	return p;
>   }
>   
> +/* single threaded page allocator with a reserved stash for emergencies */
> +struct pool {
> +	void *stash[31];

Why 31?

> +	unsigned int count;
> +};
> +
> +static void *__alloc_page(gfp_t gfp)
> +{
> +	return (void *)__get_free_page(gfp);
> +}
> +
> +static void pool_fini(struct pool *p)
> +{
> +	while (p->count--)
> +		free_page((unsigned long)p->stash[p->count]);
> +}
> +
> +static int pool_refill(struct pool *p, gfp_t gfp)
> +{
> +	while (p->count < ARRAY_SIZE(p->stash)) {
> +		p->stash[p->count] = __alloc_page(gfp);
> +		if (!p->stash[p->count])
> +			return -ENOMEM;
> +
> +		p->count++;
> +	}
> +
> +	return 0;
> +}
> +
> +static int pool_init(struct pool *p, gfp_t gfp)
> +{
> +	int err;
> +
> +	p->count = 0;
> +
> +	err = pool_refill(p, gfp);
> +	if (err)
> +		pool_fini(p);
> +
> +	return err;
> +}
> +
> +static void *pool_alloc(struct pool *p, gfp_t gfp)
> +{
> +	void *ptr;
> +
> +	ptr = __alloc_page(gfp);
> +	if (ptr)
> +		return ptr;
> +
> +	if (p->count)
> +		return p->stash[--p->count];
> +
> +	return NULL;
> +}
> +
> +static void pool_free(struct pool *p, void *page)
> +{
> +	if (p->count < ARRAY_SIZE(p->stash)) {
> +		p->stash[p->count++] = page;
> +		return;
> +	}
> +
> +	free_page((unsigned long)page);
> +}
> +
>   #ifdef CONFIG_DRM_I915_COMPRESS_ERROR
>   
>   struct compress {
> +	struct pool pool;
>   	struct z_stream_s zstream;
>   	void *tmp;
>   };
>   
>   static bool compress_init(struct compress *c)
>   {
> -	struct z_stream_s *zstream = memset(&c->zstream, 0, sizeof(c->zstream));
> +	struct z_stream_s *zstream = &c->zstream;
>   
> -	zstream->workspace =
> -		kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
> -			GFP_ATOMIC | __GFP_NOWARN);
> -	if (!zstream->workspace)
> +	if (pool_init(&c->pool, ALLOW_FAIL))
>   		return false;
>   
> -	if (zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) != Z_OK) {
> -		kfree(zstream->workspace);
> +	zstream->workspace =
> +		kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
> +			ALLOW_FAIL);
> +	if (!zstream->workspace) {
> +		pool_fini(&c->pool);
>   		return false;
>   	}
>   
>   	c->tmp = NULL;
>   	if (i915_has_memcpy_from_wc())
> -		c->tmp = (void *)__get_free_page(GFP_ATOMIC | __GFP_NOWARN);
> +		c->tmp = pool_alloc(&c->pool, ALLOW_FAIL);

So 31 stashed pages will be enough to not need atomic allocations, or I 
missed a point?

What determines 31 is enough?

>   
>   	return true;
>   }
>   
> -static void *compress_next_page(struct drm_i915_error_object *dst)
> +static bool compress_start(struct compress *c)
> +{
> +	struct z_stream_s *zstream = &c->zstream;
> +	void *workspace = zstream->workspace;
> +
> +	memset(zstream, 0, sizeof(*zstream));
> +	zstream->workspace = workspace;
> +
> +	return zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) == Z_OK;
> +}
> +
> +static void *compress_next_page(struct compress *c,
> +				struct drm_i915_error_object *dst)
>   {
> -	unsigned long page;
> +	void *page;
>   
>   	if (dst->page_count >= dst->num_pages)
>   		return ERR_PTR(-ENOSPC);
>   
> -	page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
> +	page = pool_alloc(&c->pool, ATOMIC_MAYFAIL);
>   	if (!page)
>   		return ERR_PTR(-ENOMEM);
>   
> -	return dst->pages[dst->page_count++] = (void *)page;
> +	return dst->pages[dst->page_count++] = page;
>   }
>   
>   static int compress_page(struct compress *c,
> @@ -267,7 +329,7 @@ static int compress_page(struct compress *c,
>   
>   	do {
>   		if (zstream->avail_out == 0) {
> -			zstream->next_out = compress_next_page(dst);
> +			zstream->next_out = compress_next_page(c, dst);
>   			if (IS_ERR(zstream->next_out))
>   				return PTR_ERR(zstream->next_out);
>   
> @@ -295,7 +357,7 @@ static int compress_flush(struct compress *c,
>   	do {
>   		switch (zlib_deflate(zstream, Z_FINISH)) {
>   		case Z_OK: /* more space requested */
> -			zstream->next_out = compress_next_page(dst);
> +			zstream->next_out = compress_next_page(c, dst);
>   			if (IS_ERR(zstream->next_out))
>   				return PTR_ERR(zstream->next_out);
>   
> @@ -316,15 +378,17 @@ static int compress_flush(struct compress *c,
>   	return 0;
>   }
>   
> -static void compress_fini(struct compress *c,
> -			  struct drm_i915_error_object *dst)
> +static void compress_finish(struct compress *c)
>   {
> -	struct z_stream_s *zstream = &c->zstream;
> +	zlib_deflateEnd(&c->zstream);
> +}
>   
> -	zlib_deflateEnd(zstream);
> -	kfree(zstream->workspace);
> +static void compress_fini(struct compress *c)
> +{
> +	kfree(c->zstream.workspace);
>   	if (c->tmp)
> -		free_page((unsigned long)c->tmp);
> +		pool_free(&c->pool, c->tmp);
> +	pool_fini(&c->pool);
>   }
>   
>   static void err_compression_marker(struct drm_i915_error_state_buf *m)
> @@ -335,9 +399,18 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m)
>   #else
>   
>   struct compress {
> +	struct pool pool;
>   };
>   
>   static bool compress_init(struct compress *c)
> +{
> +	if (pool_init(&c->pool, ALLOW_FAIL))
> +		return false;
> +
> +	return true;

return pool_init() == 0 ?

> +}
> +
> +static bool compress_start(struct compress *c)
>   {
>   	return true;
>   }
> @@ -346,14 +419,12 @@ static int compress_page(struct compress *c,
>   			 void *src,
>   			 struct drm_i915_error_object *dst)
>   {
> -	unsigned long page;
>   	void *ptr;
>   
> -	page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
> -	if (!page)
> +	ptr = pool_alloc(&c->pool, ATOMIC_MAYFAIL);
> +	if (!ptr)
>   		return -ENOMEM;
>   
> -	ptr = (void *)page;
>   	if (!i915_memcpy_from_wc(ptr, src, PAGE_SIZE))
>   		memcpy(ptr, src, PAGE_SIZE);
>   	dst->pages[dst->page_count++] = ptr;
> @@ -367,11 +438,15 @@ static int compress_flush(struct compress *c,
>   	return 0;
>   }
>   
> -static void compress_fini(struct compress *c,
> -			  struct drm_i915_error_object *dst)
> +static void compress_finish(struct compress *c)
>   {
>   }
>   
> +static void compress_fini(struct compress *c)
> +{
> +	pool_fini(&c->pool);
> +}
> +
>   static void err_compression_marker(struct drm_i915_error_state_buf *m)
>   {
>   	err_puts(m, "~");
> @@ -379,36 +454,6 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m)
>   
>   #endif
>   
> -static void print_error_buffers(struct drm_i915_error_state_buf *m,
> -				const char *name,
> -				struct drm_i915_error_buffer *err,
> -				int count)
> -{
> -	err_printf(m, "%s [%d]:\n", name, count);
> -
> -	while (count--) {
> -		err_printf(m, "    %08x_%08x %8u %02x %02x",
> -			   upper_32_bits(err->gtt_offset),
> -			   lower_32_bits(err->gtt_offset),
> -			   err->size,
> -			   err->read_domains,
> -			   err->write_domain);
> -		err_puts(m, tiling_flag(err->tiling));
> -		err_puts(m, dirty_flag(err->dirty));
> -		err_puts(m, purgeable_flag(err->purgeable));
> -		err_puts(m, err->userptr ? " userptr" : "");
> -		err_puts(m, i915_cache_level_str(m->i915, err->cache_level));
> -
> -		if (err->name)
> -			err_printf(m, " (name: %d)", err->name);
> -		if (err->fence_reg != I915_FENCE_REG_NONE)
> -			err_printf(m, " (fence: %d)", err->fence_reg);
> -
> -		err_puts(m, "\n");
> -		err++;
> -	}
> -}

So no active and pinned bos any more.

Ca you put in the commit message what data is gone and what remains? And 
some notes on how does that affect the usefulness of error state.

Regards,

Tvrtko

> -
>   static void error_print_instdone(struct drm_i915_error_state_buf *m,
>   				 const struct drm_i915_error_engine *ee)
>   {
> @@ -734,33 +779,6 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
>   			error_print_engine(m, &error->engine[i], error->epoch);
>   	}
>   
> -	for (i = 0; i < ARRAY_SIZE(error->active_vm); i++) {
> -		char buf[128];
> -		int len, first = 1;
> -
> -		if (!error->active_vm[i])
> -			break;
> -
> -		len = scnprintf(buf, sizeof(buf), "Active (");
> -		for (j = 0; j < ARRAY_SIZE(error->engine); j++) {
> -			if (error->engine[j].vm != error->active_vm[i])
> -				continue;
> -
> -			len += scnprintf(buf + len, sizeof(buf), "%s%s",
> -					 first ? "" : ", ",
> -					 m->i915->engine[j]->name);
> -			first = 0;
> -		}
> -		scnprintf(buf + len, sizeof(buf), ")");
> -		print_error_buffers(m, buf,
> -				    error->active_bo[i],
> -				    error->active_bo_count[i]);
> -	}
> -
> -	print_error_buffers(m, "Pinned (global)",
> -			    error->pinned_bo,
> -			    error->pinned_bo_count);
> -
>   	for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
>   		const struct drm_i915_error_engine *ee = &error->engine[i];
>   
> @@ -974,10 +992,6 @@ void __i915_gpu_state_free(struct kref *error_ref)
>   		kfree(ee->requests);
>   	}
>   
> -	for (i = 0; i < ARRAY_SIZE(error->active_bo); i++)
> -		kfree(error->active_bo[i]);
> -	kfree(error->pinned_bo);
> -
>   	kfree(error->overlay);
>   	kfree(error->display);
>   
> @@ -990,12 +1004,12 @@ void __i915_gpu_state_free(struct kref *error_ref)
>   
>   static struct drm_i915_error_object *
>   i915_error_object_create(struct drm_i915_private *i915,
> -			 struct i915_vma *vma)
> +			 struct i915_vma *vma,
> +			 struct compress *compress)
>   {
>   	struct i915_ggtt *ggtt = &i915->ggtt;
>   	const u64 slot = ggtt->error_capture.start;
>   	struct drm_i915_error_object *dst;
> -	struct compress compress;
>   	unsigned long num_pages;
>   	struct sgt_iter iter;
>   	dma_addr_t dma;
> @@ -1006,22 +1020,21 @@ i915_error_object_create(struct drm_i915_private *i915,
>   
>   	num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT;
>   	num_pages = DIV_ROUND_UP(10 * num_pages, 8); /* worstcase zlib growth */
> -	dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *),
> -		      GFP_ATOMIC | __GFP_NOWARN);
> +	dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), ATOMIC_MAYFAIL);
>   	if (!dst)
>   		return NULL;
>   
> +	if (!compress_start(compress)) {
> +		kfree(dst);
> +		return NULL;
> +	}
> +
>   	dst->gtt_offset = vma->node.start;
>   	dst->gtt_size = vma->node.size;
>   	dst->num_pages = num_pages;
>   	dst->page_count = 0;
>   	dst->unused = 0;
>   
> -	if (!compress_init(&compress)) {
> -		kfree(dst);
> -		return NULL;
> -	}
> -
>   	ret = -EINVAL;
>   	for_each_sgt_dma(dma, iter, vma->pages) {
>   		void __iomem *s;
> @@ -1029,69 +1042,23 @@ i915_error_object_create(struct drm_i915_private *i915,
>   		ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
>   
>   		s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
> -		ret = compress_page(&compress, (void  __force *)s, dst);
> +		ret = compress_page(compress, (void  __force *)s, dst);
>   		io_mapping_unmap_atomic(s);
>   		if (ret)
>   			break;
>   	}
>   
> -	if (ret || compress_flush(&compress, dst)) {
> +	if (ret || compress_flush(compress, dst)) {
>   		while (dst->page_count--)
> -			free_page((unsigned long)dst->pages[dst->page_count]);
> +			pool_free(&compress->pool, dst->pages[dst->page_count]);
>   		kfree(dst);
>   		dst = NULL;
>   	}
> +	compress_finish(compress);
>   
> -	compress_fini(&compress, dst);
>   	return dst;
>   }
>   
> -static void capture_bo(struct drm_i915_error_buffer *err,
> -		       struct i915_vma *vma)
> -{
> -	struct drm_i915_gem_object *obj = vma->obj;
> -
> -	err->size = obj->base.size;
> -	err->name = obj->base.name;
> -
> -	err->gtt_offset = vma->node.start;
> -	err->read_domains = obj->read_domains;
> -	err->write_domain = obj->write_domain;
> -	err->fence_reg = vma->fence ? vma->fence->id : -1;
> -	err->tiling = i915_gem_object_get_tiling(obj);
> -	err->dirty = obj->mm.dirty;
> -	err->purgeable = obj->mm.madv != I915_MADV_WILLNEED;
> -	err->userptr = obj->userptr.mm != NULL;
> -	err->cache_level = obj->cache_level;
> -}
> -
> -static u32 capture_error_bo(struct drm_i915_error_buffer *err,
> -			    int count, struct list_head *head,
> -			    unsigned int flags)
> -#define ACTIVE_ONLY BIT(0)
> -#define PINNED_ONLY BIT(1)
> -{
> -	struct i915_vma *vma;
> -	int i = 0;
> -
> -	list_for_each_entry(vma, head, vm_link) {
> -		if (!vma->obj)
> -			continue;
> -
> -		if (flags & ACTIVE_ONLY && !i915_vma_is_active(vma))
> -			continue;
> -
> -		if (flags & PINNED_ONLY && !i915_vma_is_pinned(vma))
> -			continue;
> -
> -		capture_bo(err++, vma);
> -		if (++i == count)
> -			break;
> -	}
> -
> -	return i;
> -}
> -
>   /*
>    * Generate a semi-unique error code. The code is not meant to have meaning, The
>    * code's only purpose is to try to prevent false duplicated bug reports by
> @@ -1281,7 +1248,7 @@ static void engine_record_requests(struct intel_engine_cs *engine,
>   	if (!count)
>   		return;
>   
> -	ee->requests = kcalloc(count, sizeof(*ee->requests), GFP_ATOMIC);
> +	ee->requests = kcalloc(count, sizeof(*ee->requests), ATOMIC_MAYFAIL);
>   	if (!ee->requests)
>   		return;
>   
> @@ -1349,8 +1316,10 @@ static void record_context(struct drm_i915_error_context *e,
>   	e->active = atomic_read(&ctx->active_count);
>   }
>   
> -static void request_record_user_bo(struct i915_request *request,
> -				   struct drm_i915_error_engine *ee)
> +static void
> +request_record_user_bo(struct i915_request *request,
> +		       struct drm_i915_error_engine *ee,
> +		       struct compress *compress)
>   {
>   	struct i915_capture_list *c;
>   	struct drm_i915_error_object **bo;
> @@ -1362,18 +1331,20 @@ static void request_record_user_bo(struct i915_request *request,
>   	if (!max)
>   		return;
>   
> -	bo = kmalloc_array(max, sizeof(*bo), GFP_ATOMIC);
> +	bo = kmalloc_array(max, sizeof(*bo), ATOMIC_MAYFAIL);
>   	if (!bo) {
>   		/* If we can't capture everything, try to capture something. */
>   		max = min_t(long, max, PAGE_SIZE / sizeof(*bo));
> -		bo = kmalloc_array(max, sizeof(*bo), GFP_ATOMIC);
> +		bo = kmalloc_array(max, sizeof(*bo), ATOMIC_MAYFAIL);
>   	}
>   	if (!bo)
>   		return;
>   
>   	count = 0;
>   	for (c = request->capture_list; c; c = c->next) {
> -		bo[count] = i915_error_object_create(request->i915, c->vma);
> +		bo[count] = i915_error_object_create(request->i915,
> +						     c->vma,
> +						     compress);
>   		if (!bo[count])
>   			break;
>   		if (++count == max)
> @@ -1386,7 +1357,8 @@ static void request_record_user_bo(struct i915_request *request,
>   
>   static struct drm_i915_error_object *
>   capture_object(struct drm_i915_private *dev_priv,
> -	       struct drm_i915_gem_object *obj)
> +	       struct drm_i915_gem_object *obj,
> +	       struct compress *compress)
>   {
>   	if (obj && i915_gem_object_has_pages(obj)) {
>   		struct i915_vma fake = {
> @@ -1396,13 +1368,14 @@ capture_object(struct drm_i915_private *dev_priv,
>   			.obj = obj,
>   		};
>   
> -		return i915_error_object_create(dev_priv, &fake);
> +		return i915_error_object_create(dev_priv, &fake, compress);
>   	} else {
>   		return NULL;
>   	}
>   }
>   
> -static void gem_record_rings(struct i915_gpu_state *error)
> +static void
> +gem_record_rings(struct i915_gpu_state *error, struct compress *compress)
>   {
>   	struct drm_i915_private *i915 = error->i915;
>   	int i;
> @@ -1420,6 +1393,9 @@ static void gem_record_rings(struct i915_gpu_state *error)
>   
>   		ee->engine_id = i;
>   
> +		/* Refill our page pool before entering atomic section */
> +		pool_refill(&compress->pool, ALLOW_FAIL);
> +
>   		error_record_engine_registers(error, engine, ee);
>   		error_record_engine_execlists(engine, ee);
>   
> @@ -1429,8 +1405,6 @@ static void gem_record_rings(struct i915_gpu_state *error)
>   			struct i915_gem_context *ctx = request->gem_context;
>   			struct intel_ring *ring = request->ring;
>   
> -			ee->vm = ctx->vm ?: &engine->gt->ggtt->vm;
> -
>   			record_context(&ee->context, ctx);
>   
>   			/* We need to copy these to an anonymous buffer
> @@ -1438,17 +1412,21 @@ static void gem_record_rings(struct i915_gpu_state *error)
>   			 * by userspace.
>   			 */
>   			ee->batchbuffer =
> -				i915_error_object_create(i915, request->batch);
> +				i915_error_object_create(i915,
> +							 request->batch,
> +							 compress);
>   
>   			if (HAS_BROKEN_CS_TLB(i915))
>   				ee->wa_batchbuffer =
>   				  i915_error_object_create(i915,
> -							   engine->gt->scratch);
> -			request_record_user_bo(request, ee);
> +							   engine->gt->scratch,
> +							   compress);
> +			request_record_user_bo(request, ee, compress);
>   
>   			ee->ctx =
>   				i915_error_object_create(i915,
> -							 request->hw_context->state);
> +							 request->hw_context->state,
> +							 compress);
>   
>   			error->simulated |=
>   				i915_gem_context_no_error_capture(ctx);
> @@ -1460,7 +1438,9 @@ static void gem_record_rings(struct i915_gpu_state *error)
>   			ee->cpu_ring_head = ring->head;
>   			ee->cpu_ring_tail = ring->tail;
>   			ee->ringbuffer =
> -				i915_error_object_create(i915, ring->vma);
> +				i915_error_object_create(i915,
> +							 ring->vma,
> +							 compress);
>   
>   			engine_record_requests(engine, request, ee);
>   		}
> @@ -1468,89 +1448,21 @@ static void gem_record_rings(struct i915_gpu_state *error)
>   
>   		ee->hws_page =
>   			i915_error_object_create(i915,
> -						 engine->status_page.vma);
> -
> -		ee->wa_ctx = i915_error_object_create(i915, engine->wa_ctx.vma);
> -
> -		ee->default_state = capture_object(i915, engine->default_state);
> -	}
> -}
> -
> -static void gem_capture_vm(struct i915_gpu_state *error,
> -			   struct i915_address_space *vm,
> -			   int idx)
> -{
> -	struct drm_i915_error_buffer *active_bo;
> -	struct i915_vma *vma;
> -	int count;
> -
> -	count = 0;
> -	list_for_each_entry(vma, &vm->bound_list, vm_link)
> -		if (i915_vma_is_active(vma))
> -			count++;
> -
> -	active_bo = NULL;
> -	if (count)
> -		active_bo = kcalloc(count, sizeof(*active_bo), GFP_ATOMIC);
> -	if (active_bo)
> -		count = capture_error_bo(active_bo,
> -					 count, &vm->bound_list,
> -					 ACTIVE_ONLY);
> -	else
> -		count = 0;
> +						 engine->status_page.vma,
> +						 compress);
>   
> -	error->active_vm[idx] = vm;
> -	error->active_bo[idx] = active_bo;
> -	error->active_bo_count[idx] = count;
> -}
> -
> -static void capture_active_buffers(struct i915_gpu_state *error)
> -{
> -	int cnt = 0, i, j;
> -
> -	BUILD_BUG_ON(ARRAY_SIZE(error->engine) > ARRAY_SIZE(error->active_bo));
> -	BUILD_BUG_ON(ARRAY_SIZE(error->active_bo) != ARRAY_SIZE(error->active_vm));
> -	BUILD_BUG_ON(ARRAY_SIZE(error->active_bo) != ARRAY_SIZE(error->active_bo_count));
> -
> -	/* Scan each engine looking for unique active contexts/vm */
> -	for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
> -		struct drm_i915_error_engine *ee = &error->engine[i];
> -		bool found;
> -
> -		if (!ee->vm)
> -			continue;
> +		ee->wa_ctx =
> +			i915_error_object_create(i915,
> +						 engine->wa_ctx.vma,
> +						 compress);
>   
> -		found = false;
> -		for (j = 0; j < i && !found; j++)
> -			found = error->engine[j].vm == ee->vm;
> -		if (!found)
> -			gem_capture_vm(error, ee->vm, cnt++);
> +		ee->default_state =
> +			capture_object(i915, engine->default_state, compress);
>   	}
>   }
>   
> -static void capture_pinned_buffers(struct i915_gpu_state *error)
> -{
> -	struct i915_address_space *vm = &error->i915->ggtt.vm;
> -	struct drm_i915_error_buffer *bo;
> -	struct i915_vma *vma;
> -	int count;
> -
> -	count = 0;
> -	list_for_each_entry(vma, &vm->bound_list, vm_link)
> -		count++;
> -
> -	bo = NULL;
> -	if (count)
> -		bo = kcalloc(count, sizeof(*bo), GFP_ATOMIC);
> -	if (!bo)
> -		return;
> -
> -	error->pinned_bo_count =
> -		capture_error_bo(bo, count, &vm->bound_list, PINNED_ONLY);
> -	error->pinned_bo = bo;
> -}
> -
> -static void capture_uc_state(struct i915_gpu_state *error)
> +static void
> +capture_uc_state(struct i915_gpu_state *error, struct compress *compress)
>   {
>   	struct drm_i915_private *i915 = error->i915;
>   	struct i915_error_uc *error_uc = &error->uc;
> @@ -1567,9 +1479,11 @@ static void capture_uc_state(struct i915_gpu_state *error)
>   	 * As modparams are generally accesible from the userspace make
>   	 * explicit copies of the firmware paths.
>   	 */
> -	error_uc->guc_fw.path = kstrdup(uc->guc.fw.path, GFP_ATOMIC);
> -	error_uc->huc_fw.path = kstrdup(uc->huc.fw.path, GFP_ATOMIC);
> -	error_uc->guc_log = i915_error_object_create(i915, uc->guc.log.vma);
> +	error_uc->guc_fw.path = kstrdup(uc->guc.fw.path, ALLOW_FAIL);
> +	error_uc->huc_fw.path = kstrdup(uc->huc.fw.path, ALLOW_FAIL);
> +	error_uc->guc_log = i915_error_object_create(i915,
> +						     uc->guc.log.vma,
> +						     compress);
>   }
>   
>   /* Capture all registers which don't fit into another category. */
> @@ -1753,56 +1667,53 @@ static void capture_finish(struct i915_gpu_state *error)
>   	ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
>   }
>   
> -static int capture(void *data)
> -{
> -	struct i915_gpu_state *error = data;
> -
> -	error->time = ktime_get_real();
> -	error->boottime = ktime_get_boottime();
> -	error->uptime = ktime_sub(ktime_get(),
> -				  error->i915->gt.last_init_time);
> -	error->capture = jiffies;
> -
> -	capture_params(error);
> -	capture_gen_state(error);
> -	capture_uc_state(error);
> -	capture_reg_state(error);
> -	gem_record_fences(error);
> -	gem_record_rings(error);
> -	capture_active_buffers(error);
> -	capture_pinned_buffers(error);
> -
> -	error->overlay = intel_overlay_capture_error_state(error->i915);
> -	error->display = intel_display_capture_error_state(error->i915);
> -
> -	error->epoch = capture_find_epoch(error);
> -
> -	capture_finish(error);
> -	return 0;
> -}
> -
>   #define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
>   
>   struct i915_gpu_state *
>   i915_capture_gpu_state(struct drm_i915_private *i915)
>   {
>   	struct i915_gpu_state *error;
> +	struct compress compress;
>   
>   	/* Check if GPU capture has been disabled */
>   	error = READ_ONCE(i915->gpu_error.first_error);
>   	if (IS_ERR(error))
>   		return error;
>   
> -	error = kzalloc(sizeof(*error), GFP_ATOMIC);
> +	error = kzalloc(sizeof(*error), ALLOW_FAIL);
>   	if (!error) {
>   		i915_disable_error_state(i915, -ENOMEM);
>   		return ERR_PTR(-ENOMEM);
>   	}
>   
> +	if (!compress_init(&compress)) {
> +		kfree(error);
> +		i915_disable_error_state(i915, -ENOMEM);
> +		return ERR_PTR(-ENOMEM);
> +	}
> +
>   	kref_init(&error->ref);
>   	error->i915 = i915;
>   
> -	stop_machine(capture, error, NULL);
> +	error->time = ktime_get_real();
> +	error->boottime = ktime_get_boottime();
> +	error->uptime = ktime_sub(ktime_get(), i915->gt.last_init_time);
> +	error->capture = jiffies;
> +
> +	capture_params(error);
> +	capture_gen_state(error);
> +	capture_uc_state(error, &compress);
> +	capture_reg_state(error);
> +	gem_record_fences(error);
> +	gem_record_rings(error, &compress);
> +
> +	error->overlay = intel_overlay_capture_error_state(i915);
> +	error->display = intel_display_capture_error_state(i915);
> +
> +	error->epoch = capture_find_epoch(error);
> +
> +	capture_finish(error);
> +	compress_fini(&compress);
>   
>   	return error;
>   }
> diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
> index 85f06bc5da05..a24c35107d16 100644
> --- a/drivers/gpu/drm/i915/i915_gpu_error.h
> +++ b/drivers/gpu/drm/i915/i915_gpu_error.h
> @@ -85,7 +85,6 @@ struct i915_gpu_state {
>   		/* Software tracked state */
>   		bool idle;
>   		unsigned long hangcheck_timestamp;
> -		struct i915_address_space *vm;
>   		int num_requests;
>   		u32 reset_count;
>   
> @@ -161,22 +160,6 @@ struct i915_gpu_state {
>   		} vm_info;
>   	} engine[I915_NUM_ENGINES];
>   
> -	struct drm_i915_error_buffer {
> -		u32 size;
> -		u32 name;
> -		u64 gtt_offset;
> -		u32 read_domains;
> -		u32 write_domain;
> -		s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
> -		u32 tiling:2;
> -		u32 dirty:1;
> -		u32 purgeable:1;
> -		u32 userptr:1;
> -		u32 cache_level:3;
> -	} *active_bo[I915_NUM_ENGINES], *pinned_bo;
> -	u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count;
> -	struct i915_address_space *active_vm[I915_NUM_ENGINES];
> -
>   	struct scatterlist *sgl, *fit;
>   };
>   
> 
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [PATCH 07/24] drm/i915: Rely on spinlock protection for GPU error capture
  2019-07-16 10:48   ` Tvrtko Ursulin
@ 2019-07-16 11:17     ` Chris Wilson
  0 siblings, 0 replies; 33+ messages in thread
From: Chris Wilson @ 2019-07-16 11:17 UTC (permalink / raw)
  To: Tvrtko Ursulin, intel-gfx

Quoting Tvrtko Ursulin (2019-07-16 11:48:28)
> 
> On 15/07/2019 09:09, Chris Wilson wrote:
> > +/* single threaded page allocator with a reserved stash for emergencies */
> > +struct pool {
> > +     void *stash[31];
> 
> Why 31?

Random power-of-two. I considered just using struct pagevec.
> 
> > +     unsigned int count;
> > +};

> >   static bool compress_init(struct compress *c)
> >   {
> > -     struct z_stream_s *zstream = memset(&c->zstream, 0, sizeof(c->zstream));
> > +     struct z_stream_s *zstream = &c->zstream;
> >   
> > -     zstream->workspace =
> > -             kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
> > -                     GFP_ATOMIC | __GFP_NOWARN);
> > -     if (!zstream->workspace)
> > +     if (pool_init(&c->pool, ALLOW_FAIL))
> >               return false;
> >   
> > -     if (zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) != Z_OK) {
> > -             kfree(zstream->workspace);
> > +     zstream->workspace =
> > +             kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
> > +                     ALLOW_FAIL);
> > +     if (!zstream->workspace) {
> > +             pool_fini(&c->pool);
> >               return false;
> >       }
> >   
> >       c->tmp = NULL;
> >       if (i915_has_memcpy_from_wc())
> > -             c->tmp = (void *)__get_free_page(GFP_ATOMIC | __GFP_NOWARN);
> > +             c->tmp = pool_alloc(&c->pool, ALLOW_FAIL);
> 
> So 31 stashed pages will be enough to not need atomic allocations, or I 
> missed a point?

No. It's just a backup in case we run out of atomic allocations. It's
modeled on mempool, except that mempool doesn't allow you to refill in
between atomic sections and instead relies on those being freed. Since
we continually allocate and keep the pages essentially forever, mempool
doesn't help.
 
> What determines 31 is enough?

It's just a backup, so we have a bit more leeway than current.
Currently, we have no non-atomic phase in which to wait for allocations,
so we should fare much better and have fewer blank error states.

Most compressed error states are less than 128k, so I plucked that as
being sufficiently large enough to capture a single compressed buffer in
case we ran out of atomic.

> > -static void print_error_buffers(struct drm_i915_error_state_buf *m,
> > -                             const char *name,
> > -                             struct drm_i915_error_buffer *err,
> > -                             int count)
> > -{
> > -     err_printf(m, "%s [%d]:\n", name, count);
> > -
> > -     while (count--) {
> > -             err_printf(m, "    %08x_%08x %8u %02x %02x",
> > -                        upper_32_bits(err->gtt_offset),
> > -                        lower_32_bits(err->gtt_offset),
> > -                        err->size,
> > -                        err->read_domains,
> > -                        err->write_domain);
> > -             err_puts(m, tiling_flag(err->tiling));
> > -             err_puts(m, dirty_flag(err->dirty));
> > -             err_puts(m, purgeable_flag(err->purgeable));
> > -             err_puts(m, err->userptr ? " userptr" : "");
> > -             err_puts(m, i915_cache_level_str(m->i915, err->cache_level));
> > -
> > -             if (err->name)
> > -                     err_printf(m, " (name: %d)", err->name);
> > -             if (err->fence_reg != I915_FENCE_REG_NONE)
> > -                     err_printf(m, " (fence: %d)", err->fence_reg);
> > -
> > -             err_puts(m, "\n");
> > -             err++;
> > -     }
> > -}
> 
> So no active and pinned bos any more.
> 
> Ca you put in the commit message what data is gone and what remains? And 
> some notes on how does that affect the usefulness of error state.

My purpose for including them was for cross-checking relocations in the
batch. It's been a long time since I've had to do that, and now mesa
likes to tag everything important with EXEC_CAPTURE so the buffers are
included in their entirety.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 33+ messages in thread

* Re: [PATCH 21/24] drm/i915: Remove logical HW ID
  2019-07-15  8:09 ` [PATCH 21/24] drm/i915: Remove logical HW ID Chris Wilson
@ 2019-07-19 20:56   ` Kumar Valsan, Prathap
  0 siblings, 0 replies; 33+ messages in thread
From: Kumar Valsan, Prathap @ 2019-07-19 20:56 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

On Mon, Jul 15, 2019 at 09:09:43AM +0100, Chris Wilson wrote:
> We only need to keep a unique tag for the active lifetime of the
> context, and for as long as we need to identify that context. The HW
> uses the tag to determine if it should use a lite-restore (why not the
> LRCA?) and passes the tag back for various status identifies. The only
> status we need to track is for OA, so when using perf, we assign the
> specific context a unique tag.

Device Page fault handler needs logical HW id. Logical HW id will be
used to identify the gem context to further lookup the address space.

Prathap
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>  drivers/gpu/drm/i915/gem/i915_gem_context.c   | 151 ------------------
>  drivers/gpu/drm/i915/gem/i915_gem_context.h   |  15 --
>  .../gpu/drm/i915/gem/i915_gem_context_types.h |  18 ---
>  .../drm/i915/gem/selftests/i915_gem_context.c |  13 +-
>  .../gpu/drm/i915/gem/selftests/mock_context.c |   8 -
>  drivers/gpu/drm/i915/gt/intel_context_types.h |   1 +
>  drivers/gpu/drm/i915/gt/intel_engine_types.h  |   5 +-
>  drivers/gpu/drm/i915/gt/intel_lrc.c           |  28 ++--
>  drivers/gpu/drm/i915/gvt/kvmgt.c              |  17 --
>  drivers/gpu/drm/i915/i915_debugfs.c           |   3 -
>  drivers/gpu/drm/i915/i915_gpu_error.c         |   7 +-
>  drivers/gpu/drm/i915/i915_gpu_error.h         |   1 -
>  drivers/gpu/drm/i915/i915_perf.c              |  28 ++--
>  drivers/gpu/drm/i915/i915_trace.h             |  38 ++---
>  .../gpu/drm/i915/selftests/i915_gem_evict.c   |   4 +-
>  drivers/gpu/drm/i915/selftests/i915_vma.c     |   2 +-
>  16 files changed, 51 insertions(+), 288 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
> index e9f5e19265b9..2669e038661e 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
> @@ -167,95 +167,6 @@ lookup_user_engine(struct i915_gem_context *ctx,
>  	return i915_gem_context_get_engine(ctx, idx);
>  }
>  
> -static inline int new_hw_id(struct drm_i915_private *i915, gfp_t gfp)
> -{
> -	unsigned int max;
> -
> -	lockdep_assert_held(&i915->contexts.mutex);
> -
> -	if (INTEL_GEN(i915) >= 11)
> -		max = GEN11_MAX_CONTEXT_HW_ID;
> -	else if (USES_GUC_SUBMISSION(i915))
> -		/*
> -		 * When using GuC in proxy submission, GuC consumes the
> -		 * highest bit in the context id to indicate proxy submission.
> -		 */
> -		max = MAX_GUC_CONTEXT_HW_ID;
> -	else
> -		max = MAX_CONTEXT_HW_ID;
> -
> -	return ida_simple_get(&i915->contexts.hw_ida, 0, max, gfp);
> -}
> -
> -static int steal_hw_id(struct drm_i915_private *i915)
> -{
> -	struct i915_gem_context *ctx, *cn;
> -	LIST_HEAD(pinned);
> -	int id = -ENOSPC;
> -
> -	lockdep_assert_held(&i915->contexts.mutex);
> -
> -	list_for_each_entry_safe(ctx, cn,
> -				 &i915->contexts.hw_id_list, hw_id_link) {
> -		if (atomic_read(&ctx->hw_id_pin_count)) {
> -			list_move_tail(&ctx->hw_id_link, &pinned);
> -			continue;
> -		}
> -
> -		GEM_BUG_ON(!ctx->hw_id); /* perma-pinned kernel context */
> -		list_del_init(&ctx->hw_id_link);
> -		id = ctx->hw_id;
> -		break;
> -	}
> -
> -	/*
> -	 * Remember how far we got up on the last repossesion scan, so the
> -	 * list is kept in a "least recently scanned" order.
> -	 */
> -	list_splice_tail(&pinned, &i915->contexts.hw_id_list);
> -	return id;
> -}
> -
> -static int assign_hw_id(struct drm_i915_private *i915, unsigned int *out)
> -{
> -	int ret;
> -
> -	lockdep_assert_held(&i915->contexts.mutex);
> -
> -	/*
> -	 * We prefer to steal/stall ourselves and our users over that of the
> -	 * entire system. That may be a little unfair to our users, and
> -	 * even hurt high priority clients. The choice is whether to oomkill
> -	 * something else, or steal a context id.
> -	 */
> -	ret = new_hw_id(i915, GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
> -	if (unlikely(ret < 0)) {
> -		ret = steal_hw_id(i915);
> -		if (ret < 0) /* once again for the correct errno code */
> -			ret = new_hw_id(i915, GFP_KERNEL);
> -		if (ret < 0)
> -			return ret;
> -	}
> -
> -	*out = ret;
> -	return 0;
> -}
> -
> -static void release_hw_id(struct i915_gem_context *ctx)
> -{
> -	struct drm_i915_private *i915 = ctx->i915;
> -
> -	if (list_empty(&ctx->hw_id_link))
> -		return;
> -
> -	mutex_lock(&i915->contexts.mutex);
> -	if (!list_empty(&ctx->hw_id_link)) {
> -		ida_simple_remove(&i915->contexts.hw_ida, ctx->hw_id);
> -		list_del_init(&ctx->hw_id_link);
> -	}
> -	mutex_unlock(&i915->contexts.mutex);
> -}
> -
>  static void __free_engines(struct i915_gem_engines *e, unsigned int count)
>  {
>  	while (count--) {
> @@ -309,7 +220,6 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
>  	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
>  	GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
>  
> -	release_hw_id(ctx);
>  	if (ctx->vm)
>  		i915_vm_put(ctx->vm);
>  
> @@ -381,12 +291,6 @@ static void context_close(struct i915_gem_context *ctx)
>  	i915_gem_context_set_closed(ctx);
>  	ctx->file_priv = ERR_PTR(-EBADF);
>  
> -	/*
> -	 * This context will never again be assinged to HW, so we can
> -	 * reuse its ID for the next context.
> -	 */
> -	release_hw_id(ctx);
> -
>  	/*
>  	 * The LUT uses the VMA as a backpointer to unref the object,
>  	 * so we need to clear the LUT before we close all the VMA (inside
> @@ -449,7 +353,6 @@ __create_context(struct drm_i915_private *i915)
>  	RCU_INIT_POINTER(ctx->engines, e);
>  
>  	INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
> -	INIT_LIST_HEAD(&ctx->hw_id_link);
>  
>  	/* NB: Mark all slices as needing a remap so that when the context first
>  	 * loads it will restore whatever remap state already exists. If there
> @@ -572,13 +475,6 @@ i915_gem_context_create_gvt(struct drm_device *dev)
>  	if (IS_ERR(ctx))
>  		goto out;
>  
> -	ret = i915_gem_context_pin_hw_id(ctx);
> -	if (ret) {
> -		context_close(ctx);
> -		ctx = ERR_PTR(ret);
> -		goto out;
> -	}
> -
>  	ctx->file_priv = ERR_PTR(-EBADF);
>  	i915_gem_context_set_closed(ctx); /* not user accessible */
>  	i915_gem_context_clear_bannable(ctx);
> @@ -609,18 +505,11 @@ struct i915_gem_context *
>  i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
>  {
>  	struct i915_gem_context *ctx;
> -	int err;
>  
>  	ctx = i915_gem_create_context(i915, 0);
>  	if (IS_ERR(ctx))
>  		return ctx;
>  
> -	err = i915_gem_context_pin_hw_id(ctx);
> -	if (err) {
> -		destroy_kernel_context(&ctx);
> -		return ERR_PTR(err);
> -	}
> -
>  	i915_gem_context_clear_bannable(ctx);
>  	ctx->sched.priority = I915_USER_PRIORITY(prio);
>  	ctx->ring_size = PAGE_SIZE;
> @@ -660,15 +549,6 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
>  		DRM_ERROR("Failed to create default global context\n");
>  		return PTR_ERR(ctx);
>  	}
> -	/*
> -	 * For easy recognisablity, we want the kernel context to be 0 and then
> -	 * all user contexts will have non-zero hw_id. Kernel contexts are
> -	 * permanently pinned, so that we never suffer a stall and can
> -	 * use them from any allocation context (e.g. for evicting other
> -	 * contexts and from inside the shrinker).
> -	 */
> -	GEM_BUG_ON(ctx->hw_id);
> -	GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count));
>  	dev_priv->kernel_context = ctx;
>  
>  	DRM_DEBUG_DRIVER("%s context support initialized\n",
> @@ -682,10 +562,6 @@ void i915_gem_contexts_fini(struct drm_i915_private *i915)
>  	lockdep_assert_held(&i915->drm.struct_mutex);
>  
>  	destroy_kernel_context(&i915->kernel_context);
> -
> -	/* Must free all deferred contexts (via flush_workqueue) first */
> -	GEM_BUG_ON(!list_empty(&i915->contexts.hw_id_list));
> -	ida_destroy(&i915->contexts.hw_ida);
>  }
>  
>  static int context_idr_cleanup(int id, void *p, void *data)
> @@ -2366,33 +2242,6 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
>  	return ret;
>  }
>  
> -int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
> -{
> -	struct drm_i915_private *i915 = ctx->i915;
> -	int err = 0;
> -
> -	mutex_lock(&i915->contexts.mutex);
> -
> -	GEM_BUG_ON(i915_gem_context_is_closed(ctx));
> -
> -	if (list_empty(&ctx->hw_id_link)) {
> -		GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count));
> -
> -		err = assign_hw_id(i915, &ctx->hw_id);
> -		if (err)
> -			goto out_unlock;
> -
> -		list_add_tail(&ctx->hw_id_link, &i915->contexts.hw_id_list);
> -	}
> -
> -	GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == ~0u);
> -	atomic_inc(&ctx->hw_id_pin_count);
> -
> -out_unlock:
> -	mutex_unlock(&i915->contexts.mutex);
> -	return err;
> -}
> -
>  /* GEM context-engines iterator: for_each_gem_engine() */
>  struct intel_context *
>  i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
> index 106e2ccf7a4c..6fb3ad7e03fc 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
> @@ -112,21 +112,6 @@ i915_gem_context_clear_user_engines(struct i915_gem_context *ctx)
>  	clear_bit(CONTEXT_USER_ENGINES, &ctx->flags);
>  }
>  
> -int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx);
> -static inline int i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
> -{
> -	if (atomic_inc_not_zero(&ctx->hw_id_pin_count))
> -		return 0;
> -
> -	return __i915_gem_context_pin_hw_id(ctx);
> -}
> -
> -static inline void i915_gem_context_unpin_hw_id(struct i915_gem_context *ctx)
> -{
> -	GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == 0u);
> -	atomic_dec(&ctx->hw_id_pin_count);
> -}
> -
>  static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
>  {
>  	return !ctx->file_priv;
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
> index 0ee61482ef94..80cd3ab355e0 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
> @@ -147,24 +147,6 @@ struct i915_gem_context {
>  #define CONTEXT_FORCE_SINGLE_SUBMISSION	2
>  #define CONTEXT_USER_ENGINES		3
>  
> -	/**
> -	 * @hw_id: - unique identifier for the context
> -	 *
> -	 * The hardware needs to uniquely identify the context for a few
> -	 * functions like fault reporting, PASID, scheduling. The
> -	 * &drm_i915_private.context_hw_ida is used to assign a unqiue
> -	 * id for the lifetime of the context.
> -	 *
> -	 * @hw_id_pin_count: - number of times this context had been pinned
> -	 * for use (should be, at most, once per engine).
> -	 *
> -	 * @hw_id_link: - all contexts with an assigned id are tracked
> -	 * for possible repossession.
> -	 */
> -	unsigned int hw_id;
> -	atomic_t hw_id_pin_count;
> -	struct list_head hw_id_link;
> -
>  	struct mutex mutex;
>  
>  	struct i915_sched_attr sched;
> diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
> index db7856f0f31e..a7b6da29f3df 100644
> --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
> @@ -524,9 +524,9 @@ static int igt_ctx_exec(void *arg)
>  
>  			err = gpu_fill(obj, ctx, engine, dw);
>  			if (err) {
> -				pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
> +				pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
>  				       ndwords, dw, max_dwords(obj),
> -				       engine->name, ctx->hw_id,
> +				       engine->name,
>  				       yesno(!!ctx->vm), err);
>  				goto out_unlock;
>  			}
> @@ -643,9 +643,9 @@ static int igt_shared_ctx_exec(void *arg)
>  
>  			err = gpu_fill(obj, ctx, engine, dw);
>  			if (err) {
> -				pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
> +				pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
>  				       ndwords, dw, max_dwords(obj),
> -				       engine->name, ctx->hw_id,
> +				       engine->name,
>  				       yesno(!!ctx->vm), err);
>  				kernel_context_close(ctx);
>  				goto out_test;
> @@ -1219,10 +1219,9 @@ static int igt_ctx_readonly(void *arg)
>  
>  			err = gpu_fill(obj, ctx, engine, dw);
>  			if (err) {
> -				pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
> +				pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
>  				       ndwords, dw, max_dwords(obj),
> -				       engine->name, ctx->hw_id,
> -				       yesno(!!ctx->vm), err);
> +				       engine->name, yesno(!!ctx->vm), err);
>  				goto out_unlock;
>  			}
>  
> diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
> index be8974ccff24..0104f16b1327 100644
> --- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c
> +++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
> @@ -13,7 +13,6 @@ mock_context(struct drm_i915_private *i915,
>  {
>  	struct i915_gem_context *ctx;
>  	struct i915_gem_engines *e;
> -	int ret;
>  
>  	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
>  	if (!ctx)
> @@ -30,13 +29,8 @@ mock_context(struct drm_i915_private *i915,
>  	RCU_INIT_POINTER(ctx->engines, e);
>  
>  	INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
> -	INIT_LIST_HEAD(&ctx->hw_id_link);
>  	mutex_init(&ctx->mutex);
>  
> -	ret = i915_gem_context_pin_hw_id(ctx);
> -	if (ret < 0)
> -		goto err_engines;
> -
>  	if (name) {
>  		struct i915_ppgtt *ppgtt;
>  
> @@ -54,8 +48,6 @@ mock_context(struct drm_i915_private *i915,
>  
>  	return ctx;
>  
> -err_engines:
> -	free_engines(rcu_access_pointer(ctx->engines));
>  err_free:
>  	kfree(ctx);
>  	return NULL;
> diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
> index c00419e38a77..36394d4061e4 100644
> --- a/drivers/gpu/drm/i915/gt/intel_context_types.h
> +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
> @@ -52,6 +52,7 @@ struct intel_context {
>  
>  	u32 *lrc_reg_state;
>  	u64 lrc_desc;
> +	u32 hw_tag;
>  
>  	unsigned int active_count; /* protected by timeline->mutex */
>  
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
> index 1da8d2f34c56..121abe5b08e7 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
> @@ -270,10 +270,13 @@ struct intel_engine_cs {
>  
>  	u8 class;
>  	u8 instance;
> +
> +	u32 uabi_capabilities;
>  	u32 context_size;
>  	u32 mmio_base;
>  
> -	u32 uabi_capabilities;
> +	unsigned int hw_tag;
> +#define NUM_HW_TAG (256)
>  
>  	struct rb_node uabi_node;
>  
> diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
> index 18d9ddc5cd58..ae15e0dd98ac 100644
> --- a/drivers/gpu/drm/i915/gt/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
> @@ -412,9 +412,6 @@ lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
>  	struct i915_gem_context *ctx = ce->gem_context;
>  	u64 desc;
>  
> -	BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH)));
> -	BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > (BIT(GEN11_SW_CTX_ID_WIDTH)));
> -
>  	desc = ctx->desc_template;				/* bits  0-11 */
>  	GEM_BUG_ON(desc & GENMASK_ULL(63, 12));
>  
> @@ -428,20 +425,11 @@ lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
>  	 * anything below.
>  	 */
>  	if (INTEL_GEN(engine->i915) >= 11) {
> -		GEM_BUG_ON(ctx->hw_id >= BIT(GEN11_SW_CTX_ID_WIDTH));
> -		desc |= (u64)ctx->hw_id << GEN11_SW_CTX_ID_SHIFT;
> -								/* bits 37-47 */
> -
>  		desc |= (u64)engine->instance << GEN11_ENGINE_INSTANCE_SHIFT;
>  								/* bits 48-53 */
>  
> -		/* TODO: decide what to do with SW counter (bits 55-60) */
> -
>  		desc |= (u64)engine->class << GEN11_ENGINE_CLASS_SHIFT;
>  								/* bits 61-63 */
> -	} else {
> -		GEM_BUG_ON(ctx->hw_id >= BIT(GEN8_CTX_ID_WIDTH));
> -		desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT;	/* bits 32-52 */
>  	}
>  
>  	return desc;
> @@ -537,6 +525,15 @@ execlists_schedule_in(struct i915_request *rq, int idx)
>  		intel_context_get(ce);
>  		ce->inflight = rq->engine;
>  
> +		if (ce->hw_tag) {
> +			ce->lrc_desc |= (u64)ce->hw_tag << 32;
> +		} else {
> +			ce->lrc_desc &= ~GENMASK_ULL(47, 37);
> +			ce->lrc_desc |=
> +				(u64)(rq->engine->hw_tag++ % NUM_HW_TAG) <<
> +				GEN11_SW_CTX_ID_SHIFT;
> +		}
> +
>  		execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
>  		intel_engine_context_in(ce->inflight);
>  	}
> @@ -1551,7 +1548,6 @@ static void execlists_context_destroy(struct kref *kref)
>  
>  static void execlists_context_unpin(struct intel_context *ce)
>  {
> -	i915_gem_context_unpin_hw_id(ce->gem_context);
>  	i915_gem_object_unpin_map(ce->state->obj);
>  	intel_ring_reset(ce->ring, ce->ring->tail);
>  }
> @@ -1606,18 +1602,12 @@ __execlists_context_pin(struct intel_context *ce,
>  		goto unpin_active;
>  	}
>  
> -	ret = i915_gem_context_pin_hw_id(ce->gem_context);
> -	if (ret)
> -		goto unpin_map;
> -
>  	ce->lrc_desc = lrc_descriptor(ce, engine);
>  	ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
>  	__execlists_update_reg_state(ce, engine);
>  
>  	return 0;
>  
> -unpin_map:
> -	i915_gem_object_unpin_map(ce->state->obj);
>  unpin_active:
>  	intel_context_active_release(ce);
>  err:
> diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
> index 144301b778df..941d0f08b047 100644
> --- a/drivers/gpu/drm/i915/gvt/kvmgt.c
> +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
> @@ -1566,27 +1566,10 @@ vgpu_id_show(struct device *dev, struct device_attribute *attr,
>  	return sprintf(buf, "\n");
>  }
>  
> -static ssize_t
> -hw_id_show(struct device *dev, struct device_attribute *attr,
> -	   char *buf)
> -{
> -	struct mdev_device *mdev = mdev_from_dev(dev);
> -
> -	if (mdev) {
> -		struct intel_vgpu *vgpu = (struct intel_vgpu *)
> -			mdev_get_drvdata(mdev);
> -		return sprintf(buf, "%u\n",
> -			       vgpu->submission.shadow[0]->gem_context->hw_id);
> -	}
> -	return sprintf(buf, "\n");
> -}
> -
>  static DEVICE_ATTR_RO(vgpu_id);
> -static DEVICE_ATTR_RO(hw_id);
>  
>  static struct attribute *intel_vgpu_attrs[] = {
>  	&dev_attr_vgpu_id.attr,
> -	&dev_attr_hw_id.attr,
>  	NULL
>  };
>  
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> index b6b80963f992..b7ec9f3cdc2c 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -1581,9 +1581,6 @@ static int i915_context_status(struct seq_file *m, void *unused)
>  		struct intel_context *ce;
>  
>  		seq_puts(m, "HW context ");
> -		if (!list_empty(&ctx->hw_id_link))
> -			seq_printf(m, "%x [pin %u]", ctx->hw_id,
> -				   atomic_read(&ctx->hw_id_pin_count));
>  		if (ctx->pid) {
>  			struct task_struct *task;
>  
> diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
> index 026ee1f5536d..75671faf263d 100644
> --- a/drivers/gpu/drm/i915/i915_gpu_error.c
> +++ b/drivers/gpu/drm/i915/i915_gpu_error.c
> @@ -506,9 +506,9 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
>  				const char *header,
>  				const struct drm_i915_error_context *ctx)
>  {
> -	err_printf(m, "%s%s[%d] hw_id %d, prio %d, guilty %d active %d\n",
> -		   header, ctx->comm, ctx->pid, ctx->hw_id,
> -		   ctx->sched_attr.priority, ctx->guilty, ctx->active);
> +	err_printf(m, "%s%s[%d] prio %d, guilty %d active %d\n",
> +		   header, ctx->comm, ctx->pid, ctx->sched_attr.priority,
> +		   ctx->guilty, ctx->active);
>  }
>  
>  static void error_print_engine(struct drm_i915_error_state_buf *m,
> @@ -1310,7 +1310,6 @@ static void record_context(struct drm_i915_error_context *e,
>  		rcu_read_unlock();
>  	}
>  
> -	e->hw_id = ctx->hw_id;
>  	e->sched_attr = ctx->sched;
>  	e->guilty = atomic_read(&ctx->guilty_count);
>  	e->active = atomic_read(&ctx->active_count);
> diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
> index a24c35107d16..1d951eae7e2e 100644
> --- a/drivers/gpu/drm/i915/i915_gpu_error.h
> +++ b/drivers/gpu/drm/i915/i915_gpu_error.h
> @@ -117,7 +117,6 @@ struct i915_gpu_state {
>  		struct drm_i915_error_context {
>  			char comm[TASK_COMM_LEN];
>  			pid_t pid;
> -			u32 hw_id;
>  			int active;
>  			int guilty;
>  			struct i915_sched_attr sched_attr;
> diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
> index 58a71efc25db..df02e0fe0701 100644
> --- a/drivers/gpu/drm/i915/i915_perf.c
> +++ b/drivers/gpu/drm/i915/i915_perf.c
> @@ -1293,19 +1293,14 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
>  			i915->perf.oa.specific_ctx_id_mask =
>  				(1U << GEN8_CTX_ID_WIDTH) - 1;
>  			i915->perf.oa.specific_ctx_id =
> -				upper_32_bits(ce->lrc_desc);
> -			i915->perf.oa.specific_ctx_id &=
>  				i915->perf.oa.specific_ctx_id_mask;
>  		}
>  		break;
>  
>  	case 11: {
>  		i915->perf.oa.specific_ctx_id_mask =
> -			((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32) |
> -			((1U << GEN11_ENGINE_INSTANCE_WIDTH) - 1) << (GEN11_ENGINE_INSTANCE_SHIFT - 32) |
> -			((1 << GEN11_ENGINE_CLASS_WIDTH) - 1) << (GEN11_ENGINE_CLASS_SHIFT - 32);
> -		i915->perf.oa.specific_ctx_id = upper_32_bits(ce->lrc_desc);
> -		i915->perf.oa.specific_ctx_id &=
> +			((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
> +		i915->perf.oa.specific_ctx_id =
>  			i915->perf.oa.specific_ctx_id_mask;
>  		break;
>  	}
> @@ -1314,6 +1309,8 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
>  		MISSING_CASE(INTEL_GEN(i915));
>  	}
>  
> +	ce->hw_tag = i915->perf.oa.specific_ctx_id_mask;
> +
>  	DRM_DEBUG_DRIVER("filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
>  			 i915->perf.oa.specific_ctx_id,
>  			 i915->perf.oa.specific_ctx_id_mask);
> @@ -1330,18 +1327,19 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
>   */
>  static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
>  {
> -	struct drm_i915_private *dev_priv = stream->dev_priv;
> +	struct drm_i915_private *i915 = stream->dev_priv;
>  	struct intel_context *ce;
>  
> -	dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
> -	dev_priv->perf.oa.specific_ctx_id_mask = 0;
> -
> -	ce = fetch_and_zero(&dev_priv->perf.oa.pinned_ctx);
> -	if (ce) {
> -		mutex_lock(&dev_priv->drm.struct_mutex);
> +	ce = fetch_and_zero(&i915->perf.oa.pinned_ctx);
> +	if (ce)  {
> +		mutex_lock(&i915->drm.struct_mutex);
> +		ce->hw_tag = 0;
>  		intel_context_unpin(ce);
> -		mutex_unlock(&dev_priv->drm.struct_mutex);
> +		mutex_unlock(&i915->drm.struct_mutex);
>  	}
> +
> +	i915->perf.oa.specific_ctx_id = INVALID_CTX_ID;
> +	i915->perf.oa.specific_ctx_id_mask = 0;
>  }
>  
>  static void
> diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
> index da18b8d6b80c..fbf6dd0eb5c9 100644
> --- a/drivers/gpu/drm/i915/i915_trace.h
> +++ b/drivers/gpu/drm/i915/i915_trace.h
> @@ -665,7 +665,6 @@ TRACE_EVENT(i915_request_queue,
>  
>  	    TP_STRUCT__entry(
>  			     __field(u32, dev)
> -			     __field(u32, hw_id)
>  			     __field(u64, ctx)
>  			     __field(u16, class)
>  			     __field(u16, instance)
> @@ -675,7 +674,6 @@ TRACE_EVENT(i915_request_queue,
>  
>  	    TP_fast_assign(
>  			   __entry->dev = rq->i915->drm.primary->index;
> -			   __entry->hw_id = rq->gem_context->hw_id;
>  			   __entry->class = rq->engine->uabi_class;
>  			   __entry->instance = rq->engine->instance;
>  			   __entry->ctx = rq->fence.context;
> @@ -683,10 +681,9 @@ TRACE_EVENT(i915_request_queue,
>  			   __entry->flags = flags;
>  			   ),
>  
> -	    TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, flags=0x%x",
> +	    TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, flags=0x%x",
>  		      __entry->dev, __entry->class, __entry->instance,
> -		      __entry->hw_id, __entry->ctx, __entry->seqno,
> -		      __entry->flags)
> +		      __entry->ctx, __entry->seqno, __entry->flags)
>  );
>  
>  DECLARE_EVENT_CLASS(i915_request,
> @@ -695,7 +692,6 @@ DECLARE_EVENT_CLASS(i915_request,
>  
>  	    TP_STRUCT__entry(
>  			     __field(u32, dev)
> -			     __field(u32, hw_id)
>  			     __field(u64, ctx)
>  			     __field(u16, class)
>  			     __field(u16, instance)
> @@ -704,16 +700,15 @@ DECLARE_EVENT_CLASS(i915_request,
>  
>  	    TP_fast_assign(
>  			   __entry->dev = rq->i915->drm.primary->index;
> -			   __entry->hw_id = rq->gem_context->hw_id;
>  			   __entry->class = rq->engine->uabi_class;
>  			   __entry->instance = rq->engine->instance;
>  			   __entry->ctx = rq->fence.context;
>  			   __entry->seqno = rq->fence.seqno;
>  			   ),
>  
> -	    TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u",
> +	    TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u",
>  		      __entry->dev, __entry->class, __entry->instance,
> -		      __entry->hw_id, __entry->ctx, __entry->seqno)
> +		      __entry->ctx, __entry->seqno)
>  );
>  
>  DEFINE_EVENT(i915_request, i915_request_add,
> @@ -738,7 +733,6 @@ TRACE_EVENT(i915_request_in,
>  
>  	    TP_STRUCT__entry(
>  			     __field(u32, dev)
> -			     __field(u32, hw_id)
>  			     __field(u64, ctx)
>  			     __field(u16, class)
>  			     __field(u16, instance)
> @@ -749,7 +743,6 @@ TRACE_EVENT(i915_request_in,
>  
>  	    TP_fast_assign(
>  			   __entry->dev = rq->i915->drm.primary->index;
> -			   __entry->hw_id = rq->gem_context->hw_id;
>  			   __entry->class = rq->engine->uabi_class;
>  			   __entry->instance = rq->engine->instance;
>  			   __entry->ctx = rq->fence.context;
> @@ -758,9 +751,9 @@ TRACE_EVENT(i915_request_in,
>  			   __entry->port = port;
>  			   ),
>  
> -	    TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, prio=%u, port=%u",
> +	    TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, prio=%u, port=%u",
>  		      __entry->dev, __entry->class, __entry->instance,
> -		      __entry->hw_id, __entry->ctx, __entry->seqno,
> +		      __entry->ctx, __entry->seqno,
>  		      __entry->prio, __entry->port)
>  );
>  
> @@ -770,7 +763,6 @@ TRACE_EVENT(i915_request_out,
>  
>  	    TP_STRUCT__entry(
>  			     __field(u32, dev)
> -			     __field(u32, hw_id)
>  			     __field(u64, ctx)
>  			     __field(u16, class)
>  			     __field(u16, instance)
> @@ -780,7 +772,6 @@ TRACE_EVENT(i915_request_out,
>  
>  	    TP_fast_assign(
>  			   __entry->dev = rq->i915->drm.primary->index;
> -			   __entry->hw_id = rq->gem_context->hw_id;
>  			   __entry->class = rq->engine->uabi_class;
>  			   __entry->instance = rq->engine->instance;
>  			   __entry->ctx = rq->fence.context;
> @@ -788,10 +779,9 @@ TRACE_EVENT(i915_request_out,
>  			   __entry->completed = i915_request_completed(rq);
>  			   ),
>  
> -		    TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, completed?=%u",
> +		    TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, completed?=%u",
>  			      __entry->dev, __entry->class, __entry->instance,
> -			      __entry->hw_id, __entry->ctx, __entry->seqno,
> -			      __entry->completed)
> +			      __entry->ctx, __entry->seqno, __entry->completed)
>  );
>  
>  #else
> @@ -829,7 +819,6 @@ TRACE_EVENT(i915_request_wait_begin,
>  
>  	    TP_STRUCT__entry(
>  			     __field(u32, dev)
> -			     __field(u32, hw_id)
>  			     __field(u64, ctx)
>  			     __field(u16, class)
>  			     __field(u16, instance)
> @@ -845,7 +834,6 @@ TRACE_EVENT(i915_request_wait_begin,
>  	     */
>  	    TP_fast_assign(
>  			   __entry->dev = rq->i915->drm.primary->index;
> -			   __entry->hw_id = rq->gem_context->hw_id;
>  			   __entry->class = rq->engine->uabi_class;
>  			   __entry->instance = rq->engine->instance;
>  			   __entry->ctx = rq->fence.context;
> @@ -853,9 +841,9 @@ TRACE_EVENT(i915_request_wait_begin,
>  			   __entry->flags = flags;
>  			   ),
>  
> -	    TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, flags=0x%x",
> +	    TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, flags=0x%x",
>  		      __entry->dev, __entry->class, __entry->instance,
> -		      __entry->hw_id, __entry->ctx, __entry->seqno,
> +		      __entry->ctx, __entry->seqno,
>  		      __entry->flags)
>  );
>  
> @@ -958,19 +946,17 @@ DECLARE_EVENT_CLASS(i915_context,
>  	TP_STRUCT__entry(
>  			__field(u32, dev)
>  			__field(struct i915_gem_context *, ctx)
> -			__field(u32, hw_id)
>  			__field(struct i915_address_space *, vm)
>  	),
>  
>  	TP_fast_assign(
>  			__entry->dev = ctx->i915->drm.primary->index;
>  			__entry->ctx = ctx;
> -			__entry->hw_id = ctx->hw_id;
>  			__entry->vm = ctx->vm;
>  	),
>  
> -	TP_printk("dev=%u, ctx=%p, ctx_vm=%p, hw_id=%u",
> -		  __entry->dev, __entry->ctx, __entry->vm, __entry->hw_id)
> +	TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
> +		  __entry->dev, __entry->ctx, __entry->vm)
>  )
>  
>  DEFINE_EVENT(i915_context, i915_context_create,
> diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
> index b6449d0a8c17..07ab822b224f 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
> +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
> @@ -475,8 +475,8 @@ static int igt_evict_contexts(void *arg)
>  			if (IS_ERR(rq)) {
>  				/* When full, fail_if_busy will trigger EBUSY */
>  				if (PTR_ERR(rq) != -EBUSY) {
> -					pr_err("Unexpected error from request alloc (ctx hw id %u, on %s): %d\n",
> -					       ctx->hw_id, engine->name,
> +					pr_err("Unexpected error from request alloc (on %s): %d\n",
> +					       engine->name,
>  					       (int)PTR_ERR(rq));
>  					err = PTR_ERR(rq);
>  				}
> diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
> index fbc79b14823a..85008f4fc78c 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_vma.c
> +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
> @@ -170,7 +170,7 @@ static int igt_vma_create(void *arg)
>  		}
>  
>  		nc = 0;
> -		for_each_prime_number(num_ctx, MAX_CONTEXT_HW_ID) {
> +		for_each_prime_number(num_ctx, NUM_HW_TAG) {
>  			for (; nc < num_ctx; nc++) {
>  				ctx = mock_context(i915, "mock");
>  				if (!ctx)
> -- 
> 2.22.0
> 
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 33+ messages in thread

end of thread, other threads:[~2019-07-19 20:41 UTC | newest]

Thread overview: 33+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-07-15  8:09 [PATCH 01/24] drm/i915: Hide unshrinkable context objects from the shrinker Chris Wilson
2019-07-15  8:09 ` [PATCH 02/24] drm/i915/gt: Move the [class][inst] lookup for engines onto the GT Chris Wilson
2019-07-15  8:09 ` [PATCH 03/24] drm/i915/gtt: Recursive ppgtt alloc for gen8 Chris Wilson
2019-07-15  8:09 ` [PATCH 04/24] drm/i915/gtt: Tidy up ppgtt insertion " Chris Wilson
2019-07-15  8:09 ` [PATCH 05/24] drm/i915/selftests: Ignore self-preemption suppression under gvt Chris Wilson
2019-07-15  9:11   ` Zhenyu Wang
2019-07-15  8:09 ` [PATCH 06/24] drm/i915: Lock the engine while dumping the active request Chris Wilson
2019-07-16 10:26   ` Tvrtko Ursulin
2019-07-15  8:09 ` [PATCH 07/24] drm/i915: Rely on spinlock protection for GPU error capture Chris Wilson
2019-07-16 10:48   ` Tvrtko Ursulin
2019-07-16 11:17     ` Chris Wilson
2019-07-15  8:09 ` [PATCH 08/24] drm/i915/oa: Reconfigure contexts on the fly Chris Wilson
2019-07-15  8:09 ` [PATCH 09/24] drm/i915: Add to timeline requires the timeline mutex Chris Wilson
2019-07-15  8:09 ` [PATCH 10/24] drm/i915: Teach execbuffer to take the engine wakeref not GT Chris Wilson
2019-07-15  8:09 ` [PATCH 11/24] drm/i915/gt: Track timeline activeness in enter/exit Chris Wilson
2019-07-15  8:09 ` [PATCH 12/24] drm/i915/gt: Convert timeline tracking to spinlock Chris Wilson
2019-07-15  8:09 ` [PATCH 13/24] drm/i915/gt: Guard timeline pinning with its own mutex Chris Wilson
2019-07-15  8:09 ` [PATCH 14/24] drm/i915: Protect request retirement with timeline->mutex Chris Wilson
2019-07-15  8:09 ` [PATCH 15/24] drm/i915: Replace struct_mutex for batch pool serialisation Chris Wilson
2019-07-15  8:09 ` [PATCH 16/24] drm/i915/gt: Mark context->active_count as protected by timeline->mutex Chris Wilson
2019-07-15  8:09 ` [PATCH 17/24] drm/i915: Forgo last_fence active request tracking Chris Wilson
2019-07-15  8:09 ` [PATCH 18/24] drm/i915/overlay: Switch to using i915_active tracking Chris Wilson
2019-07-15  8:09 ` [PATCH 19/24] drm/i915: Extract intel_frontbuffer active tracking Chris Wilson
2019-07-15  8:09 ` [PATCH 20/24] drm/i915: Markup expected timeline locks for i915_active Chris Wilson
2019-07-15  8:09 ` [PATCH 21/24] drm/i915: Remove logical HW ID Chris Wilson
2019-07-19 20:56   ` Kumar Valsan, Prathap
2019-07-15  8:09 ` [PATCH 22/24] drm/i915: Move context management under GEM Chris Wilson
2019-07-15  8:09 ` [PATCH 23/24] drm/i915: Make debugfs/per_file_stats scale better Chris Wilson
2019-07-15  8:09 ` [PATCH 24/24] drm/i915/gt: Extract GT runtime power management from intel_pm.c Chris Wilson
2019-07-15 10:11 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/24] drm/i915: Hide unshrinkable context objects from the shrinker Patchwork
2019-07-15 10:22 ` ✗ Fi.CI.SPARSE: " Patchwork
2019-07-15 11:00 ` ✓ Fi.CI.BAT: success " Patchwork
2019-07-15 13:36 ` ✗ Fi.CI.IGT: failure " Patchwork

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.