All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Subject: [CI 3/7] drm/i915: make more uncore function work on intel_uncore
Date: Wed, 20 Mar 2019 21:14:03 +0000	[thread overview]
Message-ID: <20190320211407.25316-3-chris@chris-wilson.co.uk> (raw)
In-Reply-To: <20190320211407.25316-1-chris@chris-wilson.co.uk>

From: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>

Move the init, fini, prune, suspend, resume function to work on
intel_uncore instead of dev_priv.

Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Reviewed-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190319183543.13679-5-daniele.ceraolospurio@intel.com
---
 drivers/gpu/drm/i915/i915_drv.c               |  20 +-
 drivers/gpu/drm/i915/intel_uncore.c           | 273 +++++++++---------
 drivers/gpu/drm/i915/intel_uncore.h           |  12 +-
 .../gpu/drm/i915/selftests/mock_gem_device.c  |   2 +-
 drivers/gpu/drm/i915/selftests/mock_uncore.c  |   6 +-
 drivers/gpu/drm/i915/selftests/mock_uncore.h  |   2 +-
 6 files changed, 159 insertions(+), 156 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index b540b33cbf73..70ee1b9e8208 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -998,11 +998,11 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
 	if (ret < 0)
 		goto err_bridge;
 
-	intel_uncore_init(dev_priv);
+	intel_uncore_init(&dev_priv->uncore);
 
 	intel_device_info_init_mmio(dev_priv);
 
-	intel_uncore_prune(dev_priv);
+	intel_uncore_prune(&dev_priv->uncore);
 
 	intel_uc_init_mmio(dev_priv);
 
@@ -1015,7 +1015,7 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
 	return 0;
 
 err_uncore:
-	intel_uncore_fini(dev_priv);
+	intel_uncore_fini(&dev_priv->uncore);
 	i915_mmio_cleanup(dev_priv);
 err_bridge:
 	pci_dev_put(dev_priv->bridge_dev);
@@ -1029,7 +1029,7 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
  */
 static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
 {
-	intel_uncore_fini(dev_priv);
+	intel_uncore_fini(&dev_priv->uncore);
 	i915_mmio_cleanup(dev_priv);
 	pci_dev_put(dev_priv->bridge_dev);
 }
@@ -2091,7 +2091,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
 
 	i915_gem_suspend_late(dev_priv);
 
-	intel_uncore_suspend(dev_priv);
+	intel_uncore_suspend(&dev_priv->uncore);
 
 	intel_power_domains_suspend(dev_priv,
 				    get_suspend_mode(dev_priv, hibernation));
@@ -2287,7 +2287,9 @@ static int i915_drm_resume_early(struct drm_device *dev)
 		DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
 			  ret);
 
-	intel_uncore_resume_early(dev_priv);
+	intel_uncore_resume_early(&dev_priv->uncore);
+
+	i915_check_and_clear_faults(dev_priv);
 
 	if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv)) {
 		gen9_sanitize_dc_state(dev_priv);
@@ -2857,7 +2859,7 @@ static int intel_runtime_suspend(struct device *kdev)
 
 	intel_runtime_pm_disable_interrupts(dev_priv);
 
-	intel_uncore_suspend(dev_priv);
+	intel_uncore_suspend(&dev_priv->uncore);
 
 	ret = 0;
 	if (INTEL_GEN(dev_priv) >= 11) {
@@ -2874,7 +2876,7 @@ static int intel_runtime_suspend(struct device *kdev)
 
 	if (ret) {
 		DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
-		intel_uncore_runtime_resume(dev_priv);
+		intel_uncore_runtime_resume(&dev_priv->uncore);
 
 		intel_runtime_pm_enable_interrupts(dev_priv);
 
@@ -2971,7 +2973,7 @@ static int intel_runtime_resume(struct device *kdev)
 		ret = vlv_resume_prepare(dev_priv, true);
 	}
 
-	intel_uncore_runtime_resume(dev_priv);
+	intel_uncore_runtime_resume(&dev_priv->uncore);
 
 	intel_runtime_pm_enable_interrupts(dev_priv);
 
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 2e50697e72b4..d58f96ea774c 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -525,62 +525,58 @@ check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
 	return ret;
 }
 
-static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
+static void __intel_uncore_early_sanitize(struct intel_uncore *uncore,
 					  unsigned int restore_forcewake)
 {
+	struct drm_i915_private *i915 = uncore_to_i915(uncore);
+
 	/* clear out unclaimed reg detection bit */
-	if (check_for_unclaimed_mmio(dev_priv))
+	if (check_for_unclaimed_mmio(i915))
 		DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
 
 	/* WaDisableShadowRegForCpd:chv */
-	if (IS_CHERRYVIEW(dev_priv)) {
-		__raw_i915_write32(dev_priv, GTFIFOCTL,
-				   __raw_i915_read32(dev_priv, GTFIFOCTL) |
+	if (IS_CHERRYVIEW(i915)) {
+		__raw_i915_write32(i915, GTFIFOCTL,
+				   __raw_i915_read32(i915, GTFIFOCTL) |
 				   GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
 				   GT_FIFO_CTL_RC6_POLICY_STALL);
 	}
 
 	iosf_mbi_punit_acquire();
-	intel_uncore_forcewake_reset(&dev_priv->uncore);
+	intel_uncore_forcewake_reset(uncore);
 	if (restore_forcewake) {
-		spin_lock_irq(&dev_priv->uncore.lock);
-		dev_priv->uncore.funcs.force_wake_get(&dev_priv->uncore,
-						      restore_forcewake);
-
-		if (IS_GEN_RANGE(dev_priv, 6, 7))
-			dev_priv->uncore.fifo_count =
-				fifo_free_entries(dev_priv);
-		spin_unlock_irq(&dev_priv->uncore.lock);
+		spin_lock_irq(&uncore->lock);
+		uncore->funcs.force_wake_get(uncore, restore_forcewake);
+
+		if (IS_GEN_RANGE(i915, 6, 7))
+			uncore->fifo_count = fifo_free_entries(i915);
+		spin_unlock_irq(&uncore->lock);
 	}
 	iosf_mbi_punit_release();
 }
 
-void intel_uncore_suspend(struct drm_i915_private *dev_priv)
+void intel_uncore_suspend(struct intel_uncore *uncore)
 {
 	iosf_mbi_punit_acquire();
 	iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
-		&dev_priv->uncore.pmic_bus_access_nb);
-	dev_priv->uncore.fw_domains_saved =
-		intel_uncore_forcewake_reset(&dev_priv->uncore);
+		&uncore->pmic_bus_access_nb);
+	uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore);
 	iosf_mbi_punit_release();
 }
 
-void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
+void intel_uncore_resume_early(struct intel_uncore *uncore)
 {
 	unsigned int restore_forcewake;
 
-	restore_forcewake = fetch_and_zero(&dev_priv->uncore.fw_domains_saved);
-	__intel_uncore_early_sanitize(dev_priv, restore_forcewake);
+	restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved);
+	__intel_uncore_early_sanitize(uncore, restore_forcewake);
 
-	iosf_mbi_register_pmic_bus_access_notifier(
-		&dev_priv->uncore.pmic_bus_access_nb);
-	i915_check_and_clear_faults(dev_priv);
+	iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
 }
 
-void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv)
+void intel_uncore_runtime_resume(struct intel_uncore *uncore)
 {
-	iosf_mbi_register_pmic_bus_access_notifier(
-		&dev_priv->uncore.pmic_bus_access_nb);
+	iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
 }
 
 void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
@@ -1309,29 +1305,29 @@ __gen6_write(32)
 #undef GEN6_WRITE_FOOTER
 #undef GEN6_WRITE_HEADER
 
-#define ASSIGN_WRITE_MMIO_VFUNCS(i915, x) \
+#define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \
 do { \
-	(i915)->uncore.funcs.mmio_writeb = x##_write8; \
-	(i915)->uncore.funcs.mmio_writew = x##_write16; \
-	(i915)->uncore.funcs.mmio_writel = x##_write32; \
+	(uncore)->funcs.mmio_writeb = x##_write8; \
+	(uncore)->funcs.mmio_writew = x##_write16; \
+	(uncore)->funcs.mmio_writel = x##_write32; \
 } while (0)
 
-#define ASSIGN_READ_MMIO_VFUNCS(i915, x) \
+#define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \
 do { \
-	(i915)->uncore.funcs.mmio_readb = x##_read8; \
-	(i915)->uncore.funcs.mmio_readw = x##_read16; \
-	(i915)->uncore.funcs.mmio_readl = x##_read32; \
-	(i915)->uncore.funcs.mmio_readq = x##_read64; \
+	(uncore)->funcs.mmio_readb = x##_read8; \
+	(uncore)->funcs.mmio_readw = x##_read16; \
+	(uncore)->funcs.mmio_readl = x##_read32; \
+	(uncore)->funcs.mmio_readq = x##_read64; \
 } while (0)
 
 
-static void fw_domain_init(struct drm_i915_private *dev_priv,
+static void fw_domain_init(struct intel_uncore *uncore,
 			   enum forcewake_domain_id domain_id,
 			   i915_reg_t reg_set,
 			   i915_reg_t reg_ack)
 {
-	struct intel_uncore *uncore = &dev_priv->uncore;
 	struct intel_uncore_forcewake_domain *d;
+	struct drm_i915_private *i915 = uncore_to_i915(uncore);
 
 	if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
 		return;
@@ -1344,8 +1340,8 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
 	WARN_ON(!i915_mmio_reg_valid(reg_ack));
 
 	d->wake_count = 0;
-	d->reg_set = dev_priv->regs + i915_mmio_reg_offset(reg_set);
-	d->reg_ack = dev_priv->regs + i915_mmio_reg_offset(reg_ack);
+	d->reg_set = i915->regs + i915_mmio_reg_offset(reg_set);
+	d->reg_ack = i915->regs + i915_mmio_reg_offset(reg_ack);
 
 	d->id = domain_id;
 
@@ -1370,7 +1366,7 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
 	fw_domain_reset(d);
 }
 
-static void fw_domain_fini(struct drm_i915_private *dev_priv,
+static void fw_domain_fini(struct intel_uncore *uncore,
 			   enum forcewake_domain_id domain_id)
 {
 	struct intel_uncore_forcewake_domain *d;
@@ -1378,74 +1374,76 @@ static void fw_domain_fini(struct drm_i915_private *dev_priv,
 	if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
 		return;
 
-	d = &dev_priv->uncore.fw_domain[domain_id];
+	d = &uncore->fw_domain[domain_id];
 
 	WARN_ON(d->wake_count);
 	WARN_ON(hrtimer_cancel(&d->timer));
 	memset(d, 0, sizeof(*d));
 
-	dev_priv->uncore.fw_domains &= ~BIT(domain_id);
+	uncore->fw_domains &= ~BIT(domain_id);
 }
 
-static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
+static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
 {
-	if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv))
+	struct drm_i915_private *i915 = uncore_to_i915(uncore);
+
+	if (INTEL_GEN(i915) <= 5 || intel_vgpu_active(i915))
 		return;
 
-	if (INTEL_GEN(dev_priv) >= 11) {
+	if (INTEL_GEN(i915) >= 11) {
 		int i;
 
-		dev_priv->uncore.funcs.force_wake_get =
+		uncore->funcs.force_wake_get =
 			fw_domains_get_with_fallback;
-		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
-		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+		uncore->funcs.force_wake_put = fw_domains_put;
+		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
 			       FORCEWAKE_RENDER_GEN9,
 			       FORCEWAKE_ACK_RENDER_GEN9);
-		fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
+		fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER,
 			       FORCEWAKE_BLITTER_GEN9,
 			       FORCEWAKE_ACK_BLITTER_GEN9);
 		for (i = 0; i < I915_MAX_VCS; i++) {
-			if (!HAS_ENGINE(dev_priv, _VCS(i)))
+			if (!HAS_ENGINE(i915, _VCS(i)))
 				continue;
 
-			fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
+			fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
 				       FORCEWAKE_MEDIA_VDBOX_GEN11(i),
 				       FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i));
 		}
 		for (i = 0; i < I915_MAX_VECS; i++) {
-			if (!HAS_ENGINE(dev_priv, _VECS(i)))
+			if (!HAS_ENGINE(i915, _VECS(i)))
 				continue;
 
-			fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
+			fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
 				       FORCEWAKE_MEDIA_VEBOX_GEN11(i),
 				       FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
 		}
-	} else if (IS_GEN_RANGE(dev_priv, 9, 10)) {
-		dev_priv->uncore.funcs.force_wake_get =
+	} else if (IS_GEN_RANGE(i915, 9, 10)) {
+		uncore->funcs.force_wake_get =
 			fw_domains_get_with_fallback;
-		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
-		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+		uncore->funcs.force_wake_put = fw_domains_put;
+		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
 			       FORCEWAKE_RENDER_GEN9,
 			       FORCEWAKE_ACK_RENDER_GEN9);
-		fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
+		fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER,
 			       FORCEWAKE_BLITTER_GEN9,
 			       FORCEWAKE_ACK_BLITTER_GEN9);
-		fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
+		fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
 			       FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
-	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-		dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
-		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
-		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+	} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+		uncore->funcs.force_wake_get = fw_domains_get;
+		uncore->funcs.force_wake_put = fw_domains_put;
+		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
 			       FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
-		fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
+		fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
 			       FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
-	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
-		dev_priv->uncore.funcs.force_wake_get =
+	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
+		uncore->funcs.force_wake_get =
 			fw_domains_get_with_thread_status;
-		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
-		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+		uncore->funcs.force_wake_put = fw_domains_put;
+		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
 			       FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
-	} else if (IS_IVYBRIDGE(dev_priv)) {
+	} else if (IS_IVYBRIDGE(i915)) {
 		u32 ecobus;
 
 		/* IVB configs may use multi-threaded forcewake */
@@ -1457,9 +1455,9 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
 		 * (correctly) interpreted by the test below as MT
 		 * forcewake being disabled.
 		 */
-		dev_priv->uncore.funcs.force_wake_get =
+		uncore->funcs.force_wake_get =
 			fw_domains_get_with_thread_status;
-		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
+		uncore->funcs.force_wake_put = fw_domains_put;
 
 		/* We need to init first for ECOBUS access and then
 		 * determine later if we want to reinit, in case of MT access is
@@ -1468,41 +1466,41 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
 		 * before the ecobus check.
 		 */
 
-		__raw_i915_write32(dev_priv, FORCEWAKE, 0);
-		__raw_posting_read(dev_priv, ECOBUS);
+		__raw_i915_write32(i915, FORCEWAKE, 0);
+		__raw_posting_read(i915, ECOBUS);
 
-		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
 			       FORCEWAKE_MT, FORCEWAKE_MT_ACK);
 
-		spin_lock_irq(&dev_priv->uncore.lock);
-		fw_domains_get_with_thread_status(&dev_priv->uncore, FORCEWAKE_RENDER);
-		ecobus = __raw_i915_read32(dev_priv, ECOBUS);
-		fw_domains_put(&dev_priv->uncore, FORCEWAKE_RENDER);
-		spin_unlock_irq(&dev_priv->uncore.lock);
+		spin_lock_irq(&uncore->lock);
+		fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER);
+		ecobus = __raw_i915_read32(i915, ECOBUS);
+		fw_domains_put(uncore, FORCEWAKE_RENDER);
+		spin_unlock_irq(&uncore->lock);
 
 		if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
 			DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
 			DRM_INFO("when using vblank-synced partial screen updates.\n");
-			fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+			fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
 				       FORCEWAKE, FORCEWAKE_ACK);
 		}
-	} else if (IS_GEN(dev_priv, 6)) {
-		dev_priv->uncore.funcs.force_wake_get =
+	} else if (IS_GEN(i915, 6)) {
+		uncore->funcs.force_wake_get =
 			fw_domains_get_with_thread_status;
-		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
-		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+		uncore->funcs.force_wake_put = fw_domains_put;
+		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
 			       FORCEWAKE, FORCEWAKE_ACK);
 	}
 
 	/* All future platforms are expected to require complex power gating */
-	WARN_ON(dev_priv->uncore.fw_domains == 0);
+	WARN_ON(uncore->fw_domains == 0);
 }
 
-#define ASSIGN_FW_DOMAINS_TABLE(d) \
+#define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \
 { \
-	dev_priv->uncore.fw_domains_table = \
+	(uncore)->fw_domains_table = \
 			(struct intel_forcewake_range *)(d); \
-	dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \
+	(uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \
 }
 
 static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
@@ -1538,55 +1536,56 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
 	return NOTIFY_OK;
 }
 
-void intel_uncore_init(struct drm_i915_private *dev_priv)
+void intel_uncore_init(struct intel_uncore *uncore)
 {
-	i915_check_vgpu(dev_priv);
+	struct drm_i915_private *i915 = uncore_to_i915(uncore);
 
-	intel_uncore_edram_detect(dev_priv);
-	intel_uncore_fw_domains_init(dev_priv);
-	__intel_uncore_early_sanitize(dev_priv, 0);
+	i915_check_vgpu(i915);
 
-	dev_priv->uncore.unclaimed_mmio_check = 1;
-	dev_priv->uncore.pmic_bus_access_nb.notifier_call =
-		i915_pmic_bus_access_notifier;
+	intel_uncore_edram_detect(i915);
+	intel_uncore_fw_domains_init(uncore);
+	__intel_uncore_early_sanitize(uncore, 0);
 
-	if (IS_GEN_RANGE(dev_priv, 2, 4) || intel_vgpu_active(dev_priv)) {
-		ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen2);
-		ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen2);
-	} else if (IS_GEN(dev_priv, 5)) {
-		ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen5);
-		ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen5);
-	} else if (IS_GEN_RANGE(dev_priv, 6, 7)) {
-		ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen6);
+	uncore->unclaimed_mmio_check = 1;
+	uncore->pmic_bus_access_nb.notifier_call =
+		i915_pmic_bus_access_notifier;
 
-		if (IS_VALLEYVIEW(dev_priv)) {
-			ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges);
-			ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
+	if (IS_GEN_RANGE(i915, 2, 4) || intel_vgpu_active(i915)) {
+		ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen2);
+		ASSIGN_READ_MMIO_VFUNCS(uncore, gen2);
+	} else if (IS_GEN(i915, 5)) {
+		ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen5);
+		ASSIGN_READ_MMIO_VFUNCS(uncore, gen5);
+	} else if (IS_GEN_RANGE(i915, 6, 7)) {
+		ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
+
+		if (IS_VALLEYVIEW(i915)) {
+			ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges);
+			ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
 		} else {
-			ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
+			ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
 		}
-	} else if (IS_GEN(dev_priv, 8)) {
-		if (IS_CHERRYVIEW(dev_priv)) {
-			ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges);
-			ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
-			ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
+	} else if (IS_GEN(i915, 8)) {
+		if (IS_CHERRYVIEW(i915)) {
+			ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
+			ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
+			ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
 
 		} else {
-			ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen8);
-			ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
+			ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen8);
+			ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
 		}
-	} else if (IS_GEN_RANGE(dev_priv, 9, 10)) {
-		ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
-		ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
-		ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
+	} else if (IS_GEN_RANGE(i915, 9, 10)) {
+		ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
+		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
+		ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
 	} else {
-		ASSIGN_FW_DOMAINS_TABLE(__gen11_fw_ranges);
-		ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen11_fwtable);
-		ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen11_fwtable);
+		ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
+		ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen11_fwtable);
+		ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable);
 	}
 
-	iosf_mbi_register_pmic_bus_access_notifier(
-		&dev_priv->uncore.pmic_bus_access_nb);
+	iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
 }
 
 /*
@@ -1594,44 +1593,46 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
  * the forcewake domains. Prune them, to make sure they only reference existing
  * engines.
  */
-void intel_uncore_prune(struct drm_i915_private *dev_priv)
+void intel_uncore_prune(struct intel_uncore *uncore)
 {
-	if (INTEL_GEN(dev_priv) >= 11) {
-		enum forcewake_domains fw_domains = dev_priv->uncore.fw_domains;
+	struct drm_i915_private *i915 = uncore_to_i915(uncore);
+
+	if (INTEL_GEN(i915) >= 11) {
+		enum forcewake_domains fw_domains = uncore->fw_domains;
 		enum forcewake_domain_id domain_id;
 		int i;
 
 		for (i = 0; i < I915_MAX_VCS; i++) {
 			domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
 
-			if (HAS_ENGINE(dev_priv, _VCS(i)))
+			if (HAS_ENGINE(i915, _VCS(i)))
 				continue;
 
 			if (fw_domains & BIT(domain_id))
-				fw_domain_fini(dev_priv, domain_id);
+				fw_domain_fini(uncore, domain_id);
 		}
 
 		for (i = 0; i < I915_MAX_VECS; i++) {
 			domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
 
-			if (HAS_ENGINE(dev_priv, _VECS(i)))
+			if (HAS_ENGINE(i915, _VECS(i)))
 				continue;
 
 			if (fw_domains & BIT(domain_id))
-				fw_domain_fini(dev_priv, domain_id);
+				fw_domain_fini(uncore, domain_id);
 		}
 	}
 }
 
-void intel_uncore_fini(struct drm_i915_private *dev_priv)
+void intel_uncore_fini(struct intel_uncore *uncore)
 {
 	/* Paranoia: make sure we have disabled everything before we exit. */
-	intel_uncore_sanitize(dev_priv);
+	intel_uncore_sanitize(uncore_to_i915(uncore));
 
 	iosf_mbi_punit_acquire();
 	iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
-		&dev_priv->uncore.pmic_bus_access_nb);
-	intel_uncore_forcewake_reset(&dev_priv->uncore);
+		&uncore->pmic_bus_access_nb);
+	intel_uncore_forcewake_reset(uncore);
 	iosf_mbi_punit_release();
 }
 
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index b9010184a780..b1b596c81451 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -142,14 +142,14 @@ forcewake_domain_to_uncore(const struct intel_uncore_forcewake_domain *d)
 }
 
 void intel_uncore_sanitize(struct drm_i915_private *dev_priv);
-void intel_uncore_init(struct drm_i915_private *dev_priv);
-void intel_uncore_prune(struct drm_i915_private *dev_priv);
+void intel_uncore_init(struct intel_uncore *uncore);
+void intel_uncore_prune(struct intel_uncore *uncore);
 bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
 bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
-void intel_uncore_fini(struct drm_i915_private *dev_priv);
-void intel_uncore_suspend(struct drm_i915_private *dev_priv);
-void intel_uncore_resume_early(struct drm_i915_private *dev_priv);
-void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv);
+void intel_uncore_fini(struct intel_uncore *uncore);
+void intel_uncore_suspend(struct intel_uncore *uncore);
+void intel_uncore_resume_early(struct intel_uncore *uncore);
+void intel_uncore_runtime_resume(struct intel_uncore *uncore);
 
 u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
 void assert_forcewakes_inactive(struct intel_uncore *uncore);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 54cfb611c0aa..60bbf8b4df40 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -182,7 +182,7 @@ struct drm_i915_private *mock_gem_device(void)
 		I915_GTT_PAGE_SIZE_64K |
 		I915_GTT_PAGE_SIZE_2M;
 
-	mock_uncore_init(i915);
+	mock_uncore_init(&i915->uncore);
 	i915_gem_init__mm(i915);
 
 	init_waitqueue_head(&i915->gpu_error.wait_queue);
diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.c b/drivers/gpu/drm/i915/selftests/mock_uncore.c
index 8ef14c7e5e38..c3896c1fd551 100644
--- a/drivers/gpu/drm/i915/selftests/mock_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/mock_uncore.c
@@ -39,8 +39,8 @@ __nop_read(16)
 __nop_read(32)
 __nop_read(64)
 
-void mock_uncore_init(struct drm_i915_private *i915)
+void mock_uncore_init(struct intel_uncore *uncore)
 {
-	ASSIGN_WRITE_MMIO_VFUNCS(i915, nop);
-	ASSIGN_READ_MMIO_VFUNCS(i915, nop);
+	ASSIGN_WRITE_MMIO_VFUNCS(uncore, nop);
+	ASSIGN_READ_MMIO_VFUNCS(uncore, nop);
 }
diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.h b/drivers/gpu/drm/i915/selftests/mock_uncore.h
index d79aa3ca4d51..dacb36b5ffcd 100644
--- a/drivers/gpu/drm/i915/selftests/mock_uncore.h
+++ b/drivers/gpu/drm/i915/selftests/mock_uncore.h
@@ -25,6 +25,6 @@
 #ifndef __MOCK_UNCORE_H
 #define __MOCK_UNCORE_H
 
-void mock_uncore_init(struct drm_i915_private *i915);
+void mock_uncore_init(struct intel_uncore *uncore);
 
 #endif /* !__MOCK_UNCORE_H */
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2019-03-20 21:14 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-03-20 21:14 [CI 1/7] drm/i915: use intel_uncore in fw get/put internal paths Chris Wilson
2019-03-20 21:14 ` [CI 2/7] drm/i915: use intel_uncore for all forcewake get/put Chris Wilson
2019-03-20 21:14 ` Chris Wilson [this message]
2019-03-20 21:14 ` [CI 4/7] drm/i915: make find_fw_domain work on intel_uncore Chris Wilson
2019-03-20 21:14 ` [CI 5/7] drm/i915: reduce the dev_priv->uncore dance in uncore.c Chris Wilson
2019-03-20 21:14 ` [CI 6/7] drm/i915: move regs pointer inside the uncore structure Chris Wilson
2019-03-20 21:14 ` [CI 7/7] drm/i915: make raw access function work on uncore Chris Wilson
2019-03-20 22:51 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [CI,1/7] drm/i915: use intel_uncore in fw get/put internal paths Patchwork
2019-03-20 22:55 ` ✗ Fi.CI.SPARSE: " Patchwork
2019-03-20 23:12 ` ✗ Fi.CI.BAT: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190320211407.25316-3-chris@chris-wilson.co.uk \
    --to=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.