All of lore.kernel.org
 help / color / mirror / Atom feed
From: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
To: intel-gfx@lists.freedesktop.org
Subject: [PATCH v2 2/8] drm/i915: Remove rpm asserts that use i915
Date: Wed, 12 Jun 2019 11:37:41 -0700	[thread overview]
Message-ID: <20190612183748.7693-3-daniele.ceraolospurio@intel.com> (raw)
In-Reply-To: <20190612183748.7693-1-daniele.ceraolospurio@intel.com>

Quite a few of the call points have already switched to the version
working directly on the runtime_pm structure, so let's switch over the
rest and kill the i915-based asserts.

v2: rebase

Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_mman.c   |  2 +-
 drivers/gpu/drm/i915/gvt/aperture_gm.c     |  2 +-
 drivers/gpu/drm/i915/i915_gem_fence_reg.c  |  2 +-
 drivers/gpu/drm/i915/i915_irq.c            |  6 ++---
 drivers/gpu/drm/i915/i915_vma.c            |  2 +-
 drivers/gpu/drm/i915/intel_csr.c           |  2 +-
 drivers/gpu/drm/i915/intel_display_power.c |  4 ++--
 drivers/gpu/drm/i915/intel_drv.h           | 26 ++++++----------------
 drivers/gpu/drm/i915/intel_runtime_pm.c    | 10 ++++-----
 drivers/gpu/drm/i915/intel_uncore.c        | 12 +++++-----
 drivers/gpu/drm/i915/intel_wakeref.c       |  2 +-
 11 files changed, 29 insertions(+), 41 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index c7b9b34de01b..caa61f09f714 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -308,7 +308,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 		goto err_fence;
 
 	/* Mark as being mmapped into userspace for later revocation */
-	assert_rpm_wakelock_held(i915);
+	assert_rpm_wakelock_held(&i915->runtime_pm);
 	if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
 		list_add(&obj->userfault_link, &i915->mm.userfault_list);
 	if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 1fa2f65c3cd1..7f4e3456ce11 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -131,7 +131,7 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
 	struct drm_i915_fence_reg *reg;
 	i915_reg_t fence_reg_lo, fence_reg_hi;
 
-	assert_rpm_wakelock_held(dev_priv);
+	assert_rpm_wakelock_held(&dev_priv->runtime_pm);
 
 	if (WARN_ON(fence >= vgpu_fence_sz(vgpu)))
 		return;
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index 10aa6e350bfa..f2cef3fa3deb 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -360,7 +360,7 @@ i915_vma_pin_fence(struct i915_vma *vma)
 	 * Note that we revoke fences on runtime suspend. Therefore the user
 	 * must keep the device awake whilst using the fence.
 	 */
-	assert_rpm_wakelock_held(vma->vm->i915);
+	assert_rpm_wakelock_held(&vma->vm->i915->runtime_pm);
 
 	/* Just update our place in the LRU if our fence is getting reused. */
 	if (vma->fence) {
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 11c451358fb8..1fba5652f741 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -589,7 +589,7 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
 
 void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
 {
-	assert_rpm_wakelock_held(dev_priv);
+	assert_rpm_wakelock_held(&dev_priv->runtime_pm);
 
 	spin_lock_irq(&dev_priv->irq_lock);
 	gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
@@ -598,7 +598,7 @@ void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
 
 void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
 {
-	assert_rpm_wakelock_held(dev_priv);
+	assert_rpm_wakelock_held(&dev_priv->runtime_pm);
 
 	spin_lock_irq(&dev_priv->irq_lock);
 	if (!dev_priv->guc.interrupts.enabled) {
@@ -612,7 +612,7 @@ void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
 
 void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
 {
-	assert_rpm_wakelock_held(dev_priv);
+	assert_rpm_wakelock_held(&dev_priv->runtime_pm);
 
 	spin_lock_irq(&dev_priv->irq_lock);
 	dev_priv->guc.interrupts.enabled = false;
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 80050f6a0893..1fcdfdddf9a5 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -367,7 +367,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
 	int err;
 
 	/* Access through the GTT requires the device to be awake. */
-	assert_rpm_wakelock_held(vma->vm->i915);
+	assert_rpm_wakelock_held(&vma->vm->i915->runtime_pm);
 
 	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
 	if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index bf0eebd385b9..097153c7f3d4 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -273,7 +273,7 @@ void intel_csr_load_program(struct drm_i915_private *dev_priv)
 	}
 
 	fw_size = dev_priv->csr.dmc_fw_size;
-	assert_rpm_wakelock_held(dev_priv);
+	assert_rpm_wakelock_held(&dev_priv->runtime_pm);
 
 	preempt_disable();
 
diff --git a/drivers/gpu/drm/i915/intel_display_power.c b/drivers/gpu/drm/i915/intel_display_power.c
index 278a7edc94f5..2f9f4e1d5884 100644
--- a/drivers/gpu/drm/i915/intel_display_power.c
+++ b/drivers/gpu/drm/i915/intel_display_power.c
@@ -696,7 +696,7 @@ static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
 
 	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
 		  "DC5 already programmed to be enabled.\n");
-	assert_rpm_wakelock_held(dev_priv);
+	assert_rpm_wakelock_held(&dev_priv->runtime_pm);
 
 	assert_csr_loaded(dev_priv);
 }
@@ -1814,7 +1814,7 @@ release_async_put_domains(struct i915_power_domains *power_domains, u64 mask)
 	 * wakeref to make the state checker happy about the HW access during
 	 * power well disabling.
 	 */
-	assert_rpm_raw_wakeref_held(dev_priv);
+	assert_rpm_raw_wakeref_held(&dev_priv->runtime_pm);
 	wakeref = intel_runtime_pm_get(dev_priv);
 
 	for_each_power_domain(domain, mask) {
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index aec40adf4876..ac0bd6067864 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1640,7 +1640,7 @@ assert_rpm_device_not_suspended(struct i915_runtime_pm *rpm)
 }
 
 static inline void
-____assert_rpm_raw_wakeref_held(struct i915_runtime_pm *rpm, int wakeref_count)
+__assert_rpm_raw_wakeref_held(struct i915_runtime_pm *rpm, int wakeref_count)
 {
 	assert_rpm_device_not_suspended(rpm);
 	WARN_ONCE(!intel_rpm_raw_wakeref_count(wakeref_count),
@@ -1648,35 +1648,23 @@ ____assert_rpm_raw_wakeref_held(struct i915_runtime_pm *rpm, int wakeref_count)
 }
 
 static inline void
-____assert_rpm_wakelock_held(struct i915_runtime_pm *rpm, int wakeref_count)
+__assert_rpm_wakelock_held(struct i915_runtime_pm *rpm, int wakeref_count)
 {
-	____assert_rpm_raw_wakeref_held(rpm, wakeref_count);
+	__assert_rpm_raw_wakeref_held(rpm, wakeref_count);
 	WARN_ONCE(!intel_rpm_wakelock_count(wakeref_count),
 		  "RPM wakelock ref not held during HW access\n");
 }
 
 static inline void
-__assert_rpm_raw_wakeref_held(struct i915_runtime_pm *rpm)
+assert_rpm_raw_wakeref_held(struct i915_runtime_pm *rpm)
 {
-	____assert_rpm_raw_wakeref_held(rpm, atomic_read(&rpm->wakeref_count));
+	__assert_rpm_raw_wakeref_held(rpm, atomic_read(&rpm->wakeref_count));
 }
 
 static inline void
-assert_rpm_raw_wakeref_held(struct drm_i915_private *i915)
+assert_rpm_wakelock_held(struct i915_runtime_pm *rpm)
 {
-	__assert_rpm_raw_wakeref_held(&i915->runtime_pm);
-}
-
-static inline void
-__assert_rpm_wakelock_held(struct i915_runtime_pm *rpm)
-{
-	____assert_rpm_wakelock_held(rpm, atomic_read(&rpm->wakeref_count));
-}
-
-static inline void
-assert_rpm_wakelock_held(struct drm_i915_private *i915)
-{
-	__assert_rpm_wakelock_held(&i915->runtime_pm);
+	__assert_rpm_wakelock_held(rpm, atomic_read(&rpm->wakeref_count));
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 7c602f5c748d..ae60ae1c970e 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -335,10 +335,10 @@ intel_runtime_pm_acquire(struct i915_runtime_pm *rpm, bool wakelock)
 {
 	if (wakelock) {
 		atomic_add(1 + INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count);
-		__assert_rpm_wakelock_held(rpm);
+		assert_rpm_wakelock_held(rpm);
 	} else {
 		atomic_inc(&rpm->wakeref_count);
-		__assert_rpm_raw_wakeref_held(rpm);
+		assert_rpm_raw_wakeref_held(rpm);
 	}
 }
 
@@ -346,10 +346,10 @@ static void
 intel_runtime_pm_release(struct i915_runtime_pm *rpm, int wakelock)
 {
 	if (wakelock) {
-		__assert_rpm_wakelock_held(rpm);
+		assert_rpm_wakelock_held(rpm);
 		atomic_sub(INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count);
 	} else {
-		__assert_rpm_raw_wakeref_held(rpm);
+		assert_rpm_raw_wakeref_held(rpm);
 	}
 
 	__intel_wakeref_dec_and_check_tracking(rpm);
@@ -465,7 +465,7 @@ intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915)
 {
 	struct i915_runtime_pm *rpm = &i915->runtime_pm;
 
-	__assert_rpm_wakelock_held(rpm);
+	assert_rpm_wakelock_held(rpm);
 	pm_runtime_get_noresume(rpm->kdev);
 
 	intel_runtime_pm_acquire(rpm, true);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 85171a8b866a..e1de51c4d84d 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -583,7 +583,7 @@ void intel_uncore_forcewake_get(struct intel_uncore *uncore,
 	if (!uncore->funcs.force_wake_get)
 		return;
 
-	__assert_rpm_wakelock_held(uncore->rpm);
+	assert_rpm_wakelock_held(uncore->rpm);
 
 	spin_lock_irqsave(&uncore->lock, irqflags);
 	__intel_uncore_forcewake_get(uncore, fw_domains);
@@ -737,7 +737,7 @@ void assert_forcewakes_active(struct intel_uncore *uncore,
 	if (!uncore->funcs.force_wake_get)
 		return;
 
-	__assert_rpm_wakelock_held(uncore->rpm);
+	assert_rpm_wakelock_held(uncore->rpm);
 
 	fw_domains &= uncore->fw_domains;
 	WARN(fw_domains & ~uncore->fw_domains_active,
@@ -1054,7 +1054,7 @@ unclaimed_reg_debug(struct intel_uncore *uncore,
 
 #define GEN2_READ_HEADER(x) \
 	u##x val = 0; \
-	__assert_rpm_wakelock_held(uncore->rpm);
+	assert_rpm_wakelock_held(uncore->rpm);
 
 #define GEN2_READ_FOOTER \
 	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
@@ -1096,7 +1096,7 @@ __gen2_read(64)
 	u32 offset = i915_mmio_reg_offset(reg); \
 	unsigned long irqflags; \
 	u##x val = 0; \
-	__assert_rpm_wakelock_held(uncore->rpm); \
+	assert_rpm_wakelock_held(uncore->rpm); \
 	spin_lock_irqsave(&uncore->lock, irqflags); \
 	unclaimed_reg_debug(uncore, reg, true, true)
 
@@ -1170,7 +1170,7 @@ __gen6_read(64)
 
 #define GEN2_WRITE_HEADER \
 	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
-	__assert_rpm_wakelock_held(uncore->rpm); \
+	assert_rpm_wakelock_held(uncore->rpm); \
 
 #define GEN2_WRITE_FOOTER
 
@@ -1208,7 +1208,7 @@ __gen2_write(32)
 	u32 offset = i915_mmio_reg_offset(reg); \
 	unsigned long irqflags; \
 	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
-	__assert_rpm_wakelock_held(uncore->rpm); \
+	assert_rpm_wakelock_held(uncore->rpm); \
 	spin_lock_irqsave(&uncore->lock, irqflags); \
 	unclaimed_reg_debug(uncore, reg, false, true)
 
diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c
index c25ba1b5e8ba..b6c7167ce154 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.c
+++ b/drivers/gpu/drm/i915/intel_wakeref.c
@@ -110,7 +110,7 @@ void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout)
 	}
 
 	/* Our mission is that we only extend an already active wakeref */
-	assert_rpm_wakelock_held(wf->i915);
+	assert_rpm_wakelock_held(&wf->i915->runtime_pm);
 
 	if (!refcount_inc_not_zero(&wf->count)) {
 		spin_lock_irqsave(&wf->lock, flags);
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2019-06-12 18:38 UTC|newest]

Thread overview: 15+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-06-12 18:37 [PATCH v2 0/8] RPM Encapsulation Daniele Ceraolo Spurio
2019-06-12 18:37 ` [PATCH v2 1/8] drm/i915: prefer i915_runtime_pm in intel_runtime function Daniele Ceraolo Spurio
2019-06-12 22:10   ` Chris Wilson
2019-06-12 18:37 ` Daniele Ceraolo Spurio [this message]
2019-06-12 18:37 ` [PATCH v2 3/8] drm/i915: make enable/disable rpm assert function use the rpm structure Daniele Ceraolo Spurio
2019-06-12 18:37 ` [PATCH v2 4/8] drm/i915: move and rename i915_runtime_pm Daniele Ceraolo Spurio
2019-06-12 22:14   ` Chris Wilson
2019-06-12 22:16     ` Daniele Ceraolo Spurio
2019-06-12 18:37 ` [PATCH v2 5/8] drm/i915: move a few more functions to accept the rpm structure Daniele Ceraolo Spurio
2019-06-12 18:37 ` [PATCH v2 6/8] drm/i915: update rpm_get/put to use " Daniele Ceraolo Spurio
2019-06-12 18:37 ` [PATCH v2 7/8] drm/i915: update with_intel_runtime_pm " Daniele Ceraolo Spurio
2019-06-12 18:37 ` [PATCH v2 8/8] drm/i915: make intel_wakeref work on the rpm struct Daniele Ceraolo Spurio
2019-06-12 22:18   ` Chris Wilson
2019-06-12 19:24 ` ✗ Fi.CI.BAT: failure for RPM Encapsulation Patchwork
2019-06-12 22:24 ` [PATCH v2 0/8] " Chris Wilson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190612183748.7693-3-daniele.ceraolospurio@intel.com \
    --to=daniele.ceraolospurio@intel.com \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.