All of lore.kernel.org
 help / color / mirror / Atom feed
* [RFC 0/4] PMU update for more than one card
@ 2019-08-01 10:18 Tvrtko Ursulin
  2019-08-01 10:18 ` [RFC 1/4] drm/i915/pmu: Make more struct i915_pmu centric Tvrtko Ursulin
                   ` (4 more replies)
  0 siblings, 5 replies; 17+ messages in thread
From: Tvrtko Ursulin @ 2019-08-01 10:18 UTC (permalink / raw)
  To: Intel-gfx

From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

A bit of tidying up first, with the last patch allowing for more than one card
registering with PMU.

Discussion points are appended to the last commit message.

Tvrtko Ursulin (4):
  drm/i915/pmu: Make more struct i915_pmu centric
  drm/i915/pmu: Convert engine sampling to uncore mmio
  drm/i915/pmu: Convert sampling to gt
  drm/i915/pmu: Support multiple GPUs

 drivers/gpu/drm/i915/i915_pmu.c | 274 ++++++++++++++++++--------------
 drivers/gpu/drm/i915/i915_pmu.h |   8 +
 2 files changed, 164 insertions(+), 118 deletions(-)

-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 17+ messages in thread

* [RFC 1/4] drm/i915/pmu: Make more struct i915_pmu centric
  2019-08-01 10:18 [RFC 0/4] PMU update for more than one card Tvrtko Ursulin
@ 2019-08-01 10:18 ` Tvrtko Ursulin
  2019-08-01 10:31   ` Chris Wilson
  2019-08-01 10:46   ` Michal Wajdeczko
  2019-08-01 10:18 ` [RFC 2/4] drm/i915/pmu: Convert engine sampling to uncore mmio Tvrtko Ursulin
                   ` (3 subsequent siblings)
  4 siblings, 2 replies; 17+ messages in thread
From: Tvrtko Ursulin @ 2019-08-01 10:18 UTC (permalink / raw)
  To: Intel-gfx

From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Just tidy the code a bit by removing a sea of overly verbose i915->pmu.*.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/i915_pmu.c | 194 +++++++++++++++++---------------
 1 file changed, 104 insertions(+), 90 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index eff86483bec0..12008966b00e 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -74,8 +74,9 @@ static unsigned int event_enabled_bit(struct perf_event *event)
 	return config_enabled_bit(event->attr.config);
 }
 
-static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active)
+static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active)
 {
+	struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
 	u64 enable;
 
 	/*
@@ -83,7 +84,7 @@ static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active)
 	 *
 	 * We start with a bitmask of all currently enabled events.
 	 */
-	enable = i915->pmu.enable;
+	enable = pmu->enable;
 
 	/*
 	 * Mask out all the ones which do not need the timer, or in
@@ -114,24 +115,26 @@ static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active)
 
 void i915_pmu_gt_parked(struct drm_i915_private *i915)
 {
-	if (!i915->pmu.base.event_init)
+	struct i915_pmu *pmu = &i915->pmu;
+
+	if (!pmu->base.event_init)
 		return;
 
-	spin_lock_irq(&i915->pmu.lock);
+	spin_lock_irq(&pmu->lock);
 	/*
 	 * Signal sampling timer to stop if only engine events are enabled and
 	 * GPU went idle.
 	 */
-	i915->pmu.timer_enabled = pmu_needs_timer(i915, false);
-	spin_unlock_irq(&i915->pmu.lock);
+	pmu->timer_enabled = pmu_needs_timer(pmu, false);
+	spin_unlock_irq(&pmu->lock);
 }
 
-static void __i915_pmu_maybe_start_timer(struct drm_i915_private *i915)
+static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu)
 {
-	if (!i915->pmu.timer_enabled && pmu_needs_timer(i915, true)) {
-		i915->pmu.timer_enabled = true;
-		i915->pmu.timer_last = ktime_get();
-		hrtimer_start_range_ns(&i915->pmu.timer,
+	if (!pmu->timer_enabled && pmu_needs_timer(pmu, true)) {
+		pmu->timer_enabled = true;
+		pmu->timer_last = ktime_get();
+		hrtimer_start_range_ns(&pmu->timer,
 				       ns_to_ktime(PERIOD), 0,
 				       HRTIMER_MODE_REL_PINNED);
 	}
@@ -139,15 +142,17 @@ static void __i915_pmu_maybe_start_timer(struct drm_i915_private *i915)
 
 void i915_pmu_gt_unparked(struct drm_i915_private *i915)
 {
-	if (!i915->pmu.base.event_init)
+	struct i915_pmu *pmu = &i915->pmu;
+
+	if (!pmu->base.event_init)
 		return;
 
-	spin_lock_irq(&i915->pmu.lock);
+	spin_lock_irq(&pmu->lock);
 	/*
 	 * Re-enable sampling timer when GPU goes active.
 	 */
-	__i915_pmu_maybe_start_timer(i915);
-	spin_unlock_irq(&i915->pmu.lock);
+	__i915_pmu_maybe_start_timer(pmu);
+	spin_unlock_irq(&pmu->lock);
 }
 
 static void
@@ -251,15 +256,16 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
 {
 	struct drm_i915_private *i915 =
 		container_of(hrtimer, struct drm_i915_private, pmu.timer);
+	struct i915_pmu *pmu = &i915->pmu;
 	unsigned int period_ns;
 	ktime_t now;
 
-	if (!READ_ONCE(i915->pmu.timer_enabled))
+	if (!READ_ONCE(pmu->timer_enabled))
 		return HRTIMER_NORESTART;
 
 	now = ktime_get();
-	period_ns = ktime_to_ns(ktime_sub(now, i915->pmu.timer_last));
-	i915->pmu.timer_last = now;
+	period_ns = ktime_to_ns(ktime_sub(now, pmu->timer_last));
+	pmu->timer_last = now;
 
 	/*
 	 * Strictly speaking the passed in period may not be 100% accurate for
@@ -443,6 +449,7 @@ static u64 get_rc6(struct drm_i915_private *i915)
 {
 #if IS_ENABLED(CONFIG_PM)
 	struct intel_runtime_pm *rpm = &i915->runtime_pm;
+	struct i915_pmu *pmu = &i915->pmu;
 	intel_wakeref_t wakeref;
 	unsigned long flags;
 	u64 val;
@@ -458,16 +465,16 @@ static u64 get_rc6(struct drm_i915_private *i915)
 		 * previously.
 		 */
 
-		spin_lock_irqsave(&i915->pmu.lock, flags);
+		spin_lock_irqsave(&pmu->lock, flags);
 
-		if (val >= i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
-			i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
-			i915->pmu.sample[__I915_SAMPLE_RC6].cur = val;
+		if (val >= pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
+			pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
+			pmu->sample[__I915_SAMPLE_RC6].cur = val;
 		} else {
-			val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
+			val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
 		}
 
-		spin_unlock_irqrestore(&i915->pmu.lock, flags);
+		spin_unlock_irqrestore(&pmu->lock, flags);
 	} else {
 		struct device *kdev = rpm->kdev;
 
@@ -478,7 +485,7 @@ static u64 get_rc6(struct drm_i915_private *i915)
 		 * on top of the last known real value, as the approximated RC6
 		 * counter value.
 		 */
-		spin_lock_irqsave(&i915->pmu.lock, flags);
+		spin_lock_irqsave(&pmu->lock, flags);
 
 		/*
 		 * After the above branch intel_runtime_pm_get_if_in_use failed
@@ -494,20 +501,20 @@ static u64 get_rc6(struct drm_i915_private *i915)
 		if (pm_runtime_status_suspended(kdev)) {
 			val = pm_runtime_suspended_time(kdev);
 
-			if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
-				i915->pmu.suspended_time_last = val;
+			if (!pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
+				pmu->suspended_time_last = val;
 
-			val -= i915->pmu.suspended_time_last;
-			val += i915->pmu.sample[__I915_SAMPLE_RC6].cur;
+			val -= pmu->suspended_time_last;
+			val += pmu->sample[__I915_SAMPLE_RC6].cur;
 
-			i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
-		} else if (i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
-			val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
+			pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
+		} else if (pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
+			val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
 		} else {
-			val = i915->pmu.sample[__I915_SAMPLE_RC6].cur;
+			val = pmu->sample[__I915_SAMPLE_RC6].cur;
 		}
 
-		spin_unlock_irqrestore(&i915->pmu.lock, flags);
+		spin_unlock_irqrestore(&pmu->lock, flags);
 	}
 
 	return val;
@@ -520,6 +527,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
 {
 	struct drm_i915_private *i915 =
 		container_of(event->pmu, typeof(*i915), pmu.base);
+	struct i915_pmu *pmu = &i915->pmu;
 	u64 val = 0;
 
 	if (is_engine_event(event)) {
@@ -542,12 +550,12 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
 		switch (event->attr.config) {
 		case I915_PMU_ACTUAL_FREQUENCY:
 			val =
-			   div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_ACT].cur,
+			   div_u64(pmu->sample[__I915_SAMPLE_FREQ_ACT].cur,
 				   USEC_PER_SEC /* to MHz */);
 			break;
 		case I915_PMU_REQUESTED_FREQUENCY:
 			val =
-			   div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_REQ].cur,
+			   div_u64(pmu->sample[__I915_SAMPLE_FREQ_REQ].cur,
 				   USEC_PER_SEC /* to MHz */);
 			break;
 		case I915_PMU_INTERRUPTS:
@@ -582,24 +590,25 @@ static void i915_pmu_enable(struct perf_event *event)
 	struct drm_i915_private *i915 =
 		container_of(event->pmu, typeof(*i915), pmu.base);
 	unsigned int bit = event_enabled_bit(event);
+	struct i915_pmu *pmu = &i915->pmu;
 	unsigned long flags;
 
-	spin_lock_irqsave(&i915->pmu.lock, flags);
+	spin_lock_irqsave(&pmu->lock, flags);
 
 	/*
 	 * Update the bitmask of enabled events and increment
 	 * the event reference counter.
 	 */
-	BUILD_BUG_ON(ARRAY_SIZE(i915->pmu.enable_count) != I915_PMU_MASK_BITS);
-	GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count));
-	GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0);
-	i915->pmu.enable |= BIT_ULL(bit);
-	i915->pmu.enable_count[bit]++;
+	BUILD_BUG_ON(ARRAY_SIZE(pmu->enable_count) != I915_PMU_MASK_BITS);
+	GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
+	GEM_BUG_ON(pmu->enable_count[bit] == ~0);
+	pmu->enable |= BIT_ULL(bit);
+	pmu->enable_count[bit]++;
 
 	/*
 	 * Start the sampling timer if needed and not already enabled.
 	 */
-	__i915_pmu_maybe_start_timer(i915);
+	__i915_pmu_maybe_start_timer(pmu);
 
 	/*
 	 * For per-engine events the bitmask and reference counting
@@ -625,7 +634,7 @@ static void i915_pmu_enable(struct perf_event *event)
 		engine->pmu.enable_count[sample]++;
 	}
 
-	spin_unlock_irqrestore(&i915->pmu.lock, flags);
+	spin_unlock_irqrestore(&pmu->lock, flags);
 
 	/*
 	 * Store the current counter value so we can report the correct delta
@@ -640,9 +649,10 @@ static void i915_pmu_disable(struct perf_event *event)
 	struct drm_i915_private *i915 =
 		container_of(event->pmu, typeof(*i915), pmu.base);
 	unsigned int bit = event_enabled_bit(event);
+	struct i915_pmu *pmu = &i915->pmu;
 	unsigned long flags;
 
-	spin_lock_irqsave(&i915->pmu.lock, flags);
+	spin_lock_irqsave(&pmu->lock, flags);
 
 	if (is_engine_event(event)) {
 		u8 sample = engine_event_sample(event);
@@ -664,18 +674,18 @@ static void i915_pmu_disable(struct perf_event *event)
 			engine->pmu.enable &= ~BIT(sample);
 	}
 
-	GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count));
-	GEM_BUG_ON(i915->pmu.enable_count[bit] == 0);
+	GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
+	GEM_BUG_ON(pmu->enable_count[bit] == 0);
 	/*
 	 * Decrement the reference count and clear the enabled
 	 * bitmask when the last listener on an event goes away.
 	 */
-	if (--i915->pmu.enable_count[bit] == 0) {
-		i915->pmu.enable &= ~BIT_ULL(bit);
-		i915->pmu.timer_enabled &= pmu_needs_timer(i915, true);
+	if (--pmu->enable_count[bit] == 0) {
+		pmu->enable &= ~BIT_ULL(bit);
+		pmu->timer_enabled &= pmu_needs_timer(pmu, true);
 	}
 
-	spin_unlock_irqrestore(&i915->pmu.lock, flags);
+	spin_unlock_irqrestore(&pmu->lock, flags);
 }
 
 static void i915_pmu_event_start(struct perf_event *event, int flags)
@@ -824,8 +834,9 @@ add_pmu_attr(struct perf_pmu_events_attr *attr, const char *name,
 }
 
 static struct attribute **
-create_event_attributes(struct drm_i915_private *i915)
+create_event_attributes(struct i915_pmu *pmu)
 {
+	struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
 	static const struct {
 		u64 config;
 		const char *name;
@@ -939,8 +950,8 @@ create_event_attributes(struct drm_i915_private *i915)
 		}
 	}
 
-	i915->pmu.i915_attr = i915_attr;
-	i915->pmu.pmu_attr = pmu_attr;
+	pmu->i915_attr = i915_attr;
+	pmu->pmu_attr = pmu_attr;
 
 	return attr;
 
@@ -956,7 +967,7 @@ err:;
 	return NULL;
 }
 
-static void free_event_attributes(struct drm_i915_private *i915)
+static void free_event_attributes(struct i915_pmu *pmu)
 {
 	struct attribute **attr_iter = i915_pmu_events_attr_group.attrs;
 
@@ -964,12 +975,12 @@ static void free_event_attributes(struct drm_i915_private *i915)
 		kfree((*attr_iter)->name);
 
 	kfree(i915_pmu_events_attr_group.attrs);
-	kfree(i915->pmu.i915_attr);
-	kfree(i915->pmu.pmu_attr);
+	kfree(pmu->i915_attr);
+	kfree(pmu->pmu_attr);
 
 	i915_pmu_events_attr_group.attrs = NULL;
-	i915->pmu.i915_attr = NULL;
-	i915->pmu.pmu_attr = NULL;
+	pmu->i915_attr = NULL;
+	pmu->pmu_attr = NULL;
 }
 
 static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
@@ -1006,7 +1017,7 @@ static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
 
 static enum cpuhp_state cpuhp_slot = CPUHP_INVALID;
 
-static int i915_pmu_register_cpuhp_state(struct drm_i915_private *i915)
+static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu)
 {
 	enum cpuhp_state slot;
 	int ret;
@@ -1019,7 +1030,7 @@ static int i915_pmu_register_cpuhp_state(struct drm_i915_private *i915)
 		return ret;
 
 	slot = ret;
-	ret = cpuhp_state_add_instance(slot, &i915->pmu.node);
+	ret = cpuhp_state_add_instance(slot, &pmu->node);
 	if (ret) {
 		cpuhp_remove_multi_state(slot);
 		return ret;
@@ -1029,15 +1040,16 @@ static int i915_pmu_register_cpuhp_state(struct drm_i915_private *i915)
 	return 0;
 }
 
-static void i915_pmu_unregister_cpuhp_state(struct drm_i915_private *i915)
+static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
 {
 	WARN_ON(cpuhp_slot == CPUHP_INVALID);
-	WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &i915->pmu.node));
+	WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &pmu->node));
 	cpuhp_remove_multi_state(cpuhp_slot);
 }
 
 void i915_pmu_register(struct drm_i915_private *i915)
 {
+	struct i915_pmu *pmu = &i915->pmu;
 	int ret;
 
 	if (INTEL_GEN(i915) <= 2) {
@@ -1045,56 +1057,58 @@ void i915_pmu_register(struct drm_i915_private *i915)
 		return;
 	}
 
-	i915_pmu_events_attr_group.attrs = create_event_attributes(i915);
+	i915_pmu_events_attr_group.attrs = create_event_attributes(pmu);
 	if (!i915_pmu_events_attr_group.attrs) {
 		ret = -ENOMEM;
 		goto err;
 	}
 
-	i915->pmu.base.attr_groups	= i915_pmu_attr_groups;
-	i915->pmu.base.task_ctx_nr	= perf_invalid_context;
-	i915->pmu.base.event_init	= i915_pmu_event_init;
-	i915->pmu.base.add		= i915_pmu_event_add;
-	i915->pmu.base.del		= i915_pmu_event_del;
-	i915->pmu.base.start		= i915_pmu_event_start;
-	i915->pmu.base.stop		= i915_pmu_event_stop;
-	i915->pmu.base.read		= i915_pmu_event_read;
-	i915->pmu.base.event_idx	= i915_pmu_event_event_idx;
-
-	spin_lock_init(&i915->pmu.lock);
-	hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-	i915->pmu.timer.function = i915_sample;
-
-	ret = perf_pmu_register(&i915->pmu.base, "i915", -1);
+	pmu->base.attr_groups	= i915_pmu_attr_groups;
+	pmu->base.task_ctx_nr	= perf_invalid_context;
+	pmu->base.event_init	= i915_pmu_event_init;
+	pmu->base.add		= i915_pmu_event_add;
+	pmu->base.del		= i915_pmu_event_del;
+	pmu->base.start		= i915_pmu_event_start;
+	pmu->base.stop		= i915_pmu_event_stop;
+	pmu->base.read		= i915_pmu_event_read;
+	pmu->base.event_idx	= i915_pmu_event_event_idx;
+
+	spin_lock_init(&pmu->lock);
+	hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	pmu->timer.function = i915_sample;
+
+	ret = perf_pmu_register(&pmu->base, "i915", -1);
 	if (ret)
 		goto err;
 
-	ret = i915_pmu_register_cpuhp_state(i915);
+	ret = i915_pmu_register_cpuhp_state(pmu);
 	if (ret)
 		goto err_unreg;
 
 	return;
 
 err_unreg:
-	perf_pmu_unregister(&i915->pmu.base);
+	perf_pmu_unregister(&pmu->base);
 err:
-	i915->pmu.base.event_init = NULL;
-	free_event_attributes(i915);
+	pmu->base.event_init = NULL;
+	free_event_attributes(pmu);
 	DRM_NOTE("Failed to register PMU! (err=%d)\n", ret);
 }
 
 void i915_pmu_unregister(struct drm_i915_private *i915)
 {
-	if (!i915->pmu.base.event_init)
+	struct i915_pmu *pmu = &i915->pmu;
+
+	if (!pmu->base.event_init)
 		return;
 
-	WARN_ON(i915->pmu.enable);
+	WARN_ON(pmu->enable);
 
-	hrtimer_cancel(&i915->pmu.timer);
+	hrtimer_cancel(&pmu->timer);
 
-	i915_pmu_unregister_cpuhp_state(i915);
+	i915_pmu_unregister_cpuhp_state(pmu);
 
-	perf_pmu_unregister(&i915->pmu.base);
-	i915->pmu.base.event_init = NULL;
-	free_event_attributes(i915);
+	perf_pmu_unregister(&pmu->base);
+	pmu->base.event_init = NULL;
+	free_event_attributes(pmu);
 }
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [RFC 2/4] drm/i915/pmu: Convert engine sampling to uncore mmio
  2019-08-01 10:18 [RFC 0/4] PMU update for more than one card Tvrtko Ursulin
  2019-08-01 10:18 ` [RFC 1/4] drm/i915/pmu: Make more struct i915_pmu centric Tvrtko Ursulin
@ 2019-08-01 10:18 ` Tvrtko Ursulin
  2019-08-01 10:33   ` Chris Wilson
  2019-08-01 10:18 ` [RFC 3/4] drm/i915/pmu: Convert sampling to gt Tvrtko Ursulin
                   ` (2 subsequent siblings)
  4 siblings, 1 reply; 17+ messages in thread
From: Tvrtko Ursulin @ 2019-08-01 10:18 UTC (permalink / raw)
  To: Intel-gfx

From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Drops one macro using implicit dev_priv.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/i915_pmu.c | 23 +++++++++++++----------
 1 file changed, 13 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 12008966b00e..bd9ad4f2b4e4 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -162,29 +162,30 @@ add_sample(struct i915_pmu_sample *sample, u32 val)
 }
 
 static void
-engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
+engines_sample(struct drm_i915_private *i915, unsigned int period_ns)
 {
+	struct intel_uncore *uncore = &i915->uncore;
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
 	intel_wakeref_t wakeref;
 	unsigned long flags;
 
-	if ((dev_priv->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
+	if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
 		return;
 
 	wakeref = 0;
-	if (READ_ONCE(dev_priv->gt.awake))
-		wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
+	if (READ_ONCE(i915->gt.awake))
+		wakeref = intel_runtime_pm_get_if_in_use(&i915->runtime_pm);
 	if (!wakeref)
 		return;
 
-	spin_lock_irqsave(&dev_priv->uncore.lock, flags);
-	for_each_engine(engine, dev_priv, id) {
+	spin_lock_irqsave(&uncore->lock, flags);
+	for_each_engine(engine, i915, id) {
 		struct intel_engine_pmu *pmu = &engine->pmu;
 		bool busy;
 		u32 val;
 
-		val = I915_READ_FW(RING_CTL(engine->mmio_base));
+		val = intel_uncore_read_fw(uncore, RING_CTL(engine->mmio_base));
 		if (val == 0) /* powerwell off => engine idle */
 			continue;
 
@@ -202,15 +203,17 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
 		 */
 		busy = val & (RING_WAIT_SEMAPHORE | RING_WAIT);
 		if (!busy) {
-			val = I915_READ_FW(RING_MI_MODE(engine->mmio_base));
+			val =
+			  intel_uncore_read_fw(uncore,
+					       RING_MI_MODE(engine->mmio_base));
 			busy = !(val & MODE_IDLE);
 		}
 		if (busy)
 			add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns);
 	}
-	spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
+	spin_unlock_irqrestore(&uncore->lock, flags);
 
-	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 }
 
 static void
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [RFC 3/4] drm/i915/pmu: Convert sampling to gt
  2019-08-01 10:18 [RFC 0/4] PMU update for more than one card Tvrtko Ursulin
  2019-08-01 10:18 ` [RFC 1/4] drm/i915/pmu: Make more struct i915_pmu centric Tvrtko Ursulin
  2019-08-01 10:18 ` [RFC 2/4] drm/i915/pmu: Convert engine sampling to uncore mmio Tvrtko Ursulin
@ 2019-08-01 10:18 ` Tvrtko Ursulin
  2019-08-01 10:29   ` Chris Wilson
  2019-08-01 10:33   ` Chris Wilson
  2019-08-01 10:18 ` [RFC 4/4] drm/i915/pmu: Support multiple GPUs Tvrtko Ursulin
  2019-08-02 13:20 ` ✓ Fi.CI.IGT: success for PMU update for more than one card Patchwork
  4 siblings, 2 replies; 17+ messages in thread
From: Tvrtko Ursulin @ 2019-08-01 10:18 UTC (permalink / raw)
  To: Intel-gfx

From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Engines and frequencies are a GT thing so adjust sampling routines to
match.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/i915_pmu.c | 43 ++++++++++++++++++---------------
 1 file changed, 23 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index bd9ad4f2b4e4..7c48fcf0cccb 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -162,9 +162,10 @@ add_sample(struct i915_pmu_sample *sample, u32 val)
 }
 
 static void
-engines_sample(struct drm_i915_private *i915, unsigned int period_ns)
+engines_sample(struct intel_gt *gt, unsigned int period_ns)
 {
-	struct intel_uncore *uncore = &i915->uncore;
+	struct drm_i915_private *i915 = gt->i915;
+	struct intel_uncore *uncore = gt->uncore;
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
 	intel_wakeref_t wakeref;
@@ -174,7 +175,7 @@ engines_sample(struct drm_i915_private *i915, unsigned int period_ns)
 		return;
 
 	wakeref = 0;
-	if (READ_ONCE(i915->gt.awake))
+	if (READ_ONCE(gt->awake))
 		wakeref = intel_runtime_pm_get_if_in_use(&i915->runtime_pm);
 	if (!wakeref)
 		return;
@@ -223,34 +224,35 @@ add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul)
 }
 
 static void
-frequency_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
+frequency_sample(struct intel_gt *gt, unsigned int period_ns)
 {
-	if (dev_priv->pmu.enable &
-	    config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
+	struct drm_i915_private *i915 = gt->i915;
+	struct intel_uncore *uncore = gt->uncore;
+	struct i915_pmu *pmu = &i915->pmu;
+
+	if (pmu->enable & config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
 		u32 val;
 
-		val = dev_priv->gt_pm.rps.cur_freq;
-		if (dev_priv->gt.awake) {
+		val = i915->gt_pm.rps.cur_freq;
+		if (gt->awake) {
 			intel_wakeref_t wakeref;
 
-			with_intel_runtime_pm_if_in_use(&dev_priv->runtime_pm,
+			with_intel_runtime_pm_if_in_use(&i915->runtime_pm,
 							wakeref) {
-				val = intel_uncore_read_notrace(&dev_priv->uncore,
+				val = intel_uncore_read_notrace(uncore,
 								GEN6_RPSTAT1);
-				val = intel_get_cagf(dev_priv, val);
+				val = intel_get_cagf(i915, val);
 			}
 		}
 
-		add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT],
-				intel_gpu_freq(dev_priv, val),
+		add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT],
+				intel_gpu_freq(i915, val),
 				period_ns / 1000);
 	}
 
-	if (dev_priv->pmu.enable &
-	    config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
-		add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ],
-				intel_gpu_freq(dev_priv,
-					       dev_priv->gt_pm.rps.cur_freq),
+	if (pmu->enable & config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
+		add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_REQ],
+				intel_gpu_freq(i915, i915->gt_pm.rps.cur_freq),
 				period_ns / 1000);
 	}
 }
@@ -260,6 +262,7 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
 	struct drm_i915_private *i915 =
 		container_of(hrtimer, struct drm_i915_private, pmu.timer);
 	struct i915_pmu *pmu = &i915->pmu;
+	struct intel_gt *gt = &i915->gt;
 	unsigned int period_ns;
 	ktime_t now;
 
@@ -276,8 +279,8 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
 	 * grabbing the forcewake. However the potential error from timer call-
 	 * back delay greatly dominates this so we keep it simple.
 	 */
-	engines_sample(i915, period_ns);
-	frequency_sample(i915, period_ns);
+	engines_sample(gt, period_ns);
+	frequency_sample(gt, period_ns);
 
 	hrtimer_forward(hrtimer, now, ns_to_ktime(PERIOD));
 
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [RFC 4/4] drm/i915/pmu: Support multiple GPUs
  2019-08-01 10:18 [RFC 0/4] PMU update for more than one card Tvrtko Ursulin
                   ` (2 preceding siblings ...)
  2019-08-01 10:18 ` [RFC 3/4] drm/i915/pmu: Convert sampling to gt Tvrtko Ursulin
@ 2019-08-01 10:18 ` Tvrtko Ursulin
  2019-08-01 10:45   ` Chris Wilson
  2019-08-02 13:20 ` ✓ Fi.CI.IGT: success for PMU update for more than one card Patchwork
  4 siblings, 1 reply; 17+ messages in thread
From: Tvrtko Ursulin @ 2019-08-01 10:18 UTC (permalink / raw)
  To: Intel-gfx

From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

With discrete graphics system can have both integrated and discrete GPU
handled by i915.

Currently we use a fixed name ("i915") when registering as the uncore PMU
provider which stops working in this case.

Add an unique instance number in form of "i915-<instance>" to each
registered PMU to support this. First instance keeps the old name to
preserve compatibility with existing userspace.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
---
One problem is how to tie cars and PMUs.

Should we append something better than the opaque instance (which
depends on probing order)? Like some bus slot id? But then we have to
say goodbye to preserving the "legacy" name "i915".

I couldn't find that perf supports anything for these scenarios.

Or maybe we can add a new file (sysfs?), which would be correctly under
the bus hierarchy, and would export the name of the PMU instance to
use?
---
 drivers/gpu/drm/i915/i915_pmu.c | 22 ++++++++++++++++++++--
 drivers/gpu/drm/i915/i915_pmu.h |  8 ++++++++
 2 files changed, 28 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 7c48fcf0cccb..105246ae5160 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -1053,6 +1053,8 @@ static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
 	cpuhp_remove_multi_state(cpuhp_slot);
 }
 
+static atomic_t i915_pmu_instance = ATOMIC_INIT(0);
+
 void i915_pmu_register(struct drm_i915_private *i915)
 {
 	struct i915_pmu *pmu = &i915->pmu;
@@ -1083,9 +1085,17 @@ void i915_pmu_register(struct drm_i915_private *i915)
 	hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 	pmu->timer.function = i915_sample;
 
-	ret = perf_pmu_register(&pmu->base, "i915", -1);
+	pmu->instance = atomic_inc_return(&i915_pmu_instance) - 1;
+	if (pmu->instance)
+		pmu->name = kasprintf(GFP_KERNEL, "i915-%u", pmu->instance);
+	else
+		pmu->name = "i915";
+	if (!pmu->name)
+		goto err_instance;
+
+	ret = perf_pmu_register(&pmu->base, pmu->name, -1);
 	if (ret)
-		goto err;
+		goto err_name;
 
 	ret = i915_pmu_register_cpuhp_state(pmu);
 	if (ret)
@@ -1095,6 +1105,11 @@ void i915_pmu_register(struct drm_i915_private *i915)
 
 err_unreg:
 	perf_pmu_unregister(&pmu->base);
+err_name:
+	if (pmu->instance)
+		kfree(pmu->name);
+err_instance:
+	atomic_dec(&i915_pmu_instance);
 err:
 	pmu->base.event_init = NULL;
 	free_event_attributes(pmu);
@@ -1116,5 +1131,8 @@ void i915_pmu_unregister(struct drm_i915_private *i915)
 
 	perf_pmu_unregister(&pmu->base);
 	pmu->base.event_init = NULL;
+	atomic_dec(&i915_pmu_instance);
+	if (pmu->instance)
+		kfree(pmu->name);
 	free_event_attributes(pmu);
 }
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index 4fc4f2478301..5289d4fc82b5 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -46,6 +46,14 @@ struct i915_pmu {
 	 * @base: PMU base.
 	 */
 	struct pmu base;
+	/**
+	 * @instance: PMU instance number when multiple devices.
+	 */
+	unsigned int instance;
+	/**
+	 * @name: Name as registered with perf core.
+	 */
+	char *name;
 	/**
 	 * @lock: Lock protecting enable mask and ref count handling.
 	 */
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* Re: [RFC 3/4] drm/i915/pmu: Convert sampling to gt
  2019-08-01 10:18 ` [RFC 3/4] drm/i915/pmu: Convert sampling to gt Tvrtko Ursulin
@ 2019-08-01 10:29   ` Chris Wilson
  2019-08-01 12:13     ` Tvrtko Ursulin
  2019-08-01 10:33   ` Chris Wilson
  1 sibling, 1 reply; 17+ messages in thread
From: Chris Wilson @ 2019-08-01 10:29 UTC (permalink / raw)
  To: Intel-gfx, Tvrtko Ursulin

Quoting Tvrtko Ursulin (2019-08-01 11:18:24)
>  static void
> -frequency_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
> +frequency_sample(struct intel_gt *gt, unsigned int period_ns)
>  {
> -       if (dev_priv->pmu.enable &
> -           config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
> +       struct drm_i915_private *i915 = gt->i915;
> +       struct intel_uncore *uncore = gt->uncore;
> +       struct i915_pmu *pmu = &i915->pmu;
> +
> +       if (pmu->enable & config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
>                 u32 val;
>  
> -               val = dev_priv->gt_pm.rps.cur_freq;
> -               if (dev_priv->gt.awake) {
> +               val = i915->gt_pm.rps.cur_freq;
> +               if (gt->awake) {
>                         intel_wakeref_t wakeref;
>  
> -                       with_intel_runtime_pm_if_in_use(&dev_priv->runtime_pm,
> +                       with_intel_runtime_pm_if_in_use(&i915->runtime_pm,
>                                                         wakeref) {

We can replace this wakeref with
	if (intel_gt_pm_get_if_awake()) {
instead of if (gt->awake);

Which reminds me I had some patches to change the rc6 sampling over
runtime suspend -- from your last vacation! My timing is perfect.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [RFC 1/4] drm/i915/pmu: Make more struct i915_pmu centric
  2019-08-01 10:18 ` [RFC 1/4] drm/i915/pmu: Make more struct i915_pmu centric Tvrtko Ursulin
@ 2019-08-01 10:31   ` Chris Wilson
  2019-08-01 10:46   ` Michal Wajdeczko
  1 sibling, 0 replies; 17+ messages in thread
From: Chris Wilson @ 2019-08-01 10:31 UTC (permalink / raw)
  To: Intel-gfx, Tvrtko Ursulin

Quoting Tvrtko Ursulin (2019-08-01 11:18:22)
> From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> 
> Just tidy the code a bit by removing a sea of overly verbose i915->pmu.*.
> 
> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [RFC 2/4] drm/i915/pmu: Convert engine sampling to uncore mmio
  2019-08-01 10:18 ` [RFC 2/4] drm/i915/pmu: Convert engine sampling to uncore mmio Tvrtko Ursulin
@ 2019-08-01 10:33   ` Chris Wilson
  2019-08-01 12:03     ` Tvrtko Ursulin
  0 siblings, 1 reply; 17+ messages in thread
From: Chris Wilson @ 2019-08-01 10:33 UTC (permalink / raw)
  To: Intel-gfx, Tvrtko Ursulin

Quoting Tvrtko Ursulin (2019-08-01 11:18:23)
> From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> 
> Drops one macro using implicit dev_priv.
> 
> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> ---
>  drivers/gpu/drm/i915/i915_pmu.c | 23 +++++++++++++----------
>  1 file changed, 13 insertions(+), 10 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
> index 12008966b00e..bd9ad4f2b4e4 100644
> --- a/drivers/gpu/drm/i915/i915_pmu.c
> +++ b/drivers/gpu/drm/i915/i915_pmu.c
> @@ -162,29 +162,30 @@ add_sample(struct i915_pmu_sample *sample, u32 val)
>  }
>  
>  static void
> -engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
> +engines_sample(struct drm_i915_private *i915, unsigned int period_ns)
>  {
> +       struct intel_uncore *uncore = &i915->uncore;
>         struct intel_engine_cs *engine;
>         enum intel_engine_id id;
>         intel_wakeref_t wakeref;
>         unsigned long flags;
>  
> -       if ((dev_priv->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
> +       if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
>                 return;
>  
>         wakeref = 0;
> -       if (READ_ONCE(dev_priv->gt.awake))
> -               wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
> +       if (READ_ONCE(i915->gt.awake))
> +               wakeref = intel_runtime_pm_get_if_in_use(&i915->runtime_pm);
>         if (!wakeref)
>                 return;
>  
> -       spin_lock_irqsave(&dev_priv->uncore.lock, flags);
> -       for_each_engine(engine, dev_priv, id) {
> +       spin_lock_irqsave(&uncore->lock, flags);
> +       for_each_engine(engine, i915, id) {
>                 struct intel_engine_pmu *pmu = &engine->pmu;
>                 bool busy;
>                 u32 val;
>  
> -               val = I915_READ_FW(RING_CTL(engine->mmio_base));
> +               val = intel_uncore_read_fw(uncore, RING_CTL(engine->mmio_base));

Could use ENGINE_READ_FW(engine, RING_CTL) ?

Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [RFC 3/4] drm/i915/pmu: Convert sampling to gt
  2019-08-01 10:18 ` [RFC 3/4] drm/i915/pmu: Convert sampling to gt Tvrtko Ursulin
  2019-08-01 10:29   ` Chris Wilson
@ 2019-08-01 10:33   ` Chris Wilson
  1 sibling, 0 replies; 17+ messages in thread
From: Chris Wilson @ 2019-08-01 10:33 UTC (permalink / raw)
  To: Intel-gfx, Tvrtko Ursulin

Quoting Tvrtko Ursulin (2019-08-01 11:18:24)
> From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> 
> Engines and frequencies are a GT thing so adjust sampling routines to
> match.
> 
> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [RFC 4/4] drm/i915/pmu: Support multiple GPUs
  2019-08-01 10:18 ` [RFC 4/4] drm/i915/pmu: Support multiple GPUs Tvrtko Ursulin
@ 2019-08-01 10:45   ` Chris Wilson
  2019-08-01 12:03     ` Tvrtko Ursulin
  0 siblings, 1 reply; 17+ messages in thread
From: Chris Wilson @ 2019-08-01 10:45 UTC (permalink / raw)
  To: Intel-gfx, Tvrtko Ursulin

Quoting Tvrtko Ursulin (2019-08-01 11:18:25)
> From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> 
> With discrete graphics system can have both integrated and discrete GPU
> handled by i915.
> 
> Currently we use a fixed name ("i915") when registering as the uncore PMU
> provider which stops working in this case.
> 
> Add an unique instance number in form of "i915-<instance>" to each
> registered PMU to support this. First instance keeps the old name to
> preserve compatibility with existing userspace.
> 
> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> Cc: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
> ---
> One problem is how to tie cars and PMUs.
> 
> Should we append something better than the opaque instance (which
> depends on probing order)? Like some bus slot id? But then we have to
> say goodbye to preserving the "legacy" name "i915".
> 
> I couldn't find that perf supports anything for these scenarios.
> 
> Or maybe we can add a new file (sysfs?), which would be correctly under
> the bus hierarchy, and would export the name of the PMU instance to
> use?

We definitely need to add some means of uniquely identifying i915 into
sysfs, and for us to link from bus/pci/devices/ It seems odd that this
is not a solved problem :| (Which means I've probably missed how it is
meant to be done.)

Storing a pmu-name inside sysfs seems reasonable, and postpones the
problem of persistent device names for i915 until later :)
(Fwiw, how about using i915-${pci_addr}, or even just ${pci_addr}? I
expect that perf will grow multiple lookup paths if more hotpluggable
devices start supporting perf eventss)

> ---
>  drivers/gpu/drm/i915/i915_pmu.c | 22 ++++++++++++++++++++--
>  drivers/gpu/drm/i915/i915_pmu.h |  8 ++++++++
>  2 files changed, 28 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
> index 7c48fcf0cccb..105246ae5160 100644
> --- a/drivers/gpu/drm/i915/i915_pmu.c
> +++ b/drivers/gpu/drm/i915/i915_pmu.c
> @@ -1053,6 +1053,8 @@ static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
>         cpuhp_remove_multi_state(cpuhp_slot);
>  }
>  
> +static atomic_t i915_pmu_instance = ATOMIC_INIT(0);
> +
>  void i915_pmu_register(struct drm_i915_private *i915)
>  {
>         struct i915_pmu *pmu = &i915->pmu;
> @@ -1083,9 +1085,17 @@ void i915_pmu_register(struct drm_i915_private *i915)
>         hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
>         pmu->timer.function = i915_sample;
>  
> -       ret = perf_pmu_register(&pmu->base, "i915", -1);
> +       pmu->instance = atomic_inc_return(&i915_pmu_instance) - 1;
> +       if (pmu->instance)
> +               pmu->name = kasprintf(GFP_KERNEL, "i915-%u", pmu->instance);
> +       else
> +               pmu->name = "i915";
> +       if (!pmu->name)
> +               goto err_instance;
> +
> +       ret = perf_pmu_register(&pmu->base, pmu->name, -1);

Iirc, the name is basically only used to construct the perf sysfs, and
the tools extract the event names and tags from the sysfs. So I wonder
if we could add a symlink...
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [RFC 1/4] drm/i915/pmu: Make more struct i915_pmu centric
  2019-08-01 10:18 ` [RFC 1/4] drm/i915/pmu: Make more struct i915_pmu centric Tvrtko Ursulin
  2019-08-01 10:31   ` Chris Wilson
@ 2019-08-01 10:46   ` Michal Wajdeczko
  2019-08-01 10:53     ` Chris Wilson
  2019-08-01 12:10     ` Tvrtko Ursulin
  1 sibling, 2 replies; 17+ messages in thread
From: Michal Wajdeczko @ 2019-08-01 10:46 UTC (permalink / raw)
  To: Intel-gfx, Tvrtko Ursulin

On Thu, 01 Aug 2019 12:18:22 +0200, Tvrtko Ursulin  
<tvrtko.ursulin@linux.intel.com> wrote:

> From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
>
> Just tidy the code a bit by removing a sea of overly verbose i915->pmu.*.
>
> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> ---
>  drivers/gpu/drm/i915/i915_pmu.c | 194 +++++++++++++++++---------------
>  1 file changed, 104 insertions(+), 90 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_pmu.c  
> b/drivers/gpu/drm/i915/i915_pmu.c
> index eff86483bec0..12008966b00e 100644
> --- a/drivers/gpu/drm/i915/i915_pmu.c
> +++ b/drivers/gpu/drm/i915/i915_pmu.c
> @@ -74,8 +74,9 @@ static unsigned int event_enabled_bit(struct  
> perf_event *event)
>  	return config_enabled_bit(event->attr.config);
>  }
> -static bool pmu_needs_timer(struct drm_i915_private *i915, bool  
> gpu_active)
> +static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active)
>  {
> +	struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);

maybe this can be promoted to pmu_to_i915() ?

>  	u64 enable;
> 	/*
> @@ -83,7 +84,7 @@ static bool pmu_needs_timer(struct drm_i915_private  
> *i915, bool gpu_active)
>  	 *
>  	 * We start with a bitmask of all currently enabled events.
>  	 */
> -	enable = i915->pmu.enable;
> +	enable = pmu->enable;
> 	/*
>  	 * Mask out all the ones which do not need the timer, or in
> @@ -114,24 +115,26 @@ static bool pmu_needs_timer(struct  
> drm_i915_private *i915, bool gpu_active)
> void i915_pmu_gt_parked(struct drm_i915_private *i915)

you should be more consistent and change this one too

>  {
> -	if (!i915->pmu.base.event_init)
> +	struct i915_pmu *pmu = &i915->pmu;
> +
> +	if (!pmu->base.event_init)
>  		return;
> -	spin_lock_irq(&i915->pmu.lock);
> +	spin_lock_irq(&pmu->lock);
>  	/*
>  	 * Signal sampling timer to stop if only engine events are enabled and
>  	 * GPU went idle.
>  	 */
> -	i915->pmu.timer_enabled = pmu_needs_timer(i915, false);
> -	spin_unlock_irq(&i915->pmu.lock);
> +	pmu->timer_enabled = pmu_needs_timer(pmu, false);
> +	spin_unlock_irq(&pmu->lock);
>  }
> -static void __i915_pmu_maybe_start_timer(struct drm_i915_private *i915)
> +static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu)
>  {
> -	if (!i915->pmu.timer_enabled && pmu_needs_timer(i915, true)) {
> -		i915->pmu.timer_enabled = true;
> -		i915->pmu.timer_last = ktime_get();
> -		hrtimer_start_range_ns(&i915->pmu.timer,
> +	if (!pmu->timer_enabled && pmu_needs_timer(pmu, true)) {
> +		pmu->timer_enabled = true;
> +		pmu->timer_last = ktime_get();
> +		hrtimer_start_range_ns(&pmu->timer,
>  				       ns_to_ktime(PERIOD), 0,
>  				       HRTIMER_MODE_REL_PINNED);
>  	}
> @@ -139,15 +142,17 @@ static void __i915_pmu_maybe_start_timer(struct  
> drm_i915_private *i915)
> void i915_pmu_gt_unparked(struct drm_i915_private *i915)

and here (and even some more in whole .c file)

>  {
> -	if (!i915->pmu.base.event_init)
> +	struct i915_pmu *pmu = &i915->pmu;
> +
> +	if (!pmu->base.event_init)
>  		return;
> -	spin_lock_irq(&i915->pmu.lock);
> +	spin_lock_irq(&pmu->lock);
>  	/*
>  	 * Re-enable sampling timer when GPU goes active.
>  	 */
> -	__i915_pmu_maybe_start_timer(i915);
> -	spin_unlock_irq(&i915->pmu.lock);
> +	__i915_pmu_maybe_start_timer(pmu);
> +	spin_unlock_irq(&pmu->lock);
>  }
> static void
> @@ -251,15 +256,16 @@ static enum hrtimer_restart i915_sample(struct  
> hrtimer *hrtimer)
>  {
>  	struct drm_i915_private *i915 =
>  		container_of(hrtimer, struct drm_i915_private, pmu.timer);
> +	struct i915_pmu *pmu = &i915->pmu;
>  	unsigned int period_ns;
>  	ktime_t now;
> -	if (!READ_ONCE(i915->pmu.timer_enabled))
> +	if (!READ_ONCE(pmu->timer_enabled))
>  		return HRTIMER_NORESTART;
> 	now = ktime_get();
> -	period_ns = ktime_to_ns(ktime_sub(now, i915->pmu.timer_last));
> -	i915->pmu.timer_last = now;
> +	period_ns = ktime_to_ns(ktime_sub(now, pmu->timer_last));
> +	pmu->timer_last = now;
> 	/*
>  	 * Strictly speaking the passed in period may not be 100% accurate for
> @@ -443,6 +449,7 @@ static u64 get_rc6(struct drm_i915_private *i915)

this can also take *pmu instead of *i915

>  {
>  #if IS_ENABLED(CONFIG_PM)
>  	struct intel_runtime_pm *rpm = &i915->runtime_pm;
> +	struct i915_pmu *pmu = &i915->pmu;
>  	intel_wakeref_t wakeref;
>  	unsigned long flags;
>  	u64 val;
> @@ -458,16 +465,16 @@ static u64 get_rc6(struct drm_i915_private *i915)
>  		 * previously.
>  		 */
> -		spin_lock_irqsave(&i915->pmu.lock, flags);
> +		spin_lock_irqsave(&pmu->lock, flags);
> -		if (val >= i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
> -			i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
> -			i915->pmu.sample[__I915_SAMPLE_RC6].cur = val;
> +		if (val >= pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
> +			pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
> +			pmu->sample[__I915_SAMPLE_RC6].cur = val;
>  		} else {
> -			val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
> +			val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
>  		}
> -		spin_unlock_irqrestore(&i915->pmu.lock, flags);
> +		spin_unlock_irqrestore(&pmu->lock, flags);
>  	} else {
>  		struct device *kdev = rpm->kdev;
> @@ -478,7 +485,7 @@ static u64 get_rc6(struct drm_i915_private *i915)
>  		 * on top of the last known real value, as the approximated RC6
>  		 * counter value.
>  		 */
> -		spin_lock_irqsave(&i915->pmu.lock, flags);
> +		spin_lock_irqsave(&pmu->lock, flags);
> 		/*
>  		 * After the above branch intel_runtime_pm_get_if_in_use failed
> @@ -494,20 +501,20 @@ static u64 get_rc6(struct drm_i915_private *i915)
>  		if (pm_runtime_status_suspended(kdev)) {
>  			val = pm_runtime_suspended_time(kdev);
> -			if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
> -				i915->pmu.suspended_time_last = val;
> +			if (!pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
> +				pmu->suspended_time_last = val;
> -			val -= i915->pmu.suspended_time_last;
> -			val += i915->pmu.sample[__I915_SAMPLE_RC6].cur;
> +			val -= pmu->suspended_time_last;
> +			val += pmu->sample[__I915_SAMPLE_RC6].cur;
> -			i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
> -		} else if (i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
> -			val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
> +			pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
> +		} else if (pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
> +			val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
>  		} else {
> -			val = i915->pmu.sample[__I915_SAMPLE_RC6].cur;
> +			val = pmu->sample[__I915_SAMPLE_RC6].cur;
>  		}
> -		spin_unlock_irqrestore(&i915->pmu.lock, flags);
> +		spin_unlock_irqrestore(&pmu->lock, flags);
>  	}
> 	return val;
> @@ -520,6 +527,7 @@ static u64 __i915_pmu_event_read(struct perf_event  
> *event)
>  {
>  	struct drm_i915_private *i915 =
>  		container_of(event->pmu, typeof(*i915), pmu.base);
> +	struct i915_pmu *pmu = &i915->pmu;
>  	u64 val = 0;
> 	if (is_engine_event(event)) {
> @@ -542,12 +550,12 @@ static u64 __i915_pmu_event_read(struct perf_event  
> *event)
>  		switch (event->attr.config) {
>  		case I915_PMU_ACTUAL_FREQUENCY:
>  			val =
> -			   div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_ACT].cur,
> +			   div_u64(pmu->sample[__I915_SAMPLE_FREQ_ACT].cur,
>  				   USEC_PER_SEC /* to MHz */);
>  			break;
>  		case I915_PMU_REQUESTED_FREQUENCY:
>  			val =
> -			   div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_REQ].cur,
> +			   div_u64(pmu->sample[__I915_SAMPLE_FREQ_REQ].cur,
>  				   USEC_PER_SEC /* to MHz */);
>  			break;
>  		case I915_PMU_INTERRUPTS:
> @@ -582,24 +590,25 @@ static void i915_pmu_enable(struct perf_event  
> *event)
>  	struct drm_i915_private *i915 =
>  		container_of(event->pmu, typeof(*i915), pmu.base);
>  	unsigned int bit = event_enabled_bit(event);
> +	struct i915_pmu *pmu = &i915->pmu;
>  	unsigned long flags;
> -	spin_lock_irqsave(&i915->pmu.lock, flags);
> +	spin_lock_irqsave(&pmu->lock, flags);
> 	/*
>  	 * Update the bitmask of enabled events and increment
>  	 * the event reference counter.
>  	 */
> -	BUILD_BUG_ON(ARRAY_SIZE(i915->pmu.enable_count) != I915_PMU_MASK_BITS);
> -	GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count));
> -	GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0);
> -	i915->pmu.enable |= BIT_ULL(bit);
> -	i915->pmu.enable_count[bit]++;
> +	BUILD_BUG_ON(ARRAY_SIZE(pmu->enable_count) != I915_PMU_MASK_BITS);
> +	GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
> +	GEM_BUG_ON(pmu->enable_count[bit] == ~0);
> +	pmu->enable |= BIT_ULL(bit);
> +	pmu->enable_count[bit]++;
> 	/*
>  	 * Start the sampling timer if needed and not already enabled.
>  	 */
> -	__i915_pmu_maybe_start_timer(i915);
> +	__i915_pmu_maybe_start_timer(pmu);
> 	/*
>  	 * For per-engine events the bitmask and reference counting
> @@ -625,7 +634,7 @@ static void i915_pmu_enable(struct perf_event *event)
>  		engine->pmu.enable_count[sample]++;
>  	}
> -	spin_unlock_irqrestore(&i915->pmu.lock, flags);
> +	spin_unlock_irqrestore(&pmu->lock, flags);
> 	/*
>  	 * Store the current counter value so we can report the correct delta
> @@ -640,9 +649,10 @@ static void i915_pmu_disable(struct perf_event  
> *event)
>  	struct drm_i915_private *i915 =
>  		container_of(event->pmu, typeof(*i915), pmu.base);
>  	unsigned int bit = event_enabled_bit(event);
> +	struct i915_pmu *pmu = &i915->pmu;
>  	unsigned long flags;
> -	spin_lock_irqsave(&i915->pmu.lock, flags);
> +	spin_lock_irqsave(&pmu->lock, flags);
> 	if (is_engine_event(event)) {
>  		u8 sample = engine_event_sample(event);
> @@ -664,18 +674,18 @@ static void i915_pmu_disable(struct perf_event  
> *event)
>  			engine->pmu.enable &= ~BIT(sample);
>  	}
> -	GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count));
> -	GEM_BUG_ON(i915->pmu.enable_count[bit] == 0);
> +	GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
> +	GEM_BUG_ON(pmu->enable_count[bit] == 0);
>  	/*
>  	 * Decrement the reference count and clear the enabled
>  	 * bitmask when the last listener on an event goes away.
>  	 */
> -	if (--i915->pmu.enable_count[bit] == 0) {
> -		i915->pmu.enable &= ~BIT_ULL(bit);
> -		i915->pmu.timer_enabled &= pmu_needs_timer(i915, true);
> +	if (--pmu->enable_count[bit] == 0) {
> +		pmu->enable &= ~BIT_ULL(bit);
> +		pmu->timer_enabled &= pmu_needs_timer(pmu, true);
>  	}
> -	spin_unlock_irqrestore(&i915->pmu.lock, flags);
> +	spin_unlock_irqrestore(&pmu->lock, flags);
>  }
> static void i915_pmu_event_start(struct perf_event *event, int flags)
> @@ -824,8 +834,9 @@ add_pmu_attr(struct perf_pmu_events_attr *attr,  
> const char *name,
>  }
> static struct attribute **
> -create_event_attributes(struct drm_i915_private *i915)
> +create_event_attributes(struct i915_pmu *pmu)
>  {
> +	struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
>  	static const struct {
>  		u64 config;
>  		const char *name;
> @@ -939,8 +950,8 @@ create_event_attributes(struct drm_i915_private  
> *i915)
>  		}
>  	}
> -	i915->pmu.i915_attr = i915_attr;
> -	i915->pmu.pmu_attr = pmu_attr;
> +	pmu->i915_attr = i915_attr;
> +	pmu->pmu_attr = pmu_attr;
> 	return attr;
> @@ -956,7 +967,7 @@ err:;
>  	return NULL;
>  }
> -static void free_event_attributes(struct drm_i915_private *i915)
> +static void free_event_attributes(struct i915_pmu *pmu)
>  {
>  	struct attribute **attr_iter = i915_pmu_events_attr_group.attrs;
> @@ -964,12 +975,12 @@ static void free_event_attributes(struct  
> drm_i915_private *i915)
>  		kfree((*attr_iter)->name);
> 	kfree(i915_pmu_events_attr_group.attrs);
> -	kfree(i915->pmu.i915_attr);
> -	kfree(i915->pmu.pmu_attr);
> +	kfree(pmu->i915_attr);
> +	kfree(pmu->pmu_attr);
> 	i915_pmu_events_attr_group.attrs = NULL;
> -	i915->pmu.i915_attr = NULL;
> -	i915->pmu.pmu_attr = NULL;
> +	pmu->i915_attr = NULL;
> +	pmu->pmu_attr = NULL;
>  }
> static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
> @@ -1006,7 +1017,7 @@ static int i915_pmu_cpu_offline(unsigned int cpu,  
> struct hlist_node *node)
> static enum cpuhp_state cpuhp_slot = CPUHP_INVALID;
> -static int i915_pmu_register_cpuhp_state(struct drm_i915_private *i915)
> +static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu)
>  {
>  	enum cpuhp_state slot;
>  	int ret;
> @@ -1019,7 +1030,7 @@ static int i915_pmu_register_cpuhp_state(struct  
> drm_i915_private *i915)
>  		return ret;
> 	slot = ret;
> -	ret = cpuhp_state_add_instance(slot, &i915->pmu.node);
> +	ret = cpuhp_state_add_instance(slot, &pmu->node);
>  	if (ret) {
>  		cpuhp_remove_multi_state(slot);
>  		return ret;
> @@ -1029,15 +1040,16 @@ static int i915_pmu_register_cpuhp_state(struct  
> drm_i915_private *i915)
>  	return 0;
>  }
> -static void i915_pmu_unregister_cpuhp_state(struct drm_i915_private  
> *i915)
> +static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
>  {
>  	WARN_ON(cpuhp_slot == CPUHP_INVALID);
> -	WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &i915->pmu.node));
> +	WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &pmu->node));
>  	cpuhp_remove_multi_state(cpuhp_slot);
>  }
> void i915_pmu_register(struct drm_i915_private *i915)

again

>  {
> +	struct i915_pmu *pmu = &i915->pmu;
>  	int ret;
> 	if (INTEL_GEN(i915) <= 2) {
> @@ -1045,56 +1057,58 @@ void i915_pmu_register(struct drm_i915_private  
> *i915)
>  		return;
>  	}
> -	i915_pmu_events_attr_group.attrs = create_event_attributes(i915);
> +	i915_pmu_events_attr_group.attrs = create_event_attributes(pmu);
>  	if (!i915_pmu_events_attr_group.attrs) {
>  		ret = -ENOMEM;
>  		goto err;
>  	}
> -	i915->pmu.base.attr_groups	= i915_pmu_attr_groups;
> -	i915->pmu.base.task_ctx_nr	= perf_invalid_context;
> -	i915->pmu.base.event_init	= i915_pmu_event_init;
> -	i915->pmu.base.add		= i915_pmu_event_add;
> -	i915->pmu.base.del		= i915_pmu_event_del;
> -	i915->pmu.base.start		= i915_pmu_event_start;
> -	i915->pmu.base.stop		= i915_pmu_event_stop;
> -	i915->pmu.base.read		= i915_pmu_event_read;
> -	i915->pmu.base.event_idx	= i915_pmu_event_event_idx;
> -
> -	spin_lock_init(&i915->pmu.lock);
> -	hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
> -	i915->pmu.timer.function = i915_sample;
> -
> -	ret = perf_pmu_register(&i915->pmu.base, "i915", -1);
> +	pmu->base.attr_groups	= i915_pmu_attr_groups;
> +	pmu->base.task_ctx_nr	= perf_invalid_context;
> +	pmu->base.event_init	= i915_pmu_event_init;
> +	pmu->base.add		= i915_pmu_event_add;
> +	pmu->base.del		= i915_pmu_event_del;
> +	pmu->base.start		= i915_pmu_event_start;
> +	pmu->base.stop		= i915_pmu_event_stop;
> +	pmu->base.read		= i915_pmu_event_read;
> +	pmu->base.event_idx	= i915_pmu_event_event_idx;
> +
> +	spin_lock_init(&pmu->lock);
> +	hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
> +	pmu->timer.function = i915_sample;
> +
> +	ret = perf_pmu_register(&pmu->base, "i915", -1);
>  	if (ret)
>  		goto err;
> -	ret = i915_pmu_register_cpuhp_state(i915);
> +	ret = i915_pmu_register_cpuhp_state(pmu);
>  	if (ret)
>  		goto err_unreg;
> 	return;
> err_unreg:
> -	perf_pmu_unregister(&i915->pmu.base);
> +	perf_pmu_unregister(&pmu->base);
>  err:
> -	i915->pmu.base.event_init = NULL;
> -	free_event_attributes(i915);
> +	pmu->base.event_init = NULL;
> +	free_event_attributes(pmu);
>  	DRM_NOTE("Failed to register PMU! (err=%d)\n", ret);
>  }
> void i915_pmu_unregister(struct drm_i915_private *i915)

and again

>  {
> -	if (!i915->pmu.base.event_init)
> +	struct i915_pmu *pmu = &i915->pmu;
> +
> +	if (!pmu->base.event_init)
>  		return;
> -	WARN_ON(i915->pmu.enable);
> +	WARN_ON(pmu->enable);
> -	hrtimer_cancel(&i915->pmu.timer);
> +	hrtimer_cancel(&pmu->timer);
> -	i915_pmu_unregister_cpuhp_state(i915);
> +	i915_pmu_unregister_cpuhp_state(pmu);
> -	perf_pmu_unregister(&i915->pmu.base);
> -	i915->pmu.base.event_init = NULL;
> -	free_event_attributes(i915);
> +	perf_pmu_unregister(&pmu->base);
> +	pmu->base.event_init = NULL;
> +	free_event_attributes(pmu);
>  }

with all possible replacements,

Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [RFC 1/4] drm/i915/pmu: Make more struct i915_pmu centric
  2019-08-01 10:46   ` Michal Wajdeczko
@ 2019-08-01 10:53     ` Chris Wilson
  2019-08-01 12:10     ` Tvrtko Ursulin
  1 sibling, 0 replies; 17+ messages in thread
From: Chris Wilson @ 2019-08-01 10:53 UTC (permalink / raw)
  To: Intel-gfx, Michal Wajdeczko, Tvrtko Ursulin

Quoting Michal Wajdeczko (2019-08-01 11:46:06)
> On Thu, 01 Aug 2019 12:18:22 +0200, Tvrtko Ursulin  
> <tvrtko.ursulin@linux.intel.com> wrote:
> 
> > From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> >
> > Just tidy the code a bit by removing a sea of overly verbose i915->pmu.*.
> >
> > Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> > ---
> >  drivers/gpu/drm/i915/i915_pmu.c | 194 +++++++++++++++++---------------
> >  1 file changed, 104 insertions(+), 90 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/i915/i915_pmu.c  
> > b/drivers/gpu/drm/i915/i915_pmu.c
> > index eff86483bec0..12008966b00e 100644
> > --- a/drivers/gpu/drm/i915/i915_pmu.c
> > +++ b/drivers/gpu/drm/i915/i915_pmu.c
> > @@ -74,8 +74,9 @@ static unsigned int event_enabled_bit(struct  
> > perf_event *event)
> >       return config_enabled_bit(event->attr.config);
> >  }
> > -static bool pmu_needs_timer(struct drm_i915_private *i915, bool  
> > gpu_active)
> > +static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active)
> >  {
> > +     struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
> 
> maybe this can be promoted to pmu_to_i915() ?
> 
> >       u64 enable;
> >       /*
> > @@ -83,7 +84,7 @@ static bool pmu_needs_timer(struct drm_i915_private  
> > *i915, bool gpu_active)
> >        *
> >        * We start with a bitmask of all currently enabled events.
> >        */
> > -     enable = i915->pmu.enable;
> > +     enable = pmu->enable;
> >       /*
> >        * Mask out all the ones which do not need the timer, or in
> > @@ -114,24 +115,26 @@ static bool pmu_needs_timer(struct  
> > drm_i915_private *i915, bool gpu_active)
> > void i915_pmu_gt_parked(struct drm_i915_private *i915)
> 
> you should be more consistent and change this one too

This is definitely not the final form, so I'm not bothered. I foresee
there being a mix of device-level pmu and gt-level pmu, with this being
part of the latter.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [RFC 4/4] drm/i915/pmu: Support multiple GPUs
  2019-08-01 10:45   ` Chris Wilson
@ 2019-08-01 12:03     ` Tvrtko Ursulin
  0 siblings, 0 replies; 17+ messages in thread
From: Tvrtko Ursulin @ 2019-08-01 12:03 UTC (permalink / raw)
  To: Chris Wilson, Intel-gfx


On 01/08/2019 11:45, Chris Wilson wrote:
> Quoting Tvrtko Ursulin (2019-08-01 11:18:25)
>> From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
>>
>> With discrete graphics system can have both integrated and discrete GPU
>> handled by i915.
>>
>> Currently we use a fixed name ("i915") when registering as the uncore PMU
>> provider which stops working in this case.
>>
>> Add an unique instance number in form of "i915-<instance>" to each
>> registered PMU to support this. First instance keeps the old name to
>> preserve compatibility with existing userspace.
>>
>> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
>> Cc: Chris Wilson <chris@chris-wilson.co.uk>
>> Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
>> ---
>> One problem is how to tie cars and PMUs.
>>
>> Should we append something better than the opaque instance (which
>> depends on probing order)? Like some bus slot id? But then we have to
>> say goodbye to preserving the "legacy" name "i915".
>>
>> I couldn't find that perf supports anything for these scenarios.
>>
>> Or maybe we can add a new file (sysfs?), which would be correctly under
>> the bus hierarchy, and would export the name of the PMU instance to
>> use?
> 
> We definitely need to add some means of uniquely identifying i915 into
> sysfs, and for us to link from bus/pci/devices/ It seems odd that this
> is not a solved problem :| (Which means I've probably missed how it is
> meant to be done.)

I don't think there is the "how it is meant to be done" part. For 
instance even unplug/unload is not handled by the core. Which reminds me 
that I wanted to add a module_get/put somewhere in i915_pmu.c to at 
least same myself from doing i915_reload with intel_gpu_top in another 
window.. :)

> Storing a pmu-name inside sysfs seems reasonable, and postpones the
> problem of persistent device names for i915 until later :)
> (Fwiw, how about using i915-${pci_addr}, or even just ${pci_addr}? I
> expect that perf will grow multiple lookup paths if more hotpluggable
> devices start supporting perf eventss)

Yes pci address looks like a good candidate, but how important is legacy 
compatibility? Becuase I don't think we have ability to add a symlink at 
/sys/devices/i915 -> /sys/devices/i915-${pci}. Can we break 
intel_gpu_top or other tools which are only looking at 
/sys/devices/i915/events?

> 
>> ---
>>   drivers/gpu/drm/i915/i915_pmu.c | 22 ++++++++++++++++++++--
>>   drivers/gpu/drm/i915/i915_pmu.h |  8 ++++++++
>>   2 files changed, 28 insertions(+), 2 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
>> index 7c48fcf0cccb..105246ae5160 100644
>> --- a/drivers/gpu/drm/i915/i915_pmu.c
>> +++ b/drivers/gpu/drm/i915/i915_pmu.c
>> @@ -1053,6 +1053,8 @@ static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
>>          cpuhp_remove_multi_state(cpuhp_slot);
>>   }
>>   
>> +static atomic_t i915_pmu_instance = ATOMIC_INIT(0);
>> +
>>   void i915_pmu_register(struct drm_i915_private *i915)
>>   {
>>          struct i915_pmu *pmu = &i915->pmu;
>> @@ -1083,9 +1085,17 @@ void i915_pmu_register(struct drm_i915_private *i915)
>>          hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
>>          pmu->timer.function = i915_sample;
>>   
>> -       ret = perf_pmu_register(&pmu->base, "i915", -1);
>> +       pmu->instance = atomic_inc_return(&i915_pmu_instance) - 1;
>> +       if (pmu->instance)
>> +               pmu->name = kasprintf(GFP_KERNEL, "i915-%u", pmu->instance);
>> +       else
>> +               pmu->name = "i915";
>> +       if (!pmu->name)
>> +               goto err_instance;
>> +
>> +       ret = perf_pmu_register(&pmu->base, pmu->name, -1);
> 
> Iirc, the name is basically only used to construct the perf sysfs, and
> the tools extract the event names and tags from the sysfs. So I wonder
> if we could add a symlink...

Oh ok symlinks are here.. yeah, I thought we can't. Can we?

Regards,

Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [RFC 2/4] drm/i915/pmu: Convert engine sampling to uncore mmio
  2019-08-01 10:33   ` Chris Wilson
@ 2019-08-01 12:03     ` Tvrtko Ursulin
  0 siblings, 0 replies; 17+ messages in thread
From: Tvrtko Ursulin @ 2019-08-01 12:03 UTC (permalink / raw)
  To: Chris Wilson, Intel-gfx


On 01/08/2019 11:33, Chris Wilson wrote:
> Quoting Tvrtko Ursulin (2019-08-01 11:18:23)
>> From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
>>
>> Drops one macro using implicit dev_priv.
>>
>> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
>> ---
>>   drivers/gpu/drm/i915/i915_pmu.c | 23 +++++++++++++----------
>>   1 file changed, 13 insertions(+), 10 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
>> index 12008966b00e..bd9ad4f2b4e4 100644
>> --- a/drivers/gpu/drm/i915/i915_pmu.c
>> +++ b/drivers/gpu/drm/i915/i915_pmu.c
>> @@ -162,29 +162,30 @@ add_sample(struct i915_pmu_sample *sample, u32 val)
>>   }
>>   
>>   static void
>> -engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
>> +engines_sample(struct drm_i915_private *i915, unsigned int period_ns)
>>   {
>> +       struct intel_uncore *uncore = &i915->uncore;
>>          struct intel_engine_cs *engine;
>>          enum intel_engine_id id;
>>          intel_wakeref_t wakeref;
>>          unsigned long flags;
>>   
>> -       if ((dev_priv->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
>> +       if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
>>                  return;
>>   
>>          wakeref = 0;
>> -       if (READ_ONCE(dev_priv->gt.awake))
>> -               wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
>> +       if (READ_ONCE(i915->gt.awake))
>> +               wakeref = intel_runtime_pm_get_if_in_use(&i915->runtime_pm);
>>          if (!wakeref)
>>                  return;
>>   
>> -       spin_lock_irqsave(&dev_priv->uncore.lock, flags);
>> -       for_each_engine(engine, dev_priv, id) {
>> +       spin_lock_irqsave(&uncore->lock, flags);
>> +       for_each_engine(engine, i915, id) {
>>                  struct intel_engine_pmu *pmu = &engine->pmu;
>>                  bool busy;
>>                  u32 val;
>>   
>> -               val = I915_READ_FW(RING_CTL(engine->mmio_base));
>> +               val = intel_uncore_read_fw(uncore, RING_CTL(engine->mmio_base));
> 
> Could use ENGINE_READ_FW(engine, RING_CTL) ?

D-oh!

> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>

Thanks!

Tvrtko

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [RFC 1/4] drm/i915/pmu: Make more struct i915_pmu centric
  2019-08-01 10:46   ` Michal Wajdeczko
  2019-08-01 10:53     ` Chris Wilson
@ 2019-08-01 12:10     ` Tvrtko Ursulin
  1 sibling, 0 replies; 17+ messages in thread
From: Tvrtko Ursulin @ 2019-08-01 12:10 UTC (permalink / raw)
  To: Michal Wajdeczko, Intel-gfx


On 01/08/2019 11:46, Michal Wajdeczko wrote:
> On Thu, 01 Aug 2019 12:18:22 +0200, Tvrtko Ursulin 
> <tvrtko.ursulin@linux.intel.com> wrote:
> 
>> From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
>>
>> Just tidy the code a bit by removing a sea of overly verbose i915->pmu.*.
>>
>> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
>> ---
>>  drivers/gpu/drm/i915/i915_pmu.c | 194 +++++++++++++++++---------------
>>  1 file changed, 104 insertions(+), 90 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/i915/i915_pmu.c 
>> b/drivers/gpu/drm/i915/i915_pmu.c
>> index eff86483bec0..12008966b00e 100644
>> --- a/drivers/gpu/drm/i915/i915_pmu.c
>> +++ b/drivers/gpu/drm/i915/i915_pmu.c
>> @@ -74,8 +74,9 @@ static unsigned int event_enabled_bit(struct 
>> perf_event *event)
>>      return config_enabled_bit(event->attr.config);
>>  }
>> -static bool pmu_needs_timer(struct drm_i915_private *i915, bool 
>> gpu_active)
>> +static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active)
>>  {
>> +    struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), 
>> pmu);
> 
> maybe this can be promoted to pmu_to_i915() ?

Could do, but I thought we are moving away from such constructs?

>>      u64 enable;
>>     /*
>> @@ -83,7 +84,7 @@ static bool pmu_needs_timer(struct drm_i915_private 
>> *i915, bool gpu_active)
>>       *
>>       * We start with a bitmask of all currently enabled events.
>>       */
>> -    enable = i915->pmu.enable;
>> +    enable = pmu->enable;
>>     /*
>>       * Mask out all the ones which do not need the timer, or in
>> @@ -114,24 +115,26 @@ static bool pmu_needs_timer(struct 
>> drm_i915_private *i915, bool gpu_active)
>> void i915_pmu_gt_parked(struct drm_i915_private *i915)
> 
> you should be more consistent and change this one too

I did not want to let the tentacles out of i915_pmu.c at this stage 
since it is a bit unknown how some bits will look in the future.

The ones I did seemed like the safe ones and it satisifed me that after 
the series there are no more i915->pmu.<something> accesses in 
i915_pmu.c. (Well there is a singleton on engines_sample but I couldn't 
justify adding a local for one access.)

> 
>>  {
>> -    if (!i915->pmu.base.event_init)
>> +    struct i915_pmu *pmu = &i915->pmu;
>> +
>> +    if (!pmu->base.event_init)
>>          return;
>> -    spin_lock_irq(&i915->pmu.lock);
>> +    spin_lock_irq(&pmu->lock);
>>      /*
>>       * Signal sampling timer to stop if only engine events are 
>> enabled and
>>       * GPU went idle.
>>       */
>> -    i915->pmu.timer_enabled = pmu_needs_timer(i915, false);
>> -    spin_unlock_irq(&i915->pmu.lock);
>> +    pmu->timer_enabled = pmu_needs_timer(pmu, false);
>> +    spin_unlock_irq(&pmu->lock);
>>  }
>> -static void __i915_pmu_maybe_start_timer(struct drm_i915_private *i915)
>> +static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu)
>>  {
>> -    if (!i915->pmu.timer_enabled && pmu_needs_timer(i915, true)) {
>> -        i915->pmu.timer_enabled = true;
>> -        i915->pmu.timer_last = ktime_get();
>> -        hrtimer_start_range_ns(&i915->pmu.timer,
>> +    if (!pmu->timer_enabled && pmu_needs_timer(pmu, true)) {
>> +        pmu->timer_enabled = true;
>> +        pmu->timer_last = ktime_get();
>> +        hrtimer_start_range_ns(&pmu->timer,
>>                         ns_to_ktime(PERIOD), 0,
>>                         HRTIMER_MODE_REL_PINNED);
>>      }
>> @@ -139,15 +142,17 @@ static void __i915_pmu_maybe_start_timer(struct 
>> drm_i915_private *i915)
>> void i915_pmu_gt_unparked(struct drm_i915_private *i915)
> 
> and here (and even some more in whole .c file)
> 
>>  {
>> -    if (!i915->pmu.base.event_init)
>> +    struct i915_pmu *pmu = &i915->pmu;
>> +
>> +    if (!pmu->base.event_init)
>>          return;
>> -    spin_lock_irq(&i915->pmu.lock);
>> +    spin_lock_irq(&pmu->lock);
>>      /*
>>       * Re-enable sampling timer when GPU goes active.
>>       */
>> -    __i915_pmu_maybe_start_timer(i915);
>> -    spin_unlock_irq(&i915->pmu.lock);
>> +    __i915_pmu_maybe_start_timer(pmu);
>> +    spin_unlock_irq(&pmu->lock);
>>  }
>> static void
>> @@ -251,15 +256,16 @@ static enum hrtimer_restart i915_sample(struct 
>> hrtimer *hrtimer)
>>  {
>>      struct drm_i915_private *i915 =
>>          container_of(hrtimer, struct drm_i915_private, pmu.timer);
>> +    struct i915_pmu *pmu = &i915->pmu;
>>      unsigned int period_ns;
>>      ktime_t now;
>> -    if (!READ_ONCE(i915->pmu.timer_enabled))
>> +    if (!READ_ONCE(pmu->timer_enabled))
>>          return HRTIMER_NORESTART;
>>     now = ktime_get();
>> -    period_ns = ktime_to_ns(ktime_sub(now, i915->pmu.timer_last));
>> -    i915->pmu.timer_last = now;
>> +    period_ns = ktime_to_ns(ktime_sub(now, pmu->timer_last));
>> +    pmu->timer_last = now;
>>     /*
>>       * Strictly speaking the passed in period may not be 100% 
>> accurate for
>> @@ -443,6 +449,7 @@ static u64 get_rc6(struct drm_i915_private *i915)
> 
> this can also take *pmu instead of *i915

Or even better gt? I think gt makes sense.

> 
>>  {
>>  #if IS_ENABLED(CONFIG_PM)
>>      struct intel_runtime_pm *rpm = &i915->runtime_pm;
>> +    struct i915_pmu *pmu = &i915->pmu;
>>      intel_wakeref_t wakeref;
>>      unsigned long flags;
>>      u64 val;
>> @@ -458,16 +465,16 @@ static u64 get_rc6(struct drm_i915_private *i915)
>>           * previously.
>>           */
>> -        spin_lock_irqsave(&i915->pmu.lock, flags);
>> +        spin_lock_irqsave(&pmu->lock, flags);
>> -        if (val >= i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
>> -            i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
>> -            i915->pmu.sample[__I915_SAMPLE_RC6].cur = val;
>> +        if (val >= pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
>> +            pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
>> +            pmu->sample[__I915_SAMPLE_RC6].cur = val;
>>          } else {
>> -            val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
>> +            val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
>>          }
>> -        spin_unlock_irqrestore(&i915->pmu.lock, flags);
>> +        spin_unlock_irqrestore(&pmu->lock, flags);
>>      } else {
>>          struct device *kdev = rpm->kdev;
>> @@ -478,7 +485,7 @@ static u64 get_rc6(struct drm_i915_private *i915)
>>           * on top of the last known real value, as the approximated RC6
>>           * counter value.
>>           */
>> -        spin_lock_irqsave(&i915->pmu.lock, flags);
>> +        spin_lock_irqsave(&pmu->lock, flags);
>>         /*
>>           * After the above branch intel_runtime_pm_get_if_in_use failed
>> @@ -494,20 +501,20 @@ static u64 get_rc6(struct drm_i915_private *i915)
>>          if (pm_runtime_status_suspended(kdev)) {
>>              val = pm_runtime_suspended_time(kdev);
>> -            if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
>> -                i915->pmu.suspended_time_last = val;
>> +            if (!pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
>> +                pmu->suspended_time_last = val;
>> -            val -= i915->pmu.suspended_time_last;
>> -            val += i915->pmu.sample[__I915_SAMPLE_RC6].cur;
>> +            val -= pmu->suspended_time_last;
>> +            val += pmu->sample[__I915_SAMPLE_RC6].cur;
>> -            i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
>> -        } else if (i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
>> -            val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
>> +            pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
>> +        } else if (pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
>> +            val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
>>          } else {
>> -            val = i915->pmu.sample[__I915_SAMPLE_RC6].cur;
>> +            val = pmu->sample[__I915_SAMPLE_RC6].cur;
>>          }
>> -        spin_unlock_irqrestore(&i915->pmu.lock, flags);
>> +        spin_unlock_irqrestore(&pmu->lock, flags);
>>      }
>>     return val;
>> @@ -520,6 +527,7 @@ static u64 __i915_pmu_event_read(struct perf_event 
>> *event)
>>  {
>>      struct drm_i915_private *i915 =
>>          container_of(event->pmu, typeof(*i915), pmu.base);
>> +    struct i915_pmu *pmu = &i915->pmu;
>>      u64 val = 0;
>>     if (is_engine_event(event)) {
>> @@ -542,12 +550,12 @@ static u64 __i915_pmu_event_read(struct 
>> perf_event *event)
>>          switch (event->attr.config) {
>>          case I915_PMU_ACTUAL_FREQUENCY:
>>              val =
>> -               div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_ACT].cur,
>> +               div_u64(pmu->sample[__I915_SAMPLE_FREQ_ACT].cur,
>>                     USEC_PER_SEC /* to MHz */);
>>              break;
>>          case I915_PMU_REQUESTED_FREQUENCY:
>>              val =
>> -               div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_REQ].cur,
>> +               div_u64(pmu->sample[__I915_SAMPLE_FREQ_REQ].cur,
>>                     USEC_PER_SEC /* to MHz */);
>>              break;
>>          case I915_PMU_INTERRUPTS:
>> @@ -582,24 +590,25 @@ static void i915_pmu_enable(struct perf_event 
>> *event)
>>      struct drm_i915_private *i915 =
>>          container_of(event->pmu, typeof(*i915), pmu.base);
>>      unsigned int bit = event_enabled_bit(event);
>> +    struct i915_pmu *pmu = &i915->pmu;
>>      unsigned long flags;
>> -    spin_lock_irqsave(&i915->pmu.lock, flags);
>> +    spin_lock_irqsave(&pmu->lock, flags);
>>     /*
>>       * Update the bitmask of enabled events and increment
>>       * the event reference counter.
>>       */
>> -    BUILD_BUG_ON(ARRAY_SIZE(i915->pmu.enable_count) != 
>> I915_PMU_MASK_BITS);
>> -    GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count));
>> -    GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0);
>> -    i915->pmu.enable |= BIT_ULL(bit);
>> -    i915->pmu.enable_count[bit]++;
>> +    BUILD_BUG_ON(ARRAY_SIZE(pmu->enable_count) != I915_PMU_MASK_BITS);
>> +    GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
>> +    GEM_BUG_ON(pmu->enable_count[bit] == ~0);
>> +    pmu->enable |= BIT_ULL(bit);
>> +    pmu->enable_count[bit]++;
>>     /*
>>       * Start the sampling timer if needed and not already enabled.
>>       */
>> -    __i915_pmu_maybe_start_timer(i915);
>> +    __i915_pmu_maybe_start_timer(pmu);
>>     /*
>>       * For per-engine events the bitmask and reference counting
>> @@ -625,7 +634,7 @@ static void i915_pmu_enable(struct perf_event *event)
>>          engine->pmu.enable_count[sample]++;
>>      }
>> -    spin_unlock_irqrestore(&i915->pmu.lock, flags);
>> +    spin_unlock_irqrestore(&pmu->lock, flags);
>>     /*
>>       * Store the current counter value so we can report the correct 
>> delta
>> @@ -640,9 +649,10 @@ static void i915_pmu_disable(struct perf_event 
>> *event)
>>      struct drm_i915_private *i915 =
>>          container_of(event->pmu, typeof(*i915), pmu.base);
>>      unsigned int bit = event_enabled_bit(event);
>> +    struct i915_pmu *pmu = &i915->pmu;
>>      unsigned long flags;
>> -    spin_lock_irqsave(&i915->pmu.lock, flags);
>> +    spin_lock_irqsave(&pmu->lock, flags);
>>     if (is_engine_event(event)) {
>>          u8 sample = engine_event_sample(event);
>> @@ -664,18 +674,18 @@ static void i915_pmu_disable(struct perf_event 
>> *event)
>>              engine->pmu.enable &= ~BIT(sample);
>>      }
>> -    GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count));
>> -    GEM_BUG_ON(i915->pmu.enable_count[bit] == 0);
>> +    GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
>> +    GEM_BUG_ON(pmu->enable_count[bit] == 0);
>>      /*
>>       * Decrement the reference count and clear the enabled
>>       * bitmask when the last listener on an event goes away.
>>       */
>> -    if (--i915->pmu.enable_count[bit] == 0) {
>> -        i915->pmu.enable &= ~BIT_ULL(bit);
>> -        i915->pmu.timer_enabled &= pmu_needs_timer(i915, true);
>> +    if (--pmu->enable_count[bit] == 0) {
>> +        pmu->enable &= ~BIT_ULL(bit);
>> +        pmu->timer_enabled &= pmu_needs_timer(pmu, true);
>>      }
>> -    spin_unlock_irqrestore(&i915->pmu.lock, flags);
>> +    spin_unlock_irqrestore(&pmu->lock, flags);
>>  }
>> static void i915_pmu_event_start(struct perf_event *event, int flags)
>> @@ -824,8 +834,9 @@ add_pmu_attr(struct perf_pmu_events_attr *attr, 
>> const char *name,
>>  }
>> static struct attribute **
>> -create_event_attributes(struct drm_i915_private *i915)
>> +create_event_attributes(struct i915_pmu *pmu)
>>  {
>> +    struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), 
>> pmu);
>>      static const struct {
>>          u64 config;
>>          const char *name;
>> @@ -939,8 +950,8 @@ create_event_attributes(struct drm_i915_private 
>> *i915)
>>          }
>>      }
>> -    i915->pmu.i915_attr = i915_attr;
>> -    i915->pmu.pmu_attr = pmu_attr;
>> +    pmu->i915_attr = i915_attr;
>> +    pmu->pmu_attr = pmu_attr;
>>     return attr;
>> @@ -956,7 +967,7 @@ err:;
>>      return NULL;
>>  }
>> -static void free_event_attributes(struct drm_i915_private *i915)
>> +static void free_event_attributes(struct i915_pmu *pmu)
>>  {
>>      struct attribute **attr_iter = i915_pmu_events_attr_group.attrs;
>> @@ -964,12 +975,12 @@ static void free_event_attributes(struct 
>> drm_i915_private *i915)
>>          kfree((*attr_iter)->name);
>>     kfree(i915_pmu_events_attr_group.attrs);
>> -    kfree(i915->pmu.i915_attr);
>> -    kfree(i915->pmu.pmu_attr);
>> +    kfree(pmu->i915_attr);
>> +    kfree(pmu->pmu_attr);
>>     i915_pmu_events_attr_group.attrs = NULL;
>> -    i915->pmu.i915_attr = NULL;
>> -    i915->pmu.pmu_attr = NULL;
>> +    pmu->i915_attr = NULL;
>> +    pmu->pmu_attr = NULL;
>>  }
>> static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
>> @@ -1006,7 +1017,7 @@ static int i915_pmu_cpu_offline(unsigned int 
>> cpu, struct hlist_node *node)
>> static enum cpuhp_state cpuhp_slot = CPUHP_INVALID;
>> -static int i915_pmu_register_cpuhp_state(struct drm_i915_private *i915)
>> +static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu)
>>  {
>>      enum cpuhp_state slot;
>>      int ret;
>> @@ -1019,7 +1030,7 @@ static int i915_pmu_register_cpuhp_state(struct 
>> drm_i915_private *i915)
>>          return ret;
>>     slot = ret;
>> -    ret = cpuhp_state_add_instance(slot, &i915->pmu.node);
>> +    ret = cpuhp_state_add_instance(slot, &pmu->node);
>>      if (ret) {
>>          cpuhp_remove_multi_state(slot);
>>          return ret;
>> @@ -1029,15 +1040,16 @@ static int 
>> i915_pmu_register_cpuhp_state(struct drm_i915_private *i915)
>>      return 0;
>>  }
>> -static void i915_pmu_unregister_cpuhp_state(struct drm_i915_private 
>> *i915)
>> +static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
>>  {
>>      WARN_ON(cpuhp_slot == CPUHP_INVALID);
>> -    WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &i915->pmu.node));
>> +    WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &pmu->node));
>>      cpuhp_remove_multi_state(cpuhp_slot);
>>  }
>> void i915_pmu_register(struct drm_i915_private *i915)
> 
> again
> 
>>  {
>> +    struct i915_pmu *pmu = &i915->pmu;
>>      int ret;
>>     if (INTEL_GEN(i915) <= 2) {
>> @@ -1045,56 +1057,58 @@ void i915_pmu_register(struct drm_i915_private 
>> *i915)
>>          return;
>>      }
>> -    i915_pmu_events_attr_group.attrs = create_event_attributes(i915);
>> +    i915_pmu_events_attr_group.attrs = create_event_attributes(pmu);
>>      if (!i915_pmu_events_attr_group.attrs) {
>>          ret = -ENOMEM;
>>          goto err;
>>      }
>> -    i915->pmu.base.attr_groups    = i915_pmu_attr_groups;
>> -    i915->pmu.base.task_ctx_nr    = perf_invalid_context;
>> -    i915->pmu.base.event_init    = i915_pmu_event_init;
>> -    i915->pmu.base.add        = i915_pmu_event_add;
>> -    i915->pmu.base.del        = i915_pmu_event_del;
>> -    i915->pmu.base.start        = i915_pmu_event_start;
>> -    i915->pmu.base.stop        = i915_pmu_event_stop;
>> -    i915->pmu.base.read        = i915_pmu_event_read;
>> -    i915->pmu.base.event_idx    = i915_pmu_event_event_idx;
>> -
>> -    spin_lock_init(&i915->pmu.lock);
>> -    hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
>> -    i915->pmu.timer.function = i915_sample;
>> -
>> -    ret = perf_pmu_register(&i915->pmu.base, "i915", -1);
>> +    pmu->base.attr_groups    = i915_pmu_attr_groups;
>> +    pmu->base.task_ctx_nr    = perf_invalid_context;
>> +    pmu->base.event_init    = i915_pmu_event_init;
>> +    pmu->base.add        = i915_pmu_event_add;
>> +    pmu->base.del        = i915_pmu_event_del;
>> +    pmu->base.start        = i915_pmu_event_start;
>> +    pmu->base.stop        = i915_pmu_event_stop;
>> +    pmu->base.read        = i915_pmu_event_read;
>> +    pmu->base.event_idx    = i915_pmu_event_event_idx;
>> +
>> +    spin_lock_init(&pmu->lock);
>> +    hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
>> +    pmu->timer.function = i915_sample;
>> +
>> +    ret = perf_pmu_register(&pmu->base, "i915", -1);
>>      if (ret)
>>          goto err;
>> -    ret = i915_pmu_register_cpuhp_state(i915);
>> +    ret = i915_pmu_register_cpuhp_state(pmu);
>>      if (ret)
>>          goto err_unreg;
>>     return;
>> err_unreg:
>> -    perf_pmu_unregister(&i915->pmu.base);
>> +    perf_pmu_unregister(&pmu->base);
>>  err:
>> -    i915->pmu.base.event_init = NULL;
>> -    free_event_attributes(i915);
>> +    pmu->base.event_init = NULL;
>> +    free_event_attributes(pmu);
>>      DRM_NOTE("Failed to register PMU! (err=%d)\n", ret);
>>  }
>> void i915_pmu_unregister(struct drm_i915_private *i915)
> 
> and again
> 
>>  {
>> -    if (!i915->pmu.base.event_init)
>> +    struct i915_pmu *pmu = &i915->pmu;
>> +
>> +    if (!pmu->base.event_init)
>>          return;
>> -    WARN_ON(i915->pmu.enable);
>> +    WARN_ON(pmu->enable);
>> -    hrtimer_cancel(&i915->pmu.timer);
>> +    hrtimer_cancel(&pmu->timer);
>> -    i915_pmu_unregister_cpuhp_state(i915);
>> +    i915_pmu_unregister_cpuhp_state(pmu);
>> -    perf_pmu_unregister(&i915->pmu.base);
>> -    i915->pmu.base.event_init = NULL;
>> -    free_event_attributes(i915);
>> +    perf_pmu_unregister(&pmu->base);
>> +    pmu->base.event_init = NULL;
>> +    free_event_attributes(pmu);
>>  }
> 
> with all possible replacements,
> 
> Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com>

If I convert get_rc6 to gt and leave the external API taking i915 for 
now would you be happy?

Regards,

Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [RFC 3/4] drm/i915/pmu: Convert sampling to gt
  2019-08-01 10:29   ` Chris Wilson
@ 2019-08-01 12:13     ` Tvrtko Ursulin
  0 siblings, 0 replies; 17+ messages in thread
From: Tvrtko Ursulin @ 2019-08-01 12:13 UTC (permalink / raw)
  To: Chris Wilson, Intel-gfx


On 01/08/2019 11:29, Chris Wilson wrote:
> Quoting Tvrtko Ursulin (2019-08-01 11:18:24)
>>   static void
>> -frequency_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
>> +frequency_sample(struct intel_gt *gt, unsigned int period_ns)
>>   {
>> -       if (dev_priv->pmu.enable &
>> -           config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
>> +       struct drm_i915_private *i915 = gt->i915;
>> +       struct intel_uncore *uncore = gt->uncore;
>> +       struct i915_pmu *pmu = &i915->pmu;
>> +
>> +       if (pmu->enable & config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
>>                  u32 val;
>>   
>> -               val = dev_priv->gt_pm.rps.cur_freq;
>> -               if (dev_priv->gt.awake) {
>> +               val = i915->gt_pm.rps.cur_freq;
>> +               if (gt->awake) {
>>                          intel_wakeref_t wakeref;
>>   
>> -                       with_intel_runtime_pm_if_in_use(&dev_priv->runtime_pm,
>> +                       with_intel_runtime_pm_if_in_use(&i915->runtime_pm,
>>                                                          wakeref) {
> 
> We can replace this wakeref with
> 	if (intel_gt_pm_get_if_awake()) {
> instead of if (gt->awake);

Don't see it in my tree?

> Which reminds me I had some patches to change the rc6 sampling over
> runtime suspend -- from your last vacation! My timing is perfect.

Okay, maybe doable. Can put on top of this series if I can merge the 
trivial refactors quickly today?

Regards,

Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 17+ messages in thread

* ✓ Fi.CI.IGT: success for PMU update for more than one card
  2019-08-01 10:18 [RFC 0/4] PMU update for more than one card Tvrtko Ursulin
                   ` (3 preceding siblings ...)
  2019-08-01 10:18 ` [RFC 4/4] drm/i915/pmu: Support multiple GPUs Tvrtko Ursulin
@ 2019-08-02 13:20 ` Patchwork
  4 siblings, 0 replies; 17+ messages in thread
From: Patchwork @ 2019-08-02 13:20 UTC (permalink / raw)
  To: Tvrtko Ursulin; +Cc: intel-gfx

== Series Details ==

Series: PMU update for more than one card
URL   : https://patchwork.freedesktop.org/series/64530/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_6605_full -> Patchwork_13833_full
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  

Known issues
------------

  Here are the changes found in Patchwork_13833_full that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_eio@in-flight-contexts-immediate:
    - shard-glk:          [PASS][1] -> [FAIL][2] ([fdo#105957])
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6605/shard-glk2/igt@gem_eio@in-flight-contexts-immediate.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13833/shard-glk5/igt@gem_eio@in-flight-contexts-immediate.html

  * igt@gem_eio@reset-stress:
    - shard-kbl:          [PASS][3] -> [FAIL][4] ([fdo#109661])
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6605/shard-kbl2/igt@gem_eio@reset-stress.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13833/shard-kbl3/igt@gem_eio@reset-stress.html

  * igt@gem_softpin@noreloc-s3:
    - shard-apl:          [PASS][5] -> [DMESG-WARN][6] ([fdo#108566]) +1 similar issue
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6605/shard-apl4/igt@gem_softpin@noreloc-s3.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13833/shard-apl6/igt@gem_softpin@noreloc-s3.html

  * igt@i915_pm_rpm@i2c:
    - shard-hsw:          [PASS][7] -> [FAIL][8] ([fdo#104097])
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6605/shard-hsw7/igt@i915_pm_rpm@i2c.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13833/shard-hsw2/igt@i915_pm_rpm@i2c.html

  * igt@kms_color@pipe-c-ctm-blue-to-red:
    - shard-apl:          [PASS][9] -> [FAIL][10] ([fdo#107201])
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6605/shard-apl7/igt@kms_color@pipe-c-ctm-blue-to-red.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13833/shard-apl5/igt@kms_color@pipe-c-ctm-blue-to-red.html

  * igt@kms_cursor_crc@pipe-a-cursor-suspend:
    - shard-kbl:          [PASS][11] -> [DMESG-WARN][12] ([fdo#108566]) +4 similar issues
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6605/shard-kbl4/igt@kms_cursor_crc@pipe-a-cursor-suspend.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13833/shard-kbl6/igt@kms_cursor_crc@pipe-a-cursor-suspend.html

  * igt@kms_cursor_crc@pipe-c-cursor-256x256-sliding:
    - shard-skl:          [PASS][13] -> [FAIL][14] ([fdo#103232])
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6605/shard-skl6/igt@kms_cursor_crc@pipe-c-cursor-256x256-sliding.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13833/shard-skl2/igt@kms_cursor_crc@pipe-c-cursor-256x256-sliding.html

  * igt@kms_fbcon_fbt@psr-suspend:
    - shard-skl:          [PASS][15] -> [INCOMPLETE][16] ([fdo#104108])
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6605/shard-skl7/igt@kms_fbcon_fbt@psr-suspend.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13833/shard-skl4/igt@kms_fbcon_fbt@psr-suspend.html

  * igt@kms_flip@flip-vs-expired-vblank:
    - shard-skl:          [PASS][17] -> [FAIL][18] ([fdo#105363])
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6605/shard-skl8/igt@kms_flip@flip-vs-expired-vblank.html
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13833/shard-skl4/igt@kms_flip@flip-vs-expired-vblank.html

  * igt@kms_flip@flip-vs-expired-vblank-interruptible:
    - shard-glk:          [PASS][19] -> [FAIL][20] ([fdo#105363])
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6605/shard-glk2/igt@kms_flip@flip-vs-expired-vblank-interruptible.html
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13833/shard-glk6/igt@kms_flip@flip-vs-expired-vblank-interruptible.html

  * igt@kms_frontbuffer_tracking@fbc-1p-primscrn-pri-indfb-draw-blt:
    - shard-iclb:         [PASS][21] -> [FAIL][22] ([fdo#103167]) +3 similar issues
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6605/shard-iclb4/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-pri-indfb-draw-blt.html
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13833/shard-iclb2/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-pri-indfb-draw-blt.html

  * igt@kms_frontbuffer_tracking@fbc-1p-primscrn-spr-indfb-draw-mmap-wc:
    - shard-snb:          [PASS][23] -> [SKIP][24] ([fdo#109271]) +1 similar issue
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6605/shard-snb1/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-spr-indfb-draw-mmap-wc.html
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13833/shard-snb1/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-spr-indfb-draw-mmap-wc.html

  * igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-cur-indfb-onoff:
    - shard-iclb:         [PASS][25] -> [INCOMPLETE][26] ([fdo#106978] / [fdo#107713])
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6605/shard-iclb5/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-cur-indfb-onoff.html
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13833/shard-iclb1/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-cur-indfb-onoff.html

  * igt@kms_plane_multiple@atomic-pipe-c-tiling-yf:
    - shard-skl:          [PASS][27] -> [DMESG-WARN][28] ([fdo#106885])
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6605/shard-skl3/igt@kms_plane_multiple@atomic-pipe-c-tiling-yf.html
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13833/shard-skl5/igt@kms_plane_multiple@atomic-pipe-c-tiling-yf.html

  * igt@kms_psr@psr2_basic:
    - shard-iclb:         [PASS][29] -> [SKIP][30] ([fdo#109441])
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6605/shard-iclb2/igt@kms_psr@psr2_basic.html
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13833/shard-iclb8/igt@kms_psr@psr2_basic.html

  * igt@kms_rotation_crc@multiplane-rotation:
    - shard-kbl:          [PASS][31] -> [FAIL][32] ([fdo#109016])
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6605/shard-kbl4/igt@kms_rotation_crc@multiplane-rotation.html
   [32]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13833/shard-kbl6/igt@kms_rotation_crc@multiplane-rotation.html

  
#### Possible fixes ####

  * igt@kms_color@pipe-c-ctm-0-5:
    - shard-skl:          [FAIL][33] ([fdo#108682]) -> [PASS][34]
   [33]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6605/shard-skl8/igt@kms_color@pipe-c-ctm-0-5.html
   [34]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13833/shard-skl4/igt@kms_color@pipe-c-ctm-0-5.html

  * igt@kms_flip@flip-vs-expired-vblank:
    - shard-apl:          [FAIL][35] ([fdo#105363]) -> [PASS][36]
   [35]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6605/shard-apl2/igt@kms_flip@flip-vs-expired-vblank.html
   [36]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13833/shard-apl4/igt@kms_flip@flip-vs-expired-vblank.html
    - shard-glk:          [FAIL][37] ([fdo#105363]) -> [PASS][38]
   [37]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6605/shard-glk8/igt@kms_flip@flip-vs-expired-vblank.html
   [38]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13833/shard-glk6/igt@kms_flip@flip-vs-expired-vblank.html

  * igt@kms_flip@flip-vs-expired-vblank-interruptible:
    - shard-skl:          [FAIL][39] ([fdo#105363]) -> [PASS][40]
   [39]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6605/shard-skl6/igt@kms_flip@flip-vs-expired-vblank-interruptible.html
   [40]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13833/shard-skl2/igt@kms_flip@flip-vs-expired-vblank-interruptible.html

  * igt@kms_flip@flip-vs-suspend-interruptible:
    - shard-apl:          [DMESG-WARN][41] ([fdo#108566]) -> [PASS][42] +1 similar issue
   [41]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6605/shard-apl1/igt@kms_flip@flip-vs-suspend-interruptible.html
   [42]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13833/shard-apl7/igt@kms_flip@flip-vs-suspend-interruptible.html

  * igt@kms_frontbuffer_tracking@fbc-1p-pri-indfb-multidraw:
    - shard-iclb:         [FAIL][43] ([fdo#103167]) -> [PASS][44] +3 similar issues
   [43]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6605/shard-iclb4/igt@kms_frontbuffer_tracking@fbc-1p-pri-indfb-multidraw.html
   [44]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13833/shard-iclb4/igt@kms_frontbuffer_tracking@fbc-1p-pri-indfb-multidraw.html

  * igt@kms_plane@pixel-format-pipe-b-planes-source-clamping:
    - shard-iclb:         [INCOMPLETE][45] ([fdo#107713] / [fdo#110036 ]) -> [PASS][46]
   [45]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6605/shard-iclb3/igt@kms_plane@pixel-format-pipe-b-planes-source-clamping.html
   [46]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13833/shard-iclb2/igt@kms_plane@pixel-format-pipe-b-planes-source-clamping.html

  * igt@kms_plane@plane-panning-bottom-right-suspend-pipe-c-planes:
    - shard-kbl:          [DMESG-WARN][47] ([fdo#108566]) -> [PASS][48] +1 similar issue
   [47]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6605/shard-kbl6/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-c-planes.html
   [48]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13833/shard-kbl3/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-c-planes.html

  * igt@kms_plane_lowres@pipe-a-tiling-y:
    - shard-iclb:         [FAIL][49] ([fdo#103166]) -> [PASS][50]
   [49]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6605/shard-iclb4/igt@kms_plane_lowres@pipe-a-tiling-y.html
   [50]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13833/shard-iclb6/igt@kms_plane_lowres@pipe-a-tiling-y.html

  * igt@kms_psr@psr2_cursor_plane_move:
    - shard-iclb:         [SKIP][51] ([fdo#109441]) -> [PASS][52]
   [51]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6605/shard-iclb3/igt@kms_psr@psr2_cursor_plane_move.html
   [52]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13833/shard-iclb2/igt@kms_psr@psr2_cursor_plane_move.html

  * igt@kms_setmode@basic:
    - shard-apl:          [FAIL][53] ([fdo#99912]) -> [PASS][54]
   [53]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6605/shard-apl3/igt@kms_setmode@basic.html
   [54]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13833/shard-apl1/igt@kms_setmode@basic.html

  * igt@kms_vblank@pipe-a-ts-continuation-dpms-suspend:
    - shard-kbl:          [INCOMPLETE][55] ([fdo#103665]) -> [PASS][56]
   [55]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6605/shard-kbl4/igt@kms_vblank@pipe-a-ts-continuation-dpms-suspend.html
   [56]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13833/shard-kbl6/igt@kms_vblank@pipe-a-ts-continuation-dpms-suspend.html

  * igt@kms_vblank@pipe-a-ts-continuation-suspend:
    - shard-skl:          [INCOMPLETE][57] ([fdo#104108]) -> [PASS][58] +1 similar issue
   [57]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6605/shard-skl9/igt@kms_vblank@pipe-a-ts-continuation-suspend.html
   [58]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13833/shard-skl1/igt@kms_vblank@pipe-a-ts-continuation-suspend.html

  * igt@prime_vgem@sync-render:
    - shard-apl:          [FAIL][59] ([fdo#111276]) -> [PASS][60]
   [59]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6605/shard-apl4/igt@prime_vgem@sync-render.html
   [60]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13833/shard-apl6/igt@prime_vgem@sync-render.html

  * igt@tools_test@sysfs_l3_parity:
    - shard-hsw:          [SKIP][61] ([fdo#109271]) -> [PASS][62]
   [61]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6605/shard-hsw4/igt@tools_test@sysfs_l3_parity.html
   [62]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13833/shard-hsw4/igt@tools_test@sysfs_l3_parity.html

  
#### Warnings ####

  * igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-indfb-draw-mmap-gtt:
    - shard-skl:          [FAIL][63] ([fdo#108040]) -> [FAIL][64] ([fdo#103167])
   [63]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6605/shard-skl9/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-indfb-draw-mmap-gtt.html
   [64]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13833/shard-skl3/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-indfb-draw-mmap-gtt.html

  
  [fdo#103166]: https://bugs.freedesktop.org/show_bug.cgi?id=103166
  [fdo#103167]: https://bugs.freedesktop.org/show_bug.cgi?id=103167
  [fdo#103232]: https://bugs.freedesktop.org/show_bug.cgi?id=103232
  [fdo#103665]: https://bugs.freedesktop.org/show_bug.cgi?id=103665
  [fdo#104097]: https://bugs.freedesktop.org/show_bug.cgi?id=104097
  [fdo#104108]: https://bugs.freedesktop.org/show_bug.cgi?id=104108
  [fdo#105363]: https://bugs.freedesktop.org/show_bug.cgi?id=105363
  [fdo#105957]: https://bugs.freedesktop.org/show_bug.cgi?id=105957
  [fdo#106885]: https://bugs.freedesktop.org/show_bug.cgi?id=106885
  [fdo#106978]: https://bugs.freedesktop.org/show_bug.cgi?id=106978
  [fdo#107201]: https://bugs.freedesktop.org/show_bug.cgi?id=107201
  [fdo#107713]: https://bugs.freedesktop.org/show_bug.cgi?id=107713
  [fdo#108040]: https://bugs.freedesktop.org/show_bug.cgi?id=108040
  [fdo#108566]: https://bugs.freedesktop.org/show_bug.cgi?id=108566
  [fdo#108682]: https://bugs.freedesktop.org/show_bug.cgi?id=108682
  [fdo#109016]: https://bugs.freedesktop.org/show_bug.cgi?id=109016
  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [fdo#109441]: https://bugs.freedesktop.org/show_bug.cgi?id=109441
  [fdo#109661]: https://bugs.freedesktop.org/show_bug.cgi?id=109661
  [fdo#110036 ]: https://bugs.freedesktop.org/show_bug.cgi?id=110036 
  [fdo#111276]: https://bugs.freedesktop.org/show_bug.cgi?id=111276
  [fdo#99912]: https://bugs.freedesktop.org/show_bug.cgi?id=99912


Participating hosts (10 -> 10)
------------------------------

  No changes in participating hosts


Build changes
-------------

  * CI: CI-20190529 -> None
  * Linux: CI_DRM_6605 -> Patchwork_13833

  CI-20190529: 20190529
  CI_DRM_6605: 09970f7b8f1336416254cfac87f196578e3c1d13 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5120: b3138fbea79d5d7935e53530b90efe3e816236f4 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_13833: 0911640555c2d2a7377cf7d39c95bbe514bdfb89 @ git://anongit.freedesktop.org/gfx-ci/linux
  piglit_4509: fdc5a4ca11124ab8413c7988896eec4c97336694 @ git://anongit.freedesktop.org/piglit

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13833/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 17+ messages in thread

end of thread, other threads:[~2019-08-02 13:20 UTC | newest]

Thread overview: 17+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-08-01 10:18 [RFC 0/4] PMU update for more than one card Tvrtko Ursulin
2019-08-01 10:18 ` [RFC 1/4] drm/i915/pmu: Make more struct i915_pmu centric Tvrtko Ursulin
2019-08-01 10:31   ` Chris Wilson
2019-08-01 10:46   ` Michal Wajdeczko
2019-08-01 10:53     ` Chris Wilson
2019-08-01 12:10     ` Tvrtko Ursulin
2019-08-01 10:18 ` [RFC 2/4] drm/i915/pmu: Convert engine sampling to uncore mmio Tvrtko Ursulin
2019-08-01 10:33   ` Chris Wilson
2019-08-01 12:03     ` Tvrtko Ursulin
2019-08-01 10:18 ` [RFC 3/4] drm/i915/pmu: Convert sampling to gt Tvrtko Ursulin
2019-08-01 10:29   ` Chris Wilson
2019-08-01 12:13     ` Tvrtko Ursulin
2019-08-01 10:33   ` Chris Wilson
2019-08-01 10:18 ` [RFC 4/4] drm/i915/pmu: Support multiple GPUs Tvrtko Ursulin
2019-08-01 10:45   ` Chris Wilson
2019-08-01 12:03     ` Tvrtko Ursulin
2019-08-02 13:20 ` ✓ Fi.CI.IGT: success for PMU update for more than one card Patchwork

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.