All of lore.kernel.org
 help / color / mirror / Atom feed
* [RFC v3 0/3] Support perf stat with i915 PMU
@ 2017-08-30 12:37 Dmitry Rogozhkin
  2017-08-30 12:37 ` [RFC v3 1/3] drm/i915/pmu: reorder function to suite next patch Dmitry Rogozhkin
                   ` (2 more replies)
  0 siblings, 3 replies; 4+ messages in thread
From: Dmitry Rogozhkin @ 2017-08-30 12:37 UTC (permalink / raw)
  To: intel-gfx; +Cc: Peter Zijlstra

These patches depend on the RFC patches enabling i915 PMU from Tvrtko:
  https://patchwork.freedesktop.org/series/27488/
Thus, CI failure to build them is expected. I think that my patches should
be squeashed in Tvrtko's one actually.

The first patch simply reorders functions and does nothing comparing to
Tvrtko's patches. Next patches add fixes according to PMU API comments
and clarifications from PMU aware engineers.

v1: Make busy_stats refcounted instead of the whole pmu.

v2: Expose cpumask for the i915 pmu to prevent creation of multiple events
    of the same type. Remove perf-driver level sampling.

v3: Add CPUs online/offline tracking to form cpumask.

Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Peter Zijlstra <peterz@infradead.org>

Dmitry Rogozhkin (3):
  drm/i915/pmu: reorder function to suite next patch
  drm/i915/pmu: serve global events and support perf stat
  drm/i915/pmu: deny perf driver level sampling of i915 PMU

 drivers/gpu/drm/i915/i915_drv.h         |  19 +-
 drivers/gpu/drm/i915/i915_pmu.c         | 368 ++++++++++++++++----------------
 drivers/gpu/drm/i915/intel_ringbuffer.h |   2 +-
 3 files changed, 196 insertions(+), 193 deletions(-)

-- 
1.8.3.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [RFC v3 1/3] drm/i915/pmu: reorder function to suite next patch
  2017-08-30 12:37 [RFC v3 0/3] Support perf stat with i915 PMU Dmitry Rogozhkin
@ 2017-08-30 12:37 ` Dmitry Rogozhkin
  2017-08-30 12:37 ` [RFC v3 2/3] drm/i915/pmu: serve global events and support perf stat Dmitry Rogozhkin
  2017-08-30 12:37 ` [RFC v3 3/3] drm/i915/pmu: deny perf driver level sampling of i915 PMU Dmitry Rogozhkin
  2 siblings, 0 replies; 4+ messages in thread
From: Dmitry Rogozhkin @ 2017-08-30 12:37 UTC (permalink / raw)
  To: intel-gfx; +Cc: Peter Zijlstra

This patch is doing nover except reordering functions to highlight
changes in the next patch.

Change-Id: I0cd298780503ae8f6f8035b86c59fc8b5191356b
Signed-off-by: Dmitry Rogozhkin <dmitry.v.rogozhkin@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
---
 drivers/gpu/drm/i915/i915_pmu.c | 180 ++++++++++++++++++++--------------------
 1 file changed, 90 insertions(+), 90 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 3272ec0..bcdf2bc 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -363,6 +363,88 @@ static bool engine_needs_busy_stats(struct intel_engine_cs *engine)
 	       (engine->pmu.enable & BIT(I915_SAMPLE_BUSY));
 }
 
+static u64 count_interrupts(struct drm_i915_private *i915)
+{
+	/* open-coded kstat_irqs() */
+	struct irq_desc *desc = irq_to_desc(i915->drm.pdev->irq);
+	u64 sum = 0;
+	int cpu;
+
+	if (!desc || !desc->kstat_irqs)
+		return 0;
+
+	for_each_possible_cpu(cpu)
+		sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
+
+	return sum;
+}
+
+static void i915_pmu_event_read(struct perf_event *event)
+{
+	struct drm_i915_private *i915 =
+		container_of(event->pmu, typeof(*i915), pmu.base);
+	u64 val = 0;
+
+	if (is_engine_event(event)) {
+		u8 sample = engine_event_sample(event);
+		struct intel_engine_cs *engine;
+
+		engine = intel_engine_lookup_user(i915,
+						  engine_event_class(event),
+						  engine_event_instance(event));
+
+		if (WARN_ON_ONCE(!engine)) {
+			/* Do nothing */
+		} else if (sample == I915_SAMPLE_BUSY &&
+			   engine->pmu.busy_stats) {
+			val = ktime_to_ns(intel_engine_get_busy_time(engine));
+		} else {
+			val = engine->pmu.sample[sample];
+		}
+	} else switch (event->attr.config) {
+	case I915_PMU_ACTUAL_FREQUENCY:
+		val = i915->pmu.sample[__I915_SAMPLE_FREQ_ACT];
+		break;
+	case I915_PMU_REQUESTED_FREQUENCY:
+		val = i915->pmu.sample[__I915_SAMPLE_FREQ_REQ];
+		break;
+	case I915_PMU_ENERGY:
+		val = intel_energy_uJ(i915);
+		break;
+	case I915_PMU_INTERRUPTS:
+		val = count_interrupts(i915);
+		break;
+
+	case I915_PMU_RC6_RESIDENCY:
+		if (!i915->gt.awake)
+			return;
+
+		val = intel_rc6_residency_ns(i915,
+					     IS_VALLEYVIEW(i915) ?
+					     VLV_GT_RENDER_RC6 :
+					     GEN6_GT_GFX_RC6);
+		break;
+
+	case I915_PMU_RC6p_RESIDENCY:
+		if (!i915->gt.awake)
+			return;
+
+		if (!IS_VALLEYVIEW(i915))
+			val = intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6p);
+		break;
+
+	case I915_PMU_RC6pp_RESIDENCY:
+		if (!i915->gt.awake)
+			return;
+
+		if (!IS_VALLEYVIEW(i915))
+			val = intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6pp);
+		break;
+	}
+
+	local64_set(&event->count, val);
+}
+
 static void i915_pmu_enable(struct perf_event *event)
 {
 	struct drm_i915_private *i915 =
@@ -440,23 +522,6 @@ static void i915_pmu_disable(struct perf_event *event)
 	i915_pmu_timer_cancel(event);
 }
 
-static int i915_pmu_event_add(struct perf_event *event, int flags)
-{
-	struct hw_perf_event *hwc = &event->hw;
-
-	if (flags & PERF_EF_START)
-		i915_pmu_enable(event);
-
-	hwc->state = !(flags & PERF_EF_START);
-
-	return 0;
-}
-
-static void i915_pmu_event_del(struct perf_event *event, int flags)
-{
-	i915_pmu_disable(event);
-}
-
 static void i915_pmu_event_start(struct perf_event *event, int flags)
 {
 	i915_pmu_enable(event);
@@ -467,86 +532,21 @@ static void i915_pmu_event_stop(struct perf_event *event, int flags)
 	i915_pmu_disable(event);
 }
 
-static u64 count_interrupts(struct drm_i915_private *i915)
+static int i915_pmu_event_add(struct perf_event *event, int flags)
 {
-	/* open-coded kstat_irqs() */
-	struct irq_desc *desc = irq_to_desc(i915->drm.pdev->irq);
-	u64 sum = 0;
-	int cpu;
+	struct hw_perf_event *hwc = &event->hw;
 
-	if (!desc || !desc->kstat_irqs)
-		return 0;
+	if (flags & PERF_EF_START)
+		i915_pmu_enable(event);
 
-	for_each_possible_cpu(cpu)
-		sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
+	hwc->state = !(flags & PERF_EF_START);
 
-	return sum;
+	return 0;
 }
 
-static void i915_pmu_event_read(struct perf_event *event)
+static void i915_pmu_event_del(struct perf_event *event, int flags)
 {
-	struct drm_i915_private *i915 =
-		container_of(event->pmu, typeof(*i915), pmu.base);
-	u64 val = 0;
-
-	if (is_engine_event(event)) {
-		u8 sample = engine_event_sample(event);
-		struct intel_engine_cs *engine;
-
-		engine = intel_engine_lookup_user(i915,
-						  engine_event_class(event),
-						  engine_event_instance(event));
-
-		if (WARN_ON_ONCE(!engine)) {
-			/* Do nothing */
-		} else if (sample == I915_SAMPLE_BUSY &&
-			   engine->pmu.busy_stats) {
-			val = ktime_to_ns(intel_engine_get_busy_time(engine));
-		} else {
-			val = engine->pmu.sample[sample];
-		}
-	} else switch (event->attr.config) {
-	case I915_PMU_ACTUAL_FREQUENCY:
-		val = i915->pmu.sample[__I915_SAMPLE_FREQ_ACT];
-		break;
-	case I915_PMU_REQUESTED_FREQUENCY:
-		val = i915->pmu.sample[__I915_SAMPLE_FREQ_REQ];
-		break;
-	case I915_PMU_ENERGY:
-		val = intel_energy_uJ(i915);
-		break;
-	case I915_PMU_INTERRUPTS:
-		val = count_interrupts(i915);
-		break;
-
-	case I915_PMU_RC6_RESIDENCY:
-		if (!i915->gt.awake)
-			return;
-
-		val = intel_rc6_residency_ns(i915,
-					     IS_VALLEYVIEW(i915) ?
-					     VLV_GT_RENDER_RC6 :
-					     GEN6_GT_GFX_RC6);
-		break;
-
-	case I915_PMU_RC6p_RESIDENCY:
-		if (!i915->gt.awake)
-			return;
-
-		if (!IS_VALLEYVIEW(i915))
-			val = intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6p);
-		break;
-
-	case I915_PMU_RC6pp_RESIDENCY:
-		if (!i915->gt.awake)
-			return;
-
-		if (!IS_VALLEYVIEW(i915))
-			val = intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6pp);
-		break;
-	}
-
-	local64_set(&event->count, val);
+	i915_pmu_disable(event);
 }
 
 static int i915_pmu_event_event_idx(struct perf_event *event)
-- 
1.8.3.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [RFC v3 2/3] drm/i915/pmu: serve global events and support perf stat
  2017-08-30 12:37 [RFC v3 0/3] Support perf stat with i915 PMU Dmitry Rogozhkin
  2017-08-30 12:37 ` [RFC v3 1/3] drm/i915/pmu: reorder function to suite next patch Dmitry Rogozhkin
@ 2017-08-30 12:37 ` Dmitry Rogozhkin
  2017-08-30 12:37 ` [RFC v3 3/3] drm/i915/pmu: deny perf driver level sampling of i915 PMU Dmitry Rogozhkin
  2 siblings, 0 replies; 4+ messages in thread
From: Dmitry Rogozhkin @ 2017-08-30 12:37 UTC (permalink / raw)
  To: intel-gfx; +Cc: Peter Zijlstra

This patch should probably be squashed with Tvrtko's PMU enabling
patch...

Making perf-stat workable with i915 PMU. The major point is that
current implementation of i915 PMU exposes global counter rather
thatn per-task counters. Thus, required changes are:
* Register PMU with .task_ctx_nr=perf_invalid_context
* Expose cpumask for the PMU with the single CPU in the mask
* Properly support pmu->stop(): it should call pmu->read()
* Properly support pmu->del(): it should call stop(event, PERF_EF_UPDATE)

Examples:
perf stat -e i915/rcs0-busy/ workload.sh
   This should error out with "failed to read counter" because the
requested counter can't be queried in per-task mode (which is the case).

perf stat -e i915/rcs0-busy/ -a workload.sh
perf stat -e i915/rcs0-busy/ -a -C0 workload.sh
   These 2 commands should give the same result with the correct counter
values. Counter value will be queried once in the end of the wrokload.
The example output would be:

 Performance counter stats for 'system wide':
       649,547,987      i915/rcs0-busy/

       1.895530680 seconds time elapsed

perf stat -e i915/rcs0-busy/ -a -I 100 workload.sh
   This will query counter perdiodically (each 100ms) and dump output:

     0.100108369          4,137,438      i915/rcs0-busy/
i915/rcs0-busy/: 37037414 100149071 100149071
     0.200249024         37,037,414      i915/rcs0-busy/
i915/rcs0-busy/: 36935429 100145077 100145077
     0.300391916         36,935,429      i915/rcs0-busy/
i915/rcs0-busy/: 34262017 100126136 100126136
     0.400518037         34,262,017      i915/rcs0-busy/
i915/rcs0-busy/: 34539960 100126217 100126217

v1: Make pmu.busy_stats a refcounter to avoid busy stats going away
    with some deleted event.

v2: Expose cpumask for i915 PMU to avoid multiple events creation of
    the same type followed by counter aggregation by perf-stat.

v3: Track CPUs getting online/offline to migrate perf context. If (likely)
    cpumask will initially set CPU0, CONFIG_BOOTPARAM_HOTPLUG_CPU0 will be
    needed to see effect of CPU status tracking.

Change-Id: I7d1abe747a4399196e72253f7b66441a6528dbee
Signed-off-by: Dmitry Rogozhkin <dmitry.v.rogozhkin@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>

cpumask

Change-Id: I145f59240b75f2b703e0531ec81af6cd05aae95c
Signed-off-by: Dmitry Rogozhkin <dmitry.v.rogozhkin@intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h         |  19 +++---
 drivers/gpu/drm/i915/i915_pmu.c         | 115 +++++++++++++++++++++++++++-----
 drivers/gpu/drm/i915/intel_ringbuffer.h |   2 +-
 3 files changed, 110 insertions(+), 26 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b59da2c..e629e5e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2153,6 +2153,16 @@ enum {
 	__I915_NUM_PMU_SAMPLERS
 };
 
+struct i915_pmu {
+	struct hlist_node node;
+	struct pmu base;
+	spinlock_t lock;
+	struct hrtimer timer;
+	bool timer_enabled;
+	u64 enable;
+	u64 sample[__I915_NUM_PMU_SAMPLERS];
+};
+
 struct drm_i915_private {
 	struct drm_device drm;
 
@@ -2660,14 +2670,7 @@ struct drm_i915_private {
 		int	irq;
 	} lpe_audio;
 
-	struct {
-		struct pmu base;
-		spinlock_t lock;
-		struct hrtimer timer;
-		bool timer_enabled;
-		u64 enable;
-		u64 sample[__I915_NUM_PMU_SAMPLERS];
-	} pmu;
+	struct i915_pmu pmu;
 
 	/*
 	 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index bcdf2bc..7bfedb7 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -15,6 +15,8 @@
 
 #define ENGINE_SAMPLE_BITS (16)
 
+static cpumask_t i915_pmu_cpumask = CPU_MASK_NONE;
+
 static u8 engine_config_sample(u64 config)
 {
 	return config & I915_PMU_SAMPLE_MASK;
@@ -285,16 +287,24 @@ static int i915_pmu_event_init(struct perf_event *event)
 {
 	struct drm_i915_private *i915 =
 		container_of(event->pmu, typeof(*i915), pmu.base);
+	int cpu;
 	int ret;
 
-	/* XXX ideally only want pid == -1 && cpu == -1 */
-
 	if (event->attr.type != event->pmu->type)
 		return -ENOENT;
 
 	if (has_branch_stack(event))
 		return -EOPNOTSUPP;
 
+	if (event->cpu < 0)
+		return -EINVAL;
+
+	cpu = cpumask_any_and(&i915_pmu_cpumask,
+		topology_sibling_cpumask(event->cpu));
+
+	if (cpu >= nr_cpu_ids)
+		return -ENODEV;
+
 	ret = 0;
 	if (is_engine_event(event)) {
 		ret = engine_event_init(event);
@@ -314,6 +324,7 @@ static int i915_pmu_event_init(struct perf_event *event)
 	if (ret)
 		return ret;
 
+	event->cpu = cpu;
 	if (!event->parent)
 		event->destroy = i915_pmu_event_destroy;
 
@@ -469,11 +480,16 @@ static void i915_pmu_enable(struct perf_event *event)
 				       ns_to_ktime(PERIOD), 0,
 				       HRTIMER_MODE_REL_PINNED);
 		i915->pmu.timer_enabled = true;
-	} else if (is_engine_event(event) && engine_needs_busy_stats(engine) &&
-		   !engine->pmu.busy_stats) {
-		engine->pmu.busy_stats = true;
-		if (!cancel_delayed_work(&engine->pmu.disable_busy_stats))
-			queue_work(i915->wq, &engine->pmu.enable_busy_stats);
+	} else if (is_engine_event(event) && engine_needs_busy_stats(engine)) {
+		/* Enable busy stats for the first event demanding it, next
+		 * events just reference counter. So, if some events will go
+		 * away, we will still have busy stats enabled till remaining
+		 * events still use them.
+		 */
+		if (engine->pmu.busy_stats++ == 0) {
+			if (!cancel_delayed_work(&engine->pmu.disable_busy_stats))
+				queue_work(i915->wq, &engine->pmu.enable_busy_stats);
+		}
 	}
 
 	spin_unlock_irqrestore(&i915->pmu.lock, flags);
@@ -495,16 +511,16 @@ static void i915_pmu_disable(struct perf_event *event)
 		enum intel_engine_id id;
 
 		engine = intel_engine_lookup_user(i915,
-						  engine_event_class(event),
-						  engine_event_instance(event));
+						engine_event_class(event),
+						engine_event_instance(event));
 		GEM_BUG_ON(!engine);
 		engine->pmu.enable &= ~BIT(engine_event_sample(event));
-		if (engine->pmu.busy_stats &&
-		    !engine_needs_busy_stats(engine)) {
-			engine->pmu.busy_stats = false;
-			queue_delayed_work(i915->wq,
-					   &engine->pmu.disable_busy_stats,
-					   round_jiffies_up_relative(2 * HZ));
+		if (!engine_needs_busy_stats(engine)) {
+			if (engine->pmu.busy_stats && --engine->pmu.busy_stats == 0) {
+				queue_delayed_work(i915->wq,
+						&engine->pmu.disable_busy_stats,
+						round_jiffies_up_relative(2 * HZ));
+			}
 		}
 		mask = 0;
 		for_each_engine(engine, i915, id)
@@ -529,6 +545,8 @@ static void i915_pmu_event_start(struct perf_event *event, int flags)
 
 static void i915_pmu_event_stop(struct perf_event *event, int flags)
 {
+	if (flags & PERF_EF_UPDATE)
+		i915_pmu_event_read(event);
 	i915_pmu_disable(event);
 }
 
@@ -546,7 +564,7 @@ static int i915_pmu_event_add(struct perf_event *event, int flags)
 
 static void i915_pmu_event_del(struct perf_event *event, int flags)
 {
-	i915_pmu_disable(event);
+	i915_pmu_event_stop(event, PERF_EF_UPDATE);
 }
 
 static int i915_pmu_event_event_idx(struct perf_event *event)
@@ -656,9 +674,28 @@ static ssize_t i915_pmu_event_show(struct device *dev,
         .attrs = i915_pmu_events_attrs,
 };
 
+static ssize_t i915_pmu_get_attr_cpumask(struct device *dev,
+				       struct device_attribute *attr,
+				       char *buf)
+{
+	return cpumap_print_to_pagebuf(true, buf, &i915_pmu_cpumask);
+}
+
+static DEVICE_ATTR(cpumask, S_IRUGO, i915_pmu_get_attr_cpumask, NULL);
+
+static struct attribute *i915_cpumask_attrs[] = {
+	&dev_attr_cpumask.attr,
+	NULL,
+};
+
+static struct attribute_group i915_pmu_cpumask_attr_group = {
+	.attrs = i915_cpumask_attrs,
+};
+
 static const struct attribute_group *i915_pmu_attr_groups[] = {
         &i915_pmu_format_attr_group,
         &i915_pmu_events_attr_group,
+        &i915_pmu_cpumask_attr_group,
         NULL
 };
 
@@ -678,16 +715,57 @@ static void __disable_busy_stats(struct work_struct *work)
 	intel_disable_engine_stats(engine);
 }
 
+static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
+{
+	unsigned int target;
+
+	target = cpumask_any_and(&i915_pmu_cpumask, &i915_pmu_cpumask);
+	/* Select the first online CPU as a designated reader. */
+	if (target >= nr_cpu_ids) {
+		cpumask_set_cpu(cpu, &i915_pmu_cpumask);
+	}
+	return 0;
+}
+
+static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
+{
+	struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node);
+	unsigned int target;
+
+	if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) {
+		target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
+		/* Migrate events if there is a valid target */
+		if (target < nr_cpu_ids) {
+			cpumask_set_cpu(target, &i915_pmu_cpumask);
+			perf_pmu_migrate_context(&pmu->base, cpu, target);
+		}
+	}
+	return 0;
+}
+
 void i915_pmu_register(struct drm_i915_private *i915)
 {
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
+	int err;
 
 	if (INTEL_GEN(i915) <= 2)
 		return;
 
+	err = cpuhp_setup_state_multi(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
+		"perf/x86/intel/i915:online",
+		i915_pmu_cpu_online,
+		i915_pmu_cpu_offline);
+	if (err)
+		return;
+
+	err = cpuhp_state_add_instance(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
+		&i915->pmu.node);
+	if (err)
+		return;
+
 	i915->pmu.base.attr_groups	= i915_pmu_attr_groups;
-	i915->pmu.base.task_ctx_nr	= perf_sw_context;
+	i915->pmu.base.task_ctx_nr	= perf_invalid_context;
 	i915->pmu.base.event_init	= i915_pmu_event_init;
 	i915->pmu.base.add		= i915_pmu_event_add;
 	i915->pmu.base.del		= i915_pmu_event_del;
@@ -731,4 +809,7 @@ void i915_pmu_unregister(struct drm_i915_private *i915)
 		flush_work(&engine->pmu.enable_busy_stats);
 		flush_delayed_work(&engine->pmu.disable_busy_stats);
 	}
+
+	cpuhp_state_remove_instance(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
+		&i915->pmu.node);
 }
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index fd5d838..0530e4e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -248,7 +248,7 @@ struct intel_engine_cs {
 	struct {
 		u32 enable;
 		u64 sample[4];
-		bool busy_stats;
+		unsigned int busy_stats;
 		struct work_struct enable_busy_stats;
 		struct delayed_work disable_busy_stats;
 	} pmu;
-- 
1.8.3.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [RFC v3 3/3] drm/i915/pmu: deny perf driver level sampling of i915 PMU
  2017-08-30 12:37 [RFC v3 0/3] Support perf stat with i915 PMU Dmitry Rogozhkin
  2017-08-30 12:37 ` [RFC v3 1/3] drm/i915/pmu: reorder function to suite next patch Dmitry Rogozhkin
  2017-08-30 12:37 ` [RFC v3 2/3] drm/i915/pmu: serve global events and support perf stat Dmitry Rogozhkin
@ 2017-08-30 12:37 ` Dmitry Rogozhkin
  2 siblings, 0 replies; 4+ messages in thread
From: Dmitry Rogozhkin @ 2017-08-30 12:37 UTC (permalink / raw)
  To: intel-gfx; +Cc: Peter Zijlstra

This patch should probably be squashed with Tvrtko's PMU enabling patch...

As per discussion with Peter, i915 PMU is an example of uncore PMU which
are prohibited to support perf driver level sampling. This patch removes
hrtimer which we expose to perf core and denies events creation with
non-zero event->attr.sampling_period.

Mind that this patch does _not_ remove i915 PMU _internal_ sampling timer.
So, sampling metrics are still gathered, but can be accessed only by
explicit request to get metric counter, i.e. by sys_read().

Change-Id: I33f345f679f0a5a8ecc9867f9e7c1bfb357e708d
Signed-off-by: Dmitry Rogozhkin <dmitry.v.rogozhkin@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Peter Zijlstra <peterz@infradead.org>
---
 drivers/gpu/drm/i915/i915_pmu.c | 89 ++---------------------------------------
 1 file changed, 4 insertions(+), 85 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 7bfedb7..edd1e27 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -239,50 +239,6 @@ static int engine_event_init(struct perf_event *event)
 	return 0;
 }
 
-static DEFINE_PER_CPU(struct pt_regs, i915_pmu_pt_regs);
-
-static enum hrtimer_restart hrtimer_sample(struct hrtimer *hrtimer)
-{
-	struct pt_regs *regs = this_cpu_ptr(&i915_pmu_pt_regs);
-	struct perf_sample_data data;
-	struct perf_event *event;
-	u64 period;
-
-	event = container_of(hrtimer, struct perf_event, hw.hrtimer);
-	if (event->state != PERF_EVENT_STATE_ACTIVE)
-		return HRTIMER_NORESTART;
-
-	event->pmu->read(event);
-
-	perf_sample_data_init(&data, 0, event->hw.last_period);
-	perf_event_overflow(event, &data, regs);
-
-	period = max_t(u64, 10000, event->hw.sample_period);
-	hrtimer_forward_now(hrtimer, ns_to_ktime(period));
-	return HRTIMER_RESTART;
-}
-
-static void init_hrtimer(struct perf_event *event)
-{
-	struct hw_perf_event *hwc = &event->hw;
-
-	if (!is_sampling_event(event))
-		return;
-
-	hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-	hwc->hrtimer.function = hrtimer_sample;
-
-	if (event->attr.freq) {
-		long freq = event->attr.sample_freq;
-
-		event->attr.sample_period = NSEC_PER_SEC / freq;
-		hwc->sample_period = event->attr.sample_period;
-		local64_set(&hwc->period_left, hwc->sample_period);
-		hwc->last_period = hwc->sample_period;
-		event->attr.freq = 0;
-	}
-}
-
 static int i915_pmu_event_init(struct perf_event *event)
 {
 	struct drm_i915_private *i915 =
@@ -293,6 +249,10 @@ static int i915_pmu_event_init(struct perf_event *event)
 	if (event->attr.type != event->pmu->type)
 		return -ENOENT;
 
+	/* unsupported modes and filters */
+	if (event->attr.sample_period) /* no sampling */
+		return -EINVAL;
+
 	if (has_branch_stack(event))
 		return -EOPNOTSUPP;
 
@@ -328,46 +288,9 @@ static int i915_pmu_event_init(struct perf_event *event)
 	if (!event->parent)
 		event->destroy = i915_pmu_event_destroy;
 
-	init_hrtimer(event);
-
 	return 0;
 }
 
-static void i915_pmu_timer_start(struct perf_event *event)
-{
-	struct hw_perf_event *hwc = &event->hw;
-	s64 period;
-
-	if (!is_sampling_event(event))
-		return;
-
-	period = local64_read(&hwc->period_left);
-	if (period) {
-		if (period < 0)
-			period = 10000;
-
-		local64_set(&hwc->period_left, 0);
-	} else {
-		period = max_t(u64, 10000, hwc->sample_period);
-	}
-
-	hrtimer_start_range_ns(&hwc->hrtimer,
-			       ns_to_ktime(period), 0,
-			       HRTIMER_MODE_REL_PINNED);
-}
-
-static void i915_pmu_timer_cancel(struct perf_event *event)
-{
-	struct hw_perf_event *hwc = &event->hw;
-
-	if (!is_sampling_event(event))
-		return;
-
-	local64_set(&hwc->period_left,
-		    ktime_to_ns(hrtimer_get_remaining(&hwc->hrtimer)));
-	hrtimer_cancel(&hwc->hrtimer);
-}
-
 static bool engine_needs_busy_stats(struct intel_engine_cs *engine)
 {
 	return supports_busy_stats() &&
@@ -493,8 +416,6 @@ static void i915_pmu_enable(struct perf_event *event)
 	}
 
 	spin_unlock_irqrestore(&i915->pmu.lock, flags);
-
-	i915_pmu_timer_start(event);
 }
 
 static void i915_pmu_disable(struct perf_event *event)
@@ -534,8 +455,6 @@ static void i915_pmu_disable(struct perf_event *event)
 	i915->pmu.timer_enabled &= pmu_needs_timer(i915, true);
 
 	spin_unlock_irqrestore(&i915->pmu.lock, flags);
-
-	i915_pmu_timer_cancel(event);
 }
 
 static void i915_pmu_event_start(struct perf_event *event, int flags)
-- 
1.8.3.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2017-08-30 20:40 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-08-30 12:37 [RFC v3 0/3] Support perf stat with i915 PMU Dmitry Rogozhkin
2017-08-30 12:37 ` [RFC v3 1/3] drm/i915/pmu: reorder function to suite next patch Dmitry Rogozhkin
2017-08-30 12:37 ` [RFC v3 2/3] drm/i915/pmu: serve global events and support perf stat Dmitry Rogozhkin
2017-08-30 12:37 ` [RFC v3 3/3] drm/i915/pmu: deny perf driver level sampling of i915 PMU Dmitry Rogozhkin

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.