All of lore.kernel.org
 help / color / mirror / Atom feed
* [CI 01/10] drm/i915: Extract intel_get_cagf
@ 2017-10-10 15:18 Tvrtko Ursulin
  2017-10-10 15:18 ` [CI 02/10] drm/i915/pmu: Expose a PMU interface for perf queries Tvrtko Ursulin
                   ` (9 more replies)
  0 siblings, 10 replies; 11+ messages in thread
From: Tvrtko Ursulin @ 2017-10-10 15:18 UTC (permalink / raw)
  To: Intel-gfx

From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Code to be shared between debugfs and the PMU implementation.

v2: Checkpatch cleanup.
v3: Also consolidate i915_sysfs.c/gt_act_freq_mhz_show.
v4: Rebase.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> (v2)
---
 drivers/gpu/drm/i915/i915_debugfs.c |  9 ++-------
 drivers/gpu/drm/i915/i915_drv.h     |  2 ++
 drivers/gpu/drm/i915/i915_sysfs.c   | 11 +++--------
 drivers/gpu/drm/i915/intel_pm.c     | 14 ++++++++++++++
 4 files changed, 21 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 5b58d2b897c7..43a1d75bbe47 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1167,13 +1167,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
 		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
 		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
 		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
-		if (INTEL_GEN(dev_priv) >= 9)
-			cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
-		else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
-			cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
-		else
-			cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
-		cagf = intel_gpu_freq(dev_priv, cagf);
+		cagf = intel_gpu_freq(dev_priv,
+				      intel_get_cagf(dev_priv, rpstat));
 
 		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 770305bdeabb..8c5d89860894 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -4173,6 +4173,8 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
 u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
 			   const i915_reg_t reg);
 
+u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat1);
+
 #define I915_READ8(reg)		dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
 #define I915_WRITE8(reg, val)	dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
 
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index d61c8727f756..3bcf1c8c0dd9 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -252,14 +252,9 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
 		freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
 		ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff);
 	} else {
-		u32 rpstat = I915_READ(GEN6_RPSTAT1);
-		if (INTEL_GEN(dev_priv) >= 9)
-			ret = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
-		else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
-			ret = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
-		else
-			ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
-		ret = intel_gpu_freq(dev_priv, ret);
+		ret = intel_gpu_freq(dev_priv,
+				     intel_get_cagf(dev_priv,
+						    I915_READ(GEN6_RPSTAT1)));
 	}
 	mutex_unlock(&dev_priv->rps.hw_lock);
 
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 9d0ca2656a23..ab9491178c57 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -9379,3 +9379,17 @@ u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
 	intel_runtime_pm_put(dev_priv);
 	return DIV_ROUND_UP_ULL(time_hw * units, div);
 }
+
+u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat)
+{
+	u32 cagf;
+
+	if (INTEL_GEN(dev_priv) >= 9)
+		cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
+	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+		cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
+	else
+		cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
+
+	return  cagf;
+}
-- 
2.9.5

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [CI 02/10] drm/i915/pmu: Expose a PMU interface for perf queries
  2017-10-10 15:18 [CI 01/10] drm/i915: Extract intel_get_cagf Tvrtko Ursulin
@ 2017-10-10 15:18 ` Tvrtko Ursulin
  2017-10-10 15:19 ` [CI 03/10] drm/i915/pmu: Suspend sampling when GPU is idle Tvrtko Ursulin
                   ` (8 subsequent siblings)
  9 siblings, 0 replies; 11+ messages in thread
From: Tvrtko Ursulin @ 2017-10-10 15:18 UTC (permalink / raw)
  To: Intel-gfx

From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

From: Chris Wilson <chris@chris-wilson.co.uk>
From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
From: Dmitry Rogozhkin <dmitry.v.rogozhkin@intel.com>

The first goal is to be able to measure GPU (and invidual ring) busyness
without having to poll registers from userspace. (Which not only incurs
holding the forcewake lock indefinitely, perturbing the system, but also
runs the risk of hanging the machine.) As an alternative we can use the
perf event counter interface to sample the ring registers periodically
and send those results to userspace.

Functionality we are exporting to userspace is via the existing perf PMU
API and can be exercised via the existing tools. For example:

  perf stat -a -e i915/rcs0-busy/ -I 1000

Will print the render engine busynnes once per second. All the performance
counters can be enumerated (perf list) and have their unit of measure
correctly reported in sysfs.

v1-v2 (Chris Wilson):

v2: Use a common timer for the ring sampling.

v3: (Tvrtko Ursulin)
 * Decouple uAPI from i915 engine ids.
 * Complete uAPI defines.
 * Refactor some code to helpers for clarity.
 * Skip sampling disabled engines.
 * Expose counters in sysfs.
 * Pass in fake regs to avoid null ptr deref in perf core.
 * Convert to class/instance uAPI.
 * Use shared driver code for rc6 residency, power and frequency.

v4: (Dmitry Rogozhkin)
 * Register PMU with .task_ctx_nr=perf_invalid_context
 * Expose cpumask for the PMU with the single CPU in the mask
 * Properly support pmu->stop(): it should call pmu->read()
 * Properly support pmu->del(): it should call stop(event, PERF_EF_UPDATE)
 * Introduce refcounting of event subscriptions.
 * Make pmu.busy_stats a refcounter to avoid busy stats going away
   with some deleted event.
 * Expose cpumask for i915 PMU to avoid multiple events creation of
   the same type followed by counter aggregation by perf-stat.
 * Track CPUs getting online/offline to migrate perf context. If (likely)
   cpumask will initially set CPU0, CONFIG_BOOTPARAM_HOTPLUG_CPU0 will be
   needed to see effect of CPU status tracking.
 * End result is that only global events are supported and perf stat
   works correctly.
 * Deny perf driver level sampling - it is prohibited for uncore PMU.

v5: (Tvrtko Ursulin)

 * Don't hardcode number of engine samplers.
 * Rewrite event ref-counting for correctness and simplicity.
 * Store initial counter value when starting already enabled events
   to correctly report values to all listeners.
 * Fix RC6 residency readout.
 * Comments, GPL header.

v6:
 * Add missing entry to v4 changelog.
 * Fix accounting in CPU hotplug case by copying the approach from
   arch/x86/events/intel/cstate.c. (Dmitry Rogozhkin)

v7:
 * Log failure message only on failure.
 * Remove CPU hotplug notification state on unregister.

v8:
 * Fix error unwind on failed registration.
 * Checkpatch cleanup.

v9:
 * Drop the energy metric, it is available via intel_rapl_perf.
   (Ville Syrjälä)
 * Use HAS_RC6(p). (Chris Wilson)
 * Handle unsupported non-engine events. (Dmitry Rogozhkin)
 * Rebase for intel_rc6_residency_ns needing caller managed
   runtime pm.
 * Drop HAS_RC6 checks from the read callback since creating those
   events will be rejected at init time already.
 * Add counter units to sysfs so perf stat output is nicer.
 * Cleanup the attribute tables for brevity and readability.

v10:
 * Fixed queued accounting.

v11:
 * Move intel_engine_lookup_user to intel_engine_cs.c
 * Commit update. (Joonas Lahtinen)

v12:
 * More accurate sampling. (Chris Wilson)
 * Store and report frequency in MHz for better usability from
   perf stat.
 * Removed metrics: queued, interrupts, rc6 counters.
 * Sample engine busyness based on seqno difference only
   for less MMIO (and forcewake) on all platforms. (Chris Wilson)

v13:
 * Comment spelling, use mul_u32_u32 to work around potential GCC
   issue and somne code alignment changes. (Chris Wilson)

v14:
 * Rebase.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Dmitry Rogozhkin <dmitry.v.rogozhkin@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Dmitry Rogozhkin <dmitry.v.rogozhkin@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/Makefile           |   1 +
 drivers/gpu/drm/i915/i915_drv.c         |   2 +
 drivers/gpu/drm/i915/i915_drv.h         |  14 +
 drivers/gpu/drm/i915/i915_pmu.c         | 658 ++++++++++++++++++++++++++++++++
 drivers/gpu/drm/i915/i915_pmu.h         |  96 +++++
 drivers/gpu/drm/i915/i915_reg.h         |   3 +
 drivers/gpu/drm/i915/intel_engine_cs.c  |  35 ++
 drivers/gpu/drm/i915/intel_ringbuffer.h |  26 ++
 include/uapi/drm/i915_drm.h             |  48 +++
 9 files changed, 883 insertions(+)
 create mode 100644 drivers/gpu/drm/i915/i915_pmu.c
 create mode 100644 drivers/gpu/drm/i915/i915_pmu.h

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 66d23b619db1..e94cd69b715a 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -26,6 +26,7 @@ i915-y := i915_drv.o \
 
 i915-$(CONFIG_COMPAT)   += i915_ioc32.o
 i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o intel_pipe_crc.o
+i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o
 
 # GEM code
 i915-y += i915_cmd_parser.o \
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 66fc156b294a..d373a840566a 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1215,6 +1215,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
 	struct drm_device *dev = &dev_priv->drm;
 
 	i915_gem_shrinker_init(dev_priv);
+	i915_pmu_register(dev_priv);
 
 	/*
 	 * Notify a valid surface after modesetting,
@@ -1269,6 +1270,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
 	intel_opregion_unregister(dev_priv);
 
 	i915_perf_unregister(dev_priv);
+	i915_pmu_unregister(dev_priv);
 
 	i915_teardown_sysfs(dev_priv);
 	i915_guc_log_unregister(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8c5d89860894..c634a8d193b3 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -40,6 +40,7 @@
 #include <linux/hash.h>
 #include <linux/intel-iommu.h>
 #include <linux/kref.h>
+#include <linux/perf_event.h>
 #include <linux/pm_qos.h>
 #include <linux/reservation.h>
 #include <linux/shmem_fs.h>
@@ -2266,6 +2267,8 @@ struct drm_i915_private {
 	struct i915_gem_context *kernel_context;
 	/* Context only to be used for injecting preemption commands */
 	struct i915_gem_context *preempt_context;
+	struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
+					    [MAX_ENGINE_INSTANCE + 1];
 	struct i915_vma *semaphore;
 
 	struct drm_dma_handle *status_page_dmah;
@@ -2728,6 +2731,8 @@ struct drm_i915_private {
 		int	irq;
 	} lpe_audio;
 
+	struct i915_pmu pmu;
+
 	/*
 	 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
 	 * will be rejected. Instead look for a better place.
@@ -3974,6 +3979,15 @@ extern void i915_perf_fini(struct drm_i915_private *dev_priv);
 extern void i915_perf_register(struct drm_i915_private *dev_priv);
 extern void i915_perf_unregister(struct drm_i915_private *dev_priv);
 
+/* i915_pmu.c */
+#ifdef CONFIG_PERF_EVENTS
+void i915_pmu_register(struct drm_i915_private *i915);
+void i915_pmu_unregister(struct drm_i915_private *i915);
+#else
+static inline void i915_pmu_register(struct drm_i915_private *i915) {}
+static inline void i915_pmu_unregister(struct drm_i915_private *i915) {}
+#endif
+
 /* i915_suspend.c */
 extern int i915_save_state(struct drm_i915_private *dev_priv);
 extern int i915_restore_state(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
new file mode 100644
index 000000000000..f51730bc49e2
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -0,0 +1,658 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/perf_event.h>
+#include <linux/pm_runtime.h>
+
+#include "i915_drv.h"
+#include "i915_pmu.h"
+#include "intel_ringbuffer.h"
+
+/* Frequency for the sampling timer for events which need it. */
+#define FREQUENCY 200
+#define PERIOD max_t(u64, 10000, NSEC_PER_SEC / FREQUENCY)
+
+#define ENGINE_SAMPLE_MASK \
+	(BIT(I915_SAMPLE_BUSY) | \
+	 BIT(I915_SAMPLE_WAIT) | \
+	 BIT(I915_SAMPLE_SEMA))
+
+#define ENGINE_SAMPLE_BITS (1 << I915_PMU_SAMPLE_BITS)
+
+static cpumask_t i915_pmu_cpumask = CPU_MASK_NONE;
+
+static u8 engine_config_sample(u64 config)
+{
+	return config & I915_PMU_SAMPLE_MASK;
+}
+
+static u8 engine_event_sample(struct perf_event *event)
+{
+	return engine_config_sample(event->attr.config);
+}
+
+static u8 engine_event_class(struct perf_event *event)
+{
+	return (event->attr.config >> I915_PMU_CLASS_SHIFT) & 0xff;
+}
+
+static u8 engine_event_instance(struct perf_event *event)
+{
+	return (event->attr.config >> I915_PMU_SAMPLE_BITS) & 0xff;
+}
+
+static bool is_engine_config(u64 config)
+{
+	return config < __I915_PMU_OTHER(0);
+}
+
+static unsigned int config_enabled_bit(u64 config)
+{
+	if (is_engine_config(config))
+		return engine_config_sample(config);
+	else
+		return ENGINE_SAMPLE_BITS + (config - __I915_PMU_OTHER(0));
+}
+
+static u64 config_enabled_mask(u64 config)
+{
+	return BIT_ULL(config_enabled_bit(config));
+}
+
+static bool is_engine_event(struct perf_event *event)
+{
+	return is_engine_config(event->attr.config);
+}
+
+static unsigned int event_enabled_bit(struct perf_event *event)
+{
+	return config_enabled_bit(event->attr.config);
+}
+
+static bool grab_forcewake(struct drm_i915_private *i915, bool fw)
+{
+	if (!fw)
+		intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
+
+	return true;
+}
+
+static void
+update_sample(struct i915_pmu_sample *sample, u32 unit, u32 val)
+{
+	/*
+	 * Since we are doing stochastic sampling for these counters,
+	 * average the delta with the previous value for better accuracy.
+	 */
+	sample->cur += div_u64(mul_u32_u32(sample->prev + val, unit), 2);
+	sample->prev = val;
+}
+
+static void engines_sample(struct drm_i915_private *dev_priv)
+{
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	bool fw = false;
+
+	if ((dev_priv->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
+		return;
+
+	if (!dev_priv->gt.awake)
+		return;
+
+	if (!intel_runtime_pm_get_if_in_use(dev_priv))
+		return;
+
+	for_each_engine(engine, dev_priv, id) {
+		u32 current_seqno = intel_engine_get_seqno(engine);
+		u32 last_seqno = intel_engine_last_submit(engine);
+		u32 val;
+
+		val = !i915_seqno_passed(current_seqno, last_seqno);
+
+		update_sample(&engine->pmu.sample[I915_SAMPLE_BUSY],
+			      PERIOD, val);
+
+		if (val && (engine->pmu.enable &
+		    (BIT(I915_SAMPLE_WAIT) | BIT(I915_SAMPLE_SEMA)))) {
+			fw = grab_forcewake(dev_priv, fw);
+
+			val = I915_READ_FW(RING_CTL(engine->mmio_base));
+		} else {
+			val = 0;
+		}
+
+		update_sample(&engine->pmu.sample[I915_SAMPLE_WAIT],
+			      PERIOD, !!(val & RING_WAIT));
+
+		update_sample(&engine->pmu.sample[I915_SAMPLE_SEMA],
+			      PERIOD, !!(val & RING_WAIT_SEMAPHORE));
+	}
+
+	if (fw)
+		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
+	intel_runtime_pm_put(dev_priv);
+}
+
+static void frequency_sample(struct drm_i915_private *dev_priv)
+{
+	if (dev_priv->pmu.enable &
+	    config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
+		u32 val;
+
+		val = dev_priv->rps.cur_freq;
+		if (dev_priv->gt.awake &&
+		    intel_runtime_pm_get_if_in_use(dev_priv)) {
+			val = intel_get_cagf(dev_priv,
+					     I915_READ_NOTRACE(GEN6_RPSTAT1));
+			intel_runtime_pm_put(dev_priv);
+		}
+
+		update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT],
+			      1, intel_gpu_freq(dev_priv, val));
+	}
+
+	if (dev_priv->pmu.enable &
+	    config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
+		update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ], 1,
+			      intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
+	}
+}
+
+static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
+{
+	struct drm_i915_private *i915 =
+		container_of(hrtimer, struct drm_i915_private, pmu.timer);
+
+	if (i915->pmu.enable == 0)
+		return HRTIMER_NORESTART;
+
+	engines_sample(i915);
+	frequency_sample(i915);
+
+	hrtimer_forward_now(hrtimer, ns_to_ktime(PERIOD));
+	return HRTIMER_RESTART;
+}
+
+static void i915_pmu_event_destroy(struct perf_event *event)
+{
+	WARN_ON(event->parent);
+}
+
+static int engine_event_init(struct perf_event *event)
+{
+	struct drm_i915_private *i915 =
+		container_of(event->pmu, typeof(*i915), pmu.base);
+
+	if (!intel_engine_lookup_user(i915, engine_event_class(event),
+				      engine_event_instance(event)))
+		return -ENODEV;
+
+	switch (engine_event_sample(event)) {
+	case I915_SAMPLE_BUSY:
+	case I915_SAMPLE_WAIT:
+		break;
+	case I915_SAMPLE_SEMA:
+		if (INTEL_GEN(i915) < 6)
+			return -ENODEV;
+		break;
+	default:
+		return -ENOENT;
+	}
+
+	return 0;
+}
+
+static int i915_pmu_event_init(struct perf_event *event)
+{
+	struct drm_i915_private *i915 =
+		container_of(event->pmu, typeof(*i915), pmu.base);
+	int cpu, ret;
+
+	if (event->attr.type != event->pmu->type)
+		return -ENOENT;
+
+	/* unsupported modes and filters */
+	if (event->attr.sample_period) /* no sampling */
+		return -EINVAL;
+
+	if (has_branch_stack(event))
+		return -EOPNOTSUPP;
+
+	if (event->cpu < 0)
+		return -EINVAL;
+
+	cpu = cpumask_any_and(&i915_pmu_cpumask,
+			      topology_sibling_cpumask(event->cpu));
+	if (cpu >= nr_cpu_ids)
+		return -ENODEV;
+
+	if (is_engine_event(event)) {
+		ret = engine_event_init(event);
+	} else {
+		ret = 0;
+		switch (event->attr.config) {
+		case I915_PMU_ACTUAL_FREQUENCY:
+			if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+				 /* Requires a mutex for sampling! */
+				ret = -ENODEV;
+		case I915_PMU_REQUESTED_FREQUENCY:
+			if (INTEL_GEN(i915) < 6)
+				ret = -ENODEV;
+			break;
+		default:
+			ret = -ENOENT;
+			break;
+		}
+	}
+	if (ret)
+		return ret;
+
+	event->cpu = cpu;
+	if (!event->parent)
+		event->destroy = i915_pmu_event_destroy;
+
+	return 0;
+}
+
+static u64 __i915_pmu_event_read(struct perf_event *event)
+{
+	struct drm_i915_private *i915 =
+		container_of(event->pmu, typeof(*i915), pmu.base);
+	u64 val = 0;
+
+	if (is_engine_event(event)) {
+		u8 sample = engine_event_sample(event);
+		struct intel_engine_cs *engine;
+
+		engine = intel_engine_lookup_user(i915,
+						  engine_event_class(event),
+						  engine_event_instance(event));
+
+		if (WARN_ON_ONCE(!engine)) {
+			/* Do nothing */
+		} else {
+			val = engine->pmu.sample[sample].cur;
+		}
+	} else {
+		switch (event->attr.config) {
+		case I915_PMU_ACTUAL_FREQUENCY:
+			val =
+			   div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_ACT].cur,
+				   FREQUENCY);
+			break;
+		case I915_PMU_REQUESTED_FREQUENCY:
+			val =
+			   div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_REQ].cur,
+				   FREQUENCY);
+			break;
+		}
+	}
+
+	return val;
+}
+
+static void i915_pmu_event_read(struct perf_event *event)
+{
+	struct hw_perf_event *hwc = &event->hw;
+	u64 prev, new;
+
+again:
+	prev = local64_read(&hwc->prev_count);
+	new = __i915_pmu_event_read(event);
+
+	if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev)
+		goto again;
+
+	local64_add(new - prev, &event->count);
+}
+
+static void i915_pmu_enable(struct perf_event *event)
+{
+	struct drm_i915_private *i915 =
+		container_of(event->pmu, typeof(*i915), pmu.base);
+	unsigned int bit = event_enabled_bit(event);
+	unsigned long flags;
+
+	spin_lock_irqsave(&i915->pmu.lock, flags);
+
+	/*
+	 * Start the sampling timer when enabling the first event.
+	 */
+	if (i915->pmu.enable == 0)
+		hrtimer_start_range_ns(&i915->pmu.timer,
+				       ns_to_ktime(PERIOD), 0,
+				       HRTIMER_MODE_REL_PINNED);
+
+	/*
+	 * Update the bitmask of enabled events and increment
+	 * the event reference counter.
+	 */
+	GEM_BUG_ON(bit >= I915_PMU_MASK_BITS);
+	GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0);
+	i915->pmu.enable |= BIT_ULL(bit);
+	i915->pmu.enable_count[bit]++;
+
+	/*
+	 * For per-engine events the bitmask and reference counting
+	 * is stored per engine.
+	 */
+	if (is_engine_event(event)) {
+		u8 sample = engine_event_sample(event);
+		struct intel_engine_cs *engine;
+
+		engine = intel_engine_lookup_user(i915,
+						  engine_event_class(event),
+						  engine_event_instance(event));
+		GEM_BUG_ON(!engine);
+		engine->pmu.enable |= BIT(sample);
+
+		GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS);
+		GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
+		engine->pmu.enable_count[sample]++;
+	}
+
+	/*
+	 * Store the current counter value so we can report the correct delta
+	 * for all listeners. Even when the event was already enabled and has
+	 * an existing non-zero value.
+	 */
+	local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
+
+	spin_unlock_irqrestore(&i915->pmu.lock, flags);
+}
+
+static void i915_pmu_disable(struct perf_event *event)
+{
+	struct drm_i915_private *i915 =
+		container_of(event->pmu, typeof(*i915), pmu.base);
+	unsigned int bit = event_enabled_bit(event);
+	unsigned long flags;
+
+	spin_lock_irqsave(&i915->pmu.lock, flags);
+
+	if (is_engine_event(event)) {
+		u8 sample = engine_event_sample(event);
+		struct intel_engine_cs *engine;
+
+		engine = intel_engine_lookup_user(i915,
+						  engine_event_class(event),
+						  engine_event_instance(event));
+		GEM_BUG_ON(!engine);
+		GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS);
+		GEM_BUG_ON(engine->pmu.enable_count[sample] == 0);
+		/*
+		 * Decrement the reference count and clear the enabled
+		 * bitmask when the last listener on an event goes away.
+		 */
+		if (--engine->pmu.enable_count[sample] == 0)
+			engine->pmu.enable &= ~BIT(sample);
+	}
+
+	GEM_BUG_ON(bit >= I915_PMU_MASK_BITS);
+	GEM_BUG_ON(i915->pmu.enable_count[bit] == 0);
+	/*
+	 * Decrement the reference count and clear the enabled
+	 * bitmask when the last listener on an event goes away.
+	 */
+	if (--i915->pmu.enable_count[bit] == 0)
+		i915->pmu.enable &= ~BIT_ULL(bit);
+
+	spin_unlock_irqrestore(&i915->pmu.lock, flags);
+}
+
+static void i915_pmu_event_start(struct perf_event *event, int flags)
+{
+	i915_pmu_enable(event);
+	event->hw.state = 0;
+}
+
+static void i915_pmu_event_stop(struct perf_event *event, int flags)
+{
+	if (flags & PERF_EF_UPDATE)
+		i915_pmu_event_read(event);
+	i915_pmu_disable(event);
+	event->hw.state = PERF_HES_STOPPED;
+}
+
+static int i915_pmu_event_add(struct perf_event *event, int flags)
+{
+	if (flags & PERF_EF_START)
+		i915_pmu_event_start(event, flags);
+
+	return 0;
+}
+
+static void i915_pmu_event_del(struct perf_event *event, int flags)
+{
+	i915_pmu_event_stop(event, PERF_EF_UPDATE);
+}
+
+static int i915_pmu_event_event_idx(struct perf_event *event)
+{
+	return 0;
+}
+
+static ssize_t i915_pmu_format_show(struct device *dev,
+				    struct device_attribute *attr, char *buf)
+{
+	struct dev_ext_attribute *eattr;
+
+	eattr = container_of(attr, struct dev_ext_attribute, attr);
+	return sprintf(buf, "%s\n", (char *)eattr->var);
+}
+
+#define I915_PMU_FORMAT_ATTR(_name, _config) \
+	(&((struct dev_ext_attribute[]) { \
+		{ .attr = __ATTR(_name, 0444, i915_pmu_format_show, NULL), \
+		  .var = (void *)_config, } \
+	})[0].attr.attr)
+
+static struct attribute *i915_pmu_format_attrs[] = {
+	I915_PMU_FORMAT_ATTR(i915_eventid, "config:0-20"),
+	NULL,
+};
+
+static const struct attribute_group i915_pmu_format_attr_group = {
+	.name = "format",
+	.attrs = i915_pmu_format_attrs,
+};
+
+static ssize_t i915_pmu_event_show(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	struct dev_ext_attribute *eattr;
+
+	eattr = container_of(attr, struct dev_ext_attribute, attr);
+	return sprintf(buf, "config=0x%lx\n", (unsigned long)eattr->var);
+}
+
+#define I915_EVENT_ATTR(_name, _config) \
+	(&((struct dev_ext_attribute[]) { \
+		{ .attr = __ATTR(_name, 0444, i915_pmu_event_show, NULL), \
+		  .var = (void *)_config, } \
+	})[0].attr.attr)
+
+#define I915_EVENT_STR(_name, _str) \
+	(&((struct perf_pmu_events_attr[]) { \
+		{ .attr	     = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
+		  .id	     = 0, \
+		  .event_str = _str, } \
+	})[0].attr.attr)
+
+#define I915_EVENT(_name, _config, _unit) \
+	I915_EVENT_ATTR(_name, _config), \
+	I915_EVENT_STR(_name.unit, _unit)
+
+#define I915_ENGINE_EVENT(_name, _class, _instance, _sample) \
+	I915_EVENT_ATTR(_name, __I915_PMU_ENGINE(_class, _instance, _sample)), \
+	I915_EVENT_STR(_name.unit, "ns")
+
+#define I915_ENGINE_EVENTS(_name, _class, _instance) \
+	I915_ENGINE_EVENT(_name##_instance-busy, _class, _instance, I915_SAMPLE_BUSY), \
+	I915_ENGINE_EVENT(_name##_instance-sema, _class, _instance, I915_SAMPLE_SEMA), \
+	I915_ENGINE_EVENT(_name##_instance-wait, _class, _instance, I915_SAMPLE_WAIT)
+
+static struct attribute *i915_pmu_events_attrs[] = {
+	I915_ENGINE_EVENTS(rcs, I915_ENGINE_CLASS_RENDER, 0),
+	I915_ENGINE_EVENTS(bcs, I915_ENGINE_CLASS_COPY, 0),
+	I915_ENGINE_EVENTS(vcs, I915_ENGINE_CLASS_VIDEO, 0),
+	I915_ENGINE_EVENTS(vcs, I915_ENGINE_CLASS_VIDEO, 1),
+	I915_ENGINE_EVENTS(vecs, I915_ENGINE_CLASS_VIDEO_ENHANCE, 0),
+
+	I915_EVENT(actual-frequency,    I915_PMU_ACTUAL_FREQUENCY,    "MHz"),
+	I915_EVENT(requested-frequency, I915_PMU_REQUESTED_FREQUENCY, "MHz"),
+
+	NULL,
+};
+
+static const struct attribute_group i915_pmu_events_attr_group = {
+	.name = "events",
+	.attrs = i915_pmu_events_attrs,
+};
+
+static ssize_t
+i915_pmu_get_attr_cpumask(struct device *dev,
+			  struct device_attribute *attr,
+			  char *buf)
+{
+	return cpumap_print_to_pagebuf(true, buf, &i915_pmu_cpumask);
+}
+
+static DEVICE_ATTR(cpumask, 0444, i915_pmu_get_attr_cpumask, NULL);
+
+static struct attribute *i915_cpumask_attrs[] = {
+	&dev_attr_cpumask.attr,
+	NULL,
+};
+
+static struct attribute_group i915_pmu_cpumask_attr_group = {
+	.attrs = i915_cpumask_attrs,
+};
+
+static const struct attribute_group *i915_pmu_attr_groups[] = {
+	&i915_pmu_format_attr_group,
+	&i915_pmu_events_attr_group,
+	&i915_pmu_cpumask_attr_group,
+	NULL
+};
+
+static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
+{
+	unsigned int target;
+
+	target = cpumask_any_and(&i915_pmu_cpumask, &i915_pmu_cpumask);
+	/* Select the first online CPU as a designated reader. */
+	if (target >= nr_cpu_ids)
+		cpumask_set_cpu(cpu, &i915_pmu_cpumask);
+
+	return 0;
+}
+
+static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
+{
+	struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node);
+	unsigned int target;
+
+	if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) {
+		target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
+		/* Migrate events if there is a valid target */
+		if (target < nr_cpu_ids) {
+			cpumask_set_cpu(target, &i915_pmu_cpumask);
+			perf_pmu_migrate_context(&pmu->base, cpu, target);
+		}
+	}
+
+	return 0;
+}
+
+void i915_pmu_register(struct drm_i915_private *i915)
+{
+	int ret;
+
+	if (INTEL_GEN(i915) <= 2) {
+		DRM_INFO("PMU not supported for this GPU.");
+		return;
+	}
+
+	ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
+				      "perf/x86/intel/i915:online",
+				      i915_pmu_cpu_online,
+				      i915_pmu_cpu_offline);
+	if (ret)
+		goto err_hp_state;
+
+	ret = cpuhp_state_add_instance(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
+				       &i915->pmu.node);
+	if (ret)
+		goto err_hp_instance;
+
+	i915->pmu.base.attr_groups	= i915_pmu_attr_groups;
+	i915->pmu.base.task_ctx_nr	= perf_invalid_context;
+	i915->pmu.base.event_init	= i915_pmu_event_init;
+	i915->pmu.base.add		= i915_pmu_event_add;
+	i915->pmu.base.del		= i915_pmu_event_del;
+	i915->pmu.base.start		= i915_pmu_event_start;
+	i915->pmu.base.stop		= i915_pmu_event_stop;
+	i915->pmu.base.read		= i915_pmu_event_read;
+	i915->pmu.base.event_idx	= i915_pmu_event_event_idx;
+
+	spin_lock_init(&i915->pmu.lock);
+	hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	i915->pmu.timer.function = i915_sample;
+	i915->pmu.enable = 0;
+
+	ret = perf_pmu_register(&i915->pmu.base, "i915", -1);
+	if (ret == 0)
+		return;
+
+	i915->pmu.base.event_init = NULL;
+
+	WARN_ON(cpuhp_state_remove_instance(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
+					    &i915->pmu.node));
+
+err_hp_instance:
+	cpuhp_remove_multi_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
+
+err_hp_state:
+	DRM_NOTE("Failed to register PMU! (err=%d)\n", ret);
+}
+
+void i915_pmu_unregister(struct drm_i915_private *i915)
+{
+	if (!i915->pmu.base.event_init)
+		return;
+
+	WARN_ON(cpuhp_state_remove_instance(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
+					    &i915->pmu.node));
+	cpuhp_remove_multi_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
+
+	i915->pmu.enable = 0;
+
+	hrtimer_cancel(&i915->pmu.timer);
+
+	perf_pmu_unregister(&i915->pmu.base);
+	i915->pmu.base.event_init = NULL;
+}
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
new file mode 100644
index 000000000000..0f884dd3496a
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+#ifndef __I915_PMU_H__
+#define __I915_PMU_H__
+
+enum {
+	__I915_SAMPLE_FREQ_ACT = 0,
+	__I915_SAMPLE_FREQ_REQ,
+	__I915_NUM_PMU_SAMPLERS
+};
+
+/**
+ * How many different events we track in the global PMU mask.
+ *
+ * It is also used to know to needed number of event reference counters.
+ */
+#define I915_PMU_MASK_BITS \
+	((1 << I915_PMU_SAMPLE_BITS) + \
+	 (I915_PMU_LAST + 1 - __I915_PMU_OTHER(0)))
+
+struct i915_pmu_sample {
+	u64 cur;
+	u32 prev;
+};
+
+struct i915_pmu {
+	/**
+	 * @node: List node for CPU hotplug handling.
+	 */
+	struct hlist_node node;
+	/**
+	 * @base: PMU base.
+	 */
+	struct pmu base;
+	/**
+	 * @lock: Lock protecting enable mask and ref count handling.
+	 */
+	spinlock_t lock;
+	/**
+	 * @timer: Timer for internal i915 PMU sampling.
+	 */
+	struct hrtimer timer;
+	/**
+	 * @enable: Bitmask of all currently enabled events.
+	 *
+	 * Bits are derived from uAPI event numbers in a way that low 16 bits
+	 * correspond to engine event _sample_ _type_ (I915_SAMPLE_QUEUED is
+	 * bit 0), and higher bits correspond to other events (for instance
+	 * I915_PMU_ACTUAL_FREQUENCY is bit 16 etc).
+	 *
+	 * In other words, low 16 bits are not per engine but per engine
+	 * sampler type, while the upper bits are directly mapped to other
+	 * event types.
+	 */
+	u64 enable;
+	/**
+	 * @enable_count: Reference counts for the enabled events.
+	 *
+	 * Array indices are mapped in the same way as bits in the @enable field
+	 * and they are used to control sampling on/off when multiple clients
+	 * are using the PMU API.
+	 */
+	unsigned int enable_count[I915_PMU_MASK_BITS];
+	/**
+	 * @sample: Current and previous (raw) counters for sampling events.
+	 *
+	 * These counters are updated from the i915 PMU sampling timer.
+	 *
+	 * Only global counters are held here, while the per-engine ones are in
+	 * struct intel_engine_cs.
+	 */
+	struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS];
+};
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 50e65c98ca6c..ec9f68457582 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -186,6 +186,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define VIDEO_ENHANCEMENT_CLASS	2
 #define COPY_ENGINE_CLASS	3
 #define OTHER_CLASS		4
+#define MAX_ENGINE_CLASS	4
+
+#define MAX_ENGINE_INSTANCE    1
 
 /* PCI config space */
 
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index a59b2a30ff5a..4c7d7ade99d8 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -202,6 +202,15 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
 	GEM_BUG_ON(info->class >= ARRAY_SIZE(intel_engine_classes));
 	class_info = &intel_engine_classes[info->class];
 
+	if (GEM_WARN_ON(info->class > MAX_ENGINE_CLASS))
+		return -EINVAL;
+
+	if (GEM_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
+		return -EINVAL;
+
+	if (GEM_WARN_ON(dev_priv->engine_class[info->class][info->instance]))
+		return -EINVAL;
+
 	GEM_BUG_ON(dev_priv->engine[id]);
 	engine = kzalloc(sizeof(*engine), GFP_KERNEL);
 	if (!engine)
@@ -229,6 +238,7 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
 
 	ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
 
+	dev_priv->engine_class[info->class][info->instance] = engine;
 	dev_priv->engine[id] = engine;
 	return 0;
 }
@@ -1776,6 +1786,31 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
 	drm_printf(m, "\n");
 }
 
+static u8 user_class_map[I915_ENGINE_CLASS_MAX] = {
+	[I915_ENGINE_CLASS_OTHER] = OTHER_CLASS,
+	[I915_ENGINE_CLASS_RENDER] = RENDER_CLASS,
+	[I915_ENGINE_CLASS_COPY] = COPY_ENGINE_CLASS,
+	[I915_ENGINE_CLASS_VIDEO] = VIDEO_DECODE_CLASS,
+	[I915_ENGINE_CLASS_VIDEO_ENHANCE] = VIDEO_ENHANCEMENT_CLASS,
+};
+
+struct intel_engine_cs *
+intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
+{
+	if (class >= ARRAY_SIZE(user_class_map))
+		return NULL;
+
+	class = user_class_map[class];
+
+	if (WARN_ON_ONCE(class > MAX_ENGINE_CLASS))
+		return NULL;
+
+	if (instance > MAX_ENGINE_INSTANCE)
+		return NULL;
+
+	return i915->engine_class[class][instance];
+}
+
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/mock_engine.c"
 #endif
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 17186f067408..289bb25dd964 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -5,6 +5,7 @@
 #include "i915_gem_batch_pool.h"
 #include "i915_gem_request.h"
 #include "i915_gem_timeline.h"
+#include "i915_pmu.h"
 #include "i915_selftest.h"
 
 struct drm_printer;
@@ -337,6 +338,28 @@ struct intel_engine_cs {
 		I915_SELFTEST_DECLARE(bool mock : 1);
 	} breadcrumbs;
 
+	struct {
+		/**
+		 * @enable: Bitmask of enable sample events on this engine.
+		 *
+		 * Bits correspond to sample event types, for instance
+		 * I915_SAMPLE_QUEUED is bit 0 etc.
+		 */
+		u32 enable;
+		/**
+		 * @enable_count: Reference count for the enabled samplers.
+		 *
+		 * Index number corresponds to the bit number from @enable.
+		 */
+		unsigned int enable_count[I915_PMU_SAMPLE_BITS];
+		/**
+		 * @sample: Counter values for sampling events.
+		 *
+		 * Our internal timer stores the current counters in this field.
+		 */
+		struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_MAX];
+	} pmu;
+
 	/*
 	 * A pool of objects to use as shadow copies of client batch buffers
 	 * when the command parser is enabled. Prevents the client from
@@ -843,4 +866,7 @@ bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
 
 void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *p);
 
+struct intel_engine_cs *
+intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance);
+
 #endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 125bde7d9504..d35af9ff635c 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -86,6 +86,54 @@ enum i915_mocs_table_index {
 	I915_MOCS_CACHED,
 };
 
+enum drm_i915_gem_engine_class {
+	I915_ENGINE_CLASS_OTHER = 0,
+	I915_ENGINE_CLASS_RENDER = 1,
+	I915_ENGINE_CLASS_COPY = 2,
+	I915_ENGINE_CLASS_VIDEO = 3,
+	I915_ENGINE_CLASS_VIDEO_ENHANCE = 4,
+	I915_ENGINE_CLASS_MAX /* non-ABI */
+};
+
+/**
+ * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915
+ *
+ */
+
+enum drm_i915_pmu_engine_sample {
+	I915_SAMPLE_BUSY = 0,
+	I915_SAMPLE_WAIT = 1,
+	I915_SAMPLE_SEMA = 2,
+	I915_ENGINE_SAMPLE_MAX /* non-ABI */
+};
+
+#define I915_PMU_SAMPLE_BITS (4)
+#define I915_PMU_SAMPLE_MASK (0xf)
+#define I915_PMU_SAMPLE_INSTANCE_BITS (8)
+#define I915_PMU_CLASS_SHIFT \
+	(I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS)
+
+#define __I915_PMU_ENGINE(class, instance, sample) \
+	((class) << I915_PMU_CLASS_SHIFT | \
+	(instance) << I915_PMU_SAMPLE_BITS | \
+	(sample))
+
+#define I915_PMU_ENGINE_BUSY(class, instance) \
+	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY)
+
+#define I915_PMU_ENGINE_WAIT(class, instance) \
+	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT)
+
+#define I915_PMU_ENGINE_SEMA(class, instance) \
+	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA)
+
+#define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x))
+
+#define I915_PMU_ACTUAL_FREQUENCY	__I915_PMU_OTHER(0)
+#define I915_PMU_REQUESTED_FREQUENCY	__I915_PMU_OTHER(1)
+
+#define I915_PMU_LAST I915_PMU_REQUESTED_FREQUENCY
+
 /* Each region is a minimum of 16k, and there are at most 255 of them.
  */
 #define I915_NR_TEX_REGIONS 255	/* table size 2k - maximum due to use
-- 
2.9.5

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [CI 03/10] drm/i915/pmu: Suspend sampling when GPU is idle
  2017-10-10 15:18 [CI 01/10] drm/i915: Extract intel_get_cagf Tvrtko Ursulin
  2017-10-10 15:18 ` [CI 02/10] drm/i915/pmu: Expose a PMU interface for perf queries Tvrtko Ursulin
@ 2017-10-10 15:19 ` Tvrtko Ursulin
  2017-10-10 15:19 ` [CI 04/10] drm/i915: Wrap context schedule notification Tvrtko Ursulin
                   ` (7 subsequent siblings)
  9 siblings, 0 replies; 11+ messages in thread
From: Tvrtko Ursulin @ 2017-10-10 15:19 UTC (permalink / raw)
  To: Intel-gfx

From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

If only a subset of events is enabled we can afford to suspend
the sampling timer when the GPU is idle and so save some cycles
and power.

v2: Rebase and limit timer even more.
v3: Rebase.
v4: Rebase.
v5: Skip action if perf PMU failed to register.
v6: Checkpatch cleanup.
v7:
 * Add a common helper to start the timer if needed. (Chris Wilson)
 * Add comment explaining bitwise logic in pmu_needs_timer.
v8: Fix some comments styles. (Chris Wilson)
v9: Rebase.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_drv.h         |  4 ++
 drivers/gpu/drm/i915/i915_gem.c         |  1 +
 drivers/gpu/drm/i915/i915_gem_request.c |  1 +
 drivers/gpu/drm/i915/i915_pmu.c         | 88 +++++++++++++++++++++++++++++----
 drivers/gpu/drm/i915/i915_pmu.h         |  4 ++
 5 files changed, 88 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index c634a8d193b3..2904ac4b2ae5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3983,9 +3983,13 @@ extern void i915_perf_unregister(struct drm_i915_private *dev_priv);
 #ifdef CONFIG_PERF_EVENTS
 void i915_pmu_register(struct drm_i915_private *i915);
 void i915_pmu_unregister(struct drm_i915_private *i915);
+void i915_pmu_gt_idle(struct drm_i915_private *i915);
+void i915_pmu_gt_active(struct drm_i915_private *i915);
 #else
 static inline void i915_pmu_register(struct drm_i915_private *i915) {}
 static inline void i915_pmu_unregister(struct drm_i915_private *i915) {}
+static inline void i915_pmu_gt_idle(struct drm_i915_private *i915) {}
+static inline void i915_pmu_gt_active(struct drm_i915_private *i915) {}
 #endif
 
 /* i915_suspend.c */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index e829e8c900e8..34a16ffe2ba0 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3268,6 +3268,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
 
 	intel_engines_mark_idle(dev_priv);
 	i915_gem_timelines_mark_idle(dev_priv);
+	i915_pmu_gt_idle(dev_priv);
 
 	GEM_BUG_ON(!dev_priv->gt.awake);
 	dev_priv->gt.awake = false;
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index b100b38f1dd2..c96e56d0fd9e 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -258,6 +258,7 @@ static void mark_busy(struct drm_i915_private *i915)
 	i915_update_gfx_val(i915);
 	if (INTEL_GEN(i915) >= 6)
 		gen6_rps_busy(i915);
+	i915_pmu_gt_active(i915);
 
 	queue_delayed_work(i915->wq,
 			   &i915->gt.retire_work,
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index f51730bc49e2..a1a3c630c95e 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -90,6 +90,75 @@ static unsigned int event_enabled_bit(struct perf_event *event)
 	return config_enabled_bit(event->attr.config);
 }
 
+static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active)
+{
+	u64 enable;
+
+	/*
+	 * Only some counters need the sampling timer.
+	 *
+	 * We start with a bitmask of all currently enabled events.
+	 */
+	enable = i915->pmu.enable;
+
+	/*
+	 * Mask out all the ones which do not need the timer, or in
+	 * other words keep all the ones that could need the timer.
+	 */
+	enable &= config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) |
+		  config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY) |
+		  ENGINE_SAMPLE_MASK;
+
+	/*
+	 * When the GPU is idle per-engine counters do not need to be
+	 * running so clear those bits out.
+	 */
+	if (!gpu_active)
+		enable &= ~ENGINE_SAMPLE_MASK;
+
+	/*
+	 * If some bits remain it means we need the sampling timer running.
+	 */
+	return enable;
+}
+
+void i915_pmu_gt_idle(struct drm_i915_private *i915)
+{
+	if (!i915->pmu.base.event_init)
+		return;
+
+	spin_lock_irq(&i915->pmu.lock);
+	/*
+	 * Signal sampling timer to stop if only engine events are enabled and
+	 * GPU went idle.
+	 */
+	i915->pmu.timer_enabled = pmu_needs_timer(i915, false);
+	spin_unlock_irq(&i915->pmu.lock);
+}
+
+static void __i915_pmu_maybe_start_timer(struct drm_i915_private *i915)
+{
+	if (!i915->pmu.timer_enabled && pmu_needs_timer(i915, true)) {
+		i915->pmu.timer_enabled = true;
+		hrtimer_start_range_ns(&i915->pmu.timer,
+				       ns_to_ktime(PERIOD), 0,
+				       HRTIMER_MODE_REL_PINNED);
+	}
+}
+
+void i915_pmu_gt_active(struct drm_i915_private *i915)
+{
+	if (!i915->pmu.base.event_init)
+		return;
+
+	spin_lock_irq(&i915->pmu.lock);
+	/*
+	 * Re-enable sampling timer when GPU goes active.
+	 */
+	__i915_pmu_maybe_start_timer(i915);
+	spin_unlock_irq(&i915->pmu.lock);
+}
+
 static bool grab_forcewake(struct drm_i915_private *i915, bool fw)
 {
 	if (!fw)
@@ -186,7 +255,7 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
 	struct drm_i915_private *i915 =
 		container_of(hrtimer, struct drm_i915_private, pmu.timer);
 
-	if (i915->pmu.enable == 0)
+	if (!READ_ONCE(i915->pmu.timer_enabled))
 		return HRTIMER_NORESTART;
 
 	engines_sample(i915);
@@ -339,14 +408,6 @@ static void i915_pmu_enable(struct perf_event *event)
 	spin_lock_irqsave(&i915->pmu.lock, flags);
 
 	/*
-	 * Start the sampling timer when enabling the first event.
-	 */
-	if (i915->pmu.enable == 0)
-		hrtimer_start_range_ns(&i915->pmu.timer,
-				       ns_to_ktime(PERIOD), 0,
-				       HRTIMER_MODE_REL_PINNED);
-
-	/*
 	 * Update the bitmask of enabled events and increment
 	 * the event reference counter.
 	 */
@@ -356,6 +417,11 @@ static void i915_pmu_enable(struct perf_event *event)
 	i915->pmu.enable_count[bit]++;
 
 	/*
+	 * Start the sampling timer if needed and not already enabled.
+	 */
+	__i915_pmu_maybe_start_timer(i915);
+
+	/*
 	 * For per-engine events the bitmask and reference counting
 	 * is stored per engine.
 	 */
@@ -417,8 +483,10 @@ static void i915_pmu_disable(struct perf_event *event)
 	 * Decrement the reference count and clear the enabled
 	 * bitmask when the last listener on an event goes away.
 	 */
-	if (--i915->pmu.enable_count[bit] == 0)
+	if (--i915->pmu.enable_count[bit] == 0) {
 		i915->pmu.enable &= ~BIT_ULL(bit);
+		i915->pmu.timer_enabled &= pmu_needs_timer(i915, true);
+	}
 
 	spin_unlock_irqrestore(&i915->pmu.lock, flags);
 }
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index 0f884dd3496a..89be7eb4af1c 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -83,6 +83,10 @@ struct i915_pmu {
 	 */
 	unsigned int enable_count[I915_PMU_MASK_BITS];
 	/**
+	 * @timer_enabled: Should the internal sampling timer be running.
+	 */
+	bool timer_enabled;
+	/**
 	 * @sample: Current and previous (raw) counters for sampling events.
 	 *
 	 * These counters are updated from the i915 PMU sampling timer.
-- 
2.9.5

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [CI 04/10] drm/i915: Wrap context schedule notification
  2017-10-10 15:18 [CI 01/10] drm/i915: Extract intel_get_cagf Tvrtko Ursulin
  2017-10-10 15:18 ` [CI 02/10] drm/i915/pmu: Expose a PMU interface for perf queries Tvrtko Ursulin
  2017-10-10 15:19 ` [CI 03/10] drm/i915/pmu: Suspend sampling when GPU is idle Tvrtko Ursulin
@ 2017-10-10 15:19 ` Tvrtko Ursulin
  2017-10-10 15:19 ` [CI 05/10] drm/i915: Engine busy time tracking Tvrtko Ursulin
                   ` (6 subsequent siblings)
  9 siblings, 0 replies; 11+ messages in thread
From: Tvrtko Ursulin @ 2017-10-10 15:19 UTC (permalink / raw)
  To: Intel-gfx

From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

No functional change just something which will be handy in the
following patch.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/intel_lrc.c | 17 ++++++++++++++---
 1 file changed, 14 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 721432ddf403..7a1243875f90 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -400,6 +400,18 @@ execlists_context_status_change(struct drm_i915_gem_request *rq,
 				   status, rq);
 }
 
+static inline void
+execlists_context_schedule_in(struct drm_i915_gem_request *rq)
+{
+	execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
+}
+
+static inline void
+execlists_context_schedule_out(struct drm_i915_gem_request *rq)
+{
+	execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
+}
+
 static void
 execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
 {
@@ -451,7 +463,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
 		if (rq) {
 			GEM_BUG_ON(count > !n);
 			if (!count++)
-				execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
+				execlists_context_schedule_in(rq);
 			port_set(&port[n], port_pack(rq, count));
 			desc = execlists_update_context(rq);
 			GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
@@ -879,8 +891,7 @@ static void intel_lrc_irq_handler(unsigned long data)
 			if (--count == 0) {
 				GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
 				GEM_BUG_ON(!i915_gem_request_completed(rq));
-				execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
-
+				execlists_context_schedule_out(rq);
 				trace_i915_gem_request_out(rq);
 				rq->priotree.priority = INT_MAX;
 				i915_gem_request_put(rq);
-- 
2.9.5

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [CI 05/10] drm/i915: Engine busy time tracking
  2017-10-10 15:18 [CI 01/10] drm/i915: Extract intel_get_cagf Tvrtko Ursulin
                   ` (2 preceding siblings ...)
  2017-10-10 15:19 ` [CI 04/10] drm/i915: Wrap context schedule notification Tvrtko Ursulin
@ 2017-10-10 15:19 ` Tvrtko Ursulin
  2017-10-10 15:19 ` [CI 06/10] drm/i915/pmu: Wire up engine busy stats to PMU Tvrtko Ursulin
                   ` (5 subsequent siblings)
  9 siblings, 0 replies; 11+ messages in thread
From: Tvrtko Ursulin @ 2017-10-10 15:19 UTC (permalink / raw)
  To: Intel-gfx

From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Track total time requests have been executing on the hardware.

We add new kernel API to allow software tracking of time GPU
engines are spending executing requests.

Both per-engine and global API is added with the latter also
being exported for use by external users.

v2:
 * Squashed with the internal API.
 * Dropped static key.
 * Made per-engine.
 * Store time in monotonic ktime.

v3: Moved stats clearing to disable.

v4:
 * Comments.
 * Don't export the API just yet.

v5: Whitespace cleanup.

v6:
 * Rename ref to active.
 * Drop engine aggregate stats for now.
 * Account initial busy period after enabling stats.

v7:
 * Rebase.

v8:
 * Move context in notification after the notifier. (Chris Wilson)

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/intel_engine_cs.c  | 84 ++++++++++++++++++++++++++++++
 drivers/gpu/drm/i915/intel_lrc.c        |  2 +
 drivers/gpu/drm/i915/intel_ringbuffer.h | 92 +++++++++++++++++++++++++++++++++
 3 files changed, 178 insertions(+)

diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 4c7d7ade99d8..51885629d3df 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -236,6 +236,8 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
 	/* Nothing to do here, execute in order of dependencies */
 	engine->schedule = NULL;
 
+	spin_lock_init(&engine->stats.lock);
+
 	ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
 
 	dev_priv->engine_class[info->class][info->instance] = engine;
@@ -1811,6 +1813,88 @@ intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
 	return i915->engine_class[class][instance];
 }
 
+/**
+ * intel_enable_engine_stats() - Enable engine busy tracking on engine
+ * @engine: engine to enable stats collection
+ *
+ * Start collecting the engine busyness data for @engine.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int intel_enable_engine_stats(struct intel_engine_cs *engine)
+{
+	unsigned long flags;
+
+	if (!i915_modparams.enable_execlists)
+		return -ENODEV;
+
+	spin_lock_irqsave(&engine->stats.lock, flags);
+	if (engine->stats.enabled == ~0)
+		goto busy;
+	if (engine->stats.enabled++ == 0)
+		engine->stats.enabled_at = ktime_get();
+	spin_unlock_irqrestore(&engine->stats.lock, flags);
+
+	return 0;
+
+busy:
+	spin_unlock_irqrestore(&engine->stats.lock, flags);
+
+	return -EBUSY;
+}
+
+/**
+ * intel_disable_engine_stats() - Disable engine busy tracking on engine
+ * @engine: engine to disable stats collection
+ *
+ * Stops collecting the engine busyness data for @engine.
+ */
+void intel_disable_engine_stats(struct intel_engine_cs *engine)
+{
+	unsigned long flags;
+
+	if (!i915_modparams.enable_execlists)
+		return;
+
+	spin_lock_irqsave(&engine->stats.lock, flags);
+	WARN_ON_ONCE(engine->stats.enabled == 0);
+	if (--engine->stats.enabled == 0) {
+		engine->stats.enabled_at = 0;
+		engine->stats.active = 0;
+		engine->stats.start = 0;
+		engine->stats.total = 0;
+	}
+	spin_unlock_irqrestore(&engine->stats.lock, flags);
+}
+
+/**
+ * intel_engine_get_busy_time() - Return current accumulated engine busyness
+ * @engine: engine to report on
+ *
+ * Returns accumulated time @engine was busy since engine stats were enabled.
+ */
+ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine)
+{
+	ktime_t total;
+	unsigned long flags;
+
+	spin_lock_irqsave(&engine->stats.lock, flags);
+
+	total = engine->stats.total;
+
+	/*
+	 * If the engine is executing something at the moment
+	 * add it to the total.
+	 */
+	if (engine->stats.active)
+		total = ktime_add(total,
+				  ktime_sub(ktime_get(), engine->stats.start));
+
+	spin_unlock_irqrestore(&engine->stats.lock, flags);
+
+	return total;
+}
+
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/mock_engine.c"
 #endif
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 7a1243875f90..1da5bd3b9db5 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -404,11 +404,13 @@ static inline void
 execlists_context_schedule_in(struct drm_i915_gem_request *rq)
 {
 	execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
+	intel_engine_context_in(rq->engine);
 }
 
 static inline void
 execlists_context_schedule_out(struct drm_i915_gem_request *rq)
 {
+	intel_engine_context_out(rq->engine);
 	execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
 }
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 289bb25dd964..850620699468 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -546,6 +546,38 @@ struct intel_engine_cs {
 	 * certain bits to encode the command length in the header).
 	 */
 	u32 (*get_cmd_length_mask)(u32 cmd_header);
+
+	struct {
+		/**
+		 * @lock: Lock protecting the below fields.
+		 */
+		spinlock_t lock;
+		/**
+		 * @enabled: Reference count indicating number of listeners.
+		 */
+		unsigned int enabled;
+		/**
+		 * @active: Number of contexts currently scheduled in.
+		 */
+		unsigned int active;
+		/**
+		 * @enabled_at: Timestamp when busy stats were enabled.
+		 */
+		ktime_t enabled_at;
+		/**
+		 * @start: Timestamp of the last idle to active transition.
+		 *
+		 * Idle is defined as active == 0, active is active > 0.
+		 */
+		ktime_t start;
+		/**
+		 * @total: Total time this engine was busy.
+		 *
+		 * Accumulated time not counting the most recent block in cases
+		 * where engine is currently busy (active > 0).
+		 */
+		ktime_t total;
+	} stats;
 };
 
 static inline unsigned int
@@ -869,4 +901,64 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *p);
 struct intel_engine_cs *
 intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance);
 
+static inline void intel_engine_context_in(struct intel_engine_cs *engine)
+{
+	unsigned long flags;
+
+	if (READ_ONCE(engine->stats.enabled) == 0)
+		return;
+
+	spin_lock_irqsave(&engine->stats.lock, flags);
+
+	if (engine->stats.enabled > 0) {
+		if (engine->stats.active++ == 0)
+			engine->stats.start = ktime_get();
+		GEM_BUG_ON(engine->stats.active == 0);
+	}
+
+	spin_unlock_irqrestore(&engine->stats.lock, flags);
+}
+
+static inline void intel_engine_context_out(struct intel_engine_cs *engine)
+{
+	unsigned long flags;
+
+	if (READ_ONCE(engine->stats.enabled) == 0)
+		return;
+
+	spin_lock_irqsave(&engine->stats.lock, flags);
+
+	if (engine->stats.enabled > 0) {
+		ktime_t last, now = ktime_get();
+
+		if (engine->stats.active && --engine->stats.active == 0) {
+			/*
+			 * Decrement the active context count and in case GPU
+			 * is now idle add up to the running total.
+			 */
+			last = ktime_sub(now, engine->stats.start);
+
+			engine->stats.total = ktime_add(engine->stats.total,
+							last);
+		} else if (engine->stats.active == 0) {
+			/*
+			 * After turning on engine stats, context out might be
+			 * the first event in which case we account from the
+			 * time stats gathering was turned on.
+			 */
+			last = ktime_sub(now, engine->stats.enabled_at);
+
+			engine->stats.total = ktime_add(engine->stats.total,
+							last);
+		}
+	}
+
+	spin_unlock_irqrestore(&engine->stats.lock, flags);
+}
+
+int intel_enable_engine_stats(struct intel_engine_cs *engine);
+void intel_disable_engine_stats(struct intel_engine_cs *engine);
+
+ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine);
+
 #endif /* _INTEL_RINGBUFFER_H_ */
-- 
2.9.5

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [CI 06/10] drm/i915/pmu: Wire up engine busy stats to PMU
  2017-10-10 15:18 [CI 01/10] drm/i915: Extract intel_get_cagf Tvrtko Ursulin
                   ` (3 preceding siblings ...)
  2017-10-10 15:19 ` [CI 05/10] drm/i915: Engine busy time tracking Tvrtko Ursulin
@ 2017-10-10 15:19 ` Tvrtko Ursulin
  2017-10-10 15:19 ` [CI 07/10] drm/i915: Gate engine stats collection with a static key Tvrtko Ursulin
                   ` (4 subsequent siblings)
  9 siblings, 0 replies; 11+ messages in thread
From: Tvrtko Ursulin @ 2017-10-10 15:19 UTC (permalink / raw)
  To: Intel-gfx

From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

We can use engine busy stats instead of the sampling timer for
better accuracy.

By doing this we replace the stohastic sampling with busyness
metric derived directly from engine activity. This is context
switch interrupt driven, so as accurate as we can get from
software tracking.

As a secondary benefit, we can also not run the sampling timer
in cases only busyness metric is enabled.

v2: Rebase.
v3:
 * Rebase, comments.
 * Leave engine busyness controls out of workers.
v4: Checkpatch cleanup.
v5: Added comment to pmu_needs_timer change.
v6:
 * Rebase.
 * Fix style of some comments. (Chris Wilson)
v7: Rebase and commit message update. (Chris Wilson)

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_pmu.c         | 37 +++++++++++++++++++++++++++++++--
 drivers/gpu/drm/i915/intel_ringbuffer.h |  5 +++++
 2 files changed, 40 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index a1a3c630c95e..2671e3640639 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -90,6 +90,11 @@ static unsigned int event_enabled_bit(struct perf_event *event)
 	return config_enabled_bit(event->attr.config);
 }
 
+static bool supports_busy_stats(void)
+{
+	return i915_modparams.enable_execlists;
+}
+
 static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active)
 {
 	u64 enable;
@@ -115,6 +120,12 @@ static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active)
 	 */
 	if (!gpu_active)
 		enable &= ~ENGINE_SAMPLE_MASK;
+	/*
+	 * Also there is software busyness tracking available we do not
+	 * need the timer for I915_SAMPLE_BUSY counter.
+	 */
+	else if (supports_busy_stats())
+		enable &= ~BIT(I915_SAMPLE_BUSY);
 
 	/*
 	 * If some bits remain it means we need the sampling timer running.
@@ -362,6 +373,9 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
 
 		if (WARN_ON_ONCE(!engine)) {
 			/* Do nothing */
+		} else if (sample == I915_SAMPLE_BUSY &&
+			   engine->pmu.busy_stats) {
+			val = ktime_to_ns(intel_engine_get_busy_time(engine));
 		} else {
 			val = engine->pmu.sample[sample].cur;
 		}
@@ -398,6 +412,12 @@ static void i915_pmu_event_read(struct perf_event *event)
 	local64_add(new - prev, &event->count);
 }
 
+static bool engine_needs_busy_stats(struct intel_engine_cs *engine)
+{
+	return supports_busy_stats() &&
+	       (engine->pmu.enable & BIT(I915_SAMPLE_BUSY));
+}
+
 static void i915_pmu_enable(struct perf_event *event)
 {
 	struct drm_i915_private *i915 =
@@ -437,7 +457,14 @@ static void i915_pmu_enable(struct perf_event *event)
 
 		GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS);
 		GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
-		engine->pmu.enable_count[sample]++;
+		if (engine->pmu.enable_count[sample]++ == 0) {
+			if (engine_needs_busy_stats(engine) &&
+			    !engine->pmu.busy_stats) {
+				engine->pmu.busy_stats =
+					intel_enable_engine_stats(engine) == 0;
+				WARN_ON_ONCE(!engine->pmu.busy_stats);
+			}
+		}
 	}
 
 	/*
@@ -473,8 +500,14 @@ static void i915_pmu_disable(struct perf_event *event)
 		 * Decrement the reference count and clear the enabled
 		 * bitmask when the last listener on an event goes away.
 		 */
-		if (--engine->pmu.enable_count[sample] == 0)
+		if (--engine->pmu.enable_count[sample] == 0) {
 			engine->pmu.enable &= ~BIT(sample);
+			if (!engine_needs_busy_stats(engine) &&
+			    engine->pmu.busy_stats) {
+				engine->pmu.busy_stats = false;
+				intel_disable_engine_stats(engine);
+			}
+		}
 	}
 
 	GEM_BUG_ON(bit >= I915_PMU_MASK_BITS);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 850620699468..b7f0cd449824 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -358,6 +358,11 @@ struct intel_engine_cs {
 		 * Our internal timer stores the current counters in this field.
 		 */
 		struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_MAX];
+		/**
+		 * @busy_stats: Has enablement of engine stats tracking been
+		 * 		requested.
+		 */
+		bool busy_stats;
 	} pmu;
 
 	/*
-- 
2.9.5

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [CI 07/10] drm/i915: Gate engine stats collection with a static key
  2017-10-10 15:18 [CI 01/10] drm/i915: Extract intel_get_cagf Tvrtko Ursulin
                   ` (4 preceding siblings ...)
  2017-10-10 15:19 ` [CI 06/10] drm/i915/pmu: Wire up engine busy stats to PMU Tvrtko Ursulin
@ 2017-10-10 15:19 ` Tvrtko Ursulin
  2017-10-10 15:19 ` [CI 08/10] drm/i915/pmu: Add interrupt count metric Tvrtko Ursulin
                   ` (3 subsequent siblings)
  9 siblings, 0 replies; 11+ messages in thread
From: Tvrtko Ursulin @ 2017-10-10 15:19 UTC (permalink / raw)
  To: Intel-gfx

From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

This reduces the cost of the software engine busyness tracking
to a single no-op instruction when there are no listeners.

We add a new i915 ordered workqueue to be used only for tasks
not needing struct mutex.

v2: Rebase and some comments.
v3: Rebase.
v4: Checkpatch fixes.
v5: Rebase.
v6: Use system_long_wq to avoid being blocked by struct_mutex
    users.
v7: Fix bad conflict resolution from last rebase. (Dmitry Rogozhkin)
v8: Rebase.
v9:
 * Fix race between unordered enable followed by disable.
   (Chris Wilson)
 * Prettify order of local variable declarations. (Chris Wilson)
v10:
 Chris Wilson:
  * Fix workqueue allocation failure handling.
  * Expand comment about critical workqueue ordering.
  * Add some GEM_BUG_ONs to document impossible conditions.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_drv.c         |  13 +++-
 drivers/gpu/drm/i915/i915_drv.h         |   5 ++
 drivers/gpu/drm/i915/i915_pmu.c         |  65 ++++++++++++++++++--
 drivers/gpu/drm/i915/intel_engine_cs.c  |  17 ++++++
 drivers/gpu/drm/i915/intel_ringbuffer.h | 101 ++++++++++++++++++++------------
 5 files changed, 158 insertions(+), 43 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index d373a840566a..3853038c6fdd 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -804,12 +804,22 @@ static int i915_workqueues_init(struct drm_i915_private *dev_priv)
 	if (dev_priv->wq == NULL)
 		goto out_err;
 
+	/*
+	 * Ordered workqueue for tasks not needing the global mutex but
+	 * which still need to be serialized.
+	 */
+	dev_priv->wq_misc = alloc_ordered_workqueue("i915-misc", 0);
+	if (dev_priv->wq_misc == NULL)
+		goto out_free_wq;
+
 	dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
 	if (dev_priv->hotplug.dp_wq == NULL)
-		goto out_free_wq;
+		goto out_free_wq_misc;
 
 	return 0;
 
+out_free_wq_misc:
+	destroy_workqueue(dev_priv->wq_misc);
 out_free_wq:
 	destroy_workqueue(dev_priv->wq);
 out_err:
@@ -830,6 +840,7 @@ static void i915_engines_cleanup(struct drm_i915_private *i915)
 static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
 {
 	destroy_workqueue(dev_priv->hotplug.dp_wq);
+	destroy_workqueue(dev_priv->wq_misc);
 	destroy_workqueue(dev_priv->wq);
 }
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 2904ac4b2ae5..99989cd778a9 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2356,6 +2356,11 @@ struct drm_i915_private {
 	 */
 	struct workqueue_struct *wq;
 
+	/**
+	 * @wq_misc: Workqueue for serialized tasks not needing struct mutex.
+	 */
+	struct workqueue_struct *wq_misc;
+
 	/* Display functions */
 	struct drm_i915_display_funcs display;
 
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 2671e3640639..d552e415becd 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -458,11 +458,20 @@ static void i915_pmu_enable(struct perf_event *event)
 		GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS);
 		GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
 		if (engine->pmu.enable_count[sample]++ == 0) {
+			/*
+			 * Enable engine busy stats tracking if needed or
+			 * alternatively cancel the scheduled disabling of the
+			 * same.
+			 *
+			 * If the delayed disable was pending, cancel it and in
+			 * this case no need to queue a new enable.
+			 */
 			if (engine_needs_busy_stats(engine) &&
 			    !engine->pmu.busy_stats) {
-				engine->pmu.busy_stats =
-					intel_enable_engine_stats(engine) == 0;
-				WARN_ON_ONCE(!engine->pmu.busy_stats);
+				engine->pmu.busy_stats = true;
+				if (!cancel_delayed_work(&engine->pmu.disable_busy_stats))
+					queue_work(i915->wq_misc,
+						   &engine->pmu.enable_busy_stats);
 			}
 		}
 	}
@@ -505,7 +514,22 @@ static void i915_pmu_disable(struct perf_event *event)
 			if (!engine_needs_busy_stats(engine) &&
 			    engine->pmu.busy_stats) {
 				engine->pmu.busy_stats = false;
-				intel_disable_engine_stats(engine);
+				/*
+				 * We request a delayed disable to handle the
+				 * rapid on/off cycles on events, which can
+				 * happen when tools like perf stat start, in a
+				 * nicer way.
+				 *
+				 * PMU enable and disable callbacks are
+				 * serialized with the spinlock, and since we do
+				 * them in decoupled fashion from workers, it is
+				 * critical to use an ordered work queue to
+				 * preserve this ordering between the two
+				 * events.
+				 */
+				queue_delayed_work(i915->wq_misc,
+						   &engine->pmu.disable_busy_stats,
+						   round_jiffies_up_relative(HZ));
 			}
 		}
 	}
@@ -689,8 +713,26 @@ static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
 	return 0;
 }
 
+static void __enable_busy_stats(struct work_struct *work)
+{
+	struct intel_engine_cs *engine =
+		container_of(work, typeof(*engine), pmu.enable_busy_stats);
+
+	WARN_ON_ONCE(intel_enable_engine_stats(engine));
+}
+
+static void __disable_busy_stats(struct work_struct *work)
+{
+	struct intel_engine_cs *engine =
+	       container_of(work, typeof(*engine), pmu.disable_busy_stats.work);
+
+	intel_disable_engine_stats(engine);
+}
+
 void i915_pmu_register(struct drm_i915_private *i915)
 {
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 	int ret;
 
 	if (INTEL_GEN(i915) <= 2) {
@@ -725,6 +767,12 @@ void i915_pmu_register(struct drm_i915_private *i915)
 	i915->pmu.timer.function = i915_sample;
 	i915->pmu.enable = 0;
 
+	for_each_engine(engine, i915, id) {
+		INIT_WORK(&engine->pmu.enable_busy_stats, __enable_busy_stats);
+		INIT_DELAYED_WORK(&engine->pmu.disable_busy_stats,
+				  __disable_busy_stats);
+	}
+
 	ret = perf_pmu_register(&i915->pmu.base, "i915", -1);
 	if (ret == 0)
 		return;
@@ -743,6 +791,9 @@ void i915_pmu_register(struct drm_i915_private *i915)
 
 void i915_pmu_unregister(struct drm_i915_private *i915)
 {
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+
 	if (!i915->pmu.base.event_init)
 		return;
 
@@ -754,6 +805,12 @@ void i915_pmu_unregister(struct drm_i915_private *i915)
 
 	hrtimer_cancel(&i915->pmu.timer);
 
+	for_each_engine(engine, i915, id) {
+		GEM_BUG_ON(engine->pmu.busy_stats);
+		GEM_BUG_ON(flush_work(&engine->pmu.enable_busy_stats));
+		flush_delayed_work(&engine->pmu.disable_busy_stats);
+	}
+
 	perf_pmu_unregister(&i915->pmu.base);
 	i915->pmu.base.event_init = NULL;
 }
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 51885629d3df..80a7b55184df 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -21,6 +21,7 @@
  * IN THE SOFTWARE.
  *
  */
+#include <linux/static_key.h>
 
 #include <drm/drm_print.h>
 
@@ -1813,6 +1814,10 @@ intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
 	return i915->engine_class[class][instance];
 }
 
+DEFINE_STATIC_KEY_FALSE(i915_engine_stats_key);
+static DEFINE_MUTEX(i915_engine_stats_mutex);
+static int i915_engine_stats_ref;
+
 /**
  * intel_enable_engine_stats() - Enable engine busy tracking on engine
  * @engine: engine to enable stats collection
@@ -1828,6 +1833,8 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
 	if (!i915_modparams.enable_execlists)
 		return -ENODEV;
 
+	mutex_lock(&i915_engine_stats_mutex);
+
 	spin_lock_irqsave(&engine->stats.lock, flags);
 	if (engine->stats.enabled == ~0)
 		goto busy;
@@ -1835,10 +1842,16 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
 		engine->stats.enabled_at = ktime_get();
 	spin_unlock_irqrestore(&engine->stats.lock, flags);
 
+	if (i915_engine_stats_ref++ == 0)
+		static_branch_enable(&i915_engine_stats_key);
+
+	mutex_unlock(&i915_engine_stats_mutex);
+
 	return 0;
 
 busy:
 	spin_unlock_irqrestore(&engine->stats.lock, flags);
+	mutex_unlock(&i915_engine_stats_mutex);
 
 	return -EBUSY;
 }
@@ -1856,6 +1869,7 @@ void intel_disable_engine_stats(struct intel_engine_cs *engine)
 	if (!i915_modparams.enable_execlists)
 		return;
 
+	mutex_lock(&i915_engine_stats_mutex);
 	spin_lock_irqsave(&engine->stats.lock, flags);
 	WARN_ON_ONCE(engine->stats.enabled == 0);
 	if (--engine->stats.enabled == 0) {
@@ -1865,6 +1879,9 @@ void intel_disable_engine_stats(struct intel_engine_cs *engine)
 		engine->stats.total = 0;
 	}
 	spin_unlock_irqrestore(&engine->stats.lock, flags);
+	if (--i915_engine_stats_ref == 0)
+		static_branch_disable(&i915_engine_stats_key);
+	mutex_unlock(&i915_engine_stats_mutex);
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index b7f0cd449824..0202b1fa42fd 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -363,6 +363,22 @@ struct intel_engine_cs {
 		 * 		requested.
 		 */
 		bool busy_stats;
+		/**
+		 * @enable_busy_stats: Work item for engine busy stats enabling.
+		 *
+		 * Since the action can sleep it needs to be decoupled from the
+		 * perf API callback.
+		 */
+		struct work_struct enable_busy_stats;
+		/**
+		 * @disable_busy_stats: Work item for busy stats disabling.
+		 *
+		 * Same as with @enable_busy_stats action, with the difference
+		 * that we delay it in case there are rapid enable-disable
+		 * actions, which can happen during tool startup (like perf
+		 * stat).
+		 */
+		struct delayed_work disable_busy_stats;
 	} pmu;
 
 	/*
@@ -906,59 +922,68 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *p);
 struct intel_engine_cs *
 intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance);
 
+DECLARE_STATIC_KEY_FALSE(i915_engine_stats_key);
+
 static inline void intel_engine_context_in(struct intel_engine_cs *engine)
 {
 	unsigned long flags;
 
-	if (READ_ONCE(engine->stats.enabled) == 0)
-		return;
+	if (static_branch_unlikely(&i915_engine_stats_key)) {
+		if (READ_ONCE(engine->stats.enabled) == 0)
+			return;
 
-	spin_lock_irqsave(&engine->stats.lock, flags);
+		spin_lock_irqsave(&engine->stats.lock, flags);
 
-	if (engine->stats.enabled > 0) {
-		if (engine->stats.active++ == 0)
-			engine->stats.start = ktime_get();
-		GEM_BUG_ON(engine->stats.active == 0);
-	}
+			if (engine->stats.enabled > 0) {
+				if (engine->stats.active++ == 0)
+					engine->stats.start = ktime_get();
+				GEM_BUG_ON(engine->stats.active == 0);
+			}
 
-	spin_unlock_irqrestore(&engine->stats.lock, flags);
+		spin_unlock_irqrestore(&engine->stats.lock, flags);
+	}
 }
 
 static inline void intel_engine_context_out(struct intel_engine_cs *engine)
 {
 	unsigned long flags;
 
-	if (READ_ONCE(engine->stats.enabled) == 0)
-		return;
-
-	spin_lock_irqsave(&engine->stats.lock, flags);
-
-	if (engine->stats.enabled > 0) {
-		ktime_t last, now = ktime_get();
-
-		if (engine->stats.active && --engine->stats.active == 0) {
-			/*
-			 * Decrement the active context count and in case GPU
-			 * is now idle add up to the running total.
-			 */
-			last = ktime_sub(now, engine->stats.start);
-
-			engine->stats.total = ktime_add(engine->stats.total,
-							last);
-		} else if (engine->stats.active == 0) {
-			/*
-			 * After turning on engine stats, context out might be
-			 * the first event in which case we account from the
-			 * time stats gathering was turned on.
-			 */
-			last = ktime_sub(now, engine->stats.enabled_at);
-
-			engine->stats.total = ktime_add(engine->stats.total,
-							last);
+	if (static_branch_unlikely(&i915_engine_stats_key)) {
+		if (READ_ONCE(engine->stats.enabled) == 0)
+			return;
+
+		spin_lock_irqsave(&engine->stats.lock, flags);
+
+		if (engine->stats.enabled > 0) {
+			ktime_t last, now = ktime_get();
+
+			if (engine->stats.active &&
+			    --engine->stats.active == 0) {
+				/*
+				 * Decrement the active context count and in
+				 * case GPU is now idle add up to the running
+				 * total.
+				 */
+				last = ktime_sub(now, engine->stats.start);
+
+				engine->stats.total =
+					ktime_add(engine->stats.total, last);
+			} else if (engine->stats.active == 0) {
+				/*
+				 * After turning on engine stats, context out
+				 * might be the first event in which case we
+				 * account from the time stats gathering was
+				 * turned on.
+				 */
+				last = ktime_sub(now, engine->stats.enabled_at);
+
+				engine->stats.total =
+					ktime_add(engine->stats.total, last);
+			}
 		}
-	}
 
-	spin_unlock_irqrestore(&engine->stats.lock, flags);
+		spin_unlock_irqrestore(&engine->stats.lock, flags);
+	}
 }
 
 int intel_enable_engine_stats(struct intel_engine_cs *engine);
-- 
2.9.5

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [CI 08/10] drm/i915/pmu: Add interrupt count metric
  2017-10-10 15:18 [CI 01/10] drm/i915: Extract intel_get_cagf Tvrtko Ursulin
                   ` (5 preceding siblings ...)
  2017-10-10 15:19 ` [CI 07/10] drm/i915: Gate engine stats collection with a static key Tvrtko Ursulin
@ 2017-10-10 15:19 ` Tvrtko Ursulin
  2017-10-10 15:19 ` [CI 09/10] drm/i915: Convert intel_rc6_residency_us to ns Tvrtko Ursulin
                   ` (2 subsequent siblings)
  9 siblings, 0 replies; 11+ messages in thread
From: Tvrtko Ursulin @ 2017-10-10 15:19 UTC (permalink / raw)
  To: Intel-gfx

From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

For clients like intel-gpu-overlay it is easier to read the
count via the perf API than having to parse /proc.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_pmu.c | 23 +++++++++++++++++++++++
 include/uapi/drm/i915_drm.h     |  4 +++-
 2 files changed, 26 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index d552e415becd..3d4216485770 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -276,6 +276,22 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
 	return HRTIMER_RESTART;
 }
 
+static u64 count_interrupts(struct drm_i915_private *i915)
+{
+	/* open-coded kstat_irqs() */
+	struct irq_desc *desc = irq_to_desc(i915->drm.pdev->irq);
+	u64 sum = 0;
+	int cpu;
+
+	if (!desc || !desc->kstat_irqs)
+		return 0;
+
+	for_each_possible_cpu(cpu)
+		sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
+
+	return sum;
+}
+
 static void i915_pmu_event_destroy(struct perf_event *event)
 {
 	WARN_ON(event->parent);
@@ -342,6 +358,8 @@ static int i915_pmu_event_init(struct perf_event *event)
 			if (INTEL_GEN(i915) < 6)
 				ret = -ENODEV;
 			break;
+		case I915_PMU_INTERRUPTS:
+			break;
 		default:
 			ret = -ENOENT;
 			break;
@@ -391,6 +409,9 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
 			   div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_REQ].cur,
 				   FREQUENCY);
 			break;
+		case I915_PMU_INTERRUPTS:
+			val = count_interrupts(i915);
+			break;
 		}
 	}
 
@@ -650,6 +671,8 @@ static struct attribute *i915_pmu_events_attrs[] = {
 	I915_EVENT(actual-frequency,    I915_PMU_ACTUAL_FREQUENCY,    "MHz"),
 	I915_EVENT(requested-frequency, I915_PMU_REQUESTED_FREQUENCY, "MHz"),
 
+	I915_EVENT_ATTR(interrupts, I915_PMU_INTERRUPTS),
+
 	NULL,
 };
 
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index d35af9ff635c..8aceb7f57e0c 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -132,7 +132,9 @@ enum drm_i915_pmu_engine_sample {
 #define I915_PMU_ACTUAL_FREQUENCY	__I915_PMU_OTHER(0)
 #define I915_PMU_REQUESTED_FREQUENCY	__I915_PMU_OTHER(1)
 
-#define I915_PMU_LAST I915_PMU_REQUESTED_FREQUENCY
+#define I915_PMU_INTERRUPTS		__I915_PMU_OTHER(2)
+
+#define I915_PMU_LAST I915_PMU_INTERRUPTS
 
 /* Each region is a minimum of 16k, and there are at most 255 of them.
  */
-- 
2.9.5

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [CI 09/10] drm/i915: Convert intel_rc6_residency_us to ns
  2017-10-10 15:18 [CI 01/10] drm/i915: Extract intel_get_cagf Tvrtko Ursulin
                   ` (6 preceding siblings ...)
  2017-10-10 15:19 ` [CI 08/10] drm/i915/pmu: Add interrupt count metric Tvrtko Ursulin
@ 2017-10-10 15:19 ` Tvrtko Ursulin
  2017-10-10 15:19 ` [CI 10/10] drm/i915/pmu: Add RC6 residency metrics Tvrtko Ursulin
  2017-10-10 16:57 ` ✗ Fi.CI.BAT: failure for series starting with [CI,01/10] drm/i915: Extract intel_get_cagf Patchwork
  9 siblings, 0 replies; 11+ messages in thread
From: Tvrtko Ursulin @ 2017-10-10 15:19 UTC (permalink / raw)
  To: Intel-gfx

From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Will be used for exposing the PMU counters.

v2:
 * Move intel_runtime_pm_get/put to the callers. (Chris Wilson)
 * Restore full unit conversion precision.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_drv.h   |  8 +++++++-
 drivers/gpu/drm/i915/i915_sysfs.c |  9 +++++++--
 drivers/gpu/drm/i915/intel_pm.c   | 27 +++++++++++++--------------
 3 files changed, 27 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 99989cd778a9..7e14b43e4ee0 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -4193,11 +4193,17 @@ void vlv_phy_reset_lanes(struct intel_encoder *encoder);
 
 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
-u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
+u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
 			   const i915_reg_t reg);
 
 u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat1);
 
+static inline u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
+					 const i915_reg_t reg)
+{
+	return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(dev_priv, reg), 1000);
+}
+
 #define I915_READ8(reg)		dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
 #define I915_WRITE8(reg, val)	dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
 
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 3bcf1c8c0dd9..2f74ae55332d 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -42,8 +42,13 @@ static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev)
 static u32 calc_residency(struct drm_i915_private *dev_priv,
 			  i915_reg_t reg)
 {
-	return DIV_ROUND_CLOSEST_ULL(intel_rc6_residency_us(dev_priv, reg),
-				     1000);
+	u64 res;
+
+	intel_runtime_pm_get(dev_priv);
+	res = intel_rc6_residency_us(dev_priv, reg);
+	intel_runtime_pm_put(dev_priv);
+
+	return DIV_ROUND_CLOSEST_ULL(res, 1000);
 }
 
 static ssize_t
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index ab9491178c57..0c63488d1fc1 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -9348,36 +9348,35 @@ static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
 	return lower | (u64)upper << 8;
 }
 
-u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
+u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
 			   const i915_reg_t reg)
 {
-	u64 time_hw, units, div;
+	u64 time_hw;
+	u32 mul, div;
 
 	if (!intel_enable_rc6())
 		return 0;
 
-	intel_runtime_pm_get(dev_priv);
-
 	/* On VLV and CHV, residency time is in CZ units rather than 1.28us */
 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-		units = 1000;
+		mul = 1000000;
 		div = dev_priv->czclk_freq;
-
 		time_hw = vlv_residency_raw(dev_priv, reg);
-	} else if (IS_GEN9_LP(dev_priv)) {
-		units = 1000;
-		div = 1200;		/* 833.33ns */
 
-		time_hw = I915_READ(reg);
 	} else {
-		units = 128000; /* 1.28us */
-		div = 100000;
+		/* 833.33ns units on Gen9LP, 1.28us elsewhere. */
+		if (IS_GEN9_LP(dev_priv)) {
+			mul = 10000;
+			div = 12;
+		} else {
+			mul = 1280;
+			div = 1;
+		}
 
 		time_hw = I915_READ(reg);
 	}
 
-	intel_runtime_pm_put(dev_priv);
-	return DIV_ROUND_UP_ULL(time_hw * units, div);
+	return DIV_ROUND_UP_ULL(time_hw * mul, div);
 }
 
 u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat)
-- 
2.9.5

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [CI 10/10] drm/i915/pmu: Add RC6 residency metrics
  2017-10-10 15:18 [CI 01/10] drm/i915: Extract intel_get_cagf Tvrtko Ursulin
                   ` (7 preceding siblings ...)
  2017-10-10 15:19 ` [CI 09/10] drm/i915: Convert intel_rc6_residency_us to ns Tvrtko Ursulin
@ 2017-10-10 15:19 ` Tvrtko Ursulin
  2017-10-10 16:57 ` ✗ Fi.CI.BAT: failure for series starting with [CI,01/10] drm/i915: Extract intel_get_cagf Patchwork
  9 siblings, 0 replies; 11+ messages in thread
From: Tvrtko Ursulin @ 2017-10-10 15:19 UTC (permalink / raw)
  To: Intel-gfx

From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

For clients like intel-gpu-overlay it is easier to read the
counters via the perf API than having to parse sysfs.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_pmu.c | 31 +++++++++++++++++++++++++++++++
 include/uapi/drm/i915_drm.h     |  6 +++++-
 2 files changed, 36 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 3d4216485770..ee3f15327dd6 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -360,6 +360,15 @@ static int i915_pmu_event_init(struct perf_event *event)
 			break;
 		case I915_PMU_INTERRUPTS:
 			break;
+		case I915_PMU_RC6_RESIDENCY:
+			if (!HAS_RC6(i915))
+				ret = -ENODEV;
+			break;
+		case I915_PMU_RC6p_RESIDENCY:
+		case I915_PMU_RC6pp_RESIDENCY:
+			if (!HAS_RC6p(i915))
+				ret = -ENODEV;
+			break;
 		default:
 			ret = -ENOENT;
 			break;
@@ -412,6 +421,24 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
 		case I915_PMU_INTERRUPTS:
 			val = count_interrupts(i915);
 			break;
+		case I915_PMU_RC6_RESIDENCY:
+			intel_runtime_pm_get(i915);
+			val = intel_rc6_residency_ns(i915,
+						     IS_VALLEYVIEW(i915) ?
+						     VLV_GT_RENDER_RC6 :
+						     GEN6_GT_GFX_RC6);
+			intel_runtime_pm_put(i915);
+			break;
+		case I915_PMU_RC6p_RESIDENCY:
+			intel_runtime_pm_get(i915);
+			val = intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6p);
+			intel_runtime_pm_put(i915);
+			break;
+		case I915_PMU_RC6pp_RESIDENCY:
+			intel_runtime_pm_get(i915);
+			val = intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6pp);
+			intel_runtime_pm_put(i915);
+			break;
 		}
 	}
 
@@ -673,6 +700,10 @@ static struct attribute *i915_pmu_events_attrs[] = {
 
 	I915_EVENT_ATTR(interrupts, I915_PMU_INTERRUPTS),
 
+	I915_EVENT(rc6-residency,   I915_PMU_RC6_RESIDENCY,   "ns"),
+	I915_EVENT(rc6p-residency,  I915_PMU_RC6p_RESIDENCY,  "ns"),
+	I915_EVENT(rc6pp-residency, I915_PMU_RC6pp_RESIDENCY, "ns"),
+
 	NULL,
 };
 
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 8aceb7f57e0c..709f28fc0970 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -134,7 +134,11 @@ enum drm_i915_pmu_engine_sample {
 
 #define I915_PMU_INTERRUPTS		__I915_PMU_OTHER(2)
 
-#define I915_PMU_LAST I915_PMU_INTERRUPTS
+#define I915_PMU_RC6_RESIDENCY		__I915_PMU_OTHER(3)
+#define I915_PMU_RC6p_RESIDENCY		__I915_PMU_OTHER(4)
+#define I915_PMU_RC6pp_RESIDENCY	__I915_PMU_OTHER(5)
+
+#define I915_PMU_LAST I915_PMU_RC6pp_RESIDENCY
 
 /* Each region is a minimum of 16k, and there are at most 255 of them.
  */
-- 
2.9.5

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* ✗ Fi.CI.BAT: failure for series starting with [CI,01/10] drm/i915: Extract intel_get_cagf
  2017-10-10 15:18 [CI 01/10] drm/i915: Extract intel_get_cagf Tvrtko Ursulin
                   ` (8 preceding siblings ...)
  2017-10-10 15:19 ` [CI 10/10] drm/i915/pmu: Add RC6 residency metrics Tvrtko Ursulin
@ 2017-10-10 16:57 ` Patchwork
  9 siblings, 0 replies; 11+ messages in thread
From: Patchwork @ 2017-10-10 16:57 UTC (permalink / raw)
  To: Tvrtko Ursulin; +Cc: intel-gfx

== Series Details ==

Series: series starting with [CI,01/10] drm/i915: Extract intel_get_cagf
URL   : https://patchwork.freedesktop.org/series/31667/
State : failure

== Summary ==

Series 31667v1 series starting with [CI,01/10] drm/i915: Extract intel_get_cagf
https://patchwork.freedesktop.org/api/1.0/series/31667/revisions/1/mbox/

Test chamelium:
        Subgroup dp-crc-fast:
                fail       -> PASS       (fi-kbl-7500u) fdo#102514
Test gem_exec_suspend:
        Subgroup basic-s3:
                pass       -> INCOMPLETE (fi-byt-j1900)
                pass       -> INCOMPLETE (fi-glk-1)
                pass       -> INCOMPLETE (fi-cnl-y) fdo#103070
Test kms_pipe_crc_basic:
        Subgroup suspend-read-crc-pipe-b:
                incomplete -> PASS       (fi-kbl-7560u) fdo#102846

fdo#102514 https://bugs.freedesktop.org/show_bug.cgi?id=102514
fdo#103070 https://bugs.freedesktop.org/show_bug.cgi?id=103070
fdo#102846 https://bugs.freedesktop.org/show_bug.cgi?id=102846

fi-bdw-5557u     total:289  pass:268  dwarn:0   dfail:0   fail:0   skip:21  time:451s
fi-bdw-gvtdvm    total:289  pass:265  dwarn:0   dfail:0   fail:0   skip:24  time:473s
fi-blb-e6850     total:289  pass:223  dwarn:1   dfail:0   fail:0   skip:65  time:395s
fi-bsw-n3050     total:289  pass:243  dwarn:0   dfail:0   fail:0   skip:46  time:580s
fi-bwr-2160      total:289  pass:183  dwarn:0   dfail:0   fail:0   skip:106 time:283s
fi-bxt-dsi       total:289  pass:259  dwarn:0   dfail:0   fail:0   skip:30  time:518s
fi-bxt-j4205     total:289  pass:260  dwarn:0   dfail:0   fail:0   skip:29  time:524s
fi-byt-j1900     total:118  pass:96   dwarn:0   dfail:0   fail:0   skip:21 
fi-byt-n2820     total:289  pass:249  dwarn:1   dfail:0   fail:0   skip:39  time:516s
fi-cfl-s         total:289  pass:253  dwarn:4   dfail:0   fail:0   skip:32  time:557s
fi-cnl-y         total:118  pass:97   dwarn:0   dfail:0   fail:0   skip:20 
fi-elk-e7500     total:289  pass:229  dwarn:0   dfail:0   fail:0   skip:60  time:436s
fi-glk-1         total:118  pass:96   dwarn:0   dfail:0   fail:0   skip:21 
fi-hsw-4770      total:289  pass:262  dwarn:0   dfail:0   fail:0   skip:27  time:434s
fi-hsw-4770r     total:289  pass:262  dwarn:0   dfail:0   fail:0   skip:27  time:417s
fi-ilk-650       total:289  pass:228  dwarn:0   dfail:0   fail:0   skip:61  time:459s
fi-ivb-3520m     total:289  pass:260  dwarn:0   dfail:0   fail:0   skip:29  time:496s
fi-ivb-3770      total:289  pass:260  dwarn:0   dfail:0   fail:0   skip:29  time:469s
fi-kbl-7500u     total:289  pass:264  dwarn:1   dfail:0   fail:0   skip:24  time:507s
fi-kbl-7560u     total:289  pass:270  dwarn:0   dfail:0   fail:0   skip:19  time:581s
fi-kbl-7567u     total:289  pass:265  dwarn:4   dfail:0   fail:0   skip:20  time:490s
fi-kbl-r         total:289  pass:262  dwarn:0   dfail:0   fail:0   skip:27  time:593s
fi-pnv-d510      total:289  pass:222  dwarn:1   dfail:0   fail:0   skip:66  time:653s
fi-skl-6260u     total:289  pass:269  dwarn:0   dfail:0   fail:0   skip:20  time:461s
fi-skl-6700hq    total:289  pass:263  dwarn:0   dfail:0   fail:0   skip:26  time:658s
fi-skl-6700k     total:289  pass:265  dwarn:0   dfail:0   fail:0   skip:24  time:533s
fi-skl-6770hq    total:289  pass:269  dwarn:0   dfail:0   fail:0   skip:20  time:514s
fi-skl-gvtdvm    total:289  pass:266  dwarn:0   dfail:0   fail:0   skip:23  time:478s
fi-snb-2520m     total:289  pass:250  dwarn:0   dfail:0   fail:0   skip:39  time:583s
fi-snb-2600      total:289  pass:249  dwarn:0   dfail:0   fail:0   skip:40  time:426s

cc58e6d2bc38542af8f8031d4b79f4069bba6afc drm-tip: 2017y-10m-10d-15h-40m-22s UTC integration manifest
295e2b3f602a drm/i915/pmu: Add RC6 residency metrics
61b3010492b3 drm/i915: Convert intel_rc6_residency_us to ns
44328c271e16 drm/i915/pmu: Add interrupt count metric
6a2b29848366 drm/i915: Gate engine stats collection with a static key
f2c2bb645669 drm/i915/pmu: Wire up engine busy stats to PMU
abd84e9b8fa3 drm/i915: Engine busy time tracking
5888f77f106e drm/i915: Wrap context schedule notification
2ac1945af3d7 drm/i915/pmu: Suspend sampling when GPU is idle
fe0d9bc497d8 drm/i915/pmu: Expose a PMU interface for perf queries
2fc0bc406db8 drm/i915: Extract intel_get_cagf

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_5979/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2017-10-10 16:57 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-10-10 15:18 [CI 01/10] drm/i915: Extract intel_get_cagf Tvrtko Ursulin
2017-10-10 15:18 ` [CI 02/10] drm/i915/pmu: Expose a PMU interface for perf queries Tvrtko Ursulin
2017-10-10 15:19 ` [CI 03/10] drm/i915/pmu: Suspend sampling when GPU is idle Tvrtko Ursulin
2017-10-10 15:19 ` [CI 04/10] drm/i915: Wrap context schedule notification Tvrtko Ursulin
2017-10-10 15:19 ` [CI 05/10] drm/i915: Engine busy time tracking Tvrtko Ursulin
2017-10-10 15:19 ` [CI 06/10] drm/i915/pmu: Wire up engine busy stats to PMU Tvrtko Ursulin
2017-10-10 15:19 ` [CI 07/10] drm/i915: Gate engine stats collection with a static key Tvrtko Ursulin
2017-10-10 15:19 ` [CI 08/10] drm/i915/pmu: Add interrupt count metric Tvrtko Ursulin
2017-10-10 15:19 ` [CI 09/10] drm/i915: Convert intel_rc6_residency_us to ns Tvrtko Ursulin
2017-10-10 15:19 ` [CI 10/10] drm/i915/pmu: Add RC6 residency metrics Tvrtko Ursulin
2017-10-10 16:57 ` ✗ Fi.CI.BAT: failure for series starting with [CI,01/10] drm/i915: Extract intel_get_cagf Patchwork

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.