All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/2] drm/i915/pmu: Add a name to the execlists stats
@ 2021-10-27  0:48 ` Umesh Nerlige Ramappa
  0 siblings, 0 replies; 16+ messages in thread
From: Umesh Nerlige Ramappa @ 2021-10-27  0:48 UTC (permalink / raw)
  To: intel-gfx, dri-devel
  Cc: john.c.harrison, Tvrtko Ursulin, daniel.vetter, Matthew Brost

In preparation for GuC pmu stats, add a name to the execlists stats
structure so that it can be differentiated from the GuC stats.

Signed-off-by: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_engine_cs.c    | 14 +++---
 drivers/gpu/drm/i915/gt/intel_engine_stats.h | 33 +++++++------
 drivers/gpu/drm/i915/gt/intel_engine_types.h | 52 +++++++++++---------
 3 files changed, 53 insertions(+), 46 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index ff6753ccb129..2de396e34d83 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -363,7 +363,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id,
 		DRIVER_CAPS(i915)->has_logical_contexts = true;
 
 	ewma__engine_latency_init(&engine->latency);
-	seqcount_init(&engine->stats.lock);
+	seqcount_init(&engine->stats.execlists.lock);
 
 	ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
 
@@ -1918,15 +1918,16 @@ void intel_engine_dump(struct intel_engine_cs *engine,
 static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine,
 					    ktime_t *now)
 {
-	ktime_t total = engine->stats.total;
+	struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
+	ktime_t total = stats->total;
 
 	/*
 	 * If the engine is executing something at the moment
 	 * add it to the total.
 	 */
 	*now = ktime_get();
-	if (READ_ONCE(engine->stats.active))
-		total = ktime_add(total, ktime_sub(*now, engine->stats.start));
+	if (READ_ONCE(stats->active))
+		total = ktime_add(total, ktime_sub(*now, stats->start));
 
 	return total;
 }
@@ -1940,13 +1941,14 @@ static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine,
  */
 ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now)
 {
+	struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
 	unsigned int seq;
 	ktime_t total;
 
 	do {
-		seq = read_seqcount_begin(&engine->stats.lock);
+		seq = read_seqcount_begin(&stats->lock);
 		total = __intel_engine_get_busy_time(engine, now);
-	} while (read_seqcount_retry(&engine->stats.lock, seq));
+	} while (read_seqcount_retry(&stats->lock, seq));
 
 	return total;
 }
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_stats.h b/drivers/gpu/drm/i915/gt/intel_engine_stats.h
index 24fbdd94351a..8e762d683e50 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_stats.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_stats.h
@@ -15,45 +15,46 @@
 
 static inline void intel_engine_context_in(struct intel_engine_cs *engine)
 {
+	struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
 	unsigned long flags;
 
-	if (engine->stats.active) {
-		engine->stats.active++;
+	if (stats->active) {
+		stats->active++;
 		return;
 	}
 
 	/* The writer is serialised; but the pmu reader may be from hardirq */
 	local_irq_save(flags);
-	write_seqcount_begin(&engine->stats.lock);
+	write_seqcount_begin(&stats->lock);
 
-	engine->stats.start = ktime_get();
-	engine->stats.active++;
+	stats->start = ktime_get();
+	stats->active++;
 
-	write_seqcount_end(&engine->stats.lock);
+	write_seqcount_end(&stats->lock);
 	local_irq_restore(flags);
 
-	GEM_BUG_ON(!engine->stats.active);
+	GEM_BUG_ON(!stats->active);
 }
 
 static inline void intel_engine_context_out(struct intel_engine_cs *engine)
 {
+	struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
 	unsigned long flags;
 
-	GEM_BUG_ON(!engine->stats.active);
-	if (engine->stats.active > 1) {
-		engine->stats.active--;
+	GEM_BUG_ON(!stats->active);
+	if (stats->active > 1) {
+		stats->active--;
 		return;
 	}
 
 	local_irq_save(flags);
-	write_seqcount_begin(&engine->stats.lock);
+	write_seqcount_begin(&stats->lock);
 
-	engine->stats.active--;
-	engine->stats.total =
-		ktime_add(engine->stats.total,
-			  ktime_sub(ktime_get(), engine->stats.start));
+	stats->active--;
+	stats->total = ktime_add(stats->total,
+				 ktime_sub(ktime_get(), stats->start));
 
-	write_seqcount_end(&engine->stats.lock);
+	write_seqcount_end(&stats->lock);
 	local_irq_restore(flags);
 }
 
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index e0f773585c29..24fa7fb0e7de 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -257,6 +257,33 @@ struct intel_engine_execlists {
 
 #define INTEL_ENGINE_CS_MAX_NAME 8
 
+struct intel_engine_execlists_stats {
+	/**
+	 * @active: Number of contexts currently scheduled in.
+	 */
+	unsigned int active;
+
+	/**
+	 * @lock: Lock protecting the below fields.
+	 */
+	seqcount_t lock;
+
+	/**
+	 * @total: Total time this engine was busy.
+	 *
+	 * Accumulated time not counting the most recent block in cases where
+	 * engine is currently busy (active > 0).
+	 */
+	ktime_t total;
+
+	/**
+	 * @start: Timestamp of the last idle to active transition.
+	 *
+	 * Idle is defined as active == 0, active is active > 0.
+	 */
+	ktime_t start;
+};
+
 struct intel_engine_cs {
 	struct drm_i915_private *i915;
 	struct intel_gt *gt;
@@ -488,30 +515,7 @@ struct intel_engine_cs {
 	u32 (*get_cmd_length_mask)(u32 cmd_header);
 
 	struct {
-		/**
-		 * @active: Number of contexts currently scheduled in.
-		 */
-		unsigned int active;
-
-		/**
-		 * @lock: Lock protecting the below fields.
-		 */
-		seqcount_t lock;
-
-		/**
-		 * @total: Total time this engine was busy.
-		 *
-		 * Accumulated time not counting the most recent block in cases
-		 * where engine is currently busy (active > 0).
-		 */
-		ktime_t total;
-
-		/**
-		 * @start: Timestamp of the last idle to active transition.
-		 *
-		 * Idle is defined as active == 0, active is active > 0.
-		 */
-		ktime_t start;
+		struct intel_engine_execlists_stats execlists;
 
 		/**
 		 * @rps: Utilisation at last RPS sampling.
-- 
2.20.1


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [Intel-gfx] [PATCH 1/2] drm/i915/pmu: Add a name to the execlists stats
@ 2021-10-27  0:48 ` Umesh Nerlige Ramappa
  0 siblings, 0 replies; 16+ messages in thread
From: Umesh Nerlige Ramappa @ 2021-10-27  0:48 UTC (permalink / raw)
  To: intel-gfx, dri-devel
  Cc: john.c.harrison, Tvrtko Ursulin, daniel.vetter, Matthew Brost

In preparation for GuC pmu stats, add a name to the execlists stats
structure so that it can be differentiated from the GuC stats.

Signed-off-by: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_engine_cs.c    | 14 +++---
 drivers/gpu/drm/i915/gt/intel_engine_stats.h | 33 +++++++------
 drivers/gpu/drm/i915/gt/intel_engine_types.h | 52 +++++++++++---------
 3 files changed, 53 insertions(+), 46 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index ff6753ccb129..2de396e34d83 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -363,7 +363,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id,
 		DRIVER_CAPS(i915)->has_logical_contexts = true;
 
 	ewma__engine_latency_init(&engine->latency);
-	seqcount_init(&engine->stats.lock);
+	seqcount_init(&engine->stats.execlists.lock);
 
 	ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
 
@@ -1918,15 +1918,16 @@ void intel_engine_dump(struct intel_engine_cs *engine,
 static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine,
 					    ktime_t *now)
 {
-	ktime_t total = engine->stats.total;
+	struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
+	ktime_t total = stats->total;
 
 	/*
 	 * If the engine is executing something at the moment
 	 * add it to the total.
 	 */
 	*now = ktime_get();
-	if (READ_ONCE(engine->stats.active))
-		total = ktime_add(total, ktime_sub(*now, engine->stats.start));
+	if (READ_ONCE(stats->active))
+		total = ktime_add(total, ktime_sub(*now, stats->start));
 
 	return total;
 }
@@ -1940,13 +1941,14 @@ static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine,
  */
 ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now)
 {
+	struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
 	unsigned int seq;
 	ktime_t total;
 
 	do {
-		seq = read_seqcount_begin(&engine->stats.lock);
+		seq = read_seqcount_begin(&stats->lock);
 		total = __intel_engine_get_busy_time(engine, now);
-	} while (read_seqcount_retry(&engine->stats.lock, seq));
+	} while (read_seqcount_retry(&stats->lock, seq));
 
 	return total;
 }
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_stats.h b/drivers/gpu/drm/i915/gt/intel_engine_stats.h
index 24fbdd94351a..8e762d683e50 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_stats.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_stats.h
@@ -15,45 +15,46 @@
 
 static inline void intel_engine_context_in(struct intel_engine_cs *engine)
 {
+	struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
 	unsigned long flags;
 
-	if (engine->stats.active) {
-		engine->stats.active++;
+	if (stats->active) {
+		stats->active++;
 		return;
 	}
 
 	/* The writer is serialised; but the pmu reader may be from hardirq */
 	local_irq_save(flags);
-	write_seqcount_begin(&engine->stats.lock);
+	write_seqcount_begin(&stats->lock);
 
-	engine->stats.start = ktime_get();
-	engine->stats.active++;
+	stats->start = ktime_get();
+	stats->active++;
 
-	write_seqcount_end(&engine->stats.lock);
+	write_seqcount_end(&stats->lock);
 	local_irq_restore(flags);
 
-	GEM_BUG_ON(!engine->stats.active);
+	GEM_BUG_ON(!stats->active);
 }
 
 static inline void intel_engine_context_out(struct intel_engine_cs *engine)
 {
+	struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
 	unsigned long flags;
 
-	GEM_BUG_ON(!engine->stats.active);
-	if (engine->stats.active > 1) {
-		engine->stats.active--;
+	GEM_BUG_ON(!stats->active);
+	if (stats->active > 1) {
+		stats->active--;
 		return;
 	}
 
 	local_irq_save(flags);
-	write_seqcount_begin(&engine->stats.lock);
+	write_seqcount_begin(&stats->lock);
 
-	engine->stats.active--;
-	engine->stats.total =
-		ktime_add(engine->stats.total,
-			  ktime_sub(ktime_get(), engine->stats.start));
+	stats->active--;
+	stats->total = ktime_add(stats->total,
+				 ktime_sub(ktime_get(), stats->start));
 
-	write_seqcount_end(&engine->stats.lock);
+	write_seqcount_end(&stats->lock);
 	local_irq_restore(flags);
 }
 
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index e0f773585c29..24fa7fb0e7de 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -257,6 +257,33 @@ struct intel_engine_execlists {
 
 #define INTEL_ENGINE_CS_MAX_NAME 8
 
+struct intel_engine_execlists_stats {
+	/**
+	 * @active: Number of contexts currently scheduled in.
+	 */
+	unsigned int active;
+
+	/**
+	 * @lock: Lock protecting the below fields.
+	 */
+	seqcount_t lock;
+
+	/**
+	 * @total: Total time this engine was busy.
+	 *
+	 * Accumulated time not counting the most recent block in cases where
+	 * engine is currently busy (active > 0).
+	 */
+	ktime_t total;
+
+	/**
+	 * @start: Timestamp of the last idle to active transition.
+	 *
+	 * Idle is defined as active == 0, active is active > 0.
+	 */
+	ktime_t start;
+};
+
 struct intel_engine_cs {
 	struct drm_i915_private *i915;
 	struct intel_gt *gt;
@@ -488,30 +515,7 @@ struct intel_engine_cs {
 	u32 (*get_cmd_length_mask)(u32 cmd_header);
 
 	struct {
-		/**
-		 * @active: Number of contexts currently scheduled in.
-		 */
-		unsigned int active;
-
-		/**
-		 * @lock: Lock protecting the below fields.
-		 */
-		seqcount_t lock;
-
-		/**
-		 * @total: Total time this engine was busy.
-		 *
-		 * Accumulated time not counting the most recent block in cases
-		 * where engine is currently busy (active > 0).
-		 */
-		ktime_t total;
-
-		/**
-		 * @start: Timestamp of the last idle to active transition.
-		 *
-		 * Idle is defined as active == 0, active is active > 0.
-		 */
-		ktime_t start;
+		struct intel_engine_execlists_stats execlists;
 
 		/**
 		 * @rps: Utilisation at last RPS sampling.
-- 
2.20.1


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 2/2] drm/i915/pmu: Connect engine busyness stats from GuC to pmu
  2021-10-27  0:48 ` [Intel-gfx] " Umesh Nerlige Ramappa
@ 2021-10-27  0:48   ` Umesh Nerlige Ramappa
  -1 siblings, 0 replies; 16+ messages in thread
From: Umesh Nerlige Ramappa @ 2021-10-27  0:48 UTC (permalink / raw)
  To: intel-gfx, dri-devel
  Cc: john.c.harrison, Tvrtko Ursulin, daniel.vetter, Matthew Brost

With GuC handling scheduling, i915 is not aware of the time that a
context is scheduled in and out of the engine. Since i915 pmu relies on
this info to provide engine busyness to the user, GuC shares this info
with i915 for all engines using shared memory. For each engine, this
info contains:

- total busyness: total time that the context was running (total)
- id: id of the running context (id)
- start timestamp: timestamp when the context started running (start)

At the time (now) of sampling the engine busyness, if the id is valid
(!= ~0), and start is non-zero, then the context is considered to be
active and the engine busyness is calculated using the below equation

	engine busyness = total + (now - start)

All times are obtained from the gt clock base. For inactive contexts,
engine busyness is just equal to the total.

The start and total values provided by GuC are 32 bits and wrap around
in a few minutes. Since perf pmu provides busyness as 64 bit
monotonically increasing values, there is a need for this implementation
to account for overflows and extend the time to 64 bits before returning
busyness to the user. In order to do that, a worker runs periodically at
frequency = 1/8th the time it takes for the timestamp to wrap. As an
example, that would be once in 27 seconds for a gt clock frequency of
19.2 MHz.

Note:
There might be an over-accounting of busyness due to the fact that GuC
may be updating the total and start values while kmd is reading them.
(i.e kmd may read the updated total and the stale start). In such a
case, user may see higher busyness value followed by smaller ones which
would eventually catch up to the higher value.

v2: (Tvrtko)
- Include details in commit message
- Move intel engine busyness function into execlist code
- Use union inside engine->stats
- Use natural type for ping delay jiffies
- Drop active_work condition checks
- Use for_each_engine if iterating all engines
- Drop seq locking, use spinlock at GuC level to update engine stats
- Document worker specific details

v3: (Tvrtko/Umesh)
- Demarcate GuC and execlist stat objects with comments
- Document known over-accounting issue in commit
- Provide a consistent view of GuC state
- Add hooks to gt park/unpark for GuC busyness
- Stop/start worker in gt park/unpark path
- Drop inline
- Move spinlock and worker inits to GuC initialization
- Drop helpers that are called only once

v4: (Tvrtko/Matt/Umesh)
- Drop addressed opens from commit message
- Get runtime pm in ping, remove from the park path
- Use cancel_delayed_work_sync in disable_submission path
- Update stats during reset prepare
- Skip ping if reset in progress
- Explicitly name execlists and GuC stats objects
- Since disable_submission is called from many places, move resetting
  stats to intel_guc_submission_reset_prepare

v5: (Tvrtko)
- Add a trylock helper that does not sleep and synchronize PMU event
  callbacks and worker with gt reset

v6: (CI BAT failures)
- DUTs using execlist submission failed to boot since __gt_unpark is
  called during i915 load. This ends up calling the GuC busyness unpark
  hook and results in kick-starting an uninitialized worker. Let
  park/unpark hooks check if GuC submission has been initialized.
- drop cant_sleep() from trylock helper since rcu_read_lock takes care
  of that.

v7: (CI) Fix igt@i915_selftest@live@gt_engines
- For GuC mode of submission the engine busyness is derived from gt time
  domain. Use gt time elapsed as reference in the selftest.
- Increase busyness calculation to 10ms duration to ensure batch runs
  longer and falls within the busyness tolerances in selftest.

v8:
- Use ktime_get in selftest as before
- intel_reset_trylock_no_wait results in a lockdep splat that is not
  trivial to fix since the PMU callback runs in irq context and the
  reset paths are tightly knit into the driver. The test that uncovers
  this is igt@perf_pmu@faulting-read. Drop intel_reset_trylock_no_wait,
  instead use the reset_count to synchronize with gt reset during pmu
  callback. For the ping, continue to use intel_reset_trylock since ping
  is not run in irq context.

- GuC PM timestamp does not tick when GuC is idle. This can potentially
  result in wrong busyness values when a context is active on the
  engine, but GuC is idle. Use the RING TIMESTAMP as GPU timestamp to
  process the GuC busyness stats. This works since both GuC timestamp and
  RING timestamp are synced with the same clock.

- The busyness stats may get updated after the batch starts running.
  This delay causes the busyness reported for 100us duration to fall
  below 95% in the selftest. The only option at this time is to wait for
  GuC busyness to change from idle to active before we sample busyness
  over a 100us period.

Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
Signed-off-by: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
Acked-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_engine_cs.c     |  28 +-
 drivers/gpu/drm/i915/gt/intel_engine_types.h  |  33 ++-
 .../drm/i915/gt/intel_execlists_submission.c  |  34 +++
 drivers/gpu/drm/i915/gt/intel_gt_pm.c         |   2 +
 drivers/gpu/drm/i915/gt/selftest_engine_pm.c  |  33 +++
 .../gpu/drm/i915/gt/uc/abi/guc_actions_abi.h  |   1 +
 drivers/gpu/drm/i915/gt/uc/intel_guc.h        |  30 ++
 drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c    |  21 ++
 drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h    |   5 +
 drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h   |  13 +
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 277 ++++++++++++++++++
 .../gpu/drm/i915/gt/uc/intel_guc_submission.h |   2 +
 drivers/gpu/drm/i915/i915_reg.h               |   2 +
 13 files changed, 453 insertions(+), 28 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 2de396e34d83..332756036007 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -1915,23 +1915,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
 	intel_engine_print_breadcrumbs(engine, m);
 }
 
-static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine,
-					    ktime_t *now)
-{
-	struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
-	ktime_t total = stats->total;
-
-	/*
-	 * If the engine is executing something at the moment
-	 * add it to the total.
-	 */
-	*now = ktime_get();
-	if (READ_ONCE(stats->active))
-		total = ktime_add(total, ktime_sub(*now, stats->start));
-
-	return total;
-}
-
 /**
  * intel_engine_get_busy_time() - Return current accumulated engine busyness
  * @engine: engine to report on
@@ -1941,16 +1924,7 @@ static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine,
  */
 ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now)
 {
-	struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
-	unsigned int seq;
-	ktime_t total;
-
-	do {
-		seq = read_seqcount_begin(&stats->lock);
-		total = __intel_engine_get_busy_time(engine, now);
-	} while (read_seqcount_retry(&stats->lock, seq));
-
-	return total;
+	return engine->busyness(engine, now);
 }
 
 struct intel_context *
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 24fa7fb0e7de..5732e0d71513 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -284,6 +284,28 @@ struct intel_engine_execlists_stats {
 	ktime_t start;
 };
 
+struct intel_engine_guc_stats {
+	/**
+	 * @running: Active state of the engine when busyness was last sampled.
+	 */
+	bool running;
+
+	/**
+	 * @prev_total: Previous value of total runtime clock cycles.
+	 */
+	u32 prev_total;
+
+	/**
+	 * @total_gt_clks: Total gt clock cycles this engine was busy.
+	 */
+	u64 total_gt_clks;
+
+	/**
+	 * @start_gt_clk: GT clock time of last idle to active transition.
+	 */
+	u64 start_gt_clk;
+};
+
 struct intel_engine_cs {
 	struct drm_i915_private *i915;
 	struct intel_gt *gt;
@@ -466,6 +488,12 @@ struct intel_engine_cs {
 	void		(*add_active_request)(struct i915_request *rq);
 	void		(*remove_active_request)(struct i915_request *rq);
 
+	/*
+	 * Get engine busyness and the time at which the busyness was sampled.
+	 */
+	ktime_t		(*busyness)(struct intel_engine_cs *engine,
+				    ktime_t *now);
+
 	struct intel_engine_execlists execlists;
 
 	/*
@@ -515,7 +543,10 @@ struct intel_engine_cs {
 	u32 (*get_cmd_length_mask)(u32 cmd_header);
 
 	struct {
-		struct intel_engine_execlists_stats execlists;
+		union {
+			struct intel_engine_execlists_stats execlists;
+			struct intel_engine_guc_stats guc;
+		};
 
 		/**
 		 * @rps: Utilisation at last RPS sampling.
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index bedb80057046..ca03880fa7e4 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -3293,6 +3293,38 @@ static void execlists_release(struct intel_engine_cs *engine)
 	lrc_fini_wa_ctx(engine);
 }
 
+static ktime_t __execlists_engine_busyness(struct intel_engine_cs *engine,
+					   ktime_t *now)
+{
+	struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
+	ktime_t total = stats->total;
+
+	/*
+	 * If the engine is executing something at the moment
+	 * add it to the total.
+	 */
+	*now = ktime_get();
+	if (READ_ONCE(stats->active))
+		total = ktime_add(total, ktime_sub(*now, stats->start));
+
+	return total;
+}
+
+static ktime_t execlists_engine_busyness(struct intel_engine_cs *engine,
+					 ktime_t *now)
+{
+	struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
+	unsigned int seq;
+	ktime_t total;
+
+	do {
+		seq = read_seqcount_begin(&stats->lock);
+		total = __execlists_engine_busyness(engine, now);
+	} while (read_seqcount_retry(&stats->lock, seq));
+
+	return total;
+}
+
 static void
 logical_ring_default_vfuncs(struct intel_engine_cs *engine)
 {
@@ -3349,6 +3381,8 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
 		engine->emit_bb_start = gen8_emit_bb_start;
 	else
 		engine->emit_bb_start = gen8_emit_bb_start_noarb;
+
+	engine->busyness = execlists_engine_busyness;
 }
 
 static void logical_ring_default_irqs(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 524eaf678790..b4a8594bc46c 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -86,6 +86,7 @@ static int __gt_unpark(struct intel_wakeref *wf)
 	intel_rc6_unpark(&gt->rc6);
 	intel_rps_unpark(&gt->rps);
 	i915_pmu_gt_unparked(i915);
+	intel_guc_busyness_unpark(gt);
 
 	intel_gt_unpark_requests(gt);
 	runtime_begin(gt);
@@ -104,6 +105,7 @@ static int __gt_park(struct intel_wakeref *wf)
 	runtime_end(gt);
 	intel_gt_park_requests(gt);
 
+	intel_guc_busyness_park(gt);
 	i915_vma_parked(gt);
 	i915_pmu_gt_parked(i915);
 	intel_rps_park(&gt->rps);
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
index 75569666105d..0bfd738dbf3a 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
@@ -214,6 +214,31 @@ static int live_engine_timestamps(void *arg)
 	return 0;
 }
 
+static int __spin_until_busier(struct intel_engine_cs *engine, ktime_t busyness)
+{
+	ktime_t start, unused, dt;
+
+	if (!intel_engine_uses_guc(engine))
+		return 0;
+
+	/*
+	 * In GuC mode of submission, the busyness stats may get updated after
+	 * the batch starts running. Poll for a change in busyness and timeout
+	 * after 500 us.
+	 */
+	start = ktime_get();
+	while (intel_engine_get_busy_time(engine, &unused) == busyness) {
+		dt = ktime_get() - start;
+		if (dt > 500000) {
+			pr_err("active wait timed out %lld\n", dt);
+			ENGINE_TRACE(engine, "active wait time out %lld\n", dt);
+			return -ETIME;
+		}
+	}
+
+	return 0;
+}
+
 static int live_engine_busy_stats(void *arg)
 {
 	struct intel_gt *gt = arg;
@@ -232,6 +257,7 @@ static int live_engine_busy_stats(void *arg)
 	GEM_BUG_ON(intel_gt_pm_is_awake(gt));
 	for_each_engine(engine, gt, id) {
 		struct i915_request *rq;
+		ktime_t busyness, dummy;
 		ktime_t de, dt;
 		ktime_t t[2];
 
@@ -274,12 +300,19 @@ static int live_engine_busy_stats(void *arg)
 		}
 		i915_request_add(rq);
 
+		busyness = intel_engine_get_busy_time(engine, &dummy);
 		if (!igt_wait_for_spinner(&spin, rq)) {
 			intel_gt_set_wedged(engine->gt);
 			err = -ETIME;
 			goto end;
 		}
 
+		err = __spin_until_busier(engine, busyness);
+		if (err) {
+			GEM_TRACE_DUMP();
+			goto end;
+		}
+
 		ENGINE_TRACE(engine, "measuring busy time\n");
 		preempt_disable();
 		de = intel_engine_get_busy_time(engine, &t[0]);
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
index ba10bd374cee..fe5d7d261797 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
@@ -144,6 +144,7 @@ enum intel_guc_action {
 	INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE = 0x4600,
 	INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC = 0x4601,
 	INTEL_GUC_ACTION_RESET_CLIENT = 0x5507,
+	INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF = 0x550A,
 	INTEL_GUC_ACTION_LIMIT
 };
 
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 31cf9fb48c7e..1cb46098030d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -138,6 +138,8 @@ struct intel_guc {
 	u32 ads_regset_size;
 	/** @ads_golden_ctxt_size: size of the golden contexts in the ADS */
 	u32 ads_golden_ctxt_size;
+	/** @ads_engine_usage_size: size of engine usage in the ADS */
+	u32 ads_engine_usage_size;
 
 	/** @lrc_desc_pool: object allocated to hold the GuC LRC descriptor pool */
 	struct i915_vma *lrc_desc_pool;
@@ -172,6 +174,34 @@ struct intel_guc {
 
 	/** @send_mutex: used to serialize the intel_guc_send actions */
 	struct mutex send_mutex;
+
+	/**
+	 * @timestamp: GT timestamp object that stores a copy of the timestamp
+	 * and adjusts it for overflow using a worker.
+	 */
+	struct {
+		/**
+		 * @lock: Lock protecting the below fields and the engine stats.
+		 */
+		spinlock_t lock;
+
+		/**
+		 * @gt_stamp: 64 bit extended value of the GT timestamp.
+		 */
+		u64 gt_stamp;
+
+		/**
+		 * @ping_delay: Period for polling the GT timestamp for
+		 * overflow.
+		 */
+		unsigned long ping_delay;
+
+		/**
+		 * @work: Periodic work to adjust GT timestamp, engine and
+		 * context usage for overflows.
+		 */
+		struct delayed_work work;
+	} timestamp;
 };
 
 static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index 621c893a009f..1a1edae67e4e 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -26,6 +26,8 @@
  *      | guc_policies                          |
  *      +---------------------------------------+
  *      | guc_gt_system_info                    |
+ *      +---------------------------------------+
+ *      | guc_engine_usage                      |
  *      +---------------------------------------+ <== static
  *      | guc_mmio_reg[countA] (engine 0.0)     |
  *      | guc_mmio_reg[countB] (engine 0.1)     |
@@ -47,6 +49,7 @@ struct __guc_ads_blob {
 	struct guc_ads ads;
 	struct guc_policies policies;
 	struct guc_gt_system_info system_info;
+	struct guc_engine_usage engine_usage;
 	/* From here on, location is dynamic! Refer to above diagram. */
 	struct guc_mmio_reg regset[0];
 } __packed;
@@ -628,3 +631,21 @@ void intel_guc_ads_reset(struct intel_guc *guc)
 
 	guc_ads_private_data_reset(guc);
 }
+
+u32 intel_guc_engine_usage_offset(struct intel_guc *guc)
+{
+	struct __guc_ads_blob *blob = guc->ads_blob;
+	u32 base = intel_guc_ggtt_offset(guc, guc->ads_vma);
+	u32 offset = base + ptr_offset(blob, engine_usage);
+
+	return offset;
+}
+
+struct guc_engine_usage_record *intel_guc_engine_usage(struct intel_engine_cs *engine)
+{
+	struct intel_guc *guc = &engine->gt->uc.guc;
+	struct __guc_ads_blob *blob = guc->ads_blob;
+	u8 guc_class = engine_class_to_guc_class(engine->class);
+
+	return &blob->engine_usage.engines[guc_class][ilog2(engine->logical_mask)];
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h
index 3d85051d57e4..e74c110facff 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h
@@ -6,8 +6,11 @@
 #ifndef _INTEL_GUC_ADS_H_
 #define _INTEL_GUC_ADS_H_
 
+#include <linux/types.h>
+
 struct intel_guc;
 struct drm_printer;
+struct intel_engine_cs;
 
 int intel_guc_ads_create(struct intel_guc *guc);
 void intel_guc_ads_destroy(struct intel_guc *guc);
@@ -15,5 +18,7 @@ void intel_guc_ads_init_late(struct intel_guc *guc);
 void intel_guc_ads_reset(struct intel_guc *guc);
 void intel_guc_ads_print_policy_info(struct intel_guc *guc,
 				     struct drm_printer *p);
+struct guc_engine_usage_record *intel_guc_engine_usage(struct intel_engine_cs *engine);
+u32 intel_guc_engine_usage_offset(struct intel_guc *guc);
 
 #endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
index 722933e26347..7072e30e99f4 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -294,6 +294,19 @@ struct guc_ads {
 	u32 reserved[15];
 } __packed;
 
+/* Engine usage stats */
+struct guc_engine_usage_record {
+	u32 current_context_index;
+	u32 last_switch_in_stamp;
+	u32 reserved0;
+	u32 total_runtime;
+	u32 reserved1[4];
+} __packed;
+
+struct guc_engine_usage {
+	struct guc_engine_usage_record engines[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS];
+} __packed;
+
 /* GuC logging structures */
 
 enum guc_log_buffer_type {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 38b47e73e35d..5cc49c0b3889 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -13,6 +13,7 @@
 #include "gt/intel_engine_heartbeat.h"
 #include "gt/intel_gpu_commands.h"
 #include "gt/intel_gt.h"
+#include "gt/intel_gt_clock_utils.h"
 #include "gt/intel_gt_irq.h"
 #include "gt/intel_gt_pm.h"
 #include "gt/intel_gt_requests.h"
@@ -21,6 +22,7 @@
 #include "gt/intel_mocs.h"
 #include "gt/intel_ring.h"
 
+#include "intel_guc_ads.h"
 #include "intel_guc_submission.h"
 
 #include "i915_drv.h"
@@ -1077,6 +1079,272 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc)
 	xa_unlock_irqrestore(&guc->context_lookup, flags);
 }
 
+/*
+ * GuC stores busyness stats for each engine at context in/out boundaries. A
+ * context 'in' logs execution start time, 'out' adds in -> out delta to total.
+ * i915/kmd accesses 'start', 'total' and 'context id' from memory shared with
+ * GuC.
+ *
+ * __i915_pmu_event_read samples engine busyness. When sampling, if context id
+ * is valid (!= ~0) and start is non-zero, the engine is considered to be
+ * active. For an active engine total busyness = total + (now - start), where
+ * 'now' is the time at which the busyness is sampled. For inactive engine,
+ * total busyness = total.
+ *
+ * All times are captured from GUCPMTIMESTAMP reg and are in gt clock domain.
+ *
+ * The start and total values provided by GuC are 32 bits and wrap around in a
+ * few minutes. Since perf pmu provides busyness as 64 bit monotonically
+ * increasing ns values, there is a need for this implementation to account for
+ * overflows and extend the GuC provided values to 64 bits before returning
+ * busyness to the user. In order to do that, a worker runs periodically at
+ * frequency = 1/8th the time it takes for the timestamp to wrap (i.e. once in
+ * 27 seconds for a gt clock frequency of 19.2 MHz).
+ */
+
+#define WRAP_TIME_CLKS U32_MAX
+#define POLL_TIME_CLKS (WRAP_TIME_CLKS >> 3)
+
+static void
+__extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start)
+{
+	u32 gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp);
+	u32 gt_stamp_last = lower_32_bits(guc->timestamp.gt_stamp);
+
+	if (new_start == lower_32_bits(*prev_start))
+		return;
+
+	if (new_start < gt_stamp_last &&
+	    (new_start - gt_stamp_last) <= POLL_TIME_CLKS)
+		gt_stamp_hi++;
+
+	if (new_start > gt_stamp_last &&
+	    (gt_stamp_last - new_start) <= POLL_TIME_CLKS && gt_stamp_hi)
+		gt_stamp_hi--;
+
+	*prev_start = ((u64)gt_stamp_hi << 32) | new_start;
+}
+
+static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
+{
+	struct guc_engine_usage_record *rec = intel_guc_engine_usage(engine);
+	struct intel_engine_guc_stats *stats = &engine->stats.guc;
+	struct intel_guc *guc = &engine->gt->uc.guc;
+	u32 last_switch = rec->last_switch_in_stamp;
+	u32 ctx_id = rec->current_context_index;
+	u32 total = rec->total_runtime;
+
+	lockdep_assert_held(&guc->timestamp.lock);
+
+	stats->running = ctx_id != ~0U && last_switch;
+	if (stats->running)
+		__extend_last_switch(guc, &stats->start_gt_clk, last_switch);
+
+	/*
+	 * Instead of adjusting the total for overflow, just add the
+	 * difference from previous sample stats->total_gt_clks
+	 */
+	if (total && total != ~0U) {
+		stats->total_gt_clks += (u32)(total - stats->prev_total);
+		stats->prev_total = total;
+	}
+}
+
+static void guc_update_pm_timestamp(struct intel_guc *guc,
+				    struct intel_engine_cs *engine,
+				    ktime_t *now)
+{
+	u32 gt_stamp_now, gt_stamp_hi;
+
+	lockdep_assert_held(&guc->timestamp.lock);
+
+	gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp);
+	gt_stamp_now = intel_uncore_read(engine->uncore,
+					 RING_TIMESTAMP(engine->mmio_base));
+	*now = ktime_get();
+
+	if (gt_stamp_now < lower_32_bits(guc->timestamp.gt_stamp))
+		gt_stamp_hi++;
+
+	guc->timestamp.gt_stamp = ((u64)gt_stamp_hi << 32) | gt_stamp_now;
+}
+
+/*
+ * Unlike the execlist mode of submission total and active times are in terms of
+ * gt clocks. The *now parameter is retained to return the cpu time at which the
+ * busyness was sampled.
+ */
+static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
+{
+	struct intel_engine_guc_stats stats_saved, *stats = &engine->stats.guc;
+	struct i915_gpu_error *gpu_error = &engine->i915->gpu_error;
+	struct intel_gt *gt = engine->gt;
+	struct intel_guc *guc = &gt->uc.guc;
+	u64 total, gt_stamp_saved;
+	unsigned long flags;
+	u32 reset_count;
+
+	spin_lock_irqsave(&guc->timestamp.lock, flags);
+
+	/*
+	 * If a reset happened, we risk reading partially updated
+	 * engine busyness from GuC, so we just use the driver stored
+	 * copy of busyness. Synchronize with gt reset using reset_count.
+	 */
+	reset_count = i915_reset_count(gpu_error);
+
+	*now = ktime_get();
+
+	/*
+	 * The active busyness depends on start_gt_clk and gt_stamp.
+	 * gt_stamp is updated by i915 only when gt is awake and the
+	 * start_gt_clk is derived from GuC state. To get a consistent
+	 * view of activity, we query the GuC state only if gt is awake.
+	 */
+	stats_saved = *stats;
+	gt_stamp_saved = guc->timestamp.gt_stamp;
+	if (intel_gt_pm_get_if_awake(gt)) {
+		guc_update_engine_gt_clks(engine);
+		guc_update_pm_timestamp(guc, engine, now);
+		intel_gt_pm_put_async(gt);
+		if (i915_reset_count(gpu_error) != reset_count) {
+			*stats = stats_saved;
+			guc->timestamp.gt_stamp = gt_stamp_saved;
+		}
+	}
+
+	total = intel_gt_clock_interval_to_ns(gt, stats->total_gt_clks);
+	if (stats->running) {
+		u64 clk = guc->timestamp.gt_stamp - stats->start_gt_clk;
+
+		total += intel_gt_clock_interval_to_ns(gt, clk);
+	}
+
+	spin_unlock_irqrestore(&guc->timestamp.lock, flags);
+
+	return ns_to_ktime(total);
+}
+
+static void __reset_guc_busyness_stats(struct intel_guc *guc)
+{
+	struct intel_gt *gt = guc_to_gt(guc);
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	unsigned long flags;
+	ktime_t unused;
+
+	cancel_delayed_work_sync(&guc->timestamp.work);
+
+	spin_lock_irqsave(&guc->timestamp.lock, flags);
+
+	for_each_engine(engine, gt, id) {
+		guc_update_pm_timestamp(guc, engine, &unused);
+		guc_update_engine_gt_clks(engine);
+		engine->stats.guc.prev_total = 0;
+	}
+
+	spin_unlock_irqrestore(&guc->timestamp.lock, flags);
+}
+
+static void __update_guc_busyness_stats(struct intel_guc *guc)
+{
+	struct intel_gt *gt = guc_to_gt(guc);
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	ktime_t unused;
+
+	for_each_engine(engine, gt, id) {
+		guc_update_pm_timestamp(guc, engine, &unused);
+		guc_update_engine_gt_clks(engine);
+	}
+}
+
+static void guc_timestamp_ping(struct work_struct *wrk)
+{
+	struct intel_guc *guc = container_of(wrk, typeof(*guc),
+					     timestamp.work.work);
+	struct intel_uc *uc = container_of(guc, typeof(*uc), guc);
+	struct intel_gt *gt = guc_to_gt(guc);
+	intel_wakeref_t wakeref;
+	unsigned long flags;
+	int srcu, ret;
+
+	/*
+	 * Synchronize with gt reset to make sure the worker does not
+	 * corrupt the engine/guc stats.
+	 */
+	ret = intel_gt_reset_trylock(gt, &srcu);
+	if (ret)
+		return;
+
+	spin_lock_irqsave(&guc->timestamp.lock, flags);
+
+	with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
+		__update_guc_busyness_stats(guc);
+
+	spin_unlock_irqrestore(&guc->timestamp.lock, flags);
+
+	intel_gt_reset_unlock(gt, srcu);
+
+	mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
+			 guc->timestamp.ping_delay);
+}
+
+static int guc_action_enable_usage_stats(struct intel_guc *guc)
+{
+	u32 offset = intel_guc_engine_usage_offset(guc);
+	u32 action[] = {
+		INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF,
+		offset,
+		0,
+	};
+
+	return intel_guc_send(guc, action, ARRAY_SIZE(action));
+}
+
+static void guc_init_engine_stats(struct intel_guc *guc)
+{
+	struct intel_gt *gt = guc_to_gt(guc);
+	intel_wakeref_t wakeref;
+
+	mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
+			 guc->timestamp.ping_delay);
+
+	with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref) {
+		int ret = guc_action_enable_usage_stats(guc);
+
+		if (ret)
+			drm_err(&gt->i915->drm,
+				"Failed to enable usage stats: %d!\n", ret);
+	}
+}
+
+void intel_guc_busyness_park(struct intel_gt *gt)
+{
+	struct intel_guc *guc = &gt->uc.guc;
+	unsigned long flags;
+
+	if (!guc_submission_initialized(guc))
+		return;
+
+	cancel_delayed_work(&guc->timestamp.work);
+
+	spin_lock_irqsave(&guc->timestamp.lock, flags);
+	__update_guc_busyness_stats(guc);
+	spin_unlock_irqrestore(&guc->timestamp.lock, flags);
+}
+
+void intel_guc_busyness_unpark(struct intel_gt *gt)
+{
+	struct intel_guc *guc = &gt->uc.guc;
+
+	if (!guc_submission_initialized(guc))
+		return;
+
+	mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
+			 guc->timestamp.ping_delay);
+}
+
 static inline bool
 submission_disabled(struct intel_guc *guc)
 {
@@ -1138,6 +1406,7 @@ void intel_guc_submission_reset_prepare(struct intel_guc *guc)
 	intel_gt_park_heartbeats(guc_to_gt(guc));
 	disable_submission(guc);
 	guc->interrupts.disable(guc);
+	__reset_guc_busyness_stats(guc);
 
 	/* Flush IRQ handler */
 	spin_lock_irq(&guc_to_gt(guc)->irq_lock);
@@ -1484,6 +1753,7 @@ static void destroyed_worker_func(struct work_struct *w);
  */
 int intel_guc_submission_init(struct intel_guc *guc)
 {
+	struct intel_gt *gt = guc_to_gt(guc);
 	int ret;
 
 	if (guc->lrc_desc_pool)
@@ -1512,6 +1782,10 @@ int intel_guc_submission_init(struct intel_guc *guc)
 	if (!guc->submission_state.guc_ids_bitmap)
 		return -ENOMEM;
 
+	spin_lock_init(&guc->timestamp.lock);
+	INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping);
+	guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
+
 	return 0;
 }
 
@@ -3369,7 +3643,9 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine)
 		engine->emit_flush = gen12_emit_flush_xcs;
 	}
 	engine->set_default_submission = guc_set_default_submission;
+	engine->busyness = guc_engine_busyness;
 
+	engine->flags |= I915_ENGINE_SUPPORTS_STATS;
 	engine->flags |= I915_ENGINE_HAS_PREEMPTION;
 	engine->flags |= I915_ENGINE_HAS_TIMESLICES;
 
@@ -3468,6 +3744,7 @@ int intel_guc_submission_setup(struct intel_engine_cs *engine)
 void intel_guc_submission_enable(struct intel_guc *guc)
 {
 	guc_init_lrc_mapping(guc);
+	guc_init_engine_stats(guc);
 }
 
 void intel_guc_submission_disable(struct intel_guc *guc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
index c7ef44fa0c36..5a95a9f0a8e3 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
@@ -28,6 +28,8 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc,
 void intel_guc_dump_active_requests(struct intel_engine_cs *engine,
 				    struct i915_request *hung_rq,
 				    struct drm_printer *m);
+void intel_guc_busyness_park(struct intel_gt *gt);
+void intel_guc_busyness_unpark(struct intel_gt *gt);
 
 bool intel_guc_virtual_engine_has_heartbeat(const struct intel_engine_cs *ve);
 
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index d9f7a729333f..f7927f6dac6e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2662,6 +2662,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define   RING_WAIT		(1 << 11) /* gen3+, PRBx_CTL */
 #define   RING_WAIT_SEMAPHORE	(1 << 10) /* gen6+ */
 
+#define GUCPMTIMESTAMP          _MMIO(0xC3E8)
+
 /* There are 16 64-bit CS General Purpose Registers per-engine on Gen8+ */
 #define GEN8_RING_CS_GPR(base, n)	_MMIO((base) + 0x600 + (n) * 8)
 #define GEN8_RING_CS_GPR_UDW(base, n)	_MMIO((base) + 0x600 + (n) * 8 + 4)
-- 
2.20.1


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [Intel-gfx] [PATCH 2/2] drm/i915/pmu: Connect engine busyness stats from GuC to pmu
@ 2021-10-27  0:48   ` Umesh Nerlige Ramappa
  0 siblings, 0 replies; 16+ messages in thread
From: Umesh Nerlige Ramappa @ 2021-10-27  0:48 UTC (permalink / raw)
  To: intel-gfx, dri-devel
  Cc: john.c.harrison, Tvrtko Ursulin, daniel.vetter, Matthew Brost

With GuC handling scheduling, i915 is not aware of the time that a
context is scheduled in and out of the engine. Since i915 pmu relies on
this info to provide engine busyness to the user, GuC shares this info
with i915 for all engines using shared memory. For each engine, this
info contains:

- total busyness: total time that the context was running (total)
- id: id of the running context (id)
- start timestamp: timestamp when the context started running (start)

At the time (now) of sampling the engine busyness, if the id is valid
(!= ~0), and start is non-zero, then the context is considered to be
active and the engine busyness is calculated using the below equation

	engine busyness = total + (now - start)

All times are obtained from the gt clock base. For inactive contexts,
engine busyness is just equal to the total.

The start and total values provided by GuC are 32 bits and wrap around
in a few minutes. Since perf pmu provides busyness as 64 bit
monotonically increasing values, there is a need for this implementation
to account for overflows and extend the time to 64 bits before returning
busyness to the user. In order to do that, a worker runs periodically at
frequency = 1/8th the time it takes for the timestamp to wrap. As an
example, that would be once in 27 seconds for a gt clock frequency of
19.2 MHz.

Note:
There might be an over-accounting of busyness due to the fact that GuC
may be updating the total and start values while kmd is reading them.
(i.e kmd may read the updated total and the stale start). In such a
case, user may see higher busyness value followed by smaller ones which
would eventually catch up to the higher value.

v2: (Tvrtko)
- Include details in commit message
- Move intel engine busyness function into execlist code
- Use union inside engine->stats
- Use natural type for ping delay jiffies
- Drop active_work condition checks
- Use for_each_engine if iterating all engines
- Drop seq locking, use spinlock at GuC level to update engine stats
- Document worker specific details

v3: (Tvrtko/Umesh)
- Demarcate GuC and execlist stat objects with comments
- Document known over-accounting issue in commit
- Provide a consistent view of GuC state
- Add hooks to gt park/unpark for GuC busyness
- Stop/start worker in gt park/unpark path
- Drop inline
- Move spinlock and worker inits to GuC initialization
- Drop helpers that are called only once

v4: (Tvrtko/Matt/Umesh)
- Drop addressed opens from commit message
- Get runtime pm in ping, remove from the park path
- Use cancel_delayed_work_sync in disable_submission path
- Update stats during reset prepare
- Skip ping if reset in progress
- Explicitly name execlists and GuC stats objects
- Since disable_submission is called from many places, move resetting
  stats to intel_guc_submission_reset_prepare

v5: (Tvrtko)
- Add a trylock helper that does not sleep and synchronize PMU event
  callbacks and worker with gt reset

v6: (CI BAT failures)
- DUTs using execlist submission failed to boot since __gt_unpark is
  called during i915 load. This ends up calling the GuC busyness unpark
  hook and results in kick-starting an uninitialized worker. Let
  park/unpark hooks check if GuC submission has been initialized.
- drop cant_sleep() from trylock helper since rcu_read_lock takes care
  of that.

v7: (CI) Fix igt@i915_selftest@live@gt_engines
- For GuC mode of submission the engine busyness is derived from gt time
  domain. Use gt time elapsed as reference in the selftest.
- Increase busyness calculation to 10ms duration to ensure batch runs
  longer and falls within the busyness tolerances in selftest.

v8:
- Use ktime_get in selftest as before
- intel_reset_trylock_no_wait results in a lockdep splat that is not
  trivial to fix since the PMU callback runs in irq context and the
  reset paths are tightly knit into the driver. The test that uncovers
  this is igt@perf_pmu@faulting-read. Drop intel_reset_trylock_no_wait,
  instead use the reset_count to synchronize with gt reset during pmu
  callback. For the ping, continue to use intel_reset_trylock since ping
  is not run in irq context.

- GuC PM timestamp does not tick when GuC is idle. This can potentially
  result in wrong busyness values when a context is active on the
  engine, but GuC is idle. Use the RING TIMESTAMP as GPU timestamp to
  process the GuC busyness stats. This works since both GuC timestamp and
  RING timestamp are synced with the same clock.

- The busyness stats may get updated after the batch starts running.
  This delay causes the busyness reported for 100us duration to fall
  below 95% in the selftest. The only option at this time is to wait for
  GuC busyness to change from idle to active before we sample busyness
  over a 100us period.

Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
Signed-off-by: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
Acked-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_engine_cs.c     |  28 +-
 drivers/gpu/drm/i915/gt/intel_engine_types.h  |  33 ++-
 .../drm/i915/gt/intel_execlists_submission.c  |  34 +++
 drivers/gpu/drm/i915/gt/intel_gt_pm.c         |   2 +
 drivers/gpu/drm/i915/gt/selftest_engine_pm.c  |  33 +++
 .../gpu/drm/i915/gt/uc/abi/guc_actions_abi.h  |   1 +
 drivers/gpu/drm/i915/gt/uc/intel_guc.h        |  30 ++
 drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c    |  21 ++
 drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h    |   5 +
 drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h   |  13 +
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 277 ++++++++++++++++++
 .../gpu/drm/i915/gt/uc/intel_guc_submission.h |   2 +
 drivers/gpu/drm/i915/i915_reg.h               |   2 +
 13 files changed, 453 insertions(+), 28 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 2de396e34d83..332756036007 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -1915,23 +1915,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
 	intel_engine_print_breadcrumbs(engine, m);
 }
 
-static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine,
-					    ktime_t *now)
-{
-	struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
-	ktime_t total = stats->total;
-
-	/*
-	 * If the engine is executing something at the moment
-	 * add it to the total.
-	 */
-	*now = ktime_get();
-	if (READ_ONCE(stats->active))
-		total = ktime_add(total, ktime_sub(*now, stats->start));
-
-	return total;
-}
-
 /**
  * intel_engine_get_busy_time() - Return current accumulated engine busyness
  * @engine: engine to report on
@@ -1941,16 +1924,7 @@ static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine,
  */
 ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now)
 {
-	struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
-	unsigned int seq;
-	ktime_t total;
-
-	do {
-		seq = read_seqcount_begin(&stats->lock);
-		total = __intel_engine_get_busy_time(engine, now);
-	} while (read_seqcount_retry(&stats->lock, seq));
-
-	return total;
+	return engine->busyness(engine, now);
 }
 
 struct intel_context *
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 24fa7fb0e7de..5732e0d71513 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -284,6 +284,28 @@ struct intel_engine_execlists_stats {
 	ktime_t start;
 };
 
+struct intel_engine_guc_stats {
+	/**
+	 * @running: Active state of the engine when busyness was last sampled.
+	 */
+	bool running;
+
+	/**
+	 * @prev_total: Previous value of total runtime clock cycles.
+	 */
+	u32 prev_total;
+
+	/**
+	 * @total_gt_clks: Total gt clock cycles this engine was busy.
+	 */
+	u64 total_gt_clks;
+
+	/**
+	 * @start_gt_clk: GT clock time of last idle to active transition.
+	 */
+	u64 start_gt_clk;
+};
+
 struct intel_engine_cs {
 	struct drm_i915_private *i915;
 	struct intel_gt *gt;
@@ -466,6 +488,12 @@ struct intel_engine_cs {
 	void		(*add_active_request)(struct i915_request *rq);
 	void		(*remove_active_request)(struct i915_request *rq);
 
+	/*
+	 * Get engine busyness and the time at which the busyness was sampled.
+	 */
+	ktime_t		(*busyness)(struct intel_engine_cs *engine,
+				    ktime_t *now);
+
 	struct intel_engine_execlists execlists;
 
 	/*
@@ -515,7 +543,10 @@ struct intel_engine_cs {
 	u32 (*get_cmd_length_mask)(u32 cmd_header);
 
 	struct {
-		struct intel_engine_execlists_stats execlists;
+		union {
+			struct intel_engine_execlists_stats execlists;
+			struct intel_engine_guc_stats guc;
+		};
 
 		/**
 		 * @rps: Utilisation at last RPS sampling.
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index bedb80057046..ca03880fa7e4 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -3293,6 +3293,38 @@ static void execlists_release(struct intel_engine_cs *engine)
 	lrc_fini_wa_ctx(engine);
 }
 
+static ktime_t __execlists_engine_busyness(struct intel_engine_cs *engine,
+					   ktime_t *now)
+{
+	struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
+	ktime_t total = stats->total;
+
+	/*
+	 * If the engine is executing something at the moment
+	 * add it to the total.
+	 */
+	*now = ktime_get();
+	if (READ_ONCE(stats->active))
+		total = ktime_add(total, ktime_sub(*now, stats->start));
+
+	return total;
+}
+
+static ktime_t execlists_engine_busyness(struct intel_engine_cs *engine,
+					 ktime_t *now)
+{
+	struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
+	unsigned int seq;
+	ktime_t total;
+
+	do {
+		seq = read_seqcount_begin(&stats->lock);
+		total = __execlists_engine_busyness(engine, now);
+	} while (read_seqcount_retry(&stats->lock, seq));
+
+	return total;
+}
+
 static void
 logical_ring_default_vfuncs(struct intel_engine_cs *engine)
 {
@@ -3349,6 +3381,8 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
 		engine->emit_bb_start = gen8_emit_bb_start;
 	else
 		engine->emit_bb_start = gen8_emit_bb_start_noarb;
+
+	engine->busyness = execlists_engine_busyness;
 }
 
 static void logical_ring_default_irqs(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 524eaf678790..b4a8594bc46c 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -86,6 +86,7 @@ static int __gt_unpark(struct intel_wakeref *wf)
 	intel_rc6_unpark(&gt->rc6);
 	intel_rps_unpark(&gt->rps);
 	i915_pmu_gt_unparked(i915);
+	intel_guc_busyness_unpark(gt);
 
 	intel_gt_unpark_requests(gt);
 	runtime_begin(gt);
@@ -104,6 +105,7 @@ static int __gt_park(struct intel_wakeref *wf)
 	runtime_end(gt);
 	intel_gt_park_requests(gt);
 
+	intel_guc_busyness_park(gt);
 	i915_vma_parked(gt);
 	i915_pmu_gt_parked(i915);
 	intel_rps_park(&gt->rps);
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
index 75569666105d..0bfd738dbf3a 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
@@ -214,6 +214,31 @@ static int live_engine_timestamps(void *arg)
 	return 0;
 }
 
+static int __spin_until_busier(struct intel_engine_cs *engine, ktime_t busyness)
+{
+	ktime_t start, unused, dt;
+
+	if (!intel_engine_uses_guc(engine))
+		return 0;
+
+	/*
+	 * In GuC mode of submission, the busyness stats may get updated after
+	 * the batch starts running. Poll for a change in busyness and timeout
+	 * after 500 us.
+	 */
+	start = ktime_get();
+	while (intel_engine_get_busy_time(engine, &unused) == busyness) {
+		dt = ktime_get() - start;
+		if (dt > 500000) {
+			pr_err("active wait timed out %lld\n", dt);
+			ENGINE_TRACE(engine, "active wait time out %lld\n", dt);
+			return -ETIME;
+		}
+	}
+
+	return 0;
+}
+
 static int live_engine_busy_stats(void *arg)
 {
 	struct intel_gt *gt = arg;
@@ -232,6 +257,7 @@ static int live_engine_busy_stats(void *arg)
 	GEM_BUG_ON(intel_gt_pm_is_awake(gt));
 	for_each_engine(engine, gt, id) {
 		struct i915_request *rq;
+		ktime_t busyness, dummy;
 		ktime_t de, dt;
 		ktime_t t[2];
 
@@ -274,12 +300,19 @@ static int live_engine_busy_stats(void *arg)
 		}
 		i915_request_add(rq);
 
+		busyness = intel_engine_get_busy_time(engine, &dummy);
 		if (!igt_wait_for_spinner(&spin, rq)) {
 			intel_gt_set_wedged(engine->gt);
 			err = -ETIME;
 			goto end;
 		}
 
+		err = __spin_until_busier(engine, busyness);
+		if (err) {
+			GEM_TRACE_DUMP();
+			goto end;
+		}
+
 		ENGINE_TRACE(engine, "measuring busy time\n");
 		preempt_disable();
 		de = intel_engine_get_busy_time(engine, &t[0]);
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
index ba10bd374cee..fe5d7d261797 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
@@ -144,6 +144,7 @@ enum intel_guc_action {
 	INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE = 0x4600,
 	INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC = 0x4601,
 	INTEL_GUC_ACTION_RESET_CLIENT = 0x5507,
+	INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF = 0x550A,
 	INTEL_GUC_ACTION_LIMIT
 };
 
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 31cf9fb48c7e..1cb46098030d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -138,6 +138,8 @@ struct intel_guc {
 	u32 ads_regset_size;
 	/** @ads_golden_ctxt_size: size of the golden contexts in the ADS */
 	u32 ads_golden_ctxt_size;
+	/** @ads_engine_usage_size: size of engine usage in the ADS */
+	u32 ads_engine_usage_size;
 
 	/** @lrc_desc_pool: object allocated to hold the GuC LRC descriptor pool */
 	struct i915_vma *lrc_desc_pool;
@@ -172,6 +174,34 @@ struct intel_guc {
 
 	/** @send_mutex: used to serialize the intel_guc_send actions */
 	struct mutex send_mutex;
+
+	/**
+	 * @timestamp: GT timestamp object that stores a copy of the timestamp
+	 * and adjusts it for overflow using a worker.
+	 */
+	struct {
+		/**
+		 * @lock: Lock protecting the below fields and the engine stats.
+		 */
+		spinlock_t lock;
+
+		/**
+		 * @gt_stamp: 64 bit extended value of the GT timestamp.
+		 */
+		u64 gt_stamp;
+
+		/**
+		 * @ping_delay: Period for polling the GT timestamp for
+		 * overflow.
+		 */
+		unsigned long ping_delay;
+
+		/**
+		 * @work: Periodic work to adjust GT timestamp, engine and
+		 * context usage for overflows.
+		 */
+		struct delayed_work work;
+	} timestamp;
 };
 
 static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index 621c893a009f..1a1edae67e4e 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -26,6 +26,8 @@
  *      | guc_policies                          |
  *      +---------------------------------------+
  *      | guc_gt_system_info                    |
+ *      +---------------------------------------+
+ *      | guc_engine_usage                      |
  *      +---------------------------------------+ <== static
  *      | guc_mmio_reg[countA] (engine 0.0)     |
  *      | guc_mmio_reg[countB] (engine 0.1)     |
@@ -47,6 +49,7 @@ struct __guc_ads_blob {
 	struct guc_ads ads;
 	struct guc_policies policies;
 	struct guc_gt_system_info system_info;
+	struct guc_engine_usage engine_usage;
 	/* From here on, location is dynamic! Refer to above diagram. */
 	struct guc_mmio_reg regset[0];
 } __packed;
@@ -628,3 +631,21 @@ void intel_guc_ads_reset(struct intel_guc *guc)
 
 	guc_ads_private_data_reset(guc);
 }
+
+u32 intel_guc_engine_usage_offset(struct intel_guc *guc)
+{
+	struct __guc_ads_blob *blob = guc->ads_blob;
+	u32 base = intel_guc_ggtt_offset(guc, guc->ads_vma);
+	u32 offset = base + ptr_offset(blob, engine_usage);
+
+	return offset;
+}
+
+struct guc_engine_usage_record *intel_guc_engine_usage(struct intel_engine_cs *engine)
+{
+	struct intel_guc *guc = &engine->gt->uc.guc;
+	struct __guc_ads_blob *blob = guc->ads_blob;
+	u8 guc_class = engine_class_to_guc_class(engine->class);
+
+	return &blob->engine_usage.engines[guc_class][ilog2(engine->logical_mask)];
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h
index 3d85051d57e4..e74c110facff 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h
@@ -6,8 +6,11 @@
 #ifndef _INTEL_GUC_ADS_H_
 #define _INTEL_GUC_ADS_H_
 
+#include <linux/types.h>
+
 struct intel_guc;
 struct drm_printer;
+struct intel_engine_cs;
 
 int intel_guc_ads_create(struct intel_guc *guc);
 void intel_guc_ads_destroy(struct intel_guc *guc);
@@ -15,5 +18,7 @@ void intel_guc_ads_init_late(struct intel_guc *guc);
 void intel_guc_ads_reset(struct intel_guc *guc);
 void intel_guc_ads_print_policy_info(struct intel_guc *guc,
 				     struct drm_printer *p);
+struct guc_engine_usage_record *intel_guc_engine_usage(struct intel_engine_cs *engine);
+u32 intel_guc_engine_usage_offset(struct intel_guc *guc);
 
 #endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
index 722933e26347..7072e30e99f4 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -294,6 +294,19 @@ struct guc_ads {
 	u32 reserved[15];
 } __packed;
 
+/* Engine usage stats */
+struct guc_engine_usage_record {
+	u32 current_context_index;
+	u32 last_switch_in_stamp;
+	u32 reserved0;
+	u32 total_runtime;
+	u32 reserved1[4];
+} __packed;
+
+struct guc_engine_usage {
+	struct guc_engine_usage_record engines[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS];
+} __packed;
+
 /* GuC logging structures */
 
 enum guc_log_buffer_type {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 38b47e73e35d..5cc49c0b3889 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -13,6 +13,7 @@
 #include "gt/intel_engine_heartbeat.h"
 #include "gt/intel_gpu_commands.h"
 #include "gt/intel_gt.h"
+#include "gt/intel_gt_clock_utils.h"
 #include "gt/intel_gt_irq.h"
 #include "gt/intel_gt_pm.h"
 #include "gt/intel_gt_requests.h"
@@ -21,6 +22,7 @@
 #include "gt/intel_mocs.h"
 #include "gt/intel_ring.h"
 
+#include "intel_guc_ads.h"
 #include "intel_guc_submission.h"
 
 #include "i915_drv.h"
@@ -1077,6 +1079,272 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc)
 	xa_unlock_irqrestore(&guc->context_lookup, flags);
 }
 
+/*
+ * GuC stores busyness stats for each engine at context in/out boundaries. A
+ * context 'in' logs execution start time, 'out' adds in -> out delta to total.
+ * i915/kmd accesses 'start', 'total' and 'context id' from memory shared with
+ * GuC.
+ *
+ * __i915_pmu_event_read samples engine busyness. When sampling, if context id
+ * is valid (!= ~0) and start is non-zero, the engine is considered to be
+ * active. For an active engine total busyness = total + (now - start), where
+ * 'now' is the time at which the busyness is sampled. For inactive engine,
+ * total busyness = total.
+ *
+ * All times are captured from GUCPMTIMESTAMP reg and are in gt clock domain.
+ *
+ * The start and total values provided by GuC are 32 bits and wrap around in a
+ * few minutes. Since perf pmu provides busyness as 64 bit monotonically
+ * increasing ns values, there is a need for this implementation to account for
+ * overflows and extend the GuC provided values to 64 bits before returning
+ * busyness to the user. In order to do that, a worker runs periodically at
+ * frequency = 1/8th the time it takes for the timestamp to wrap (i.e. once in
+ * 27 seconds for a gt clock frequency of 19.2 MHz).
+ */
+
+#define WRAP_TIME_CLKS U32_MAX
+#define POLL_TIME_CLKS (WRAP_TIME_CLKS >> 3)
+
+static void
+__extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start)
+{
+	u32 gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp);
+	u32 gt_stamp_last = lower_32_bits(guc->timestamp.gt_stamp);
+
+	if (new_start == lower_32_bits(*prev_start))
+		return;
+
+	if (new_start < gt_stamp_last &&
+	    (new_start - gt_stamp_last) <= POLL_TIME_CLKS)
+		gt_stamp_hi++;
+
+	if (new_start > gt_stamp_last &&
+	    (gt_stamp_last - new_start) <= POLL_TIME_CLKS && gt_stamp_hi)
+		gt_stamp_hi--;
+
+	*prev_start = ((u64)gt_stamp_hi << 32) | new_start;
+}
+
+static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
+{
+	struct guc_engine_usage_record *rec = intel_guc_engine_usage(engine);
+	struct intel_engine_guc_stats *stats = &engine->stats.guc;
+	struct intel_guc *guc = &engine->gt->uc.guc;
+	u32 last_switch = rec->last_switch_in_stamp;
+	u32 ctx_id = rec->current_context_index;
+	u32 total = rec->total_runtime;
+
+	lockdep_assert_held(&guc->timestamp.lock);
+
+	stats->running = ctx_id != ~0U && last_switch;
+	if (stats->running)
+		__extend_last_switch(guc, &stats->start_gt_clk, last_switch);
+
+	/*
+	 * Instead of adjusting the total for overflow, just add the
+	 * difference from previous sample stats->total_gt_clks
+	 */
+	if (total && total != ~0U) {
+		stats->total_gt_clks += (u32)(total - stats->prev_total);
+		stats->prev_total = total;
+	}
+}
+
+static void guc_update_pm_timestamp(struct intel_guc *guc,
+				    struct intel_engine_cs *engine,
+				    ktime_t *now)
+{
+	u32 gt_stamp_now, gt_stamp_hi;
+
+	lockdep_assert_held(&guc->timestamp.lock);
+
+	gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp);
+	gt_stamp_now = intel_uncore_read(engine->uncore,
+					 RING_TIMESTAMP(engine->mmio_base));
+	*now = ktime_get();
+
+	if (gt_stamp_now < lower_32_bits(guc->timestamp.gt_stamp))
+		gt_stamp_hi++;
+
+	guc->timestamp.gt_stamp = ((u64)gt_stamp_hi << 32) | gt_stamp_now;
+}
+
+/*
+ * Unlike the execlist mode of submission total and active times are in terms of
+ * gt clocks. The *now parameter is retained to return the cpu time at which the
+ * busyness was sampled.
+ */
+static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
+{
+	struct intel_engine_guc_stats stats_saved, *stats = &engine->stats.guc;
+	struct i915_gpu_error *gpu_error = &engine->i915->gpu_error;
+	struct intel_gt *gt = engine->gt;
+	struct intel_guc *guc = &gt->uc.guc;
+	u64 total, gt_stamp_saved;
+	unsigned long flags;
+	u32 reset_count;
+
+	spin_lock_irqsave(&guc->timestamp.lock, flags);
+
+	/*
+	 * If a reset happened, we risk reading partially updated
+	 * engine busyness from GuC, so we just use the driver stored
+	 * copy of busyness. Synchronize with gt reset using reset_count.
+	 */
+	reset_count = i915_reset_count(gpu_error);
+
+	*now = ktime_get();
+
+	/*
+	 * The active busyness depends on start_gt_clk and gt_stamp.
+	 * gt_stamp is updated by i915 only when gt is awake and the
+	 * start_gt_clk is derived from GuC state. To get a consistent
+	 * view of activity, we query the GuC state only if gt is awake.
+	 */
+	stats_saved = *stats;
+	gt_stamp_saved = guc->timestamp.gt_stamp;
+	if (intel_gt_pm_get_if_awake(gt)) {
+		guc_update_engine_gt_clks(engine);
+		guc_update_pm_timestamp(guc, engine, now);
+		intel_gt_pm_put_async(gt);
+		if (i915_reset_count(gpu_error) != reset_count) {
+			*stats = stats_saved;
+			guc->timestamp.gt_stamp = gt_stamp_saved;
+		}
+	}
+
+	total = intel_gt_clock_interval_to_ns(gt, stats->total_gt_clks);
+	if (stats->running) {
+		u64 clk = guc->timestamp.gt_stamp - stats->start_gt_clk;
+
+		total += intel_gt_clock_interval_to_ns(gt, clk);
+	}
+
+	spin_unlock_irqrestore(&guc->timestamp.lock, flags);
+
+	return ns_to_ktime(total);
+}
+
+static void __reset_guc_busyness_stats(struct intel_guc *guc)
+{
+	struct intel_gt *gt = guc_to_gt(guc);
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	unsigned long flags;
+	ktime_t unused;
+
+	cancel_delayed_work_sync(&guc->timestamp.work);
+
+	spin_lock_irqsave(&guc->timestamp.lock, flags);
+
+	for_each_engine(engine, gt, id) {
+		guc_update_pm_timestamp(guc, engine, &unused);
+		guc_update_engine_gt_clks(engine);
+		engine->stats.guc.prev_total = 0;
+	}
+
+	spin_unlock_irqrestore(&guc->timestamp.lock, flags);
+}
+
+static void __update_guc_busyness_stats(struct intel_guc *guc)
+{
+	struct intel_gt *gt = guc_to_gt(guc);
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	ktime_t unused;
+
+	for_each_engine(engine, gt, id) {
+		guc_update_pm_timestamp(guc, engine, &unused);
+		guc_update_engine_gt_clks(engine);
+	}
+}
+
+static void guc_timestamp_ping(struct work_struct *wrk)
+{
+	struct intel_guc *guc = container_of(wrk, typeof(*guc),
+					     timestamp.work.work);
+	struct intel_uc *uc = container_of(guc, typeof(*uc), guc);
+	struct intel_gt *gt = guc_to_gt(guc);
+	intel_wakeref_t wakeref;
+	unsigned long flags;
+	int srcu, ret;
+
+	/*
+	 * Synchronize with gt reset to make sure the worker does not
+	 * corrupt the engine/guc stats.
+	 */
+	ret = intel_gt_reset_trylock(gt, &srcu);
+	if (ret)
+		return;
+
+	spin_lock_irqsave(&guc->timestamp.lock, flags);
+
+	with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
+		__update_guc_busyness_stats(guc);
+
+	spin_unlock_irqrestore(&guc->timestamp.lock, flags);
+
+	intel_gt_reset_unlock(gt, srcu);
+
+	mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
+			 guc->timestamp.ping_delay);
+}
+
+static int guc_action_enable_usage_stats(struct intel_guc *guc)
+{
+	u32 offset = intel_guc_engine_usage_offset(guc);
+	u32 action[] = {
+		INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF,
+		offset,
+		0,
+	};
+
+	return intel_guc_send(guc, action, ARRAY_SIZE(action));
+}
+
+static void guc_init_engine_stats(struct intel_guc *guc)
+{
+	struct intel_gt *gt = guc_to_gt(guc);
+	intel_wakeref_t wakeref;
+
+	mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
+			 guc->timestamp.ping_delay);
+
+	with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref) {
+		int ret = guc_action_enable_usage_stats(guc);
+
+		if (ret)
+			drm_err(&gt->i915->drm,
+				"Failed to enable usage stats: %d!\n", ret);
+	}
+}
+
+void intel_guc_busyness_park(struct intel_gt *gt)
+{
+	struct intel_guc *guc = &gt->uc.guc;
+	unsigned long flags;
+
+	if (!guc_submission_initialized(guc))
+		return;
+
+	cancel_delayed_work(&guc->timestamp.work);
+
+	spin_lock_irqsave(&guc->timestamp.lock, flags);
+	__update_guc_busyness_stats(guc);
+	spin_unlock_irqrestore(&guc->timestamp.lock, flags);
+}
+
+void intel_guc_busyness_unpark(struct intel_gt *gt)
+{
+	struct intel_guc *guc = &gt->uc.guc;
+
+	if (!guc_submission_initialized(guc))
+		return;
+
+	mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
+			 guc->timestamp.ping_delay);
+}
+
 static inline bool
 submission_disabled(struct intel_guc *guc)
 {
@@ -1138,6 +1406,7 @@ void intel_guc_submission_reset_prepare(struct intel_guc *guc)
 	intel_gt_park_heartbeats(guc_to_gt(guc));
 	disable_submission(guc);
 	guc->interrupts.disable(guc);
+	__reset_guc_busyness_stats(guc);
 
 	/* Flush IRQ handler */
 	spin_lock_irq(&guc_to_gt(guc)->irq_lock);
@@ -1484,6 +1753,7 @@ static void destroyed_worker_func(struct work_struct *w);
  */
 int intel_guc_submission_init(struct intel_guc *guc)
 {
+	struct intel_gt *gt = guc_to_gt(guc);
 	int ret;
 
 	if (guc->lrc_desc_pool)
@@ -1512,6 +1782,10 @@ int intel_guc_submission_init(struct intel_guc *guc)
 	if (!guc->submission_state.guc_ids_bitmap)
 		return -ENOMEM;
 
+	spin_lock_init(&guc->timestamp.lock);
+	INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping);
+	guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
+
 	return 0;
 }
 
@@ -3369,7 +3643,9 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine)
 		engine->emit_flush = gen12_emit_flush_xcs;
 	}
 	engine->set_default_submission = guc_set_default_submission;
+	engine->busyness = guc_engine_busyness;
 
+	engine->flags |= I915_ENGINE_SUPPORTS_STATS;
 	engine->flags |= I915_ENGINE_HAS_PREEMPTION;
 	engine->flags |= I915_ENGINE_HAS_TIMESLICES;
 
@@ -3468,6 +3744,7 @@ int intel_guc_submission_setup(struct intel_engine_cs *engine)
 void intel_guc_submission_enable(struct intel_guc *guc)
 {
 	guc_init_lrc_mapping(guc);
+	guc_init_engine_stats(guc);
 }
 
 void intel_guc_submission_disable(struct intel_guc *guc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
index c7ef44fa0c36..5a95a9f0a8e3 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
@@ -28,6 +28,8 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc,
 void intel_guc_dump_active_requests(struct intel_engine_cs *engine,
 				    struct i915_request *hung_rq,
 				    struct drm_printer *m);
+void intel_guc_busyness_park(struct intel_gt *gt);
+void intel_guc_busyness_unpark(struct intel_gt *gt);
 
 bool intel_guc_virtual_engine_has_heartbeat(const struct intel_engine_cs *ve);
 
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index d9f7a729333f..f7927f6dac6e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2662,6 +2662,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define   RING_WAIT		(1 << 11) /* gen3+, PRBx_CTL */
 #define   RING_WAIT_SEMAPHORE	(1 << 10) /* gen6+ */
 
+#define GUCPMTIMESTAMP          _MMIO(0xC3E8)
+
 /* There are 16 64-bit CS General Purpose Registers per-engine on Gen8+ */
 #define GEN8_RING_CS_GPR(base, n)	_MMIO((base) + 0x600 + (n) * 8)
 #define GEN8_RING_CS_GPR_UDW(base, n)	_MMIO((base) + 0x600 + (n) * 8 + 4)
-- 
2.20.1


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [Intel-gfx] ✗ Fi.CI.SPARSE: warning for series starting with [1/2] drm/i915/pmu: Add a name to the execlists stats
  2021-10-27  0:48 ` [Intel-gfx] " Umesh Nerlige Ramappa
  (?)
  (?)
@ 2021-10-27  0:58 ` Patchwork
  -1 siblings, 0 replies; 16+ messages in thread
From: Patchwork @ 2021-10-27  0:58 UTC (permalink / raw)
  To: Umesh Nerlige Ramappa; +Cc: intel-gfx

== Series Details ==

Series: series starting with [1/2] drm/i915/pmu: Add a name to the execlists stats
URL   : https://patchwork.freedesktop.org/series/96324/
State : warning

== Summary ==

$ dim sparse --fast origin/drm-tip
Sparse version: v0.6.2
Fast mode used, each commit won't be checked separately.
+drivers/gpu/drm/i915/gt/intel_engine_stats.h:28:9: warning: trying to copy expression type 31
+drivers/gpu/drm/i915/gt/intel_engine_stats.h:28:9: warning: trying to copy expression type 31
+drivers/gpu/drm/i915/gt/intel_engine_stats.h:28:9: warning: trying to copy expression type 31
+drivers/gpu/drm/i915/gt/intel_engine_stats.h:33:9: warning: trying to copy expression type 31
+drivers/gpu/drm/i915/gt/intel_engine_stats.h:33:9: warning: trying to copy expression type 31
+drivers/gpu/drm/i915/gt/intel_engine_stats.h:51:9: warning: trying to copy expression type 31
+drivers/gpu/drm/i915/gt/intel_engine_stats.h:51:9: warning: trying to copy expression type 31
+drivers/gpu/drm/i915/gt/intel_engine_stats.h:51:9: warning: trying to copy expression type 31
+drivers/gpu/drm/i915/gt/intel_engine_stats.h:57:9: warning: trying to copy expression type 31
+drivers/gpu/drm/i915/gt/intel_engine_stats.h:57:9: warning: trying to copy expression type 31
-O:drivers/gpu/drm/i915/gt/intel_engine_stats.h:27:9: warning: trying to copy expression type 31
-O:drivers/gpu/drm/i915/gt/intel_engine_stats.h:27:9: warning: trying to copy expression type 31
-O:drivers/gpu/drm/i915/gt/intel_engine_stats.h:27:9: warning: trying to copy expression type 31
-O:drivers/gpu/drm/i915/gt/intel_engine_stats.h:32:9: warning: trying to copy expression type 31
-O:drivers/gpu/drm/i915/gt/intel_engine_stats.h:32:9: warning: trying to copy expression type 31
-O:drivers/gpu/drm/i915/gt/intel_engine_stats.h:49:9: warning: trying to copy expression type 31
-O:drivers/gpu/drm/i915/gt/intel_engine_stats.h:49:9: warning: trying to copy expression type 31
-O:drivers/gpu/drm/i915/gt/intel_engine_stats.h:49:9: warning: trying to copy expression type 31
-O:drivers/gpu/drm/i915/gt/intel_engine_stats.h:56:9: warning: trying to copy expression type 31
-O:drivers/gpu/drm/i915/gt/intel_engine_stats.h:56:9: warning: trying to copy expression type 31



^ permalink raw reply	[flat|nested] 16+ messages in thread

* [Intel-gfx] ✓ Fi.CI.BAT: success for series starting with [1/2] drm/i915/pmu: Add a name to the execlists stats
  2021-10-27  0:48 ` [Intel-gfx] " Umesh Nerlige Ramappa
                   ` (2 preceding siblings ...)
  (?)
@ 2021-10-27  1:27 ` Patchwork
  -1 siblings, 0 replies; 16+ messages in thread
From: Patchwork @ 2021-10-27  1:27 UTC (permalink / raw)
  To: Umesh Nerlige Ramappa; +Cc: intel-gfx

[-- Attachment #1: Type: text/plain, Size: 3447 bytes --]

== Series Details ==

Series: series starting with [1/2] drm/i915/pmu: Add a name to the execlists stats
URL   : https://patchwork.freedesktop.org/series/96324/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_10792 -> Patchwork_21455
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/index.html

Participating hosts (38 -> 33)
------------------------------

  Additional (1): fi-ilk-650 
  Missing    (6): bat-dg1-6 bat-dg1-5 fi-icl-u2 bat-adlp-4 fi-ctg-p8600 fi-elk-e7500 

Known issues
------------

  Here are the changes found in Patchwork_21455 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@amdgpu/amd_cs_nop@nop-compute0:
    - fi-ilk-650:         NOTRUN -> [SKIP][1] ([fdo#109271]) +35 similar issues
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/fi-ilk-650/igt@amdgpu/amd_cs_nop@nop-compute0.html

  * igt@gem_exec_suspend@basic-s0:
    - fi-tgl-1115g4:      [PASS][2] -> [FAIL][3] ([i915#1888])
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/fi-tgl-1115g4/igt@gem_exec_suspend@basic-s0.html
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/fi-tgl-1115g4/igt@gem_exec_suspend@basic-s0.html

  * igt@kms_chamelium@dp-hpd-fast:
    - fi-ilk-650:         NOTRUN -> [SKIP][4] ([fdo#109271] / [fdo#111827]) +8 similar issues
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/fi-ilk-650/igt@kms_chamelium@dp-hpd-fast.html

  * igt@runner@aborted:
    - fi-bxt-dsi:         NOTRUN -> [FAIL][5] ([i915#2426] / [i915#3363])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/fi-bxt-dsi/igt@runner@aborted.html

  
#### Possible fixes ####

  * igt@kms_frontbuffer_tracking@basic:
    - {fi-hsw-gt1}:       [DMESG-WARN][6] ([i915#4290]) -> [PASS][7]
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/fi-hsw-gt1/igt@kms_frontbuffer_tracking@basic.html
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/fi-hsw-gt1/igt@kms_frontbuffer_tracking@basic.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [fdo#111827]: https://bugs.freedesktop.org/show_bug.cgi?id=111827
  [i915#1888]: https://gitlab.freedesktop.org/drm/intel/issues/1888
  [i915#1982]: https://gitlab.freedesktop.org/drm/intel/issues/1982
  [i915#2426]: https://gitlab.freedesktop.org/drm/intel/issues/2426
  [i915#3363]: https://gitlab.freedesktop.org/drm/intel/issues/3363
  [i915#4290]: https://gitlab.freedesktop.org/drm/intel/issues/4290


Build changes
-------------

  * Linux: CI_DRM_10792 -> Patchwork_21455

  CI-20190529: 20190529
  CI_DRM_10792: 299777ddcc06c9a0ea7b95a0823ccaca268d16b8 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_6262: d1c793b26e31cc6ae3f9fa3239805a9bbcc749fb @ https://gitlab.freedesktop.org/drm/igt-gpu-tools.git
  Patchwork_21455: 8d0300f5d14fc7a068b618ba28b6eabfb45b294b @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

8d0300f5d14f drm/i915/pmu: Connect engine busyness stats from GuC to pmu
b27b1a409734 drm/i915/pmu: Add a name to the execlists stats

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/index.html

[-- Attachment #2: Type: text/html, Size: 4172 bytes --]

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [Intel-gfx] ✗ Fi.CI.IGT: failure for series starting with [1/2] drm/i915/pmu: Add a name to the execlists stats
  2021-10-27  0:48 ` [Intel-gfx] " Umesh Nerlige Ramappa
                   ` (3 preceding siblings ...)
  (?)
@ 2021-10-27  2:49 ` Patchwork
  -1 siblings, 0 replies; 16+ messages in thread
From: Patchwork @ 2021-10-27  2:49 UTC (permalink / raw)
  To: Umesh Nerlige Ramappa; +Cc: intel-gfx

[-- Attachment #1: Type: text/plain, Size: 30297 bytes --]

== Series Details ==

Series: series starting with [1/2] drm/i915/pmu: Add a name to the execlists stats
URL   : https://patchwork.freedesktop.org/series/96324/
State : failure

== Summary ==

CI Bug Log - changes from CI_DRM_10792_full -> Patchwork_21455_full
====================================================

Summary
-------

  **FAILURE**

  Serious unknown changes coming with Patchwork_21455_full absolutely need to be
  verified manually.
  
  If you think the reported changes have nothing to do with the changes
  introduced in Patchwork_21455_full, please notify your bug team to allow them
  to document this new failure mode, which will reduce false positives in CI.

  

Participating hosts (10 -> 10)
------------------------------

  No changes in participating hosts

Possible new issues
-------------------

  Here are the unknown changes that may have been introduced in Patchwork_21455_full:

### IGT changes ###

#### Possible regressions ####

  * igt@kms_cursor_legacy@all-pipes-forked-bo:
    - shard-tglb:         [PASS][1] -> [INCOMPLETE][2]
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-tglb7/igt@kms_cursor_legacy@all-pipes-forked-bo.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-tglb5/igt@kms_cursor_legacy@all-pipes-forked-bo.html

  
Known issues
------------

  Here are the changes found in Patchwork_21455_full that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@drm_mm@all@insert_range:
    - shard-skl:          [PASS][3] -> [INCOMPLETE][4] ([i915#2485])
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-skl4/igt@drm_mm@all@insert_range.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-skl3/igt@drm_mm@all@insert_range.html

  * igt@gem_ctx_isolation@preservation-s3@rcs0:
    - shard-skl:          [PASS][5] -> [INCOMPLETE][6] ([i915#198])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-skl6/igt@gem_ctx_isolation@preservation-s3@rcs0.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-skl4/igt@gem_ctx_isolation@preservation-s3@rcs0.html

  * igt@gem_exec_fair@basic-deadline:
    - shard-apl:          NOTRUN -> [FAIL][7] ([i915#2846])
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-apl1/igt@gem_exec_fair@basic-deadline.html

  * igt@gem_exec_fair@basic-flow@rcs0:
    - shard-tglb:         [PASS][8] -> [FAIL][9] ([i915#2842]) +2 similar issues
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-tglb7/igt@gem_exec_fair@basic-flow@rcs0.html
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-tglb3/igt@gem_exec_fair@basic-flow@rcs0.html

  * igt@gem_exec_fair@basic-none-solo@rcs0:
    - shard-kbl:          NOTRUN -> [FAIL][10] ([i915#2842])
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-kbl4/igt@gem_exec_fair@basic-none-solo@rcs0.html

  * igt@gem_exec_fair@basic-none@vecs0:
    - shard-apl:          [PASS][11] -> [FAIL][12] ([i915#2842] / [i915#3468])
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-apl8/igt@gem_exec_fair@basic-none@vecs0.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-apl8/igt@gem_exec_fair@basic-none@vecs0.html

  * igt@gem_exec_fair@basic-pace@vecs0:
    - shard-kbl:          [PASS][13] -> [SKIP][14] ([fdo#109271])
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-kbl6/igt@gem_exec_fair@basic-pace@vecs0.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-kbl6/igt@gem_exec_fair@basic-pace@vecs0.html

  * igt@gem_exec_suspend@basic-s3:
    - shard-kbl:          [PASS][15] -> [DMESG-WARN][16] ([i915#180]) +2 similar issues
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-kbl3/igt@gem_exec_suspend@basic-s3.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-kbl1/igt@gem_exec_suspend@basic-s3.html

  * igt@gem_exec_whisper@basic-contexts-forked-all:
    - shard-glk:          [PASS][17] -> [DMESG-WARN][18] ([i915#118])
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-glk5/igt@gem_exec_whisper@basic-contexts-forked-all.html
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-glk4/igt@gem_exec_whisper@basic-contexts-forked-all.html

  * igt@gem_workarounds@suspend-resume:
    - shard-tglb:         [PASS][19] -> [INCOMPLETE][20] ([i915#456])
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-tglb8/igt@gem_workarounds@suspend-resume.html
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-tglb7/igt@gem_workarounds@suspend-resume.html

  * igt@i915_suspend@debugfs-reader:
    - shard-apl:          [PASS][21] -> [DMESG-WARN][22] ([i915#180]) +1 similar issue
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-apl3/igt@i915_suspend@debugfs-reader.html
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-apl7/igt@i915_suspend@debugfs-reader.html

  * igt@i915_suspend@forcewake:
    - shard-tglb:         [PASS][23] -> [INCOMPLETE][24] ([i915#2411] / [i915#456])
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-tglb3/igt@i915_suspend@forcewake.html
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-tglb7/igt@i915_suspend@forcewake.html

  * igt@kms_big_fb@linear-16bpp-rotate-90:
    - shard-apl:          NOTRUN -> [SKIP][25] ([fdo#109271]) +75 similar issues
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-apl8/igt@kms_big_fb@linear-16bpp-rotate-90.html

  * igt@kms_big_fb@y-tiled-max-hw-stride-32bpp-rotate-180-hflip:
    - shard-apl:          NOTRUN -> [SKIP][26] ([fdo#109271] / [i915#3777]) +1 similar issue
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-apl1/igt@kms_big_fb@y-tiled-max-hw-stride-32bpp-rotate-180-hflip.html

  * igt@kms_big_fb@yf-tiled-max-hw-stride-32bpp-rotate-180-async-flip:
    - shard-skl:          NOTRUN -> [FAIL][27] ([i915#3743])
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-skl3/igt@kms_big_fb@yf-tiled-max-hw-stride-32bpp-rotate-180-async-flip.html

  * igt@kms_ccs@pipe-b-random-ccs-data-y_tiled_gen12_mc_ccs:
    - shard-kbl:          NOTRUN -> [SKIP][28] ([fdo#109271] / [i915#3886]) +5 similar issues
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-kbl7/igt@kms_ccs@pipe-b-random-ccs-data-y_tiled_gen12_mc_ccs.html

  * igt@kms_ccs@pipe-c-ccs-on-another-bo-y_tiled_gen12_rc_ccs_cc:
    - shard-apl:          NOTRUN -> [SKIP][29] ([fdo#109271] / [i915#3886]) +4 similar issues
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-apl8/igt@kms_ccs@pipe-c-ccs-on-another-bo-y_tiled_gen12_rc_ccs_cc.html

  * igt@kms_ccs@pipe-c-crc-primary-rotation-180-y_tiled_gen12_mc_ccs:
    - shard-skl:          NOTRUN -> [SKIP][30] ([fdo#109271] / [i915#3886]) +1 similar issue
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-skl9/igt@kms_ccs@pipe-c-crc-primary-rotation-180-y_tiled_gen12_mc_ccs.html

  * igt@kms_chamelium@vga-edid-read:
    - shard-apl:          NOTRUN -> [SKIP][31] ([fdo#109271] / [fdo#111827]) +5 similar issues
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-apl1/igt@kms_chamelium@vga-edid-read.html

  * igt@kms_color_chamelium@pipe-d-ctm-max:
    - shard-kbl:          NOTRUN -> [SKIP][32] ([fdo#109271] / [fdo#111827]) +10 similar issues
   [32]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-kbl3/igt@kms_color_chamelium@pipe-d-ctm-max.html

  * igt@kms_content_protection@atomic:
    - shard-kbl:          NOTRUN -> [TIMEOUT][33] ([i915#1319]) +1 similar issue
   [33]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-kbl7/igt@kms_content_protection@atomic.html
    - shard-apl:          NOTRUN -> [TIMEOUT][34] ([i915#1319])
   [34]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-apl4/igt@kms_content_protection@atomic.html

  * igt@kms_cursor_crc@pipe-a-cursor-suspend:
    - shard-kbl:          NOTRUN -> [DMESG-WARN][35] ([i915#180]) +3 similar issues
   [35]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-kbl7/igt@kms_cursor_crc@pipe-a-cursor-suspend.html

  * igt@kms_cursor_crc@pipe-c-cursor-suspend:
    - shard-apl:          NOTRUN -> [DMESG-WARN][36] ([i915#180])
   [36]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-apl4/igt@kms_cursor_crc@pipe-c-cursor-suspend.html

  * igt@kms_cursor_legacy@flip-vs-cursor-atomic-transitions:
    - shard-skl:          [PASS][37] -> [FAIL][38] ([i915#2346])
   [37]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-skl1/igt@kms_cursor_legacy@flip-vs-cursor-atomic-transitions.html
   [38]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-skl1/igt@kms_cursor_legacy@flip-vs-cursor-atomic-transitions.html

  * igt@kms_fbcon_fbt@fbc-suspend:
    - shard-apl:          [PASS][39] -> [INCOMPLETE][40] ([i915#180] / [i915#1982])
   [39]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-apl1/igt@kms_fbcon_fbt@fbc-suspend.html
   [40]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-apl4/igt@kms_fbcon_fbt@fbc-suspend.html

  * igt@kms_flip@flip-vs-expired-vblank@c-edp1:
    - shard-skl:          [PASS][41] -> [FAIL][42] ([i915#79])
   [41]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-skl6/igt@kms_flip@flip-vs-expired-vblank@c-edp1.html
   [42]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-skl9/igt@kms_flip@flip-vs-expired-vblank@c-edp1.html

  * igt@kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile:
    - shard-skl:          NOTRUN -> [SKIP][43] ([fdo#109271]) +53 similar issues
   [43]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-skl9/igt@kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile.html

  * igt@kms_frontbuffer_tracking@psr-rgb101010-draw-mmap-wc:
    - shard-kbl:          NOTRUN -> [SKIP][44] ([fdo#109271]) +133 similar issues
   [44]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-kbl6/igt@kms_frontbuffer_tracking@psr-rgb101010-draw-mmap-wc.html

  * igt@kms_pipe_crc_basic@read-crc-pipe-d-frame-sequence:
    - shard-apl:          NOTRUN -> [SKIP][45] ([fdo#109271] / [i915#533]) +1 similar issue
   [45]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-apl4/igt@kms_pipe_crc_basic@read-crc-pipe-d-frame-sequence.html
    - shard-kbl:          NOTRUN -> [SKIP][46] ([fdo#109271] / [i915#533])
   [46]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-kbl7/igt@kms_pipe_crc_basic@read-crc-pipe-d-frame-sequence.html

  * igt@kms_plane_alpha_blend@pipe-a-constant-alpha-max:
    - shard-kbl:          NOTRUN -> [FAIL][47] ([fdo#108145] / [i915#265])
   [47]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-kbl3/igt@kms_plane_alpha_blend@pipe-a-constant-alpha-max.html

  * igt@kms_plane_alpha_blend@pipe-b-alpha-7efc:
    - shard-apl:          NOTRUN -> [FAIL][48] ([fdo#108145] / [i915#265]) +1 similar issue
   [48]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-apl8/igt@kms_plane_alpha_blend@pipe-b-alpha-7efc.html

  * igt@kms_plane_alpha_blend@pipe-b-alpha-transparent-fb:
    - shard-kbl:          NOTRUN -> [FAIL][49] ([i915#265])
   [49]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-kbl6/igt@kms_plane_alpha_blend@pipe-b-alpha-transparent-fb.html

  * igt@kms_psr2_sf@overlay-plane-update-sf-dmg-area-5:
    - shard-kbl:          NOTRUN -> [SKIP][50] ([fdo#109271] / [i915#658])
   [50]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-kbl6/igt@kms_psr2_sf@overlay-plane-update-sf-dmg-area-5.html

  * igt@kms_psr2_su@page_flip:
    - shard-skl:          NOTRUN -> [SKIP][51] ([fdo#109271] / [i915#658])
   [51]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-skl3/igt@kms_psr2_su@page_flip.html

  * igt@kms_psr@psr2_cursor_plane_move:
    - shard-iclb:         [PASS][52] -> [SKIP][53] ([fdo#109441]) +1 similar issue
   [52]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-iclb2/igt@kms_psr@psr2_cursor_plane_move.html
   [53]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-iclb5/igt@kms_psr@psr2_cursor_plane_move.html

  * igt@kms_writeback@writeback-check-output:
    - shard-apl:          NOTRUN -> [SKIP][54] ([fdo#109271] / [i915#2437])
   [54]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-apl4/igt@kms_writeback@writeback-check-output.html
    - shard-kbl:          NOTRUN -> [SKIP][55] ([fdo#109271] / [i915#2437])
   [55]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-kbl7/igt@kms_writeback@writeback-check-output.html

  * igt@perf@polling-parameterized:
    - shard-skl:          [PASS][56] -> [FAIL][57] ([i915#1542])
   [56]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-skl10/igt@perf@polling-parameterized.html
   [57]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-skl6/igt@perf@polling-parameterized.html

  * igt@perf@polling-small-buf:
    - shard-skl:          [PASS][58] -> [FAIL][59] ([i915#1722])
   [58]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-skl7/igt@perf@polling-small-buf.html
   [59]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-skl4/igt@perf@polling-small-buf.html

  * igt@sysfs_clients@fair-3:
    - shard-apl:          NOTRUN -> [SKIP][60] ([fdo#109271] / [i915#2994])
   [60]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-apl1/igt@sysfs_clients@fair-3.html

  * igt@sysfs_clients@sema-50:
    - shard-kbl:          NOTRUN -> [SKIP][61] ([fdo#109271] / [i915#2994])
   [61]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-kbl4/igt@sysfs_clients@sema-50.html

  
#### Possible fixes ####

  * igt@gem_exec_fair@basic-none@vcs0:
    - shard-kbl:          [FAIL][62] ([i915#2842]) -> [PASS][63] +2 similar issues
   [62]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-kbl7/igt@gem_exec_fair@basic-none@vcs0.html
   [63]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-kbl4/igt@gem_exec_fair@basic-none@vcs0.html
    - shard-apl:          [FAIL][64] ([i915#2842]) -> [PASS][65] +1 similar issue
   [64]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-apl8/igt@gem_exec_fair@basic-none@vcs0.html
   [65]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-apl8/igt@gem_exec_fair@basic-none@vcs0.html

  * igt@gem_exec_fair@basic-pace-solo@rcs0:
    - shard-tglb:         [FAIL][66] ([i915#2842]) -> [PASS][67]
   [66]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-tglb6/igt@gem_exec_fair@basic-pace-solo@rcs0.html
   [67]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-tglb8/igt@gem_exec_fair@basic-pace-solo@rcs0.html

  * igt@gem_exec_fair@basic-pace@vcs0:
    - shard-kbl:          [SKIP][68] ([fdo#109271]) -> [PASS][69]
   [68]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-kbl6/igt@gem_exec_fair@basic-pace@vcs0.html
   [69]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-kbl6/igt@gem_exec_fair@basic-pace@vcs0.html

  * igt@gem_exec_fence@syncobj-invalid-flags:
    - shard-skl:          [DMESG-WARN][70] ([i915#1982]) -> [PASS][71]
   [70]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-skl8/igt@gem_exec_fence@syncobj-invalid-flags.html
   [71]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-skl8/igt@gem_exec_fence@syncobj-invalid-flags.html

  * igt@gem_ppgtt@flink-and-close-vma-leak:
    - shard-glk:          [FAIL][72] ([i915#644]) -> [PASS][73]
   [72]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-glk3/igt@gem_ppgtt@flink-and-close-vma-leak.html
   [73]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-glk3/igt@gem_ppgtt@flink-and-close-vma-leak.html

  * igt@i915_suspend@sysfs-reader:
    - shard-apl:          [DMESG-WARN][74] ([i915#180]) -> [PASS][75] +2 similar issues
   [74]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-apl4/igt@i915_suspend@sysfs-reader.html
   [75]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-apl8/igt@i915_suspend@sysfs-reader.html

  * igt@kms_cursor_legacy@flip-vs-cursor-busy-crc-legacy:
    - shard-skl:          [FAIL][76] ([i915#2346]) -> [PASS][77]
   [76]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-skl3/igt@kms_cursor_legacy@flip-vs-cursor-busy-crc-legacy.html
   [77]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-skl9/igt@kms_cursor_legacy@flip-vs-cursor-busy-crc-legacy.html

  * igt@kms_fbcon_fbt@fbc-suspend:
    - shard-kbl:          [INCOMPLETE][78] ([i915#180] / [i915#636]) -> [PASS][79]
   [78]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-kbl6/igt@kms_fbcon_fbt@fbc-suspend.html
   [79]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-kbl7/igt@kms_fbcon_fbt@fbc-suspend.html

  * igt@kms_flip@flip-vs-suspend@b-edp1:
    - shard-skl:          [INCOMPLETE][80] ([i915#198]) -> [PASS][81]
   [80]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-skl9/igt@kms_flip@flip-vs-suspend@b-edp1.html
   [81]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-skl10/igt@kms_flip@flip-vs-suspend@b-edp1.html

  * igt@kms_flip@flip-vs-wf_vblank-interruptible@a-edp1:
    - shard-skl:          [FAIL][82] ([i915#2122]) -> [PASS][83]
   [82]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-skl4/igt@kms_flip@flip-vs-wf_vblank-interruptible@a-edp1.html
   [83]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-skl10/igt@kms_flip@flip-vs-wf_vblank-interruptible@a-edp1.html

  * igt@kms_flip_scaled_crc@flip-32bpp-ytile-to-32bpp-ytileccs:
    - shard-iclb:         [SKIP][84] ([i915#3701]) -> [PASS][85]
   [84]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-iclb2/igt@kms_flip_scaled_crc@flip-32bpp-ytile-to-32bpp-ytileccs.html
   [85]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-iclb5/igt@kms_flip_scaled_crc@flip-32bpp-ytile-to-32bpp-ytileccs.html

  * igt@kms_hdr@bpc-switch:
    - shard-skl:          [FAIL][86] ([i915#1188]) -> [PASS][87]
   [86]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-skl7/igt@kms_hdr@bpc-switch.html
   [87]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-skl4/igt@kms_hdr@bpc-switch.html

  * igt@kms_hdr@bpc-switch-suspend:
    - shard-kbl:          [DMESG-WARN][88] ([i915#180]) -> [PASS][89] +6 similar issues
   [88]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-kbl4/igt@kms_hdr@bpc-switch-suspend.html
   [89]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-kbl4/igt@kms_hdr@bpc-switch-suspend.html

  * igt@kms_plane_alpha_blend@pipe-b-coverage-7efc:
    - shard-skl:          [FAIL][90] ([fdo#108145] / [i915#265]) -> [PASS][91] +1 similar issue
   [90]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-skl2/igt@kms_plane_alpha_blend@pipe-b-coverage-7efc.html
   [91]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-skl7/igt@kms_plane_alpha_blend@pipe-b-coverage-7efc.html

  * igt@kms_psr@psr2_cursor_mmap_cpu:
    - shard-iclb:         [SKIP][92] ([fdo#109441]) -> [PASS][93] +1 similar issue
   [92]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-iclb5/igt@kms_psr@psr2_cursor_mmap_cpu.html
   [93]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-iclb2/igt@kms_psr@psr2_cursor_mmap_cpu.html

  
#### Warnings ####

  * igt@i915_pm_rc6_residency@rc6-idle:
    - shard-iclb:         [WARN][94] ([i915#1804] / [i915#2684]) -> [WARN][95] ([i915#2684])
   [94]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-iclb4/igt@i915_pm_rc6_residency@rc6-idle.html
   [95]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-iclb8/igt@i915_pm_rc6_residency@rc6-idle.html

  * igt@kms_flip@2x-flip-vs-expired-vblank@ab-hdmi-a1-hdmi-a2:
    - shard-glk:          [FAIL][96] ([i915#79]) -> [FAIL][97] ([i915#2122])
   [96]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-glk4/igt@kms_flip@2x-flip-vs-expired-vblank@ab-hdmi-a1-hdmi-a2.html
   [97]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-glk7/igt@kms_flip@2x-flip-vs-expired-vblank@ab-hdmi-a1-hdmi-a2.html

  * igt@kms_psr2_sf@overlay-primary-update-sf-dmg-area-4:
    - shard-iclb:         [SKIP][98] ([i915#2920]) -> [SKIP][99] ([i915#658]) +1 similar issue
   [98]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-iclb2/igt@kms_psr2_sf@overlay-primary-update-sf-dmg-area-4.html
   [99]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-iclb6/igt@kms_psr2_sf@overlay-primary-update-sf-dmg-area-4.html

  * igt@runner@aborted:
    - shard-kbl:          ([FAIL][100], [FAIL][101], [FAIL][102], [FAIL][103], [FAIL][104], [FAIL][105], [FAIL][106], [FAIL][107], [FAIL][108], [FAIL][109], [FAIL][110], [FAIL][111], [FAIL][112], [FAIL][113], [FAIL][114]) ([i915#1436] / [i915#180] / [i915#1814] / [i915#3002] / [i915#3363] / [i915#4312] / [i915#602] / [i915#92]) -> ([FAIL][115], [FAIL][116], [FAIL][117], [FAIL][118], [FAIL][119], [FAIL][120], [FAIL][121], [FAIL][122], [FAIL][123], [FAIL][124], [FAIL][125], [FAIL][126], [FAIL][127]) ([i915#1436] / [i915#180] / [i915#1814] / [i915#3002] / [i915#3363] / [i915#4312] / [i915#602])
   [100]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-kbl6/igt@runner@aborted.html
   [101]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-kbl4/igt@runner@aborted.html
   [102]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-kbl6/igt@runner@aborted.html
   [103]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-kbl1/igt@runner@aborted.html
   [104]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-kbl1/igt@runner@aborted.html
   [105]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-kbl4/igt@runner@aborted.html
   [106]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-kbl3/igt@runner@aborted.html
   [107]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-kbl6/igt@runner@aborted.html
   [108]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-kbl6/igt@runner@aborted.html
   [109]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-kbl3/igt@runner@aborted.html
   [110]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-kbl7/igt@runner@aborted.html
   [111]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-kbl7/igt@runner@aborted.html
   [112]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-kbl7/igt@runner@aborted.html
   [113]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-kbl7/igt@runner@aborted.html
   [114]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-kbl6/igt@runner@aborted.html
   [115]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-kbl4/igt@runner@aborted.html
   [116]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-kbl6/igt@runner@aborted.html
   [117]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-kbl4/igt@runner@aborted.html
   [118]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-kbl1/igt@runner@aborted.html
   [119]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-kbl7/igt@runner@aborted.html
   [120]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-kbl1/igt@runner@aborted.html
   [121]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-kbl1/igt@runner@aborted.html
   [122]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-kbl6/igt@runner@aborted.html
   [123]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-kbl6/igt@runner@aborted.html
   [124]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-kbl7/igt@runner@aborted.html
   [125]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-kbl7/igt@runner@aborted.html
   [126]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-kbl1/igt@runner@aborted.html
   [127]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-kbl3/igt@runner@aborted.html
    - shard-apl:          ([FAIL][128], [FAIL][129], [FAIL][130], [FAIL][131], [FAIL][132], [FAIL][133], [FAIL][134]) ([fdo#109271] / [i915#180] / [i915#1814] / [i915#3002] / [i915#3363] / [i915#4312]) -> ([FAIL][135], [FAIL][136], [FAIL][137], [FAIL][138], [FAIL][139], [FAIL][140], [FAIL][141]) ([i915#180] / [i915#1814] / [i915#3002] / [i915#3363] / [i915#4312])
   [128]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-apl8/igt@runner@aborted.html
   [129]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-apl4/igt@runner@aborted.html
   [130]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-apl1/igt@runner@aborted.html
   [131]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-apl3/igt@runner@aborted.html
   [132]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-apl3/igt@runner@aborted.html
   [133]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-apl3/igt@runner@aborted.html
   [134]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-apl4/igt@runner@aborted.html
   [135]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-apl3/igt@runner@aborted.html
   [136]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-apl7/igt@runner@aborted.html
   [137]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-apl1/igt@runner@aborted.html
   [138]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-apl4/igt@runner@aborted.html
   [139]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-apl7/igt@runner@aborted.html
   [140]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-apl4/igt@runner@aborted.html
   [141]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-apl8/igt@runner@aborted.html
    - shard-skl:          ([FAIL][142], [FAIL][143], [FAIL][144]) ([i915#1436] / [i915#1814] / [i915#2029] / [i915#3002] / [i915#3363] / [i915#4312]) -> ([FAIL][145], [FAIL][146], [FAIL][147]) ([i915#1436] / [i915#2029] / [i915#3002] / [i915#3363] / [i915#4312])
   [142]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-skl9/igt@runner@aborted.html
   [143]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-skl7/igt@runner@aborted.html
   [144]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10792/shard-skl3/igt@runner@aborted.html
   [145]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-skl2/igt@runner@aborted.html
   [146]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-skl3/igt@runner@aborted.html
   [147]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/shard-skl1/igt@runner@aborted.html

  
  [fdo#108145]: https://bugs.freedesktop.org/show_bug.cgi?id=108145
  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [fdo#109441]: https://bugs.freedesktop.org/show_bug.cgi?id=109441
  [fdo#111827]: https://bugs.freedesktop.org/show_bug.cgi?id=111827
  [i915#118]: https://gitlab.freedesktop.org/drm/intel/issues/118
  [i915#1188]: https://gitlab.freedesktop.org/drm/intel/issues/1188
  [i915#1319]: https://gitlab.freedesktop.org/drm/intel/issues/1319
  [i915#1436]: https://gitlab.freedesktop.org/drm/intel/issues/1436
  [i915#1542]: https://gitlab.freedesktop.org/drm/intel/issues/1542
  [i915#1722]: https://gitlab.freedesktop.org/drm/intel/issues/1722
  [i915#180]: https://gitlab.freedesktop.org/drm/intel/issues/180
  [i915#1804]: https://gitlab.freedesktop.org/drm/intel/issues/1804
  [i915#1814]: https://gitlab.freedesktop.org/drm/intel/issues/1814
  [i915#198]: https://gitlab.freedesktop.org/drm/intel/issues/198
  [i915#1982]: https://gitlab.freedesktop.org/drm/intel/issues/1982
  [i915#2029]: https://gitlab.freedesktop.org/drm/intel/issues/2029
  [i915#2122]: https://gitlab.freedesktop.org/drm/intel/issues/2122
  [i915#2346]: https://gitlab.freedesktop.org/drm/intel/issues/2346
  [i915#2411]: https://gitlab.freedesktop.org/drm/intel/issues/2411
  [i915#2437]: https://gitlab.freedesktop.org/drm/intel/issues/2437
  [i915#2485]: https://gitlab.freedesktop.org/drm/intel/issues/2485
  [i915#265]: https://gitlab.freedesktop.org/drm/intel/issues/265
  [i915#2684]: https://gitlab.freedesktop.org/drm/intel/issues/2684
  [i915#2842]: https://gitlab.freedesktop.org/drm/intel/issues/2842
  [i915#2846]: https://gitlab.freedesktop.org/drm/intel/issues/2846
  [i915#2920]: https://gitlab.freedesktop.org/drm/intel/issues/2920
  [i915#2994]: https://gitlab.freedesktop.org/drm/intel/issues/2994
  [i915#3002]: https://gitlab.freedesktop.org/drm/intel/issues/3002
  [i915#3363]: https://gitlab.freedesktop.org/drm/intel/issues/3363
  [i915#3468]: https://gitlab.freedesktop.org/drm/intel/issues/3468
  [i915#3701]: https://gitlab.freedesktop.org/drm/intel/issues/3701
  [i915#3743]: https://gitlab.freedesktop.org/drm/intel/issues/3743
  [i915#3777]: https://gitlab.freedesktop.org/drm/intel/issues/3777
  [i915#3886]: https://gitlab.freedesktop.org/drm/intel/issues/3886
  [i915#4312]: https://gitlab.freedesktop.org/drm/intel/issues/4312
  [i915#456]: https://gitlab.freedesktop.org/drm/intel/issues/456
  [i915#533]: https://gitlab.freedesktop.org/drm/intel/issues/533
  [i915#602]: https://gitlab.freedesktop.org/drm/intel/issues/602
  [i915#636]: https://gitlab.freedesktop.org/drm/intel/issues/636
  [i915#644]: https://gitlab.freedesktop.org/drm/intel/issues/644
  [i915#658]: https://gitlab.freedesktop.org/drm/intel/issues/658
  [i915#79]: https://gitlab.freedesktop.org/drm/intel/issues/79
  [i915#92]: https://gitlab.freedesktop.org/drm/intel/issues/92


Build changes
-------------

  * Linux: CI_DRM_10792 -> Patchwork_21455

  CI-20190529: 20190529
  CI_DRM_10792: 299777ddcc06c9a0ea7b95a0823ccaca268d16b8 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_6262: d1c793b26e31cc6ae3f9fa3239805a9bbcc749fb @ https://gitlab.freedesktop.org/drm/igt-gpu-tools.git
  Patchwork_21455: 8d0300f5d14fc7a068b618ba28b6eabfb45b294b @ git://anongit.freedesktop.org/gfx-ci/linux
  piglit_4509: fdc

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21455/index.html

[-- Attachment #2: Type: text/html, Size: 38139 bytes --]

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/2] drm/i915/pmu: Add a name to the execlists stats
  2021-10-27  0:48 ` [Intel-gfx] " Umesh Nerlige Ramappa
@ 2021-10-27 18:56   ` Matthew Brost
  -1 siblings, 0 replies; 16+ messages in thread
From: Matthew Brost @ 2021-10-27 18:56 UTC (permalink / raw)
  To: Umesh Nerlige Ramappa
  Cc: intel-gfx, dri-devel, john.c.harrison, Tvrtko Ursulin, daniel.vetter

On Tue, Oct 26, 2021 at 05:48:20PM -0700, Umesh Nerlige Ramappa wrote:
> In preparation for GuC pmu stats, add a name to the execlists stats
> structure so that it can be differentiated from the GuC stats.
> 
> Signed-off-by: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
> ---
>  drivers/gpu/drm/i915/gt/intel_engine_cs.c    | 14 +++---
>  drivers/gpu/drm/i915/gt/intel_engine_stats.h | 33 +++++++------
>  drivers/gpu/drm/i915/gt/intel_engine_types.h | 52 +++++++++++---------
>  3 files changed, 53 insertions(+), 46 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
> index ff6753ccb129..2de396e34d83 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
> @@ -363,7 +363,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id,
>  		DRIVER_CAPS(i915)->has_logical_contexts = true;
>  
>  	ewma__engine_latency_init(&engine->latency);
> -	seqcount_init(&engine->stats.lock);
> +	seqcount_init(&engine->stats.execlists.lock);
>  
>  	ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
>  
> @@ -1918,15 +1918,16 @@ void intel_engine_dump(struct intel_engine_cs *engine,
>  static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine,
>  					    ktime_t *now)
>  {
> -	ktime_t total = engine->stats.total;
> +	struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
> +	ktime_t total = stats->total;
>  
>  	/*
>  	 * If the engine is executing something at the moment
>  	 * add it to the total.
>  	 */
>  	*now = ktime_get();
> -	if (READ_ONCE(engine->stats.active))
> -		total = ktime_add(total, ktime_sub(*now, engine->stats.start));
> +	if (READ_ONCE(stats->active))
> +		total = ktime_add(total, ktime_sub(*now, stats->start));
>  
>  	return total;
>  }
> @@ -1940,13 +1941,14 @@ static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine,
>   */
>  ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now)
>  {
> +	struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
>  	unsigned int seq;
>  	ktime_t total;
>  
>  	do {
> -		seq = read_seqcount_begin(&engine->stats.lock);
> +		seq = read_seqcount_begin(&stats->lock);
>  		total = __intel_engine_get_busy_time(engine, now);
> -	} while (read_seqcount_retry(&engine->stats.lock, seq));
> +	} while (read_seqcount_retry(&stats->lock, seq));
>  
>  	return total;
>  }
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_stats.h b/drivers/gpu/drm/i915/gt/intel_engine_stats.h
> index 24fbdd94351a..8e762d683e50 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine_stats.h
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_stats.h
> @@ -15,45 +15,46 @@
>  
>  static inline void intel_engine_context_in(struct intel_engine_cs *engine)
>  {
> +	struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
>  	unsigned long flags;
>  
> -	if (engine->stats.active) {
> -		engine->stats.active++;
> +	if (stats->active) {
> +		stats->active++;
>  		return;
>  	}
>  
>  	/* The writer is serialised; but the pmu reader may be from hardirq */
>  	local_irq_save(flags);
> -	write_seqcount_begin(&engine->stats.lock);
> +	write_seqcount_begin(&stats->lock);
>  
> -	engine->stats.start = ktime_get();
> -	engine->stats.active++;
> +	stats->start = ktime_get();
> +	stats->active++;
>  
> -	write_seqcount_end(&engine->stats.lock);
> +	write_seqcount_end(&stats->lock);
>  	local_irq_restore(flags);
>  
> -	GEM_BUG_ON(!engine->stats.active);
> +	GEM_BUG_ON(!stats->active);
>  }
>  
>  static inline void intel_engine_context_out(struct intel_engine_cs *engine)
>  {
> +	struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
>  	unsigned long flags;
>  
> -	GEM_BUG_ON(!engine->stats.active);
> -	if (engine->stats.active > 1) {
> -		engine->stats.active--;
> +	GEM_BUG_ON(!stats->active);
> +	if (stats->active > 1) {
> +		stats->active--;
>  		return;
>  	}
>  
>  	local_irq_save(flags);
> -	write_seqcount_begin(&engine->stats.lock);
> +	write_seqcount_begin(&stats->lock);
>  
> -	engine->stats.active--;
> -	engine->stats.total =
> -		ktime_add(engine->stats.total,
> -			  ktime_sub(ktime_get(), engine->stats.start));
> +	stats->active--;
> +	stats->total = ktime_add(stats->total,
> +				 ktime_sub(ktime_get(), stats->start));
>  
> -	write_seqcount_end(&engine->stats.lock);
> +	write_seqcount_end(&stats->lock);
>  	local_irq_restore(flags);
>  }
>  
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
> index e0f773585c29..24fa7fb0e7de 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
> @@ -257,6 +257,33 @@ struct intel_engine_execlists {
>  
>  #define INTEL_ENGINE_CS_MAX_NAME 8
>  
> +struct intel_engine_execlists_stats {
> +	/**
> +	 * @active: Number of contexts currently scheduled in.
> +	 */
> +	unsigned int active;
> +
> +	/**
> +	 * @lock: Lock protecting the below fields.
> +	 */
> +	seqcount_t lock;
> +
> +	/**
> +	 * @total: Total time this engine was busy.
> +	 *
> +	 * Accumulated time not counting the most recent block in cases where
> +	 * engine is currently busy (active > 0).
> +	 */
> +	ktime_t total;
> +
> +	/**
> +	 * @start: Timestamp of the last idle to active transition.
> +	 *
> +	 * Idle is defined as active == 0, active is active > 0.
> +	 */
> +	ktime_t start;
> +};
> +
>  struct intel_engine_cs {
>  	struct drm_i915_private *i915;
>  	struct intel_gt *gt;
> @@ -488,30 +515,7 @@ struct intel_engine_cs {
>  	u32 (*get_cmd_length_mask)(u32 cmd_header);
>  
>  	struct {
> -		/**
> -		 * @active: Number of contexts currently scheduled in.
> -		 */
> -		unsigned int active;
> -
> -		/**
> -		 * @lock: Lock protecting the below fields.
> -		 */
> -		seqcount_t lock;
> -
> -		/**
> -		 * @total: Total time this engine was busy.
> -		 *
> -		 * Accumulated time not counting the most recent block in cases
> -		 * where engine is currently busy (active > 0).
> -		 */
> -		ktime_t total;
> -
> -		/**
> -		 * @start: Timestamp of the last idle to active transition.
> -		 *
> -		 * Idle is defined as active == 0, active is active > 0.
> -		 */
> -		ktime_t start;
> +		struct intel_engine_execlists_stats execlists;

I'd probably just define this sub-structure inline rather defining it
above. e.g.

struct {
	struct {
		fields...
	} execlists;
} stats;

Then the follow up patch:

union {
	struct {
		fields...
	} execlists;
	struct {
		fields...
	} guc;
} stats;

Or if there is common fields:

struct {
	common fields...
	struct {
		fields...
	} execlists;
	struct {
		fields...
	} guc;
} stats;

That being said, if you prefer to leave it as is, that's fine too.

With that:
Reviewed-by: Matthew Brost <matthew.brost@intel.com>

>  
>  		/**
>  		 * @rps: Utilisation at last RPS sampling.
> -- 
> 2.20.1
> 

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [Intel-gfx] [PATCH 1/2] drm/i915/pmu: Add a name to the execlists stats
@ 2021-10-27 18:56   ` Matthew Brost
  0 siblings, 0 replies; 16+ messages in thread
From: Matthew Brost @ 2021-10-27 18:56 UTC (permalink / raw)
  To: Umesh Nerlige Ramappa
  Cc: intel-gfx, dri-devel, john.c.harrison, Tvrtko Ursulin, daniel.vetter

On Tue, Oct 26, 2021 at 05:48:20PM -0700, Umesh Nerlige Ramappa wrote:
> In preparation for GuC pmu stats, add a name to the execlists stats
> structure so that it can be differentiated from the GuC stats.
> 
> Signed-off-by: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
> ---
>  drivers/gpu/drm/i915/gt/intel_engine_cs.c    | 14 +++---
>  drivers/gpu/drm/i915/gt/intel_engine_stats.h | 33 +++++++------
>  drivers/gpu/drm/i915/gt/intel_engine_types.h | 52 +++++++++++---------
>  3 files changed, 53 insertions(+), 46 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
> index ff6753ccb129..2de396e34d83 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
> @@ -363,7 +363,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id,
>  		DRIVER_CAPS(i915)->has_logical_contexts = true;
>  
>  	ewma__engine_latency_init(&engine->latency);
> -	seqcount_init(&engine->stats.lock);
> +	seqcount_init(&engine->stats.execlists.lock);
>  
>  	ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
>  
> @@ -1918,15 +1918,16 @@ void intel_engine_dump(struct intel_engine_cs *engine,
>  static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine,
>  					    ktime_t *now)
>  {
> -	ktime_t total = engine->stats.total;
> +	struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
> +	ktime_t total = stats->total;
>  
>  	/*
>  	 * If the engine is executing something at the moment
>  	 * add it to the total.
>  	 */
>  	*now = ktime_get();
> -	if (READ_ONCE(engine->stats.active))
> -		total = ktime_add(total, ktime_sub(*now, engine->stats.start));
> +	if (READ_ONCE(stats->active))
> +		total = ktime_add(total, ktime_sub(*now, stats->start));
>  
>  	return total;
>  }
> @@ -1940,13 +1941,14 @@ static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine,
>   */
>  ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now)
>  {
> +	struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
>  	unsigned int seq;
>  	ktime_t total;
>  
>  	do {
> -		seq = read_seqcount_begin(&engine->stats.lock);
> +		seq = read_seqcount_begin(&stats->lock);
>  		total = __intel_engine_get_busy_time(engine, now);
> -	} while (read_seqcount_retry(&engine->stats.lock, seq));
> +	} while (read_seqcount_retry(&stats->lock, seq));
>  
>  	return total;
>  }
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_stats.h b/drivers/gpu/drm/i915/gt/intel_engine_stats.h
> index 24fbdd94351a..8e762d683e50 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine_stats.h
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_stats.h
> @@ -15,45 +15,46 @@
>  
>  static inline void intel_engine_context_in(struct intel_engine_cs *engine)
>  {
> +	struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
>  	unsigned long flags;
>  
> -	if (engine->stats.active) {
> -		engine->stats.active++;
> +	if (stats->active) {
> +		stats->active++;
>  		return;
>  	}
>  
>  	/* The writer is serialised; but the pmu reader may be from hardirq */
>  	local_irq_save(flags);
> -	write_seqcount_begin(&engine->stats.lock);
> +	write_seqcount_begin(&stats->lock);
>  
> -	engine->stats.start = ktime_get();
> -	engine->stats.active++;
> +	stats->start = ktime_get();
> +	stats->active++;
>  
> -	write_seqcount_end(&engine->stats.lock);
> +	write_seqcount_end(&stats->lock);
>  	local_irq_restore(flags);
>  
> -	GEM_BUG_ON(!engine->stats.active);
> +	GEM_BUG_ON(!stats->active);
>  }
>  
>  static inline void intel_engine_context_out(struct intel_engine_cs *engine)
>  {
> +	struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
>  	unsigned long flags;
>  
> -	GEM_BUG_ON(!engine->stats.active);
> -	if (engine->stats.active > 1) {
> -		engine->stats.active--;
> +	GEM_BUG_ON(!stats->active);
> +	if (stats->active > 1) {
> +		stats->active--;
>  		return;
>  	}
>  
>  	local_irq_save(flags);
> -	write_seqcount_begin(&engine->stats.lock);
> +	write_seqcount_begin(&stats->lock);
>  
> -	engine->stats.active--;
> -	engine->stats.total =
> -		ktime_add(engine->stats.total,
> -			  ktime_sub(ktime_get(), engine->stats.start));
> +	stats->active--;
> +	stats->total = ktime_add(stats->total,
> +				 ktime_sub(ktime_get(), stats->start));
>  
> -	write_seqcount_end(&engine->stats.lock);
> +	write_seqcount_end(&stats->lock);
>  	local_irq_restore(flags);
>  }
>  
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
> index e0f773585c29..24fa7fb0e7de 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
> @@ -257,6 +257,33 @@ struct intel_engine_execlists {
>  
>  #define INTEL_ENGINE_CS_MAX_NAME 8
>  
> +struct intel_engine_execlists_stats {
> +	/**
> +	 * @active: Number of contexts currently scheduled in.
> +	 */
> +	unsigned int active;
> +
> +	/**
> +	 * @lock: Lock protecting the below fields.
> +	 */
> +	seqcount_t lock;
> +
> +	/**
> +	 * @total: Total time this engine was busy.
> +	 *
> +	 * Accumulated time not counting the most recent block in cases where
> +	 * engine is currently busy (active > 0).
> +	 */
> +	ktime_t total;
> +
> +	/**
> +	 * @start: Timestamp of the last idle to active transition.
> +	 *
> +	 * Idle is defined as active == 0, active is active > 0.
> +	 */
> +	ktime_t start;
> +};
> +
>  struct intel_engine_cs {
>  	struct drm_i915_private *i915;
>  	struct intel_gt *gt;
> @@ -488,30 +515,7 @@ struct intel_engine_cs {
>  	u32 (*get_cmd_length_mask)(u32 cmd_header);
>  
>  	struct {
> -		/**
> -		 * @active: Number of contexts currently scheduled in.
> -		 */
> -		unsigned int active;
> -
> -		/**
> -		 * @lock: Lock protecting the below fields.
> -		 */
> -		seqcount_t lock;
> -
> -		/**
> -		 * @total: Total time this engine was busy.
> -		 *
> -		 * Accumulated time not counting the most recent block in cases
> -		 * where engine is currently busy (active > 0).
> -		 */
> -		ktime_t total;
> -
> -		/**
> -		 * @start: Timestamp of the last idle to active transition.
> -		 *
> -		 * Idle is defined as active == 0, active is active > 0.
> -		 */
> -		ktime_t start;
> +		struct intel_engine_execlists_stats execlists;

I'd probably just define this sub-structure inline rather defining it
above. e.g.

struct {
	struct {
		fields...
	} execlists;
} stats;

Then the follow up patch:

union {
	struct {
		fields...
	} execlists;
	struct {
		fields...
	} guc;
} stats;

Or if there is common fields:

struct {
	common fields...
	struct {
		fields...
	} execlists;
	struct {
		fields...
	} guc;
} stats;

That being said, if you prefer to leave it as is, that's fine too.

With that:
Reviewed-by: Matthew Brost <matthew.brost@intel.com>

>  
>  		/**
>  		 * @rps: Utilisation at last RPS sampling.
> -- 
> 2.20.1
> 

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 2/2] drm/i915/pmu: Connect engine busyness stats from GuC to pmu
  2021-10-27  0:48   ` [Intel-gfx] " Umesh Nerlige Ramappa
@ 2021-10-27 20:02     ` Matthew Brost
  -1 siblings, 0 replies; 16+ messages in thread
From: Matthew Brost @ 2021-10-27 20:02 UTC (permalink / raw)
  To: Umesh Nerlige Ramappa
  Cc: intel-gfx, dri-devel, john.c.harrison, Tvrtko Ursulin, daniel.vetter

On Tue, Oct 26, 2021 at 05:48:21PM -0700, Umesh Nerlige Ramappa wrote:
> With GuC handling scheduling, i915 is not aware of the time that a
> context is scheduled in and out of the engine. Since i915 pmu relies on
> this info to provide engine busyness to the user, GuC shares this info
> with i915 for all engines using shared memory. For each engine, this
> info contains:
> 
> - total busyness: total time that the context was running (total)
> - id: id of the running context (id)
> - start timestamp: timestamp when the context started running (start)
> 
> At the time (now) of sampling the engine busyness, if the id is valid
> (!= ~0), and start is non-zero, then the context is considered to be
> active and the engine busyness is calculated using the below equation
> 
> 	engine busyness = total + (now - start)
> 
> All times are obtained from the gt clock base. For inactive contexts,
> engine busyness is just equal to the total.
> 
> The start and total values provided by GuC are 32 bits and wrap around
> in a few minutes. Since perf pmu provides busyness as 64 bit
> monotonically increasing values, there is a need for this implementation
> to account for overflows and extend the time to 64 bits before returning
> busyness to the user. In order to do that, a worker runs periodically at
> frequency = 1/8th the time it takes for the timestamp to wrap. As an
> example, that would be once in 27 seconds for a gt clock frequency of
> 19.2 MHz.
> 
> Note:
> There might be an over-accounting of busyness due to the fact that GuC
> may be updating the total and start values while kmd is reading them.
> (i.e kmd may read the updated total and the stale start). In such a
> case, user may see higher busyness value followed by smaller ones which
> would eventually catch up to the higher value.
> 
> v2: (Tvrtko)
> - Include details in commit message
> - Move intel engine busyness function into execlist code
> - Use union inside engine->stats
> - Use natural type for ping delay jiffies
> - Drop active_work condition checks
> - Use for_each_engine if iterating all engines
> - Drop seq locking, use spinlock at GuC level to update engine stats
> - Document worker specific details
> 
> v3: (Tvrtko/Umesh)
> - Demarcate GuC and execlist stat objects with comments
> - Document known over-accounting issue in commit
> - Provide a consistent view of GuC state
> - Add hooks to gt park/unpark for GuC busyness
> - Stop/start worker in gt park/unpark path
> - Drop inline
> - Move spinlock and worker inits to GuC initialization
> - Drop helpers that are called only once
> 
> v4: (Tvrtko/Matt/Umesh)
> - Drop addressed opens from commit message
> - Get runtime pm in ping, remove from the park path
> - Use cancel_delayed_work_sync in disable_submission path
> - Update stats during reset prepare
> - Skip ping if reset in progress
> - Explicitly name execlists and GuC stats objects
> - Since disable_submission is called from many places, move resetting
>   stats to intel_guc_submission_reset_prepare
> 
> v5: (Tvrtko)
> - Add a trylock helper that does not sleep and synchronize PMU event
>   callbacks and worker with gt reset
> 
> v6: (CI BAT failures)
> - DUTs using execlist submission failed to boot since __gt_unpark is
>   called during i915 load. This ends up calling the GuC busyness unpark
>   hook and results in kick-starting an uninitialized worker. Let
>   park/unpark hooks check if GuC submission has been initialized.
> - drop cant_sleep() from trylock helper since rcu_read_lock takes care
>   of that.
> 
> v7: (CI) Fix igt@i915_selftest@live@gt_engines
> - For GuC mode of submission the engine busyness is derived from gt time
>   domain. Use gt time elapsed as reference in the selftest.
> - Increase busyness calculation to 10ms duration to ensure batch runs
>   longer and falls within the busyness tolerances in selftest.
> 
> v8:
> - Use ktime_get in selftest as before
> - intel_reset_trylock_no_wait results in a lockdep splat that is not
>   trivial to fix since the PMU callback runs in irq context and the
>   reset paths are tightly knit into the driver. The test that uncovers
>   this is igt@perf_pmu@faulting-read. Drop intel_reset_trylock_no_wait,
>   instead use the reset_count to synchronize with gt reset during pmu
>   callback. For the ping, continue to use intel_reset_trylock since ping
>   is not run in irq context.
> 
> - GuC PM timestamp does not tick when GuC is idle. This can potentially
>   result in wrong busyness values when a context is active on the
>   engine, but GuC is idle. Use the RING TIMESTAMP as GPU timestamp to
>   process the GuC busyness stats. This works since both GuC timestamp and
>   RING timestamp are synced with the same clock.
> 
> - The busyness stats may get updated after the batch starts running.
>   This delay causes the busyness reported for 100us duration to fall
>   below 95% in the selftest. The only option at this time is to wait for
>   GuC busyness to change from idle to active before we sample busyness
>   over a 100us period.
> 
> Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
> Signed-off-by: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
> Acked-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> ---
>  drivers/gpu/drm/i915/gt/intel_engine_cs.c     |  28 +-
>  drivers/gpu/drm/i915/gt/intel_engine_types.h  |  33 ++-
>  .../drm/i915/gt/intel_execlists_submission.c  |  34 +++
>  drivers/gpu/drm/i915/gt/intel_gt_pm.c         |   2 +
>  drivers/gpu/drm/i915/gt/selftest_engine_pm.c  |  33 +++
>  .../gpu/drm/i915/gt/uc/abi/guc_actions_abi.h  |   1 +
>  drivers/gpu/drm/i915/gt/uc/intel_guc.h        |  30 ++
>  drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c    |  21 ++
>  drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h    |   5 +
>  drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h   |  13 +
>  .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 277 ++++++++++++++++++
>  .../gpu/drm/i915/gt/uc/intel_guc_submission.h |   2 +
>  drivers/gpu/drm/i915/i915_reg.h               |   2 +
>  13 files changed, 453 insertions(+), 28 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
> index 2de396e34d83..332756036007 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
> @@ -1915,23 +1915,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
>  	intel_engine_print_breadcrumbs(engine, m);
>  }
>  
> -static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine,
> -					    ktime_t *now)
> -{
> -	struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
> -	ktime_t total = stats->total;
> -
> -	/*
> -	 * If the engine is executing something at the moment
> -	 * add it to the total.
> -	 */
> -	*now = ktime_get();
> -	if (READ_ONCE(stats->active))
> -		total = ktime_add(total, ktime_sub(*now, stats->start));
> -
> -	return total;
> -}
> -
>  /**
>   * intel_engine_get_busy_time() - Return current accumulated engine busyness
>   * @engine: engine to report on
> @@ -1941,16 +1924,7 @@ static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine,
>   */
>  ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now)
>  {
> -	struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
> -	unsigned int seq;
> -	ktime_t total;
> -
> -	do {
> -		seq = read_seqcount_begin(&stats->lock);
> -		total = __intel_engine_get_busy_time(engine, now);
> -	} while (read_seqcount_retry(&stats->lock, seq));
> -
> -	return total;
> +	return engine->busyness(engine, now);
>  }
>  
>  struct intel_context *
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
> index 24fa7fb0e7de..5732e0d71513 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
> @@ -284,6 +284,28 @@ struct intel_engine_execlists_stats {
>  	ktime_t start;
>  };
>  
> +struct intel_engine_guc_stats {
> +	/**
> +	 * @running: Active state of the engine when busyness was last sampled.
> +	 */
> +	bool running;
> +
> +	/**
> +	 * @prev_total: Previous value of total runtime clock cycles.
> +	 */
> +	u32 prev_total;
> +
> +	/**
> +	 * @total_gt_clks: Total gt clock cycles this engine was busy.
> +	 */
> +	u64 total_gt_clks;
> +
> +	/**
> +	 * @start_gt_clk: GT clock time of last idle to active transition.
> +	 */
> +	u64 start_gt_clk;
> +};
> +
>  struct intel_engine_cs {
>  	struct drm_i915_private *i915;
>  	struct intel_gt *gt;
> @@ -466,6 +488,12 @@ struct intel_engine_cs {
>  	void		(*add_active_request)(struct i915_request *rq);
>  	void		(*remove_active_request)(struct i915_request *rq);
>  
> +	/*
> +	 * Get engine busyness and the time at which the busyness was sampled.
> +	 */
> +	ktime_t		(*busyness)(struct intel_engine_cs *engine,
> +				    ktime_t *now);
> +
>  	struct intel_engine_execlists execlists;
>  
>  	/*
> @@ -515,7 +543,10 @@ struct intel_engine_cs {
>  	u32 (*get_cmd_length_mask)(u32 cmd_header);
>  
>  	struct {
> -		struct intel_engine_execlists_stats execlists;
> +		union {
> +			struct intel_engine_execlists_stats execlists;
> +			struct intel_engine_guc_stats guc;
> +		};
>  
>  		/**
>  		 * @rps: Utilisation at last RPS sampling.
> diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
> index bedb80057046..ca03880fa7e4 100644
> --- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
> +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
> @@ -3293,6 +3293,38 @@ static void execlists_release(struct intel_engine_cs *engine)
>  	lrc_fini_wa_ctx(engine);
>  }
>  
> +static ktime_t __execlists_engine_busyness(struct intel_engine_cs *engine,
> +					   ktime_t *now)
> +{
> +	struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
> +	ktime_t total = stats->total;
> +
> +	/*
> +	 * If the engine is executing something at the moment
> +	 * add it to the total.
> +	 */
> +	*now = ktime_get();
> +	if (READ_ONCE(stats->active))
> +		total = ktime_add(total, ktime_sub(*now, stats->start));
> +
> +	return total;
> +}
> +
> +static ktime_t execlists_engine_busyness(struct intel_engine_cs *engine,
> +					 ktime_t *now)
> +{
> +	struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
> +	unsigned int seq;
> +	ktime_t total;
> +
> +	do {
> +		seq = read_seqcount_begin(&stats->lock);
> +		total = __execlists_engine_busyness(engine, now);
> +	} while (read_seqcount_retry(&stats->lock, seq));
> +
> +	return total;
> +}
> +
>  static void
>  logical_ring_default_vfuncs(struct intel_engine_cs *engine)
>  {
> @@ -3349,6 +3381,8 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
>  		engine->emit_bb_start = gen8_emit_bb_start;
>  	else
>  		engine->emit_bb_start = gen8_emit_bb_start_noarb;
> +
> +	engine->busyness = execlists_engine_busyness;
>  }
>  
>  static void logical_ring_default_irqs(struct intel_engine_cs *engine)
> diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
> index 524eaf678790..b4a8594bc46c 100644
> --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
> +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
> @@ -86,6 +86,7 @@ static int __gt_unpark(struct intel_wakeref *wf)
>  	intel_rc6_unpark(&gt->rc6);
>  	intel_rps_unpark(&gt->rps);
>  	i915_pmu_gt_unparked(i915);
> +	intel_guc_busyness_unpark(gt);
>  
>  	intel_gt_unpark_requests(gt);
>  	runtime_begin(gt);
> @@ -104,6 +105,7 @@ static int __gt_park(struct intel_wakeref *wf)
>  	runtime_end(gt);
>  	intel_gt_park_requests(gt);
>  
> +	intel_guc_busyness_park(gt);
>  	i915_vma_parked(gt);
>  	i915_pmu_gt_parked(i915);
>  	intel_rps_park(&gt->rps);
> diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
> index 75569666105d..0bfd738dbf3a 100644
> --- a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
> +++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
> @@ -214,6 +214,31 @@ static int live_engine_timestamps(void *arg)
>  	return 0;
>  }
>  
> +static int __spin_until_busier(struct intel_engine_cs *engine, ktime_t busyness)
> +{
> +	ktime_t start, unused, dt;
> +
> +	if (!intel_engine_uses_guc(engine))
> +		return 0;
> +
> +	/*
> +	 * In GuC mode of submission, the busyness stats may get updated after
> +	 * the batch starts running. Poll for a change in busyness and timeout
> +	 * after 500 us.
> +	 */
> +	start = ktime_get();
> +	while (intel_engine_get_busy_time(engine, &unused) == busyness) {
> +		dt = ktime_get() - start;
> +		if (dt > 500000) {
> +			pr_err("active wait timed out %lld\n", dt);
> +			ENGINE_TRACE(engine, "active wait time out %lld\n", dt);
> +			return -ETIME;
> +		}
> +	}
> +
> +	return 0;
> +}
> +
>  static int live_engine_busy_stats(void *arg)
>  {
>  	struct intel_gt *gt = arg;
> @@ -232,6 +257,7 @@ static int live_engine_busy_stats(void *arg)
>  	GEM_BUG_ON(intel_gt_pm_is_awake(gt));
>  	for_each_engine(engine, gt, id) {
>  		struct i915_request *rq;
> +		ktime_t busyness, dummy;
>  		ktime_t de, dt;
>  		ktime_t t[2];
>  
> @@ -274,12 +300,19 @@ static int live_engine_busy_stats(void *arg)
>  		}
>  		i915_request_add(rq);
>  
> +		busyness = intel_engine_get_busy_time(engine, &dummy);
>  		if (!igt_wait_for_spinner(&spin, rq)) {
>  			intel_gt_set_wedged(engine->gt);
>  			err = -ETIME;
>  			goto end;
>  		}
>  
> +		err = __spin_until_busier(engine, busyness);
> +		if (err) {
> +			GEM_TRACE_DUMP();
> +			goto end;
> +		}
> +
>  		ENGINE_TRACE(engine, "measuring busy time\n");
>  		preempt_disable();
>  		de = intel_engine_get_busy_time(engine, &t[0]);
> diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
> index ba10bd374cee..fe5d7d261797 100644
> --- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
> +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
> @@ -144,6 +144,7 @@ enum intel_guc_action {
>  	INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE = 0x4600,
>  	INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC = 0x4601,
>  	INTEL_GUC_ACTION_RESET_CLIENT = 0x5507,
> +	INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF = 0x550A,
>  	INTEL_GUC_ACTION_LIMIT
>  };
>  
> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
> index 31cf9fb48c7e..1cb46098030d 100644
> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
> @@ -138,6 +138,8 @@ struct intel_guc {
>  	u32 ads_regset_size;
>  	/** @ads_golden_ctxt_size: size of the golden contexts in the ADS */
>  	u32 ads_golden_ctxt_size;
> +	/** @ads_engine_usage_size: size of engine usage in the ADS */
> +	u32 ads_engine_usage_size;
>  
>  	/** @lrc_desc_pool: object allocated to hold the GuC LRC descriptor pool */
>  	struct i915_vma *lrc_desc_pool;
> @@ -172,6 +174,34 @@ struct intel_guc {
>  
>  	/** @send_mutex: used to serialize the intel_guc_send actions */
>  	struct mutex send_mutex;
> +
> +	/**
> +	 * @timestamp: GT timestamp object that stores a copy of the timestamp
> +	 * and adjusts it for overflow using a worker.
> +	 */
> +	struct {
> +		/**
> +		 * @lock: Lock protecting the below fields and the engine stats.
> +		 */
> +		spinlock_t lock;
> +
> +		/**
> +		 * @gt_stamp: 64 bit extended value of the GT timestamp.
> +		 */
> +		u64 gt_stamp;
> +
> +		/**
> +		 * @ping_delay: Period for polling the GT timestamp for
> +		 * overflow.
> +		 */
> +		unsigned long ping_delay;
> +
> +		/**
> +		 * @work: Periodic work to adjust GT timestamp, engine and
> +		 * context usage for overflows.
> +		 */
> +		struct delayed_work work;
> +	} timestamp;
>  };
>  
>  static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
> index 621c893a009f..1a1edae67e4e 100644
> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
> @@ -26,6 +26,8 @@
>   *      | guc_policies                          |
>   *      +---------------------------------------+
>   *      | guc_gt_system_info                    |
> + *      +---------------------------------------+
> + *      | guc_engine_usage                      |
>   *      +---------------------------------------+ <== static
>   *      | guc_mmio_reg[countA] (engine 0.0)     |
>   *      | guc_mmio_reg[countB] (engine 0.1)     |
> @@ -47,6 +49,7 @@ struct __guc_ads_blob {
>  	struct guc_ads ads;
>  	struct guc_policies policies;
>  	struct guc_gt_system_info system_info;
> +	struct guc_engine_usage engine_usage;
>  	/* From here on, location is dynamic! Refer to above diagram. */
>  	struct guc_mmio_reg regset[0];
>  } __packed;
> @@ -628,3 +631,21 @@ void intel_guc_ads_reset(struct intel_guc *guc)
>  
>  	guc_ads_private_data_reset(guc);
>  }
> +
> +u32 intel_guc_engine_usage_offset(struct intel_guc *guc)
> +{
> +	struct __guc_ads_blob *blob = guc->ads_blob;
> +	u32 base = intel_guc_ggtt_offset(guc, guc->ads_vma);
> +	u32 offset = base + ptr_offset(blob, engine_usage);
> +
> +	return offset;
> +}
> +
> +struct guc_engine_usage_record *intel_guc_engine_usage(struct intel_engine_cs *engine)
> +{
> +	struct intel_guc *guc = &engine->gt->uc.guc;
> +	struct __guc_ads_blob *blob = guc->ads_blob;
> +	u8 guc_class = engine_class_to_guc_class(engine->class);
> +
> +	return &blob->engine_usage.engines[guc_class][ilog2(engine->logical_mask)];
> +}
> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h
> index 3d85051d57e4..e74c110facff 100644
> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h
> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h
> @@ -6,8 +6,11 @@
>  #ifndef _INTEL_GUC_ADS_H_
>  #define _INTEL_GUC_ADS_H_
>  
> +#include <linux/types.h>
> +
>  struct intel_guc;
>  struct drm_printer;
> +struct intel_engine_cs;
>  
>  int intel_guc_ads_create(struct intel_guc *guc);
>  void intel_guc_ads_destroy(struct intel_guc *guc);
> @@ -15,5 +18,7 @@ void intel_guc_ads_init_late(struct intel_guc *guc);
>  void intel_guc_ads_reset(struct intel_guc *guc);
>  void intel_guc_ads_print_policy_info(struct intel_guc *guc,
>  				     struct drm_printer *p);
> +struct guc_engine_usage_record *intel_guc_engine_usage(struct intel_engine_cs *engine);
> +u32 intel_guc_engine_usage_offset(struct intel_guc *guc);
>  
>  #endif
> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
> index 722933e26347..7072e30e99f4 100644
> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
> @@ -294,6 +294,19 @@ struct guc_ads {
>  	u32 reserved[15];
>  } __packed;
>  
> +/* Engine usage stats */
> +struct guc_engine_usage_record {
> +	u32 current_context_index;
> +	u32 last_switch_in_stamp;
> +	u32 reserved0;
> +	u32 total_runtime;
> +	u32 reserved1[4];
> +} __packed;
> +
> +struct guc_engine_usage {
> +	struct guc_engine_usage_record engines[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS];

Again like I mentioned in the previous patch, I'd define this
sub-structure inline. But that is just my opinion and doesn't really
matter. I believe I understand everything else this patch is doing and
it looks good to me.

With that:
Reviewed-by: Matthew Brost <matthew.brost@intel.com> 

> +} __packed;
> +
>  /* GuC logging structures */
>  
>  enum guc_log_buffer_type {
> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> index 38b47e73e35d..5cc49c0b3889 100644
> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> @@ -13,6 +13,7 @@
>  #include "gt/intel_engine_heartbeat.h"
>  #include "gt/intel_gpu_commands.h"
>  #include "gt/intel_gt.h"
> +#include "gt/intel_gt_clock_utils.h"
>  #include "gt/intel_gt_irq.h"
>  #include "gt/intel_gt_pm.h"
>  #include "gt/intel_gt_requests.h"
> @@ -21,6 +22,7 @@
>  #include "gt/intel_mocs.h"
>  #include "gt/intel_ring.h"
>  
> +#include "intel_guc_ads.h"
>  #include "intel_guc_submission.h"
>  
>  #include "i915_drv.h"
> @@ -1077,6 +1079,272 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc)
>  	xa_unlock_irqrestore(&guc->context_lookup, flags);
>  }
>  
> +/*
> + * GuC stores busyness stats for each engine at context in/out boundaries. A
> + * context 'in' logs execution start time, 'out' adds in -> out delta to total.
> + * i915/kmd accesses 'start', 'total' and 'context id' from memory shared with
> + * GuC.
> + *
> + * __i915_pmu_event_read samples engine busyness. When sampling, if context id
> + * is valid (!= ~0) and start is non-zero, the engine is considered to be
> + * active. For an active engine total busyness = total + (now - start), where
> + * 'now' is the time at which the busyness is sampled. For inactive engine,
> + * total busyness = total.
> + *
> + * All times are captured from GUCPMTIMESTAMP reg and are in gt clock domain.
> + *
> + * The start and total values provided by GuC are 32 bits and wrap around in a
> + * few minutes. Since perf pmu provides busyness as 64 bit monotonically
> + * increasing ns values, there is a need for this implementation to account for
> + * overflows and extend the GuC provided values to 64 bits before returning
> + * busyness to the user. In order to do that, a worker runs periodically at
> + * frequency = 1/8th the time it takes for the timestamp to wrap (i.e. once in
> + * 27 seconds for a gt clock frequency of 19.2 MHz).
> + */
> +
> +#define WRAP_TIME_CLKS U32_MAX
> +#define POLL_TIME_CLKS (WRAP_TIME_CLKS >> 3)
> +
> +static void
> +__extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start)
> +{
> +	u32 gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp);
> +	u32 gt_stamp_last = lower_32_bits(guc->timestamp.gt_stamp);
> +
> +	if (new_start == lower_32_bits(*prev_start))
> +		return;
> +
> +	if (new_start < gt_stamp_last &&
> +	    (new_start - gt_stamp_last) <= POLL_TIME_CLKS)
> +		gt_stamp_hi++;
> +
> +	if (new_start > gt_stamp_last &&
> +	    (gt_stamp_last - new_start) <= POLL_TIME_CLKS && gt_stamp_hi)
> +		gt_stamp_hi--;
> +
> +	*prev_start = ((u64)gt_stamp_hi << 32) | new_start;
> +}
> +
> +static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
> +{
> +	struct guc_engine_usage_record *rec = intel_guc_engine_usage(engine);
> +	struct intel_engine_guc_stats *stats = &engine->stats.guc;
> +	struct intel_guc *guc = &engine->gt->uc.guc;
> +	u32 last_switch = rec->last_switch_in_stamp;
> +	u32 ctx_id = rec->current_context_index;
> +	u32 total = rec->total_runtime;
> +
> +	lockdep_assert_held(&guc->timestamp.lock);
> +
> +	stats->running = ctx_id != ~0U && last_switch;
> +	if (stats->running)
> +		__extend_last_switch(guc, &stats->start_gt_clk, last_switch);
> +
> +	/*
> +	 * Instead of adjusting the total for overflow, just add the
> +	 * difference from previous sample stats->total_gt_clks
> +	 */
> +	if (total && total != ~0U) {
> +		stats->total_gt_clks += (u32)(total - stats->prev_total);
> +		stats->prev_total = total;
> +	}
> +}
> +
> +static void guc_update_pm_timestamp(struct intel_guc *guc,
> +				    struct intel_engine_cs *engine,
> +				    ktime_t *now)
> +{
> +	u32 gt_stamp_now, gt_stamp_hi;
> +
> +	lockdep_assert_held(&guc->timestamp.lock);
> +
> +	gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp);
> +	gt_stamp_now = intel_uncore_read(engine->uncore,
> +					 RING_TIMESTAMP(engine->mmio_base));
> +	*now = ktime_get();
> +
> +	if (gt_stamp_now < lower_32_bits(guc->timestamp.gt_stamp))
> +		gt_stamp_hi++;
> +
> +	guc->timestamp.gt_stamp = ((u64)gt_stamp_hi << 32) | gt_stamp_now;
> +}
> +
> +/*
> + * Unlike the execlist mode of submission total and active times are in terms of
> + * gt clocks. The *now parameter is retained to return the cpu time at which the
> + * busyness was sampled.
> + */
> +static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
> +{
> +	struct intel_engine_guc_stats stats_saved, *stats = &engine->stats.guc;
> +	struct i915_gpu_error *gpu_error = &engine->i915->gpu_error;
> +	struct intel_gt *gt = engine->gt;
> +	struct intel_guc *guc = &gt->uc.guc;
> +	u64 total, gt_stamp_saved;
> +	unsigned long flags;
> +	u32 reset_count;
> +
> +	spin_lock_irqsave(&guc->timestamp.lock, flags);
> +
> +	/*
> +	 * If a reset happened, we risk reading partially updated
> +	 * engine busyness from GuC, so we just use the driver stored
> +	 * copy of busyness. Synchronize with gt reset using reset_count.
> +	 */
> +	reset_count = i915_reset_count(gpu_error);
> +
> +	*now = ktime_get();
> +
> +	/*
> +	 * The active busyness depends on start_gt_clk and gt_stamp.
> +	 * gt_stamp is updated by i915 only when gt is awake and the
> +	 * start_gt_clk is derived from GuC state. To get a consistent
> +	 * view of activity, we query the GuC state only if gt is awake.
> +	 */
> +	stats_saved = *stats;
> +	gt_stamp_saved = guc->timestamp.gt_stamp;
> +	if (intel_gt_pm_get_if_awake(gt)) {
> +		guc_update_engine_gt_clks(engine);
> +		guc_update_pm_timestamp(guc, engine, now);
> +		intel_gt_pm_put_async(gt);
> +		if (i915_reset_count(gpu_error) != reset_count) {
> +			*stats = stats_saved;
> +			guc->timestamp.gt_stamp = gt_stamp_saved;
> +		}
> +	}
> +
> +	total = intel_gt_clock_interval_to_ns(gt, stats->total_gt_clks);
> +	if (stats->running) {
> +		u64 clk = guc->timestamp.gt_stamp - stats->start_gt_clk;
> +
> +		total += intel_gt_clock_interval_to_ns(gt, clk);
> +	}
> +
> +	spin_unlock_irqrestore(&guc->timestamp.lock, flags);
> +
> +	return ns_to_ktime(total);
> +}
> +
> +static void __reset_guc_busyness_stats(struct intel_guc *guc)
> +{
> +	struct intel_gt *gt = guc_to_gt(guc);
> +	struct intel_engine_cs *engine;
> +	enum intel_engine_id id;
> +	unsigned long flags;
> +	ktime_t unused;
> +
> +	cancel_delayed_work_sync(&guc->timestamp.work);
> +
> +	spin_lock_irqsave(&guc->timestamp.lock, flags);
> +
> +	for_each_engine(engine, gt, id) {
> +		guc_update_pm_timestamp(guc, engine, &unused);
> +		guc_update_engine_gt_clks(engine);
> +		engine->stats.guc.prev_total = 0;
> +	}
> +
> +	spin_unlock_irqrestore(&guc->timestamp.lock, flags);
> +}
> +
> +static void __update_guc_busyness_stats(struct intel_guc *guc)
> +{
> +	struct intel_gt *gt = guc_to_gt(guc);
> +	struct intel_engine_cs *engine;
> +	enum intel_engine_id id;
> +	ktime_t unused;
> +
> +	for_each_engine(engine, gt, id) {
> +		guc_update_pm_timestamp(guc, engine, &unused);
> +		guc_update_engine_gt_clks(engine);
> +	}
> +}
> +
> +static void guc_timestamp_ping(struct work_struct *wrk)
> +{
> +	struct intel_guc *guc = container_of(wrk, typeof(*guc),
> +					     timestamp.work.work);
> +	struct intel_uc *uc = container_of(guc, typeof(*uc), guc);
> +	struct intel_gt *gt = guc_to_gt(guc);
> +	intel_wakeref_t wakeref;
> +	unsigned long flags;
> +	int srcu, ret;
> +
> +	/*
> +	 * Synchronize with gt reset to make sure the worker does not
> +	 * corrupt the engine/guc stats.
> +	 */
> +	ret = intel_gt_reset_trylock(gt, &srcu);
> +	if (ret)
> +		return;
> +
> +	spin_lock_irqsave(&guc->timestamp.lock, flags);
> +
> +	with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
> +		__update_guc_busyness_stats(guc);
> +
> +	spin_unlock_irqrestore(&guc->timestamp.lock, flags);
> +
> +	intel_gt_reset_unlock(gt, srcu);
> +
> +	mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
> +			 guc->timestamp.ping_delay);
> +}
> +
> +static int guc_action_enable_usage_stats(struct intel_guc *guc)
> +{
> +	u32 offset = intel_guc_engine_usage_offset(guc);
> +	u32 action[] = {
> +		INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF,
> +		offset,
> +		0,
> +	};
> +
> +	return intel_guc_send(guc, action, ARRAY_SIZE(action));
> +}
> +
> +static void guc_init_engine_stats(struct intel_guc *guc)
> +{
> +	struct intel_gt *gt = guc_to_gt(guc);
> +	intel_wakeref_t wakeref;
> +
> +	mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
> +			 guc->timestamp.ping_delay);
> +
> +	with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref) {
> +		int ret = guc_action_enable_usage_stats(guc);
> +
> +		if (ret)
> +			drm_err(&gt->i915->drm,
> +				"Failed to enable usage stats: %d!\n", ret);
> +	}
> +}
> +
> +void intel_guc_busyness_park(struct intel_gt *gt)
> +{
> +	struct intel_guc *guc = &gt->uc.guc;
> +	unsigned long flags;
> +
> +	if (!guc_submission_initialized(guc))
> +		return;
> +
> +	cancel_delayed_work(&guc->timestamp.work);
> +
> +	spin_lock_irqsave(&guc->timestamp.lock, flags);
> +	__update_guc_busyness_stats(guc);
> +	spin_unlock_irqrestore(&guc->timestamp.lock, flags);
> +}
> +
> +void intel_guc_busyness_unpark(struct intel_gt *gt)
> +{
> +	struct intel_guc *guc = &gt->uc.guc;
> +
> +	if (!guc_submission_initialized(guc))
> +		return;
> +
> +	mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
> +			 guc->timestamp.ping_delay);
> +}
> +
>  static inline bool
>  submission_disabled(struct intel_guc *guc)
>  {
> @@ -1138,6 +1406,7 @@ void intel_guc_submission_reset_prepare(struct intel_guc *guc)
>  	intel_gt_park_heartbeats(guc_to_gt(guc));
>  	disable_submission(guc);
>  	guc->interrupts.disable(guc);
> +	__reset_guc_busyness_stats(guc);
>  
>  	/* Flush IRQ handler */
>  	spin_lock_irq(&guc_to_gt(guc)->irq_lock);
> @@ -1484,6 +1753,7 @@ static void destroyed_worker_func(struct work_struct *w);
>   */
>  int intel_guc_submission_init(struct intel_guc *guc)
>  {
> +	struct intel_gt *gt = guc_to_gt(guc);
>  	int ret;
>  
>  	if (guc->lrc_desc_pool)
> @@ -1512,6 +1782,10 @@ int intel_guc_submission_init(struct intel_guc *guc)
>  	if (!guc->submission_state.guc_ids_bitmap)
>  		return -ENOMEM;
>  
> +	spin_lock_init(&guc->timestamp.lock);
> +	INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping);
> +	guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
> +
>  	return 0;
>  }
>  
> @@ -3369,7 +3643,9 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine)
>  		engine->emit_flush = gen12_emit_flush_xcs;
>  	}
>  	engine->set_default_submission = guc_set_default_submission;
> +	engine->busyness = guc_engine_busyness;
>  
> +	engine->flags |= I915_ENGINE_SUPPORTS_STATS;
>  	engine->flags |= I915_ENGINE_HAS_PREEMPTION;
>  	engine->flags |= I915_ENGINE_HAS_TIMESLICES;
>  
> @@ -3468,6 +3744,7 @@ int intel_guc_submission_setup(struct intel_engine_cs *engine)
>  void intel_guc_submission_enable(struct intel_guc *guc)
>  {
>  	guc_init_lrc_mapping(guc);
> +	guc_init_engine_stats(guc);
>  }
>  
>  void intel_guc_submission_disable(struct intel_guc *guc)
> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
> index c7ef44fa0c36..5a95a9f0a8e3 100644
> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
> @@ -28,6 +28,8 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc,
>  void intel_guc_dump_active_requests(struct intel_engine_cs *engine,
>  				    struct i915_request *hung_rq,
>  				    struct drm_printer *m);
> +void intel_guc_busyness_park(struct intel_gt *gt);
> +void intel_guc_busyness_unpark(struct intel_gt *gt);
>  
>  bool intel_guc_virtual_engine_has_heartbeat(const struct intel_engine_cs *ve);
>  
> diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
> index d9f7a729333f..f7927f6dac6e 100644
> --- a/drivers/gpu/drm/i915/i915_reg.h
> +++ b/drivers/gpu/drm/i915/i915_reg.h
> @@ -2662,6 +2662,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
>  #define   RING_WAIT		(1 << 11) /* gen3+, PRBx_CTL */
>  #define   RING_WAIT_SEMAPHORE	(1 << 10) /* gen6+ */
>  
> +#define GUCPMTIMESTAMP          _MMIO(0xC3E8)
> +
>  /* There are 16 64-bit CS General Purpose Registers per-engine on Gen8+ */
>  #define GEN8_RING_CS_GPR(base, n)	_MMIO((base) + 0x600 + (n) * 8)
>  #define GEN8_RING_CS_GPR_UDW(base, n)	_MMIO((base) + 0x600 + (n) * 8 + 4)
> -- 
> 2.20.1
> 

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [Intel-gfx] [PATCH 2/2] drm/i915/pmu: Connect engine busyness stats from GuC to pmu
@ 2021-10-27 20:02     ` Matthew Brost
  0 siblings, 0 replies; 16+ messages in thread
From: Matthew Brost @ 2021-10-27 20:02 UTC (permalink / raw)
  To: Umesh Nerlige Ramappa
  Cc: intel-gfx, dri-devel, john.c.harrison, Tvrtko Ursulin, daniel.vetter

On Tue, Oct 26, 2021 at 05:48:21PM -0700, Umesh Nerlige Ramappa wrote:
> With GuC handling scheduling, i915 is not aware of the time that a
> context is scheduled in and out of the engine. Since i915 pmu relies on
> this info to provide engine busyness to the user, GuC shares this info
> with i915 for all engines using shared memory. For each engine, this
> info contains:
> 
> - total busyness: total time that the context was running (total)
> - id: id of the running context (id)
> - start timestamp: timestamp when the context started running (start)
> 
> At the time (now) of sampling the engine busyness, if the id is valid
> (!= ~0), and start is non-zero, then the context is considered to be
> active and the engine busyness is calculated using the below equation
> 
> 	engine busyness = total + (now - start)
> 
> All times are obtained from the gt clock base. For inactive contexts,
> engine busyness is just equal to the total.
> 
> The start and total values provided by GuC are 32 bits and wrap around
> in a few minutes. Since perf pmu provides busyness as 64 bit
> monotonically increasing values, there is a need for this implementation
> to account for overflows and extend the time to 64 bits before returning
> busyness to the user. In order to do that, a worker runs periodically at
> frequency = 1/8th the time it takes for the timestamp to wrap. As an
> example, that would be once in 27 seconds for a gt clock frequency of
> 19.2 MHz.
> 
> Note:
> There might be an over-accounting of busyness due to the fact that GuC
> may be updating the total and start values while kmd is reading them.
> (i.e kmd may read the updated total and the stale start). In such a
> case, user may see higher busyness value followed by smaller ones which
> would eventually catch up to the higher value.
> 
> v2: (Tvrtko)
> - Include details in commit message
> - Move intel engine busyness function into execlist code
> - Use union inside engine->stats
> - Use natural type for ping delay jiffies
> - Drop active_work condition checks
> - Use for_each_engine if iterating all engines
> - Drop seq locking, use spinlock at GuC level to update engine stats
> - Document worker specific details
> 
> v3: (Tvrtko/Umesh)
> - Demarcate GuC and execlist stat objects with comments
> - Document known over-accounting issue in commit
> - Provide a consistent view of GuC state
> - Add hooks to gt park/unpark for GuC busyness
> - Stop/start worker in gt park/unpark path
> - Drop inline
> - Move spinlock and worker inits to GuC initialization
> - Drop helpers that are called only once
> 
> v4: (Tvrtko/Matt/Umesh)
> - Drop addressed opens from commit message
> - Get runtime pm in ping, remove from the park path
> - Use cancel_delayed_work_sync in disable_submission path
> - Update stats during reset prepare
> - Skip ping if reset in progress
> - Explicitly name execlists and GuC stats objects
> - Since disable_submission is called from many places, move resetting
>   stats to intel_guc_submission_reset_prepare
> 
> v5: (Tvrtko)
> - Add a trylock helper that does not sleep and synchronize PMU event
>   callbacks and worker with gt reset
> 
> v6: (CI BAT failures)
> - DUTs using execlist submission failed to boot since __gt_unpark is
>   called during i915 load. This ends up calling the GuC busyness unpark
>   hook and results in kick-starting an uninitialized worker. Let
>   park/unpark hooks check if GuC submission has been initialized.
> - drop cant_sleep() from trylock helper since rcu_read_lock takes care
>   of that.
> 
> v7: (CI) Fix igt@i915_selftest@live@gt_engines
> - For GuC mode of submission the engine busyness is derived from gt time
>   domain. Use gt time elapsed as reference in the selftest.
> - Increase busyness calculation to 10ms duration to ensure batch runs
>   longer and falls within the busyness tolerances in selftest.
> 
> v8:
> - Use ktime_get in selftest as before
> - intel_reset_trylock_no_wait results in a lockdep splat that is not
>   trivial to fix since the PMU callback runs in irq context and the
>   reset paths are tightly knit into the driver. The test that uncovers
>   this is igt@perf_pmu@faulting-read. Drop intel_reset_trylock_no_wait,
>   instead use the reset_count to synchronize with gt reset during pmu
>   callback. For the ping, continue to use intel_reset_trylock since ping
>   is not run in irq context.
> 
> - GuC PM timestamp does not tick when GuC is idle. This can potentially
>   result in wrong busyness values when a context is active on the
>   engine, but GuC is idle. Use the RING TIMESTAMP as GPU timestamp to
>   process the GuC busyness stats. This works since both GuC timestamp and
>   RING timestamp are synced with the same clock.
> 
> - The busyness stats may get updated after the batch starts running.
>   This delay causes the busyness reported for 100us duration to fall
>   below 95% in the selftest. The only option at this time is to wait for
>   GuC busyness to change from idle to active before we sample busyness
>   over a 100us period.
> 
> Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
> Signed-off-by: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
> Acked-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> ---
>  drivers/gpu/drm/i915/gt/intel_engine_cs.c     |  28 +-
>  drivers/gpu/drm/i915/gt/intel_engine_types.h  |  33 ++-
>  .../drm/i915/gt/intel_execlists_submission.c  |  34 +++
>  drivers/gpu/drm/i915/gt/intel_gt_pm.c         |   2 +
>  drivers/gpu/drm/i915/gt/selftest_engine_pm.c  |  33 +++
>  .../gpu/drm/i915/gt/uc/abi/guc_actions_abi.h  |   1 +
>  drivers/gpu/drm/i915/gt/uc/intel_guc.h        |  30 ++
>  drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c    |  21 ++
>  drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h    |   5 +
>  drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h   |  13 +
>  .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 277 ++++++++++++++++++
>  .../gpu/drm/i915/gt/uc/intel_guc_submission.h |   2 +
>  drivers/gpu/drm/i915/i915_reg.h               |   2 +
>  13 files changed, 453 insertions(+), 28 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
> index 2de396e34d83..332756036007 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
> @@ -1915,23 +1915,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
>  	intel_engine_print_breadcrumbs(engine, m);
>  }
>  
> -static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine,
> -					    ktime_t *now)
> -{
> -	struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
> -	ktime_t total = stats->total;
> -
> -	/*
> -	 * If the engine is executing something at the moment
> -	 * add it to the total.
> -	 */
> -	*now = ktime_get();
> -	if (READ_ONCE(stats->active))
> -		total = ktime_add(total, ktime_sub(*now, stats->start));
> -
> -	return total;
> -}
> -
>  /**
>   * intel_engine_get_busy_time() - Return current accumulated engine busyness
>   * @engine: engine to report on
> @@ -1941,16 +1924,7 @@ static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine,
>   */
>  ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now)
>  {
> -	struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
> -	unsigned int seq;
> -	ktime_t total;
> -
> -	do {
> -		seq = read_seqcount_begin(&stats->lock);
> -		total = __intel_engine_get_busy_time(engine, now);
> -	} while (read_seqcount_retry(&stats->lock, seq));
> -
> -	return total;
> +	return engine->busyness(engine, now);
>  }
>  
>  struct intel_context *
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
> index 24fa7fb0e7de..5732e0d71513 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
> @@ -284,6 +284,28 @@ struct intel_engine_execlists_stats {
>  	ktime_t start;
>  };
>  
> +struct intel_engine_guc_stats {
> +	/**
> +	 * @running: Active state of the engine when busyness was last sampled.
> +	 */
> +	bool running;
> +
> +	/**
> +	 * @prev_total: Previous value of total runtime clock cycles.
> +	 */
> +	u32 prev_total;
> +
> +	/**
> +	 * @total_gt_clks: Total gt clock cycles this engine was busy.
> +	 */
> +	u64 total_gt_clks;
> +
> +	/**
> +	 * @start_gt_clk: GT clock time of last idle to active transition.
> +	 */
> +	u64 start_gt_clk;
> +};
> +
>  struct intel_engine_cs {
>  	struct drm_i915_private *i915;
>  	struct intel_gt *gt;
> @@ -466,6 +488,12 @@ struct intel_engine_cs {
>  	void		(*add_active_request)(struct i915_request *rq);
>  	void		(*remove_active_request)(struct i915_request *rq);
>  
> +	/*
> +	 * Get engine busyness and the time at which the busyness was sampled.
> +	 */
> +	ktime_t		(*busyness)(struct intel_engine_cs *engine,
> +				    ktime_t *now);
> +
>  	struct intel_engine_execlists execlists;
>  
>  	/*
> @@ -515,7 +543,10 @@ struct intel_engine_cs {
>  	u32 (*get_cmd_length_mask)(u32 cmd_header);
>  
>  	struct {
> -		struct intel_engine_execlists_stats execlists;
> +		union {
> +			struct intel_engine_execlists_stats execlists;
> +			struct intel_engine_guc_stats guc;
> +		};
>  
>  		/**
>  		 * @rps: Utilisation at last RPS sampling.
> diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
> index bedb80057046..ca03880fa7e4 100644
> --- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
> +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
> @@ -3293,6 +3293,38 @@ static void execlists_release(struct intel_engine_cs *engine)
>  	lrc_fini_wa_ctx(engine);
>  }
>  
> +static ktime_t __execlists_engine_busyness(struct intel_engine_cs *engine,
> +					   ktime_t *now)
> +{
> +	struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
> +	ktime_t total = stats->total;
> +
> +	/*
> +	 * If the engine is executing something at the moment
> +	 * add it to the total.
> +	 */
> +	*now = ktime_get();
> +	if (READ_ONCE(stats->active))
> +		total = ktime_add(total, ktime_sub(*now, stats->start));
> +
> +	return total;
> +}
> +
> +static ktime_t execlists_engine_busyness(struct intel_engine_cs *engine,
> +					 ktime_t *now)
> +{
> +	struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
> +	unsigned int seq;
> +	ktime_t total;
> +
> +	do {
> +		seq = read_seqcount_begin(&stats->lock);
> +		total = __execlists_engine_busyness(engine, now);
> +	} while (read_seqcount_retry(&stats->lock, seq));
> +
> +	return total;
> +}
> +
>  static void
>  logical_ring_default_vfuncs(struct intel_engine_cs *engine)
>  {
> @@ -3349,6 +3381,8 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
>  		engine->emit_bb_start = gen8_emit_bb_start;
>  	else
>  		engine->emit_bb_start = gen8_emit_bb_start_noarb;
> +
> +	engine->busyness = execlists_engine_busyness;
>  }
>  
>  static void logical_ring_default_irqs(struct intel_engine_cs *engine)
> diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
> index 524eaf678790..b4a8594bc46c 100644
> --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
> +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
> @@ -86,6 +86,7 @@ static int __gt_unpark(struct intel_wakeref *wf)
>  	intel_rc6_unpark(&gt->rc6);
>  	intel_rps_unpark(&gt->rps);
>  	i915_pmu_gt_unparked(i915);
> +	intel_guc_busyness_unpark(gt);
>  
>  	intel_gt_unpark_requests(gt);
>  	runtime_begin(gt);
> @@ -104,6 +105,7 @@ static int __gt_park(struct intel_wakeref *wf)
>  	runtime_end(gt);
>  	intel_gt_park_requests(gt);
>  
> +	intel_guc_busyness_park(gt);
>  	i915_vma_parked(gt);
>  	i915_pmu_gt_parked(i915);
>  	intel_rps_park(&gt->rps);
> diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
> index 75569666105d..0bfd738dbf3a 100644
> --- a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
> +++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
> @@ -214,6 +214,31 @@ static int live_engine_timestamps(void *arg)
>  	return 0;
>  }
>  
> +static int __spin_until_busier(struct intel_engine_cs *engine, ktime_t busyness)
> +{
> +	ktime_t start, unused, dt;
> +
> +	if (!intel_engine_uses_guc(engine))
> +		return 0;
> +
> +	/*
> +	 * In GuC mode of submission, the busyness stats may get updated after
> +	 * the batch starts running. Poll for a change in busyness and timeout
> +	 * after 500 us.
> +	 */
> +	start = ktime_get();
> +	while (intel_engine_get_busy_time(engine, &unused) == busyness) {
> +		dt = ktime_get() - start;
> +		if (dt > 500000) {
> +			pr_err("active wait timed out %lld\n", dt);
> +			ENGINE_TRACE(engine, "active wait time out %lld\n", dt);
> +			return -ETIME;
> +		}
> +	}
> +
> +	return 0;
> +}
> +
>  static int live_engine_busy_stats(void *arg)
>  {
>  	struct intel_gt *gt = arg;
> @@ -232,6 +257,7 @@ static int live_engine_busy_stats(void *arg)
>  	GEM_BUG_ON(intel_gt_pm_is_awake(gt));
>  	for_each_engine(engine, gt, id) {
>  		struct i915_request *rq;
> +		ktime_t busyness, dummy;
>  		ktime_t de, dt;
>  		ktime_t t[2];
>  
> @@ -274,12 +300,19 @@ static int live_engine_busy_stats(void *arg)
>  		}
>  		i915_request_add(rq);
>  
> +		busyness = intel_engine_get_busy_time(engine, &dummy);
>  		if (!igt_wait_for_spinner(&spin, rq)) {
>  			intel_gt_set_wedged(engine->gt);
>  			err = -ETIME;
>  			goto end;
>  		}
>  
> +		err = __spin_until_busier(engine, busyness);
> +		if (err) {
> +			GEM_TRACE_DUMP();
> +			goto end;
> +		}
> +
>  		ENGINE_TRACE(engine, "measuring busy time\n");
>  		preempt_disable();
>  		de = intel_engine_get_busy_time(engine, &t[0]);
> diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
> index ba10bd374cee..fe5d7d261797 100644
> --- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
> +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
> @@ -144,6 +144,7 @@ enum intel_guc_action {
>  	INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE = 0x4600,
>  	INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC = 0x4601,
>  	INTEL_GUC_ACTION_RESET_CLIENT = 0x5507,
> +	INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF = 0x550A,
>  	INTEL_GUC_ACTION_LIMIT
>  };
>  
> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
> index 31cf9fb48c7e..1cb46098030d 100644
> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
> @@ -138,6 +138,8 @@ struct intel_guc {
>  	u32 ads_regset_size;
>  	/** @ads_golden_ctxt_size: size of the golden contexts in the ADS */
>  	u32 ads_golden_ctxt_size;
> +	/** @ads_engine_usage_size: size of engine usage in the ADS */
> +	u32 ads_engine_usage_size;
>  
>  	/** @lrc_desc_pool: object allocated to hold the GuC LRC descriptor pool */
>  	struct i915_vma *lrc_desc_pool;
> @@ -172,6 +174,34 @@ struct intel_guc {
>  
>  	/** @send_mutex: used to serialize the intel_guc_send actions */
>  	struct mutex send_mutex;
> +
> +	/**
> +	 * @timestamp: GT timestamp object that stores a copy of the timestamp
> +	 * and adjusts it for overflow using a worker.
> +	 */
> +	struct {
> +		/**
> +		 * @lock: Lock protecting the below fields and the engine stats.
> +		 */
> +		spinlock_t lock;
> +
> +		/**
> +		 * @gt_stamp: 64 bit extended value of the GT timestamp.
> +		 */
> +		u64 gt_stamp;
> +
> +		/**
> +		 * @ping_delay: Period for polling the GT timestamp for
> +		 * overflow.
> +		 */
> +		unsigned long ping_delay;
> +
> +		/**
> +		 * @work: Periodic work to adjust GT timestamp, engine and
> +		 * context usage for overflows.
> +		 */
> +		struct delayed_work work;
> +	} timestamp;
>  };
>  
>  static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
> index 621c893a009f..1a1edae67e4e 100644
> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
> @@ -26,6 +26,8 @@
>   *      | guc_policies                          |
>   *      +---------------------------------------+
>   *      | guc_gt_system_info                    |
> + *      +---------------------------------------+
> + *      | guc_engine_usage                      |
>   *      +---------------------------------------+ <== static
>   *      | guc_mmio_reg[countA] (engine 0.0)     |
>   *      | guc_mmio_reg[countB] (engine 0.1)     |
> @@ -47,6 +49,7 @@ struct __guc_ads_blob {
>  	struct guc_ads ads;
>  	struct guc_policies policies;
>  	struct guc_gt_system_info system_info;
> +	struct guc_engine_usage engine_usage;
>  	/* From here on, location is dynamic! Refer to above diagram. */
>  	struct guc_mmio_reg regset[0];
>  } __packed;
> @@ -628,3 +631,21 @@ void intel_guc_ads_reset(struct intel_guc *guc)
>  
>  	guc_ads_private_data_reset(guc);
>  }
> +
> +u32 intel_guc_engine_usage_offset(struct intel_guc *guc)
> +{
> +	struct __guc_ads_blob *blob = guc->ads_blob;
> +	u32 base = intel_guc_ggtt_offset(guc, guc->ads_vma);
> +	u32 offset = base + ptr_offset(blob, engine_usage);
> +
> +	return offset;
> +}
> +
> +struct guc_engine_usage_record *intel_guc_engine_usage(struct intel_engine_cs *engine)
> +{
> +	struct intel_guc *guc = &engine->gt->uc.guc;
> +	struct __guc_ads_blob *blob = guc->ads_blob;
> +	u8 guc_class = engine_class_to_guc_class(engine->class);
> +
> +	return &blob->engine_usage.engines[guc_class][ilog2(engine->logical_mask)];
> +}
> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h
> index 3d85051d57e4..e74c110facff 100644
> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h
> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h
> @@ -6,8 +6,11 @@
>  #ifndef _INTEL_GUC_ADS_H_
>  #define _INTEL_GUC_ADS_H_
>  
> +#include <linux/types.h>
> +
>  struct intel_guc;
>  struct drm_printer;
> +struct intel_engine_cs;
>  
>  int intel_guc_ads_create(struct intel_guc *guc);
>  void intel_guc_ads_destroy(struct intel_guc *guc);
> @@ -15,5 +18,7 @@ void intel_guc_ads_init_late(struct intel_guc *guc);
>  void intel_guc_ads_reset(struct intel_guc *guc);
>  void intel_guc_ads_print_policy_info(struct intel_guc *guc,
>  				     struct drm_printer *p);
> +struct guc_engine_usage_record *intel_guc_engine_usage(struct intel_engine_cs *engine);
> +u32 intel_guc_engine_usage_offset(struct intel_guc *guc);
>  
>  #endif
> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
> index 722933e26347..7072e30e99f4 100644
> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
> @@ -294,6 +294,19 @@ struct guc_ads {
>  	u32 reserved[15];
>  } __packed;
>  
> +/* Engine usage stats */
> +struct guc_engine_usage_record {
> +	u32 current_context_index;
> +	u32 last_switch_in_stamp;
> +	u32 reserved0;
> +	u32 total_runtime;
> +	u32 reserved1[4];
> +} __packed;
> +
> +struct guc_engine_usage {
> +	struct guc_engine_usage_record engines[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS];

Again like I mentioned in the previous patch, I'd define this
sub-structure inline. But that is just my opinion and doesn't really
matter. I believe I understand everything else this patch is doing and
it looks good to me.

With that:
Reviewed-by: Matthew Brost <matthew.brost@intel.com> 

> +} __packed;
> +
>  /* GuC logging structures */
>  
>  enum guc_log_buffer_type {
> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> index 38b47e73e35d..5cc49c0b3889 100644
> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> @@ -13,6 +13,7 @@
>  #include "gt/intel_engine_heartbeat.h"
>  #include "gt/intel_gpu_commands.h"
>  #include "gt/intel_gt.h"
> +#include "gt/intel_gt_clock_utils.h"
>  #include "gt/intel_gt_irq.h"
>  #include "gt/intel_gt_pm.h"
>  #include "gt/intel_gt_requests.h"
> @@ -21,6 +22,7 @@
>  #include "gt/intel_mocs.h"
>  #include "gt/intel_ring.h"
>  
> +#include "intel_guc_ads.h"
>  #include "intel_guc_submission.h"
>  
>  #include "i915_drv.h"
> @@ -1077,6 +1079,272 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc)
>  	xa_unlock_irqrestore(&guc->context_lookup, flags);
>  }
>  
> +/*
> + * GuC stores busyness stats for each engine at context in/out boundaries. A
> + * context 'in' logs execution start time, 'out' adds in -> out delta to total.
> + * i915/kmd accesses 'start', 'total' and 'context id' from memory shared with
> + * GuC.
> + *
> + * __i915_pmu_event_read samples engine busyness. When sampling, if context id
> + * is valid (!= ~0) and start is non-zero, the engine is considered to be
> + * active. For an active engine total busyness = total + (now - start), where
> + * 'now' is the time at which the busyness is sampled. For inactive engine,
> + * total busyness = total.
> + *
> + * All times are captured from GUCPMTIMESTAMP reg and are in gt clock domain.
> + *
> + * The start and total values provided by GuC are 32 bits and wrap around in a
> + * few minutes. Since perf pmu provides busyness as 64 bit monotonically
> + * increasing ns values, there is a need for this implementation to account for
> + * overflows and extend the GuC provided values to 64 bits before returning
> + * busyness to the user. In order to do that, a worker runs periodically at
> + * frequency = 1/8th the time it takes for the timestamp to wrap (i.e. once in
> + * 27 seconds for a gt clock frequency of 19.2 MHz).
> + */
> +
> +#define WRAP_TIME_CLKS U32_MAX
> +#define POLL_TIME_CLKS (WRAP_TIME_CLKS >> 3)
> +
> +static void
> +__extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start)
> +{
> +	u32 gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp);
> +	u32 gt_stamp_last = lower_32_bits(guc->timestamp.gt_stamp);
> +
> +	if (new_start == lower_32_bits(*prev_start))
> +		return;
> +
> +	if (new_start < gt_stamp_last &&
> +	    (new_start - gt_stamp_last) <= POLL_TIME_CLKS)
> +		gt_stamp_hi++;
> +
> +	if (new_start > gt_stamp_last &&
> +	    (gt_stamp_last - new_start) <= POLL_TIME_CLKS && gt_stamp_hi)
> +		gt_stamp_hi--;
> +
> +	*prev_start = ((u64)gt_stamp_hi << 32) | new_start;
> +}
> +
> +static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
> +{
> +	struct guc_engine_usage_record *rec = intel_guc_engine_usage(engine);
> +	struct intel_engine_guc_stats *stats = &engine->stats.guc;
> +	struct intel_guc *guc = &engine->gt->uc.guc;
> +	u32 last_switch = rec->last_switch_in_stamp;
> +	u32 ctx_id = rec->current_context_index;
> +	u32 total = rec->total_runtime;
> +
> +	lockdep_assert_held(&guc->timestamp.lock);
> +
> +	stats->running = ctx_id != ~0U && last_switch;
> +	if (stats->running)
> +		__extend_last_switch(guc, &stats->start_gt_clk, last_switch);
> +
> +	/*
> +	 * Instead of adjusting the total for overflow, just add the
> +	 * difference from previous sample stats->total_gt_clks
> +	 */
> +	if (total && total != ~0U) {
> +		stats->total_gt_clks += (u32)(total - stats->prev_total);
> +		stats->prev_total = total;
> +	}
> +}
> +
> +static void guc_update_pm_timestamp(struct intel_guc *guc,
> +				    struct intel_engine_cs *engine,
> +				    ktime_t *now)
> +{
> +	u32 gt_stamp_now, gt_stamp_hi;
> +
> +	lockdep_assert_held(&guc->timestamp.lock);
> +
> +	gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp);
> +	gt_stamp_now = intel_uncore_read(engine->uncore,
> +					 RING_TIMESTAMP(engine->mmio_base));
> +	*now = ktime_get();
> +
> +	if (gt_stamp_now < lower_32_bits(guc->timestamp.gt_stamp))
> +		gt_stamp_hi++;
> +
> +	guc->timestamp.gt_stamp = ((u64)gt_stamp_hi << 32) | gt_stamp_now;
> +}
> +
> +/*
> + * Unlike the execlist mode of submission total and active times are in terms of
> + * gt clocks. The *now parameter is retained to return the cpu time at which the
> + * busyness was sampled.
> + */
> +static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
> +{
> +	struct intel_engine_guc_stats stats_saved, *stats = &engine->stats.guc;
> +	struct i915_gpu_error *gpu_error = &engine->i915->gpu_error;
> +	struct intel_gt *gt = engine->gt;
> +	struct intel_guc *guc = &gt->uc.guc;
> +	u64 total, gt_stamp_saved;
> +	unsigned long flags;
> +	u32 reset_count;
> +
> +	spin_lock_irqsave(&guc->timestamp.lock, flags);
> +
> +	/*
> +	 * If a reset happened, we risk reading partially updated
> +	 * engine busyness from GuC, so we just use the driver stored
> +	 * copy of busyness. Synchronize with gt reset using reset_count.
> +	 */
> +	reset_count = i915_reset_count(gpu_error);
> +
> +	*now = ktime_get();
> +
> +	/*
> +	 * The active busyness depends on start_gt_clk and gt_stamp.
> +	 * gt_stamp is updated by i915 only when gt is awake and the
> +	 * start_gt_clk is derived from GuC state. To get a consistent
> +	 * view of activity, we query the GuC state only if gt is awake.
> +	 */
> +	stats_saved = *stats;
> +	gt_stamp_saved = guc->timestamp.gt_stamp;
> +	if (intel_gt_pm_get_if_awake(gt)) {
> +		guc_update_engine_gt_clks(engine);
> +		guc_update_pm_timestamp(guc, engine, now);
> +		intel_gt_pm_put_async(gt);
> +		if (i915_reset_count(gpu_error) != reset_count) {
> +			*stats = stats_saved;
> +			guc->timestamp.gt_stamp = gt_stamp_saved;
> +		}
> +	}
> +
> +	total = intel_gt_clock_interval_to_ns(gt, stats->total_gt_clks);
> +	if (stats->running) {
> +		u64 clk = guc->timestamp.gt_stamp - stats->start_gt_clk;
> +
> +		total += intel_gt_clock_interval_to_ns(gt, clk);
> +	}
> +
> +	spin_unlock_irqrestore(&guc->timestamp.lock, flags);
> +
> +	return ns_to_ktime(total);
> +}
> +
> +static void __reset_guc_busyness_stats(struct intel_guc *guc)
> +{
> +	struct intel_gt *gt = guc_to_gt(guc);
> +	struct intel_engine_cs *engine;
> +	enum intel_engine_id id;
> +	unsigned long flags;
> +	ktime_t unused;
> +
> +	cancel_delayed_work_sync(&guc->timestamp.work);
> +
> +	spin_lock_irqsave(&guc->timestamp.lock, flags);
> +
> +	for_each_engine(engine, gt, id) {
> +		guc_update_pm_timestamp(guc, engine, &unused);
> +		guc_update_engine_gt_clks(engine);
> +		engine->stats.guc.prev_total = 0;
> +	}
> +
> +	spin_unlock_irqrestore(&guc->timestamp.lock, flags);
> +}
> +
> +static void __update_guc_busyness_stats(struct intel_guc *guc)
> +{
> +	struct intel_gt *gt = guc_to_gt(guc);
> +	struct intel_engine_cs *engine;
> +	enum intel_engine_id id;
> +	ktime_t unused;
> +
> +	for_each_engine(engine, gt, id) {
> +		guc_update_pm_timestamp(guc, engine, &unused);
> +		guc_update_engine_gt_clks(engine);
> +	}
> +}
> +
> +static void guc_timestamp_ping(struct work_struct *wrk)
> +{
> +	struct intel_guc *guc = container_of(wrk, typeof(*guc),
> +					     timestamp.work.work);
> +	struct intel_uc *uc = container_of(guc, typeof(*uc), guc);
> +	struct intel_gt *gt = guc_to_gt(guc);
> +	intel_wakeref_t wakeref;
> +	unsigned long flags;
> +	int srcu, ret;
> +
> +	/*
> +	 * Synchronize with gt reset to make sure the worker does not
> +	 * corrupt the engine/guc stats.
> +	 */
> +	ret = intel_gt_reset_trylock(gt, &srcu);
> +	if (ret)
> +		return;
> +
> +	spin_lock_irqsave(&guc->timestamp.lock, flags);
> +
> +	with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
> +		__update_guc_busyness_stats(guc);
> +
> +	spin_unlock_irqrestore(&guc->timestamp.lock, flags);
> +
> +	intel_gt_reset_unlock(gt, srcu);
> +
> +	mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
> +			 guc->timestamp.ping_delay);
> +}
> +
> +static int guc_action_enable_usage_stats(struct intel_guc *guc)
> +{
> +	u32 offset = intel_guc_engine_usage_offset(guc);
> +	u32 action[] = {
> +		INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF,
> +		offset,
> +		0,
> +	};
> +
> +	return intel_guc_send(guc, action, ARRAY_SIZE(action));
> +}
> +
> +static void guc_init_engine_stats(struct intel_guc *guc)
> +{
> +	struct intel_gt *gt = guc_to_gt(guc);
> +	intel_wakeref_t wakeref;
> +
> +	mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
> +			 guc->timestamp.ping_delay);
> +
> +	with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref) {
> +		int ret = guc_action_enable_usage_stats(guc);
> +
> +		if (ret)
> +			drm_err(&gt->i915->drm,
> +				"Failed to enable usage stats: %d!\n", ret);
> +	}
> +}
> +
> +void intel_guc_busyness_park(struct intel_gt *gt)
> +{
> +	struct intel_guc *guc = &gt->uc.guc;
> +	unsigned long flags;
> +
> +	if (!guc_submission_initialized(guc))
> +		return;
> +
> +	cancel_delayed_work(&guc->timestamp.work);
> +
> +	spin_lock_irqsave(&guc->timestamp.lock, flags);
> +	__update_guc_busyness_stats(guc);
> +	spin_unlock_irqrestore(&guc->timestamp.lock, flags);
> +}
> +
> +void intel_guc_busyness_unpark(struct intel_gt *gt)
> +{
> +	struct intel_guc *guc = &gt->uc.guc;
> +
> +	if (!guc_submission_initialized(guc))
> +		return;
> +
> +	mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
> +			 guc->timestamp.ping_delay);
> +}
> +
>  static inline bool
>  submission_disabled(struct intel_guc *guc)
>  {
> @@ -1138,6 +1406,7 @@ void intel_guc_submission_reset_prepare(struct intel_guc *guc)
>  	intel_gt_park_heartbeats(guc_to_gt(guc));
>  	disable_submission(guc);
>  	guc->interrupts.disable(guc);
> +	__reset_guc_busyness_stats(guc);
>  
>  	/* Flush IRQ handler */
>  	spin_lock_irq(&guc_to_gt(guc)->irq_lock);
> @@ -1484,6 +1753,7 @@ static void destroyed_worker_func(struct work_struct *w);
>   */
>  int intel_guc_submission_init(struct intel_guc *guc)
>  {
> +	struct intel_gt *gt = guc_to_gt(guc);
>  	int ret;
>  
>  	if (guc->lrc_desc_pool)
> @@ -1512,6 +1782,10 @@ int intel_guc_submission_init(struct intel_guc *guc)
>  	if (!guc->submission_state.guc_ids_bitmap)
>  		return -ENOMEM;
>  
> +	spin_lock_init(&guc->timestamp.lock);
> +	INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping);
> +	guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
> +
>  	return 0;
>  }
>  
> @@ -3369,7 +3643,9 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine)
>  		engine->emit_flush = gen12_emit_flush_xcs;
>  	}
>  	engine->set_default_submission = guc_set_default_submission;
> +	engine->busyness = guc_engine_busyness;
>  
> +	engine->flags |= I915_ENGINE_SUPPORTS_STATS;
>  	engine->flags |= I915_ENGINE_HAS_PREEMPTION;
>  	engine->flags |= I915_ENGINE_HAS_TIMESLICES;
>  
> @@ -3468,6 +3744,7 @@ int intel_guc_submission_setup(struct intel_engine_cs *engine)
>  void intel_guc_submission_enable(struct intel_guc *guc)
>  {
>  	guc_init_lrc_mapping(guc);
> +	guc_init_engine_stats(guc);
>  }
>  
>  void intel_guc_submission_disable(struct intel_guc *guc)
> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
> index c7ef44fa0c36..5a95a9f0a8e3 100644
> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
> @@ -28,6 +28,8 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc,
>  void intel_guc_dump_active_requests(struct intel_engine_cs *engine,
>  				    struct i915_request *hung_rq,
>  				    struct drm_printer *m);
> +void intel_guc_busyness_park(struct intel_gt *gt);
> +void intel_guc_busyness_unpark(struct intel_gt *gt);
>  
>  bool intel_guc_virtual_engine_has_heartbeat(const struct intel_engine_cs *ve);
>  
> diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
> index d9f7a729333f..f7927f6dac6e 100644
> --- a/drivers/gpu/drm/i915/i915_reg.h
> +++ b/drivers/gpu/drm/i915/i915_reg.h
> @@ -2662,6 +2662,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
>  #define   RING_WAIT		(1 << 11) /* gen3+, PRBx_CTL */
>  #define   RING_WAIT_SEMAPHORE	(1 << 10) /* gen6+ */
>  
> +#define GUCPMTIMESTAMP          _MMIO(0xC3E8)
> +
>  /* There are 16 64-bit CS General Purpose Registers per-engine on Gen8+ */
>  #define GEN8_RING_CS_GPR(base, n)	_MMIO((base) + 0x600 + (n) * 8)
>  #define GEN8_RING_CS_GPR_UDW(base, n)	_MMIO((base) + 0x600 + (n) * 8 + 4)
> -- 
> 2.20.1
> 

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [Intel-gfx] [PATCH 2/2] drm/i915/pmu: Connect engine busyness stats from GuC to pmu
  2021-10-27  0:48   ` [Intel-gfx] " Umesh Nerlige Ramappa
  (?)
  (?)
@ 2021-10-30  0:40   ` Umesh Nerlige Ramappa
  -1 siblings, 0 replies; 16+ messages in thread
From: Umesh Nerlige Ramappa @ 2021-10-30  0:40 UTC (permalink / raw)
  To: intel-gfx, dri-devel
  Cc: john.c.harrison, Tvrtko Ursulin, daniel.vetter, Matthew Brost

On Tue, Oct 26, 2021 at 05:48:21PM -0700, Umesh Nerlige Ramappa wrote:
>With GuC handling scheduling, i915 is not aware of the time that a
>context is scheduled in and out of the engine. Since i915 pmu relies on
>this info to provide engine busyness to the user, GuC shares this info
>with i915 for all engines using shared memory. For each engine, this
>info contains:
>
>- total busyness: total time that the context was running (total)
>- id: id of the running context (id)
>- start timestamp: timestamp when the context started running (start)
>
>At the time (now) of sampling the engine busyness, if the id is valid
>(!= ~0), and start is non-zero, then the context is considered to be
>active and the engine busyness is calculated using the below equation
>
>	engine busyness = total + (now - start)
>
>All times are obtained from the gt clock base. For inactive contexts,
>engine busyness is just equal to the total.
>
>The start and total values provided by GuC are 32 bits and wrap around
>in a few minutes. Since perf pmu provides busyness as 64 bit
>monotonically increasing values, there is a need for this implementation
>to account for overflows and extend the time to 64 bits before returning
>busyness to the user. In order to do that, a worker runs periodically at
>frequency = 1/8th the time it takes for the timestamp to wrap. As an
>example, that would be once in 27 seconds for a gt clock frequency of
>19.2 MHz.
>
>Note:
>There might be an over-accounting of busyness due to the fact that GuC
>may be updating the total and start values while kmd is reading them.
>(i.e kmd may read the updated total and the stale start). In such a
>case, user may see higher busyness value followed by smaller ones which
>would eventually catch up to the higher value.
>
>v2: (Tvrtko)
>- Include details in commit message
>- Move intel engine busyness function into execlist code
>- Use union inside engine->stats
>- Use natural type for ping delay jiffies
>- Drop active_work condition checks
>- Use for_each_engine if iterating all engines
>- Drop seq locking, use spinlock at GuC level to update engine stats
>- Document worker specific details
>
>v3: (Tvrtko/Umesh)
>- Demarcate GuC and execlist stat objects with comments
>- Document known over-accounting issue in commit
>- Provide a consistent view of GuC state
>- Add hooks to gt park/unpark for GuC busyness
>- Stop/start worker in gt park/unpark path
>- Drop inline
>- Move spinlock and worker inits to GuC initialization
>- Drop helpers that are called only once
>
>v4: (Tvrtko/Matt/Umesh)
>- Drop addressed opens from commit message
>- Get runtime pm in ping, remove from the park path
>- Use cancel_delayed_work_sync in disable_submission path
>- Update stats during reset prepare
>- Skip ping if reset in progress
>- Explicitly name execlists and GuC stats objects
>- Since disable_submission is called from many places, move resetting
>  stats to intel_guc_submission_reset_prepare
>
>v5: (Tvrtko)
>- Add a trylock helper that does not sleep and synchronize PMU event
>  callbacks and worker with gt reset
>
>v6: (CI BAT failures)
>- DUTs using execlist submission failed to boot since __gt_unpark is
>  called during i915 load. This ends up calling the GuC busyness unpark
>  hook and results in kick-starting an uninitialized worker. Let
>  park/unpark hooks check if GuC submission has been initialized.
>- drop cant_sleep() from trylock helper since rcu_read_lock takes care
>  of that.
>
>v7: (CI) Fix igt@i915_selftest@live@gt_engines
>- For GuC mode of submission the engine busyness is derived from gt time
>  domain. Use gt time elapsed as reference in the selftest.
>- Increase busyness calculation to 10ms duration to ensure batch runs
>  longer and falls within the busyness tolerances in selftest.
>
>v8:
>- Use ktime_get in selftest as before
>- intel_reset_trylock_no_wait results in a lockdep splat that is not
>  trivial to fix since the PMU callback runs in irq context and the
>  reset paths are tightly knit into the driver. The test that uncovers
>  this is igt@perf_pmu@faulting-read. Drop intel_reset_trylock_no_wait,
>  instead use the reset_count to synchronize with gt reset during pmu
>  callback. For the ping, continue to use intel_reset_trylock since ping
>  is not run in irq context.
>
>- GuC PM timestamp does not tick when GuC is idle. This can potentially
>  result in wrong busyness values when a context is active on the
>  engine, but GuC is idle. Use the RING TIMESTAMP as GPU timestamp to
>  process the GuC busyness stats. This works since both GuC timestamp and
>  RING timestamp are synced with the same clock.
>
>- The busyness stats may get updated after the batch starts running.
>  This delay causes the busyness reported for 100us duration to fall
>  below 95% in the selftest. The only option at this time is to wait for
>  GuC busyness to change from idle to active before we sample busyness
>  over a 100us period.
>
>Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
>Signed-off-by: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
>Acked-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
>---
> drivers/gpu/drm/i915/gt/intel_engine_cs.c     |  28 +-
> drivers/gpu/drm/i915/gt/intel_engine_types.h  |  33 ++-
> .../drm/i915/gt/intel_execlists_submission.c  |  34 +++
> drivers/gpu/drm/i915/gt/intel_gt_pm.c         |   2 +
> drivers/gpu/drm/i915/gt/selftest_engine_pm.c  |  33 +++
> .../gpu/drm/i915/gt/uc/abi/guc_actions_abi.h  |   1 +
> drivers/gpu/drm/i915/gt/uc/intel_guc.h        |  30 ++
> drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c    |  21 ++
> drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h    |   5 +
> drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h   |  13 +
> .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 277 ++++++++++++++++++
> .../gpu/drm/i915/gt/uc/intel_guc_submission.h |   2 +
> drivers/gpu/drm/i915/i915_reg.h               |   2 +
> 13 files changed, 453 insertions(+), 28 deletions(-)
>
>diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
>index 2de396e34d83..332756036007 100644
>--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
>+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
>@@ -1915,23 +1915,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
> 	intel_engine_print_breadcrumbs(engine, m);
> }
>
>-static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine,
>-					    ktime_t *now)
>-{
>-	struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
>-	ktime_t total = stats->total;
>-
>-	/*
>-	 * If the engine is executing something at the moment
>-	 * add it to the total.
>-	 */
>-	*now = ktime_get();
>-	if (READ_ONCE(stats->active))
>-		total = ktime_add(total, ktime_sub(*now, stats->start));
>-
>-	return total;
>-}
>-
> /**
>  * intel_engine_get_busy_time() - Return current accumulated engine busyness
>  * @engine: engine to report on
>@@ -1941,16 +1924,7 @@ static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine,
>  */
> ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now)
> {
>-	struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
>-	unsigned int seq;
>-	ktime_t total;
>-
>-	do {
>-		seq = read_seqcount_begin(&stats->lock);
>-		total = __intel_engine_get_busy_time(engine, now);
>-	} while (read_seqcount_retry(&stats->lock, seq));
>-
>-	return total;
>+	return engine->busyness(engine, now);
> }
>
> struct intel_context *
>diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
>index 24fa7fb0e7de..5732e0d71513 100644
>--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
>+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
>@@ -284,6 +284,28 @@ struct intel_engine_execlists_stats {
> 	ktime_t start;
> };
>
>+struct intel_engine_guc_stats {
>+	/**
>+	 * @running: Active state of the engine when busyness was last sampled.
>+	 */
>+	bool running;
>+
>+	/**
>+	 * @prev_total: Previous value of total runtime clock cycles.
>+	 */
>+	u32 prev_total;
>+
>+	/**
>+	 * @total_gt_clks: Total gt clock cycles this engine was busy.
>+	 */
>+	u64 total_gt_clks;
>+
>+	/**
>+	 * @start_gt_clk: GT clock time of last idle to active transition.
>+	 */
>+	u64 start_gt_clk;
>+};
>+
> struct intel_engine_cs {
> 	struct drm_i915_private *i915;
> 	struct intel_gt *gt;
>@@ -466,6 +488,12 @@ struct intel_engine_cs {
> 	void		(*add_active_request)(struct i915_request *rq);
> 	void		(*remove_active_request)(struct i915_request *rq);
>
>+	/*
>+	 * Get engine busyness and the time at which the busyness was sampled.
>+	 */
>+	ktime_t		(*busyness)(struct intel_engine_cs *engine,
>+				    ktime_t *now);
>+
> 	struct intel_engine_execlists execlists;
>
> 	/*
>@@ -515,7 +543,10 @@ struct intel_engine_cs {
> 	u32 (*get_cmd_length_mask)(u32 cmd_header);
>
> 	struct {
>-		struct intel_engine_execlists_stats execlists;
>+		union {
>+			struct intel_engine_execlists_stats execlists;
>+			struct intel_engine_guc_stats guc;
>+		};
>
> 		/**
> 		 * @rps: Utilisation at last RPS sampling.
>diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
>index bedb80057046..ca03880fa7e4 100644
>--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
>+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
>@@ -3293,6 +3293,38 @@ static void execlists_release(struct intel_engine_cs *engine)
> 	lrc_fini_wa_ctx(engine);
> }
>
>+static ktime_t __execlists_engine_busyness(struct intel_engine_cs *engine,
>+					   ktime_t *now)
>+{
>+	struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
>+	ktime_t total = stats->total;
>+
>+	/*
>+	 * If the engine is executing something at the moment
>+	 * add it to the total.
>+	 */
>+	*now = ktime_get();
>+	if (READ_ONCE(stats->active))
>+		total = ktime_add(total, ktime_sub(*now, stats->start));
>+
>+	return total;
>+}
>+
>+static ktime_t execlists_engine_busyness(struct intel_engine_cs *engine,
>+					 ktime_t *now)
>+{
>+	struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
>+	unsigned int seq;
>+	ktime_t total;
>+
>+	do {
>+		seq = read_seqcount_begin(&stats->lock);
>+		total = __execlists_engine_busyness(engine, now);
>+	} while (read_seqcount_retry(&stats->lock, seq));
>+
>+	return total;
>+}
>+
> static void
> logical_ring_default_vfuncs(struct intel_engine_cs *engine)
> {
>@@ -3349,6 +3381,8 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
> 		engine->emit_bb_start = gen8_emit_bb_start;
> 	else
> 		engine->emit_bb_start = gen8_emit_bb_start_noarb;
>+
>+	engine->busyness = execlists_engine_busyness;
> }
>
> static void logical_ring_default_irqs(struct intel_engine_cs *engine)
>diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
>index 524eaf678790..b4a8594bc46c 100644
>--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
>+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
>@@ -86,6 +86,7 @@ static int __gt_unpark(struct intel_wakeref *wf)
> 	intel_rc6_unpark(&gt->rc6);
> 	intel_rps_unpark(&gt->rps);
> 	i915_pmu_gt_unparked(i915);
>+	intel_guc_busyness_unpark(gt);
>
> 	intel_gt_unpark_requests(gt);
> 	runtime_begin(gt);
>@@ -104,6 +105,7 @@ static int __gt_park(struct intel_wakeref *wf)
> 	runtime_end(gt);
> 	intel_gt_park_requests(gt);
>
>+	intel_guc_busyness_park(gt);
> 	i915_vma_parked(gt);
> 	i915_pmu_gt_parked(i915);
> 	intel_rps_park(&gt->rps);
>diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
>index 75569666105d..0bfd738dbf3a 100644
>--- a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
>+++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
>@@ -214,6 +214,31 @@ static int live_engine_timestamps(void *arg)
> 	return 0;
> }
>
>+static int __spin_until_busier(struct intel_engine_cs *engine, ktime_t busyness)
>+{
>+	ktime_t start, unused, dt;
>+
>+	if (!intel_engine_uses_guc(engine))
>+		return 0;
>+
>+	/*
>+	 * In GuC mode of submission, the busyness stats may get updated after
>+	 * the batch starts running. Poll for a change in busyness and timeout
>+	 * after 500 us.
>+	 */
>+	start = ktime_get();
>+	while (intel_engine_get_busy_time(engine, &unused) == busyness) {
>+		dt = ktime_get() - start;
>+		if (dt > 500000) {
>+			pr_err("active wait timed out %lld\n", dt);
>+			ENGINE_TRACE(engine, "active wait time out %lld\n", dt);
>+			return -ETIME;
>+		}
>+	}
>+
>+	return 0;
>+}
>+
> static int live_engine_busy_stats(void *arg)
> {
> 	struct intel_gt *gt = arg;
>@@ -232,6 +257,7 @@ static int live_engine_busy_stats(void *arg)
> 	GEM_BUG_ON(intel_gt_pm_is_awake(gt));
> 	for_each_engine(engine, gt, id) {
> 		struct i915_request *rq;
>+		ktime_t busyness, dummy;
> 		ktime_t de, dt;
> 		ktime_t t[2];
>
>@@ -274,12 +300,19 @@ static int live_engine_busy_stats(void *arg)
> 		}
> 		i915_request_add(rq);
>
>+		busyness = intel_engine_get_busy_time(engine, &dummy);
> 		if (!igt_wait_for_spinner(&spin, rq)) {
> 			intel_gt_set_wedged(engine->gt);
> 			err = -ETIME;
> 			goto end;
> 		}
>
>+		err = __spin_until_busier(engine, busyness);
>+		if (err) {
>+			GEM_TRACE_DUMP();
>+			goto end;
>+		}
>+
> 		ENGINE_TRACE(engine, "measuring busy time\n");
> 		preempt_disable();
> 		de = intel_engine_get_busy_time(engine, &t[0]);
>diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
>index ba10bd374cee..fe5d7d261797 100644
>--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
>+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
>@@ -144,6 +144,7 @@ enum intel_guc_action {
> 	INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE = 0x4600,
> 	INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC = 0x4601,
> 	INTEL_GUC_ACTION_RESET_CLIENT = 0x5507,
>+	INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF = 0x550A,
> 	INTEL_GUC_ACTION_LIMIT
> };
>
>diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
>index 31cf9fb48c7e..1cb46098030d 100644
>--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
>+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
>@@ -138,6 +138,8 @@ struct intel_guc {
> 	u32 ads_regset_size;
> 	/** @ads_golden_ctxt_size: size of the golden contexts in the ADS */
> 	u32 ads_golden_ctxt_size;
>+	/** @ads_engine_usage_size: size of engine usage in the ADS */
>+	u32 ads_engine_usage_size;
>
> 	/** @lrc_desc_pool: object allocated to hold the GuC LRC descriptor pool */
> 	struct i915_vma *lrc_desc_pool;
>@@ -172,6 +174,34 @@ struct intel_guc {
>
> 	/** @send_mutex: used to serialize the intel_guc_send actions */
> 	struct mutex send_mutex;
>+
>+	/**
>+	 * @timestamp: GT timestamp object that stores a copy of the timestamp
>+	 * and adjusts it for overflow using a worker.
>+	 */
>+	struct {
>+		/**
>+		 * @lock: Lock protecting the below fields and the engine stats.
>+		 */
>+		spinlock_t lock;
>+
>+		/**
>+		 * @gt_stamp: 64 bit extended value of the GT timestamp.
>+		 */
>+		u64 gt_stamp;
>+
>+		/**
>+		 * @ping_delay: Period for polling the GT timestamp for
>+		 * overflow.
>+		 */
>+		unsigned long ping_delay;
>+
>+		/**
>+		 * @work: Periodic work to adjust GT timestamp, engine and
>+		 * context usage for overflows.
>+		 */
>+		struct delayed_work work;
>+	} timestamp;
> };
>
> static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
>diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
>index 621c893a009f..1a1edae67e4e 100644
>--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
>+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
>@@ -26,6 +26,8 @@
>  *      | guc_policies                          |
>  *      +---------------------------------------+
>  *      | guc_gt_system_info                    |
>+ *      +---------------------------------------+
>+ *      | guc_engine_usage                      |
>  *      +---------------------------------------+ <== static
>  *      | guc_mmio_reg[countA] (engine 0.0)     |
>  *      | guc_mmio_reg[countB] (engine 0.1)     |
>@@ -47,6 +49,7 @@ struct __guc_ads_blob {
> 	struct guc_ads ads;
> 	struct guc_policies policies;
> 	struct guc_gt_system_info system_info;
>+	struct guc_engine_usage engine_usage;
> 	/* From here on, location is dynamic! Refer to above diagram. */
> 	struct guc_mmio_reg regset[0];
> } __packed;
>@@ -628,3 +631,21 @@ void intel_guc_ads_reset(struct intel_guc *guc)
>
> 	guc_ads_private_data_reset(guc);
> }
>+
>+u32 intel_guc_engine_usage_offset(struct intel_guc *guc)
>+{
>+	struct __guc_ads_blob *blob = guc->ads_blob;
>+	u32 base = intel_guc_ggtt_offset(guc, guc->ads_vma);
>+	u32 offset = base + ptr_offset(blob, engine_usage);
>+
>+	return offset;
>+}
>+
>+struct guc_engine_usage_record *intel_guc_engine_usage(struct intel_engine_cs *engine)
>+{
>+	struct intel_guc *guc = &engine->gt->uc.guc;
>+	struct __guc_ads_blob *blob = guc->ads_blob;
>+	u8 guc_class = engine_class_to_guc_class(engine->class);
>+
>+	return &blob->engine_usage.engines[guc_class][ilog2(engine->logical_mask)];
>+}
>diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h
>index 3d85051d57e4..e74c110facff 100644
>--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h
>+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h
>@@ -6,8 +6,11 @@
> #ifndef _INTEL_GUC_ADS_H_
> #define _INTEL_GUC_ADS_H_
>
>+#include <linux/types.h>
>+
> struct intel_guc;
> struct drm_printer;
>+struct intel_engine_cs;
>
> int intel_guc_ads_create(struct intel_guc *guc);
> void intel_guc_ads_destroy(struct intel_guc *guc);
>@@ -15,5 +18,7 @@ void intel_guc_ads_init_late(struct intel_guc *guc);
> void intel_guc_ads_reset(struct intel_guc *guc);
> void intel_guc_ads_print_policy_info(struct intel_guc *guc,
> 				     struct drm_printer *p);
>+struct guc_engine_usage_record *intel_guc_engine_usage(struct intel_engine_cs *engine);
>+u32 intel_guc_engine_usage_offset(struct intel_guc *guc);
>
> #endif
>diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
>index 722933e26347..7072e30e99f4 100644
>--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
>+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
>@@ -294,6 +294,19 @@ struct guc_ads {
> 	u32 reserved[15];
> } __packed;
>
>+/* Engine usage stats */
>+struct guc_engine_usage_record {
>+	u32 current_context_index;
>+	u32 last_switch_in_stamp;
>+	u32 reserved0;
>+	u32 total_runtime;
>+	u32 reserved1[4];
>+} __packed;
>+
>+struct guc_engine_usage {
>+	struct guc_engine_usage_record engines[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS];
>+} __packed;
>+
> /* GuC logging structures */
>
> enum guc_log_buffer_type {
>diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
>index 38b47e73e35d..5cc49c0b3889 100644
>--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
>+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
>@@ -13,6 +13,7 @@
> #include "gt/intel_engine_heartbeat.h"
> #include "gt/intel_gpu_commands.h"
> #include "gt/intel_gt.h"
>+#include "gt/intel_gt_clock_utils.h"
> #include "gt/intel_gt_irq.h"
> #include "gt/intel_gt_pm.h"
> #include "gt/intel_gt_requests.h"
>@@ -21,6 +22,7 @@
> #include "gt/intel_mocs.h"
> #include "gt/intel_ring.h"
>
>+#include "intel_guc_ads.h"
> #include "intel_guc_submission.h"
>
> #include "i915_drv.h"
>@@ -1077,6 +1079,272 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc)
> 	xa_unlock_irqrestore(&guc->context_lookup, flags);
> }
>
>+/*
>+ * GuC stores busyness stats for each engine at context in/out boundaries. A
>+ * context 'in' logs execution start time, 'out' adds in -> out delta to total.
>+ * i915/kmd accesses 'start', 'total' and 'context id' from memory shared with
>+ * GuC.
>+ *
>+ * __i915_pmu_event_read samples engine busyness. When sampling, if context id
>+ * is valid (!= ~0) and start is non-zero, the engine is considered to be
>+ * active. For an active engine total busyness = total + (now - start), where
>+ * 'now' is the time at which the busyness is sampled. For inactive engine,
>+ * total busyness = total.
>+ *
>+ * All times are captured from GUCPMTIMESTAMP reg and are in gt clock domain.
>+ *
>+ * The start and total values provided by GuC are 32 bits and wrap around in a
>+ * few minutes. Since perf pmu provides busyness as 64 bit monotonically
>+ * increasing ns values, there is a need for this implementation to account for
>+ * overflows and extend the GuC provided values to 64 bits before returning
>+ * busyness to the user. In order to do that, a worker runs periodically at
>+ * frequency = 1/8th the time it takes for the timestamp to wrap (i.e. once in
>+ * 27 seconds for a gt clock frequency of 19.2 MHz).
>+ */
>+
>+#define WRAP_TIME_CLKS U32_MAX
>+#define POLL_TIME_CLKS (WRAP_TIME_CLKS >> 3)
>+
>+static void
>+__extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start)
>+{
>+	u32 gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp);
>+	u32 gt_stamp_last = lower_32_bits(guc->timestamp.gt_stamp);
>+
>+	if (new_start == lower_32_bits(*prev_start))
>+		return;
>+
>+	if (new_start < gt_stamp_last &&
>+	    (new_start - gt_stamp_last) <= POLL_TIME_CLKS)
>+		gt_stamp_hi++;
>+
>+	if (new_start > gt_stamp_last &&
>+	    (gt_stamp_last - new_start) <= POLL_TIME_CLKS && gt_stamp_hi)
>+		gt_stamp_hi--;
>+
>+	*prev_start = ((u64)gt_stamp_hi << 32) | new_start;
>+}
>+
>+static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
>+{
>+	struct guc_engine_usage_record *rec = intel_guc_engine_usage(engine);
>+	struct intel_engine_guc_stats *stats = &engine->stats.guc;
>+	struct intel_guc *guc = &engine->gt->uc.guc;
>+	u32 last_switch = rec->last_switch_in_stamp;
>+	u32 ctx_id = rec->current_context_index;
>+	u32 total = rec->total_runtime;
>+
>+	lockdep_assert_held(&guc->timestamp.lock);
>+
>+	stats->running = ctx_id != ~0U && last_switch;
>+	if (stats->running)
>+		__extend_last_switch(guc, &stats->start_gt_clk, last_switch);
>+
>+	/*
>+	 * Instead of adjusting the total for overflow, just add the
>+	 * difference from previous sample stats->total_gt_clks
>+	 */
>+	if (total && total != ~0U) {
>+		stats->total_gt_clks += (u32)(total - stats->prev_total);
>+		stats->prev_total = total;
>+	}
>+}
>+
>+static void guc_update_pm_timestamp(struct intel_guc *guc,
>+				    struct intel_engine_cs *engine,
>+				    ktime_t *now)
>+{
>+	u32 gt_stamp_now, gt_stamp_hi;
>+
>+	lockdep_assert_held(&guc->timestamp.lock);
>+
>+	gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp);
>+	gt_stamp_now = intel_uncore_read(engine->uncore,
>+					 RING_TIMESTAMP(engine->mmio_base));
>+	*now = ktime_get();
>+
>+	if (gt_stamp_now < lower_32_bits(guc->timestamp.gt_stamp))
>+		gt_stamp_hi++;
>+
>+	guc->timestamp.gt_stamp = ((u64)gt_stamp_hi << 32) | gt_stamp_now;
>+}
>+
>+/*
>+ * Unlike the execlist mode of submission total and active times are in terms of
>+ * gt clocks. The *now parameter is retained to return the cpu time at which the
>+ * busyness was sampled.
>+ */
>+static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
>+{
>+	struct intel_engine_guc_stats stats_saved, *stats = &engine->stats.guc;
>+	struct i915_gpu_error *gpu_error = &engine->i915->gpu_error;
>+	struct intel_gt *gt = engine->gt;
>+	struct intel_guc *guc = &gt->uc.guc;
>+	u64 total, gt_stamp_saved;
>+	unsigned long flags;
>+	u32 reset_count;
>+
>+	spin_lock_irqsave(&guc->timestamp.lock, flags);
>+
>+	/*
>+	 * If a reset happened, we risk reading partially updated
>+	 * engine busyness from GuC, so we just use the driver stored
>+	 * copy of busyness. Synchronize with gt reset using reset_count.
>+	 */

I think I missed an additional check for I915_RESET_BACKOFF here before 
reading the reset count. If the BACKOFF bit is set, then we should just 
use the saved copy of stats. If it's not, then processing the reset 
count makes sense. Will post something to fix that.

Regards,
Umesh

>+	reset_count = i915_reset_count(gpu_error);
>+
>+	*now = ktime_get();
>+
>+	/*
>+	 * The active busyness depends on start_gt_clk and gt_stamp.
>+	 * gt_stamp is updated by i915 only when gt is awake and the
>+	 * start_gt_clk is derived from GuC state. To get a consistent
>+	 * view of activity, we query the GuC state only if gt is awake.
>+	 */
>+	stats_saved = *stats;
>+	gt_stamp_saved = guc->timestamp.gt_stamp;
>+	if (intel_gt_pm_get_if_awake(gt)) {
>+		guc_update_engine_gt_clks(engine);
>+		guc_update_pm_timestamp(guc, engine, now);
>+		intel_gt_pm_put_async(gt);
>+		if (i915_reset_count(gpu_error) != reset_count) {
>+			*stats = stats_saved;
>+			guc->timestamp.gt_stamp = gt_stamp_saved;
>+		}
>+	}
>+
>+	total = intel_gt_clock_interval_to_ns(gt, stats->total_gt_clks);
>+	if (stats->running) {
>+		u64 clk = guc->timestamp.gt_stamp - stats->start_gt_clk;
>+
>+		total += intel_gt_clock_interval_to_ns(gt, clk);
>+	}
>+
>+	spin_unlock_irqrestore(&guc->timestamp.lock, flags);
>+
>+	return ns_to_ktime(total);
>+}
>+
>+static void __reset_guc_busyness_stats(struct intel_guc *guc)
>+{
>+	struct intel_gt *gt = guc_to_gt(guc);
>+	struct intel_engine_cs *engine;
>+	enum intel_engine_id id;
>+	unsigned long flags;
>+	ktime_t unused;
>+
>+	cancel_delayed_work_sync(&guc->timestamp.work);
>+
>+	spin_lock_irqsave(&guc->timestamp.lock, flags);
>+
>+	for_each_engine(engine, gt, id) {
>+		guc_update_pm_timestamp(guc, engine, &unused);
>+		guc_update_engine_gt_clks(engine);
>+		engine->stats.guc.prev_total = 0;
>+	}
>+
>+	spin_unlock_irqrestore(&guc->timestamp.lock, flags);
>+}
>+
>+static void __update_guc_busyness_stats(struct intel_guc *guc)
>+{
>+	struct intel_gt *gt = guc_to_gt(guc);
>+	struct intel_engine_cs *engine;
>+	enum intel_engine_id id;
>+	ktime_t unused;
>+
>+	for_each_engine(engine, gt, id) {
>+		guc_update_pm_timestamp(guc, engine, &unused);
>+		guc_update_engine_gt_clks(engine);
>+	}
>+}
>+
>+static void guc_timestamp_ping(struct work_struct *wrk)
>+{
>+	struct intel_guc *guc = container_of(wrk, typeof(*guc),
>+					     timestamp.work.work);
>+	struct intel_uc *uc = container_of(guc, typeof(*uc), guc);
>+	struct intel_gt *gt = guc_to_gt(guc);
>+	intel_wakeref_t wakeref;
>+	unsigned long flags;
>+	int srcu, ret;
>+
>+	/*
>+	 * Synchronize with gt reset to make sure the worker does not
>+	 * corrupt the engine/guc stats.
>+	 */
>+	ret = intel_gt_reset_trylock(gt, &srcu);
>+	if (ret)
>+		return;
>+
>+	spin_lock_irqsave(&guc->timestamp.lock, flags);
>+
>+	with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
>+		__update_guc_busyness_stats(guc);
>+
>+	spin_unlock_irqrestore(&guc->timestamp.lock, flags);
>+
>+	intel_gt_reset_unlock(gt, srcu);
>+
>+	mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
>+			 guc->timestamp.ping_delay);
>+}
>+
>+static int guc_action_enable_usage_stats(struct intel_guc *guc)
>+{
>+	u32 offset = intel_guc_engine_usage_offset(guc);
>+	u32 action[] = {
>+		INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF,
>+		offset,
>+		0,
>+	};
>+
>+	return intel_guc_send(guc, action, ARRAY_SIZE(action));
>+}
>+
>+static void guc_init_engine_stats(struct intel_guc *guc)
>+{
>+	struct intel_gt *gt = guc_to_gt(guc);
>+	intel_wakeref_t wakeref;
>+
>+	mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
>+			 guc->timestamp.ping_delay);
>+
>+	with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref) {
>+		int ret = guc_action_enable_usage_stats(guc);
>+
>+		if (ret)
>+			drm_err(&gt->i915->drm,
>+				"Failed to enable usage stats: %d!\n", ret);
>+	}
>+}
>+
>+void intel_guc_busyness_park(struct intel_gt *gt)
>+{
>+	struct intel_guc *guc = &gt->uc.guc;
>+	unsigned long flags;
>+
>+	if (!guc_submission_initialized(guc))
>+		return;
>+
>+	cancel_delayed_work(&guc->timestamp.work);
>+
>+	spin_lock_irqsave(&guc->timestamp.lock, flags);
>+	__update_guc_busyness_stats(guc);
>+	spin_unlock_irqrestore(&guc->timestamp.lock, flags);
>+}
>+
>+void intel_guc_busyness_unpark(struct intel_gt *gt)
>+{
>+	struct intel_guc *guc = &gt->uc.guc;
>+
>+	if (!guc_submission_initialized(guc))
>+		return;
>+
>+	mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
>+			 guc->timestamp.ping_delay);
>+}
>+
> static inline bool
> submission_disabled(struct intel_guc *guc)
> {
>@@ -1138,6 +1406,7 @@ void intel_guc_submission_reset_prepare(struct intel_guc *guc)
> 	intel_gt_park_heartbeats(guc_to_gt(guc));
> 	disable_submission(guc);
> 	guc->interrupts.disable(guc);
>+	__reset_guc_busyness_stats(guc);
>
> 	/* Flush IRQ handler */
> 	spin_lock_irq(&guc_to_gt(guc)->irq_lock);
>@@ -1484,6 +1753,7 @@ static void destroyed_worker_func(struct work_struct *w);
>  */
> int intel_guc_submission_init(struct intel_guc *guc)
> {
>+	struct intel_gt *gt = guc_to_gt(guc);
> 	int ret;
>
> 	if (guc->lrc_desc_pool)
>@@ -1512,6 +1782,10 @@ int intel_guc_submission_init(struct intel_guc *guc)
> 	if (!guc->submission_state.guc_ids_bitmap)
> 		return -ENOMEM;
>
>+	spin_lock_init(&guc->timestamp.lock);
>+	INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping);
>+	guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
>+
> 	return 0;
> }
>
>@@ -3369,7 +3643,9 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine)
> 		engine->emit_flush = gen12_emit_flush_xcs;
> 	}
> 	engine->set_default_submission = guc_set_default_submission;
>+	engine->busyness = guc_engine_busyness;
>
>+	engine->flags |= I915_ENGINE_SUPPORTS_STATS;
> 	engine->flags |= I915_ENGINE_HAS_PREEMPTION;
> 	engine->flags |= I915_ENGINE_HAS_TIMESLICES;
>
>@@ -3468,6 +3744,7 @@ int intel_guc_submission_setup(struct intel_engine_cs *engine)
> void intel_guc_submission_enable(struct intel_guc *guc)
> {
> 	guc_init_lrc_mapping(guc);
>+	guc_init_engine_stats(guc);
> }
>
> void intel_guc_submission_disable(struct intel_guc *guc)
>diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
>index c7ef44fa0c36..5a95a9f0a8e3 100644
>--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
>+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
>@@ -28,6 +28,8 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc,
> void intel_guc_dump_active_requests(struct intel_engine_cs *engine,
> 				    struct i915_request *hung_rq,
> 				    struct drm_printer *m);
>+void intel_guc_busyness_park(struct intel_gt *gt);
>+void intel_guc_busyness_unpark(struct intel_gt *gt);
>
> bool intel_guc_virtual_engine_has_heartbeat(const struct intel_engine_cs *ve);
>
>diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
>index d9f7a729333f..f7927f6dac6e 100644
>--- a/drivers/gpu/drm/i915/i915_reg.h
>+++ b/drivers/gpu/drm/i915/i915_reg.h
>@@ -2662,6 +2662,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
> #define   RING_WAIT		(1 << 11) /* gen3+, PRBx_CTL */
> #define   RING_WAIT_SEMAPHORE	(1 << 10) /* gen6+ */
>
>+#define GUCPMTIMESTAMP          _MMIO(0xC3E8)
>+
> /* There are 16 64-bit CS General Purpose Registers per-engine on Gen8+ */
> #define GEN8_RING_CS_GPR(base, n)	_MMIO((base) + 0x600 + (n) * 8)
> #define GEN8_RING_CS_GPR_UDW(base, n)	_MMIO((base) + 0x600 + (n) * 8 + 4)
>-- 
>2.20.1
>

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 2/2] drm/i915/pmu: Connect engine busyness stats from GuC to pmu
  2021-10-27  0:48   ` [Intel-gfx] " Umesh Nerlige Ramappa
@ 2022-10-21  8:42     ` Tvrtko Ursulin
  -1 siblings, 0 replies; 16+ messages in thread
From: Tvrtko Ursulin @ 2022-10-21  8:42 UTC (permalink / raw)
  To: Umesh Nerlige Ramappa, intel-gfx, dri-devel
  Cc: Matthew Brost, daniel.vetter, john.c.harrison


On 27/10/2021 01:48, Umesh Nerlige Ramappa wrote:

[snip]

> +static void guc_timestamp_ping(struct work_struct *wrk)
> +{
> +	struct intel_guc *guc = container_of(wrk, typeof(*guc),
> +					     timestamp.work.work);
> +	struct intel_uc *uc = container_of(guc, typeof(*uc), guc);
> +	struct intel_gt *gt = guc_to_gt(guc);
> +	intel_wakeref_t wakeref;
> +	unsigned long flags;
> +	int srcu, ret;
> +
> +	/*
> +	 * Synchronize with gt reset to make sure the worker does not
> +	 * corrupt the engine/guc stats.
> +	 */
> +	ret = intel_gt_reset_trylock(gt, &srcu);
> +	if (ret)
> +		return;
> +
> +	spin_lock_irqsave(&guc->timestamp.lock, flags);
> +
> +	with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
> +		__update_guc_busyness_stats(guc);

Spotted one splat today: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12268/bat-adlp-4/igt@i915_pm_rpm@basic-pci-d3-state.html

Could be that reset lock needs to be inside the rpm get. Haven't really though about it much, could you please check?

<4> [300.214744]
<4> [300.214753] ======================================================
<4> [300.214755] WARNING: possible circular locking dependency detected
<4> [300.214758] 6.1.0-rc1-CI_DRM_12268-g86e8558e3283+ #1 Not tainted
<4> [300.214761] ------------------------------------------------------
<4> [300.214762] kworker/10:1H/265 is trying to acquire lock:
<4> [300.214765] ffffffff8275e560 (fs_reclaim){+.+.}-{0:0}, at: __kmem_cache_alloc_node+0x27/0x170
<4> [300.214780]
but task is already holding lock:
<4> [300.214782] ffffc900013e7e78 ((work_completion)(&(&guc->timestamp.work)->work)){+.+.}-{0:0}, at: process_one_work+0x1eb/0x5b0
<4> [300.214793]
which lock already depends on the new lock.
<4> [300.214794]
the existing dependency chain (in reverse order) is:
<4> [300.214796]
-> #2 ((work_completion)(&(&guc->timestamp.work)->work)){+.+.}-{0:0}:
<4> [300.214801]        lock_acquire+0xd3/0x310
<4> [300.214806]        __flush_work+0x77/0x4e0
<4> [300.214811]        __cancel_work_timer+0x14e/0x1f0
<4> [300.214815]        intel_guc_submission_reset_prepare+0x7a/0x420 [i915]
<4> [300.215119]        intel_uc_reset_prepare+0x44/0x50 [i915]
<4> [300.215360]        reset_prepare+0x21/0x80 [i915]
<4> [300.215561]        intel_gt_reset+0x143/0x340 [i915]
<4> [300.215757]        intel_gt_reset_global+0xeb/0x160 [i915]
<4> [300.215946]        intel_gt_handle_error+0x2c2/0x410 [i915]
<4> [300.216137]        intel_gt_debugfs_reset_store+0x59/0xc0 [i915]
<4> [300.216333]        i915_wedged_set+0xc/0x20 [i915]
<4> [300.216513]        simple_attr_write+0xda/0x100
<4> [300.216520]        full_proxy_write+0x4e/0x80
<4> [300.216525]        vfs_write+0xe3/0x4e0
<4> [300.216531]        ksys_write+0x57/0xd0
<4> [300.216535]        do_syscall_64+0x37/0x90
<4> [300.216542]        entry_SYSCALL_64_after_hwframe+0x63/0xcd
<4> [300.216549]
-> #1 (&gt->reset.mutex){+.+.}-{3:3}:
<4> [300.216556]        lock_acquire+0xd3/0x310
<4> [300.216559]        i915_gem_shrinker_taints_mutex+0x2d/0x50 [i915]
<4> [300.216799]        intel_gt_init_reset+0x61/0x80 [i915]
<4> [300.217018]        intel_gt_common_init_early+0x10c/0x190 [i915]
<4> [300.217227]        intel_root_gt_init_early+0x44/0x60 [i915]
<4> [300.217434]        i915_driver_probe+0x9ab/0xf30 [i915]
<4> [300.217615]        i915_pci_probe+0xa5/0x240 [i915]
<4> [300.217796]        pci_device_probe+0x95/0x110
<4> [300.217803]        really_probe+0xd6/0x350
<4> [300.217811]        __driver_probe_device+0x73/0x170
<4> [300.217816]        driver_probe_device+0x1a/0x90
<4> [300.217821]        __driver_attach+0xbc/0x190
<4> [300.217826]        bus_for_each_dev+0x72/0xc0
<4> [300.217831]        bus_add_driver+0x1bb/0x210
<4> [300.217835]        driver_register+0x66/0xc0
<4> [300.217841]        0xffffffffa093001f
<4> [300.217844]        do_one_initcall+0x53/0x2f0
<4> [300.217849]        do_init_module+0x45/0x1c0
<4> [300.217855]        load_module+0x1d5e/0x1e90
<4> [300.217859]        __do_sys_finit_module+0xaf/0x120
<4> [300.217864]        do_syscall_64+0x37/0x90
<4> [300.217869]        entry_SYSCALL_64_after_hwframe+0x63/0xcd
<4> [300.217875]
-> #0 (fs_reclaim){+.+.}-{0:0}:
<4> [300.217880]        validate_chain+0xb3d/0x2000
<4> [300.217884]        __lock_acquire+0x5a4/0xb70
<4> [300.217888]        lock_acquire+0xd3/0x310
<4> [300.217891]        fs_reclaim_acquire+0xa1/0xd0
<4> [300.217896]        __kmem_cache_alloc_node+0x27/0x170
<4> [300.217899]        __kmalloc+0x43/0x1a0
<4> [300.217903]        acpi_ns_internalize_name+0x44/0x9f
<4> [300.217909]        acpi_ns_get_node_unlocked+0x6b/0xd7
<4> [300.217914]        acpi_ns_get_node+0x3b/0x54
<4> [300.217918]        acpi_get_handle+0x89/0xb7
<4> [300.217922]        acpi_has_method+0x1c/0x40
<4> [300.217928]        acpi_pci_set_power_state+0x42/0xf0
<4> [300.217935]        pci_power_up+0x20/0x1a0
<4> [300.217940]        pci_pm_default_resume_early+0x9/0x30
<4> [300.217945]        pci_pm_runtime_resume+0x29/0x90
<4> [300.217948]        __rpm_callback+0x3d/0x110
<4> [300.217954]        rpm_callback+0x58/0x60
<4> [300.217959]        rpm_resume+0x548/0x760
<4> [300.217963]        __pm_runtime_resume+0x42/0x80
<4> [300.217968]        __intel_runtime_pm_get+0x19/0x80 [i915]
<4> [300.218170]        guc_timestamp_ping+0x63/0xc0 [i915]
<4> [300.218467]        process_one_work+0x272/0x5b0
<4> [300.218472]        worker_thread+0x37/0x370
<4> [300.218477]        kthread+0xed/0x120
<4> [300.218481]        ret_from_fork+0x1f/0x30
<4> [300.218485]
other info that might help us debug this:
<4> [300.218487] Chain exists of:
   fs_reclaim --> &gt->reset.mutex --> (work_completion)(&(&guc->timestamp.work)->work)
<4> [300.218495]  Possible unsafe locking scenario:
<4> [300.218497]        CPU0                    CPU1
<4> [300.218499]        ----                    ----
<4> [300.218501]   lock((work_completion)(&(&guc->timestamp.work)->work));
<4> [300.218505]                                lock(&gt->reset.mutex);
<4> [300.218509]                                lock((work_completion)(&(&guc->timestamp.work)->work));
<4> [300.218512]   lock(fs_reclaim);
<4> [300.218515]
  *** DEADLOCK ***

Regards,

Tvrtko

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [Intel-gfx] [PATCH 2/2] drm/i915/pmu: Connect engine busyness stats from GuC to pmu
@ 2022-10-21  8:42     ` Tvrtko Ursulin
  0 siblings, 0 replies; 16+ messages in thread
From: Tvrtko Ursulin @ 2022-10-21  8:42 UTC (permalink / raw)
  To: Umesh Nerlige Ramappa, intel-gfx, dri-devel; +Cc: daniel.vetter


On 27/10/2021 01:48, Umesh Nerlige Ramappa wrote:

[snip]

> +static void guc_timestamp_ping(struct work_struct *wrk)
> +{
> +	struct intel_guc *guc = container_of(wrk, typeof(*guc),
> +					     timestamp.work.work);
> +	struct intel_uc *uc = container_of(guc, typeof(*uc), guc);
> +	struct intel_gt *gt = guc_to_gt(guc);
> +	intel_wakeref_t wakeref;
> +	unsigned long flags;
> +	int srcu, ret;
> +
> +	/*
> +	 * Synchronize with gt reset to make sure the worker does not
> +	 * corrupt the engine/guc stats.
> +	 */
> +	ret = intel_gt_reset_trylock(gt, &srcu);
> +	if (ret)
> +		return;
> +
> +	spin_lock_irqsave(&guc->timestamp.lock, flags);
> +
> +	with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
> +		__update_guc_busyness_stats(guc);

Spotted one splat today: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12268/bat-adlp-4/igt@i915_pm_rpm@basic-pci-d3-state.html

Could be that reset lock needs to be inside the rpm get. Haven't really though about it much, could you please check?

<4> [300.214744]
<4> [300.214753] ======================================================
<4> [300.214755] WARNING: possible circular locking dependency detected
<4> [300.214758] 6.1.0-rc1-CI_DRM_12268-g86e8558e3283+ #1 Not tainted
<4> [300.214761] ------------------------------------------------------
<4> [300.214762] kworker/10:1H/265 is trying to acquire lock:
<4> [300.214765] ffffffff8275e560 (fs_reclaim){+.+.}-{0:0}, at: __kmem_cache_alloc_node+0x27/0x170
<4> [300.214780]
but task is already holding lock:
<4> [300.214782] ffffc900013e7e78 ((work_completion)(&(&guc->timestamp.work)->work)){+.+.}-{0:0}, at: process_one_work+0x1eb/0x5b0
<4> [300.214793]
which lock already depends on the new lock.
<4> [300.214794]
the existing dependency chain (in reverse order) is:
<4> [300.214796]
-> #2 ((work_completion)(&(&guc->timestamp.work)->work)){+.+.}-{0:0}:
<4> [300.214801]        lock_acquire+0xd3/0x310
<4> [300.214806]        __flush_work+0x77/0x4e0
<4> [300.214811]        __cancel_work_timer+0x14e/0x1f0
<4> [300.214815]        intel_guc_submission_reset_prepare+0x7a/0x420 [i915]
<4> [300.215119]        intel_uc_reset_prepare+0x44/0x50 [i915]
<4> [300.215360]        reset_prepare+0x21/0x80 [i915]
<4> [300.215561]        intel_gt_reset+0x143/0x340 [i915]
<4> [300.215757]        intel_gt_reset_global+0xeb/0x160 [i915]
<4> [300.215946]        intel_gt_handle_error+0x2c2/0x410 [i915]
<4> [300.216137]        intel_gt_debugfs_reset_store+0x59/0xc0 [i915]
<4> [300.216333]        i915_wedged_set+0xc/0x20 [i915]
<4> [300.216513]        simple_attr_write+0xda/0x100
<4> [300.216520]        full_proxy_write+0x4e/0x80
<4> [300.216525]        vfs_write+0xe3/0x4e0
<4> [300.216531]        ksys_write+0x57/0xd0
<4> [300.216535]        do_syscall_64+0x37/0x90
<4> [300.216542]        entry_SYSCALL_64_after_hwframe+0x63/0xcd
<4> [300.216549]
-> #1 (&gt->reset.mutex){+.+.}-{3:3}:
<4> [300.216556]        lock_acquire+0xd3/0x310
<4> [300.216559]        i915_gem_shrinker_taints_mutex+0x2d/0x50 [i915]
<4> [300.216799]        intel_gt_init_reset+0x61/0x80 [i915]
<4> [300.217018]        intel_gt_common_init_early+0x10c/0x190 [i915]
<4> [300.217227]        intel_root_gt_init_early+0x44/0x60 [i915]
<4> [300.217434]        i915_driver_probe+0x9ab/0xf30 [i915]
<4> [300.217615]        i915_pci_probe+0xa5/0x240 [i915]
<4> [300.217796]        pci_device_probe+0x95/0x110
<4> [300.217803]        really_probe+0xd6/0x350
<4> [300.217811]        __driver_probe_device+0x73/0x170
<4> [300.217816]        driver_probe_device+0x1a/0x90
<4> [300.217821]        __driver_attach+0xbc/0x190
<4> [300.217826]        bus_for_each_dev+0x72/0xc0
<4> [300.217831]        bus_add_driver+0x1bb/0x210
<4> [300.217835]        driver_register+0x66/0xc0
<4> [300.217841]        0xffffffffa093001f
<4> [300.217844]        do_one_initcall+0x53/0x2f0
<4> [300.217849]        do_init_module+0x45/0x1c0
<4> [300.217855]        load_module+0x1d5e/0x1e90
<4> [300.217859]        __do_sys_finit_module+0xaf/0x120
<4> [300.217864]        do_syscall_64+0x37/0x90
<4> [300.217869]        entry_SYSCALL_64_after_hwframe+0x63/0xcd
<4> [300.217875]
-> #0 (fs_reclaim){+.+.}-{0:0}:
<4> [300.217880]        validate_chain+0xb3d/0x2000
<4> [300.217884]        __lock_acquire+0x5a4/0xb70
<4> [300.217888]        lock_acquire+0xd3/0x310
<4> [300.217891]        fs_reclaim_acquire+0xa1/0xd0
<4> [300.217896]        __kmem_cache_alloc_node+0x27/0x170
<4> [300.217899]        __kmalloc+0x43/0x1a0
<4> [300.217903]        acpi_ns_internalize_name+0x44/0x9f
<4> [300.217909]        acpi_ns_get_node_unlocked+0x6b/0xd7
<4> [300.217914]        acpi_ns_get_node+0x3b/0x54
<4> [300.217918]        acpi_get_handle+0x89/0xb7
<4> [300.217922]        acpi_has_method+0x1c/0x40
<4> [300.217928]        acpi_pci_set_power_state+0x42/0xf0
<4> [300.217935]        pci_power_up+0x20/0x1a0
<4> [300.217940]        pci_pm_default_resume_early+0x9/0x30
<4> [300.217945]        pci_pm_runtime_resume+0x29/0x90
<4> [300.217948]        __rpm_callback+0x3d/0x110
<4> [300.217954]        rpm_callback+0x58/0x60
<4> [300.217959]        rpm_resume+0x548/0x760
<4> [300.217963]        __pm_runtime_resume+0x42/0x80
<4> [300.217968]        __intel_runtime_pm_get+0x19/0x80 [i915]
<4> [300.218170]        guc_timestamp_ping+0x63/0xc0 [i915]
<4> [300.218467]        process_one_work+0x272/0x5b0
<4> [300.218472]        worker_thread+0x37/0x370
<4> [300.218477]        kthread+0xed/0x120
<4> [300.218481]        ret_from_fork+0x1f/0x30
<4> [300.218485]
other info that might help us debug this:
<4> [300.218487] Chain exists of:
   fs_reclaim --> &gt->reset.mutex --> (work_completion)(&(&guc->timestamp.work)->work)
<4> [300.218495]  Possible unsafe locking scenario:
<4> [300.218497]        CPU0                    CPU1
<4> [300.218499]        ----                    ----
<4> [300.218501]   lock((work_completion)(&(&guc->timestamp.work)->work));
<4> [300.218505]                                lock(&gt->reset.mutex);
<4> [300.218509]                                lock((work_completion)(&(&guc->timestamp.work)->work));
<4> [300.218512]   lock(fs_reclaim);
<4> [300.218515]
  *** DEADLOCK ***

Regards,

Tvrtko

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 2/2] drm/i915/pmu: Connect engine busyness stats from GuC to pmu
  2022-10-21  8:42     ` [Intel-gfx] " Tvrtko Ursulin
@ 2022-10-22  0:21       ` Umesh Nerlige Ramappa
  -1 siblings, 0 replies; 16+ messages in thread
From: Umesh Nerlige Ramappa @ 2022-10-22  0:21 UTC (permalink / raw)
  To: Tvrtko Ursulin
  Cc: Matthew Brost, daniel.vetter, intel-gfx, john.c.harrison, dri-devel

On Fri, Oct 21, 2022 at 09:42:53AM +0100, Tvrtko Ursulin wrote:
>
>On 27/10/2021 01:48, Umesh Nerlige Ramappa wrote:
>
>[snip]
>
>>+static void guc_timestamp_ping(struct work_struct *wrk)
>>+{
>>+	struct intel_guc *guc = container_of(wrk, typeof(*guc),
>>+					     timestamp.work.work);
>>+	struct intel_uc *uc = container_of(guc, typeof(*uc), guc);
>>+	struct intel_gt *gt = guc_to_gt(guc);
>>+	intel_wakeref_t wakeref;
>>+	unsigned long flags;
>>+	int srcu, ret;
>>+
>>+	/*
>>+	 * Synchronize with gt reset to make sure the worker does not
>>+	 * corrupt the engine/guc stats.
>>+	 */
>>+	ret = intel_gt_reset_trylock(gt, &srcu);
>>+	if (ret)
>>+		return;
>>+
>>+	spin_lock_irqsave(&guc->timestamp.lock, flags);
>>+
>>+	with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
>>+		__update_guc_busyness_stats(guc);
>
>Spotted one splat today: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12268/bat-adlp-4/igt@i915_pm_rpm@basic-pci-d3-state.html
>
>Could be that reset lock needs to be inside the rpm get. Haven't really though about it much, could you please check?
>
><4> [300.214744]
><4> [300.214753] ======================================================
><4> [300.214755] WARNING: possible circular locking dependency detected
><4> [300.214758] 6.1.0-rc1-CI_DRM_12268-g86e8558e3283+ #1 Not tainted
><4> [300.214761] ------------------------------------------------------
><4> [300.214762] kworker/10:1H/265 is trying to acquire lock:
><4> [300.214765] ffffffff8275e560 (fs_reclaim){+.+.}-{0:0}, at: __kmem_cache_alloc_node+0x27/0x170
><4> [300.214780]
>but task is already holding lock:
><4> [300.214782] ffffc900013e7e78 ((work_completion)(&(&guc->timestamp.work)->work)){+.+.}-{0:0}, at: process_one_work+0x1eb/0x5b0
><4> [300.214793]
>which lock already depends on the new lock.
><4> [300.214794]
>the existing dependency chain (in reverse order) is:
><4> [300.214796]
>-> #2 ((work_completion)(&(&guc->timestamp.work)->work)){+.+.}-{0:0}:
><4> [300.214801]        lock_acquire+0xd3/0x310
><4> [300.214806]        __flush_work+0x77/0x4e0
><4> [300.214811]        __cancel_work_timer+0x14e/0x1f0
><4> [300.214815]        intel_guc_submission_reset_prepare+0x7a/0x420 [i915]
><4> [300.215119]        intel_uc_reset_prepare+0x44/0x50 [i915]
><4> [300.215360]        reset_prepare+0x21/0x80 [i915]
><4> [300.215561]        intel_gt_reset+0x143/0x340 [i915]
><4> [300.215757]        intel_gt_reset_global+0xeb/0x160 [i915]
><4> [300.215946]        intel_gt_handle_error+0x2c2/0x410 [i915]
><4> [300.216137]        intel_gt_debugfs_reset_store+0x59/0xc0 [i915]
><4> [300.216333]        i915_wedged_set+0xc/0x20 [i915]
><4> [300.216513]        simple_attr_write+0xda/0x100
><4> [300.216520]        full_proxy_write+0x4e/0x80
><4> [300.216525]        vfs_write+0xe3/0x4e0
><4> [300.216531]        ksys_write+0x57/0xd0
><4> [300.216535]        do_syscall_64+0x37/0x90
><4> [300.216542]        entry_SYSCALL_64_after_hwframe+0x63/0xcd
><4> [300.216549]
>-> #1 (&gt->reset.mutex){+.+.}-{3:3}:
><4> [300.216556]        lock_acquire+0xd3/0x310
><4> [300.216559]        i915_gem_shrinker_taints_mutex+0x2d/0x50 [i915]

i915_gem_shrinker_taints_mutex seems to have something to do with 
fs_reclaim and so does the stack #0. Any idea what this early init is 
doing? Can this code also result in a gt_wedged case because that might 
explain the stack #2 which is a reset.

><4> [300.216799]        intel_gt_init_reset+0x61/0x80 [i915]
><4> [300.217018]        intel_gt_common_init_early+0x10c/0x190 [i915]
><4> [300.217227]        intel_root_gt_init_early+0x44/0x60 [i915]
><4> [300.217434]        i915_driver_probe+0x9ab/0xf30 [i915]
><4> [300.217615]        i915_pci_probe+0xa5/0x240 [i915]
><4> [300.217796]        pci_device_probe+0x95/0x110
><4> [300.217803]        really_probe+0xd6/0x350
><4> [300.217811]        __driver_probe_device+0x73/0x170
><4> [300.217816]        driver_probe_device+0x1a/0x90
><4> [300.217821]        __driver_attach+0xbc/0x190
><4> [300.217826]        bus_for_each_dev+0x72/0xc0
><4> [300.217831]        bus_add_driver+0x1bb/0x210
><4> [300.217835]        driver_register+0x66/0xc0
><4> [300.217841]        0xffffffffa093001f
><4> [300.217844]        do_one_initcall+0x53/0x2f0
><4> [300.217849]        do_init_module+0x45/0x1c0
><4> [300.217855]        load_module+0x1d5e/0x1e90
><4> [300.217859]        __do_sys_finit_module+0xaf/0x120
><4> [300.217864]        do_syscall_64+0x37/0x90
><4> [300.217869]        entry_SYSCALL_64_after_hwframe+0x63/0xcd
><4> [300.217875]
>-> #0 (fs_reclaim){+.+.}-{0:0}:
><4> [300.217880]        validate_chain+0xb3d/0x2000
><4> [300.217884]        __lock_acquire+0x5a4/0xb70
><4> [300.217888]        lock_acquire+0xd3/0x310
><4> [300.217891]        fs_reclaim_acquire+0xa1/0xd0

fs_reclaim ^

><4> [300.217896]        __kmem_cache_alloc_node+0x27/0x170
><4> [300.217899]        __kmalloc+0x43/0x1a0
><4> [300.217903]        acpi_ns_internalize_name+0x44/0x9f
><4> [300.217909]        acpi_ns_get_node_unlocked+0x6b/0xd7
><4> [300.217914]        acpi_ns_get_node+0x3b/0x54
><4> [300.217918]        acpi_get_handle+0x89/0xb7
><4> [300.217922]        acpi_has_method+0x1c/0x40
><4> [300.217928]        acpi_pci_set_power_state+0x42/0xf0
><4> [300.217935]        pci_power_up+0x20/0x1a0
><4> [300.217940]        pci_pm_default_resume_early+0x9/0x30
><4> [300.217945]        pci_pm_runtime_resume+0x29/0x90
><4> [300.217948]        __rpm_callback+0x3d/0x110
><4> [300.217954]        rpm_callback+0x58/0x60
><4> [300.217959]        rpm_resume+0x548/0x760
><4> [300.217963]        __pm_runtime_resume+0x42/0x80
><4> [300.217968]        __intel_runtime_pm_get+0x19/0x80 [i915]
><4> [300.218170]        guc_timestamp_ping+0x63/0xc0 [i915]
><4> [300.218467]        process_one_work+0x272/0x5b0
><4> [300.218472]        worker_thread+0x37/0x370
><4> [300.218477]        kthread+0xed/0x120
><4> [300.218481]        ret_from_fork+0x1f/0x30
><4> [300.218485]

If the suspend has completed, not sure why guc_timestamp_ping is getting 
called and resulting in pci_power_up in this stack. The park should have 
synchronously canceled the ping worker. Strange.

>other info that might help us debug this:
><4> [300.218487] Chain exists of:
>  fs_reclaim --> &gt->reset.mutex --> (work_completion)(&(&guc->timestamp.work)->work)
><4> [300.218495]  Possible unsafe locking scenario:
><4> [300.218497]        CPU0                    CPU1
><4> [300.218499]        ----                    ----
><4> [300.218501]   lock((work_completion)(&(&guc->timestamp.work)->work));
><4> [300.218505]                                lock(&gt->reset.mutex);
><4> [300.218509]                                lock((work_completion)(&(&guc->timestamp.work)->work));
><4> [300.218512]   lock(fs_reclaim);
><4> [300.218515]
> *** DEADLOCK ***

Still looking into it, could use some help with the above questions.

Thanks,
Umesh
>
>Regards,
>
>Tvrtko

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [Intel-gfx] [PATCH 2/2] drm/i915/pmu: Connect engine busyness stats from GuC to pmu
@ 2022-10-22  0:21       ` Umesh Nerlige Ramappa
  0 siblings, 0 replies; 16+ messages in thread
From: Umesh Nerlige Ramappa @ 2022-10-22  0:21 UTC (permalink / raw)
  To: Tvrtko Ursulin; +Cc: daniel.vetter, intel-gfx, dri-devel

On Fri, Oct 21, 2022 at 09:42:53AM +0100, Tvrtko Ursulin wrote:
>
>On 27/10/2021 01:48, Umesh Nerlige Ramappa wrote:
>
>[snip]
>
>>+static void guc_timestamp_ping(struct work_struct *wrk)
>>+{
>>+	struct intel_guc *guc = container_of(wrk, typeof(*guc),
>>+					     timestamp.work.work);
>>+	struct intel_uc *uc = container_of(guc, typeof(*uc), guc);
>>+	struct intel_gt *gt = guc_to_gt(guc);
>>+	intel_wakeref_t wakeref;
>>+	unsigned long flags;
>>+	int srcu, ret;
>>+
>>+	/*
>>+	 * Synchronize with gt reset to make sure the worker does not
>>+	 * corrupt the engine/guc stats.
>>+	 */
>>+	ret = intel_gt_reset_trylock(gt, &srcu);
>>+	if (ret)
>>+		return;
>>+
>>+	spin_lock_irqsave(&guc->timestamp.lock, flags);
>>+
>>+	with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
>>+		__update_guc_busyness_stats(guc);
>
>Spotted one splat today: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12268/bat-adlp-4/igt@i915_pm_rpm@basic-pci-d3-state.html
>
>Could be that reset lock needs to be inside the rpm get. Haven't really though about it much, could you please check?
>
><4> [300.214744]
><4> [300.214753] ======================================================
><4> [300.214755] WARNING: possible circular locking dependency detected
><4> [300.214758] 6.1.0-rc1-CI_DRM_12268-g86e8558e3283+ #1 Not tainted
><4> [300.214761] ------------------------------------------------------
><4> [300.214762] kworker/10:1H/265 is trying to acquire lock:
><4> [300.214765] ffffffff8275e560 (fs_reclaim){+.+.}-{0:0}, at: __kmem_cache_alloc_node+0x27/0x170
><4> [300.214780]
>but task is already holding lock:
><4> [300.214782] ffffc900013e7e78 ((work_completion)(&(&guc->timestamp.work)->work)){+.+.}-{0:0}, at: process_one_work+0x1eb/0x5b0
><4> [300.214793]
>which lock already depends on the new lock.
><4> [300.214794]
>the existing dependency chain (in reverse order) is:
><4> [300.214796]
>-> #2 ((work_completion)(&(&guc->timestamp.work)->work)){+.+.}-{0:0}:
><4> [300.214801]        lock_acquire+0xd3/0x310
><4> [300.214806]        __flush_work+0x77/0x4e0
><4> [300.214811]        __cancel_work_timer+0x14e/0x1f0
><4> [300.214815]        intel_guc_submission_reset_prepare+0x7a/0x420 [i915]
><4> [300.215119]        intel_uc_reset_prepare+0x44/0x50 [i915]
><4> [300.215360]        reset_prepare+0x21/0x80 [i915]
><4> [300.215561]        intel_gt_reset+0x143/0x340 [i915]
><4> [300.215757]        intel_gt_reset_global+0xeb/0x160 [i915]
><4> [300.215946]        intel_gt_handle_error+0x2c2/0x410 [i915]
><4> [300.216137]        intel_gt_debugfs_reset_store+0x59/0xc0 [i915]
><4> [300.216333]        i915_wedged_set+0xc/0x20 [i915]
><4> [300.216513]        simple_attr_write+0xda/0x100
><4> [300.216520]        full_proxy_write+0x4e/0x80
><4> [300.216525]        vfs_write+0xe3/0x4e0
><4> [300.216531]        ksys_write+0x57/0xd0
><4> [300.216535]        do_syscall_64+0x37/0x90
><4> [300.216542]        entry_SYSCALL_64_after_hwframe+0x63/0xcd
><4> [300.216549]
>-> #1 (&gt->reset.mutex){+.+.}-{3:3}:
><4> [300.216556]        lock_acquire+0xd3/0x310
><4> [300.216559]        i915_gem_shrinker_taints_mutex+0x2d/0x50 [i915]

i915_gem_shrinker_taints_mutex seems to have something to do with 
fs_reclaim and so does the stack #0. Any idea what this early init is 
doing? Can this code also result in a gt_wedged case because that might 
explain the stack #2 which is a reset.

><4> [300.216799]        intel_gt_init_reset+0x61/0x80 [i915]
><4> [300.217018]        intel_gt_common_init_early+0x10c/0x190 [i915]
><4> [300.217227]        intel_root_gt_init_early+0x44/0x60 [i915]
><4> [300.217434]        i915_driver_probe+0x9ab/0xf30 [i915]
><4> [300.217615]        i915_pci_probe+0xa5/0x240 [i915]
><4> [300.217796]        pci_device_probe+0x95/0x110
><4> [300.217803]        really_probe+0xd6/0x350
><4> [300.217811]        __driver_probe_device+0x73/0x170
><4> [300.217816]        driver_probe_device+0x1a/0x90
><4> [300.217821]        __driver_attach+0xbc/0x190
><4> [300.217826]        bus_for_each_dev+0x72/0xc0
><4> [300.217831]        bus_add_driver+0x1bb/0x210
><4> [300.217835]        driver_register+0x66/0xc0
><4> [300.217841]        0xffffffffa093001f
><4> [300.217844]        do_one_initcall+0x53/0x2f0
><4> [300.217849]        do_init_module+0x45/0x1c0
><4> [300.217855]        load_module+0x1d5e/0x1e90
><4> [300.217859]        __do_sys_finit_module+0xaf/0x120
><4> [300.217864]        do_syscall_64+0x37/0x90
><4> [300.217869]        entry_SYSCALL_64_after_hwframe+0x63/0xcd
><4> [300.217875]
>-> #0 (fs_reclaim){+.+.}-{0:0}:
><4> [300.217880]        validate_chain+0xb3d/0x2000
><4> [300.217884]        __lock_acquire+0x5a4/0xb70
><4> [300.217888]        lock_acquire+0xd3/0x310
><4> [300.217891]        fs_reclaim_acquire+0xa1/0xd0

fs_reclaim ^

><4> [300.217896]        __kmem_cache_alloc_node+0x27/0x170
><4> [300.217899]        __kmalloc+0x43/0x1a0
><4> [300.217903]        acpi_ns_internalize_name+0x44/0x9f
><4> [300.217909]        acpi_ns_get_node_unlocked+0x6b/0xd7
><4> [300.217914]        acpi_ns_get_node+0x3b/0x54
><4> [300.217918]        acpi_get_handle+0x89/0xb7
><4> [300.217922]        acpi_has_method+0x1c/0x40
><4> [300.217928]        acpi_pci_set_power_state+0x42/0xf0
><4> [300.217935]        pci_power_up+0x20/0x1a0
><4> [300.217940]        pci_pm_default_resume_early+0x9/0x30
><4> [300.217945]        pci_pm_runtime_resume+0x29/0x90
><4> [300.217948]        __rpm_callback+0x3d/0x110
><4> [300.217954]        rpm_callback+0x58/0x60
><4> [300.217959]        rpm_resume+0x548/0x760
><4> [300.217963]        __pm_runtime_resume+0x42/0x80
><4> [300.217968]        __intel_runtime_pm_get+0x19/0x80 [i915]
><4> [300.218170]        guc_timestamp_ping+0x63/0xc0 [i915]
><4> [300.218467]        process_one_work+0x272/0x5b0
><4> [300.218472]        worker_thread+0x37/0x370
><4> [300.218477]        kthread+0xed/0x120
><4> [300.218481]        ret_from_fork+0x1f/0x30
><4> [300.218485]

If the suspend has completed, not sure why guc_timestamp_ping is getting 
called and resulting in pci_power_up in this stack. The park should have 
synchronously canceled the ping worker. Strange.

>other info that might help us debug this:
><4> [300.218487] Chain exists of:
>  fs_reclaim --> &gt->reset.mutex --> (work_completion)(&(&guc->timestamp.work)->work)
><4> [300.218495]  Possible unsafe locking scenario:
><4> [300.218497]        CPU0                    CPU1
><4> [300.218499]        ----                    ----
><4> [300.218501]   lock((work_completion)(&(&guc->timestamp.work)->work));
><4> [300.218505]                                lock(&gt->reset.mutex);
><4> [300.218509]                                lock((work_completion)(&(&guc->timestamp.work)->work));
><4> [300.218512]   lock(fs_reclaim);
><4> [300.218515]
> *** DEADLOCK ***

Still looking into it, could use some help with the above questions.

Thanks,
Umesh
>
>Regards,
>
>Tvrtko

^ permalink raw reply	[flat|nested] 16+ messages in thread

end of thread, other threads:[~2022-10-22  0:21 UTC | newest]

Thread overview: 16+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-10-27  0:48 [PATCH 1/2] drm/i915/pmu: Add a name to the execlists stats Umesh Nerlige Ramappa
2021-10-27  0:48 ` [Intel-gfx] " Umesh Nerlige Ramappa
2021-10-27  0:48 ` [PATCH 2/2] drm/i915/pmu: Connect engine busyness stats from GuC to pmu Umesh Nerlige Ramappa
2021-10-27  0:48   ` [Intel-gfx] " Umesh Nerlige Ramappa
2021-10-27 20:02   ` Matthew Brost
2021-10-27 20:02     ` [Intel-gfx] " Matthew Brost
2021-10-30  0:40   ` Umesh Nerlige Ramappa
2022-10-21  8:42   ` Tvrtko Ursulin
2022-10-21  8:42     ` [Intel-gfx] " Tvrtko Ursulin
2022-10-22  0:21     ` Umesh Nerlige Ramappa
2022-10-22  0:21       ` [Intel-gfx] " Umesh Nerlige Ramappa
2021-10-27  0:58 ` [Intel-gfx] ✗ Fi.CI.SPARSE: warning for series starting with [1/2] drm/i915/pmu: Add a name to the execlists stats Patchwork
2021-10-27  1:27 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2021-10-27  2:49 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork
2021-10-27 18:56 ` [PATCH 1/2] " Matthew Brost
2021-10-27 18:56   ` [Intel-gfx] " Matthew Brost

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.