All of lore.kernel.org
 help / color / mirror / Atom feed
From: Sagar Arun Kamble <sagar.a.kamble@intel.com>
To: intel-gfx@lists.freedesktop.org
Cc: Sourab Gupta <sourab.gupta@intel.com>
Subject: [PATCH 12/14] drm/i915: Extract raw GPU timestamps from OA reports to forward in perf samples
Date: Thu,  7 Sep 2017 15:36:12 +0530	[thread overview]
Message-ID: <1504778774-18117-13-git-send-email-sagar.a.kamble@intel.com> (raw)
In-Reply-To: <1504778774-18117-1-git-send-email-sagar.a.kamble@intel.com>

From: Sourab Gupta <sourab.gupta@intel.com>

The OA reports contain the least significant 32 bits of the gpu timestamp.
This patch enables retrieval of the timestamp field from OA reports, to
forward as 64 bit raw gpu timestamps in the perf samples.

Signed-off-by: Sourab Gupta <sourab.gupta@intel.com>
Signed-off-by: Sagar Arun Kamble <sagar.a.kamble@intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h  |  1 +
 drivers/gpu/drm/i915/i915_perf.c | 48 ++++++++++++++++++++++++++++++----------
 drivers/gpu/drm/i915/i915_reg.h  |  4 ++++
 3 files changed, 41 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 2d5f20a..d9f12a5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2739,6 +2739,7 @@ struct drm_i915_private {
 			u32 ctx_flexeu0_offset;
 			u32 n_pending_periodic_samples;
 			u32 pending_periodic_ts;
+			u64 last_gpu_ts;
 
 			/**
 			 * The RPT_ID/reason field for Gen8+ includes a bit
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 8243246..3a72705 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1121,6 +1121,26 @@ static int append_perf_sample(struct i915_perf_stream *stream,
 }
 
 /**
+ * get_gpu_ts_from_oa_report - Retrieve absolute gpu timestamp from OA report
+ *
+ * Note: We are assuming that we're updating last_gpu_ts frequently enough so
+ * that it's never possible to see multiple overflows before we compare
+ * sample_ts to last_gpu_ts. Since this is significantly large duration
+ * (~6min for 80ns ts base), we can safely assume so.
+ */
+static u64 get_gpu_ts_from_oa_report(struct drm_i915_private *dev_priv,
+					const u8 *report)
+{
+	u32 sample_ts = *(u32 *)(report + 4);
+	u32 delta;
+
+	delta = sample_ts - (u32)dev_priv->perf.oa.last_gpu_ts;
+	dev_priv->perf.oa.last_gpu_ts += delta;
+
+	return dev_priv->perf.oa.last_gpu_ts;
+}
+
+/**
  * append_oa_buffer_sample - Copies single periodic OA report into userspace
  * read() buffer.
  * @stream: An i915-perf stream opened for OA metrics
@@ -1152,11 +1172,8 @@ static int append_oa_buffer_sample(struct i915_perf_stream *stream,
 	if (sample_flags & SAMPLE_TAG)
 		data.tag = stream->last_tag;
 
-	/* TODO: Derive timestamp from OA report,
-	 * after scaling with the ts base
-	 */
 	if (sample_flags & SAMPLE_TS)
-		data.ts = 0;
+		data.ts = get_gpu_ts_from_oa_report(dev_priv, report);
 
 	if (sample_flags & SAMPLE_OA_REPORT)
 		data.report = report;
@@ -1730,6 +1747,7 @@ static int append_cs_buffer_sample(struct i915_perf_stream *stream,
 	struct drm_i915_private *dev_priv = stream->dev_priv;
 	struct i915_perf_sample_data data = { 0 };
 	u32 sample_flags = stream->sample_flags;
+	u64 gpu_ts = 0;
 	int ret = 0;
 
 	if (sample_flags & SAMPLE_OA_REPORT) {
@@ -1745,6 +1763,9 @@ static int append_cs_buffer_sample(struct i915_perf_stream *stream,
 						 sample_ts, U32_MAX);
 		if (ret)
 			return ret;
+
+		if (sample_flags & SAMPLE_TS)
+			gpu_ts = get_gpu_ts_from_oa_report(dev_priv, report);
 	}
 
 	if (sample_flags & SAMPLE_OA_SOURCE)
@@ -1783,16 +1804,13 @@ static int append_cs_buffer_sample(struct i915_perf_stream *stream,
 	}
 
 	if (sample_flags & SAMPLE_TS) {
-		/* For RCS, if OA samples are also being collected, derive the
-		 * timestamp from OA report, after scaling with the TS base.
+		/* If OA sampling is enabled, derive the ts from OA report.
 		 * Else, forward the timestamp collected via command stream.
 		 */
-		/* TODO: derive the timestamp from OA report */
-		if (sample_flags & SAMPLE_OA_REPORT)
-			data.ts = 0;
-		else
-			data.ts = *(u64 *) (stream->cs_buffer.vaddr +
+		if (!(sample_flags & SAMPLE_OA_REPORT))
+			gpu_ts = *(u64 *) (stream->cs_buffer.vaddr +
 					   node->ts_offset);
+		data.ts = gpu_ts;
 	}
 
 	return append_perf_sample(stream, buf, count, offset, &data);
@@ -2959,9 +2977,15 @@ static void i915_perf_stream_enable(struct i915_perf_stream *stream)
 {
 	struct drm_i915_private *dev_priv = stream->dev_priv;
 
-	if (stream->sample_flags & SAMPLE_OA_REPORT)
+	if (stream->sample_flags & SAMPLE_OA_REPORT) {
 		dev_priv->perf.oa.ops.oa_enable(dev_priv);
 
+		if (stream->sample_flags & SAMPLE_TS)
+			dev_priv->perf.oa.last_gpu_ts =
+				I915_READ64_2x32(GT_TIMESTAMP_COUNT,
+					GT_TIMESTAMP_COUNT_UDW);
+	}
+
 	if (stream->cs_mode || dev_priv->perf.oa.periodic)
 		hrtimer_start(&dev_priv->perf.poll_check_timer,
 			      ns_to_ktime(POLL_PERIOD),
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index a24d391..7958a15 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -730,6 +730,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define PS_DEPTH_COUNT                  _MMIO(0x2350)
 #define PS_DEPTH_COUNT_UDW		_MMIO(0x2350 + 4)
 
+/* Timestamp count register */
+#define GT_TIMESTAMP_COUNT		_MMIO(0x2358)
+#define GT_TIMESTAMP_COUNT_UDW		_MMIO(0x2358 + 4)
+
 /* There are the 4 64-bit counter registers, one for each stream output */
 #define GEN7_SO_NUM_PRIMS_WRITTEN(n)		_MMIO(0x5200 + (n) * 8)
 #define GEN7_SO_NUM_PRIMS_WRITTEN_UDW(n)	_MMIO(0x5200 + (n) * 8 + 4)
-- 
1.9.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2017-09-07 10:03 UTC|newest]

Thread overview: 18+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-09-07 10:06 [PATCH 00/14] i915 perf support for command stream based OA, GPU and workload metrics capture Sagar Arun Kamble
2017-09-07 10:06 ` [PATCH 01/14] drm/i915: Add ctx getparam ioctl parameter to retrieve ctx unique id Sagar Arun Kamble
2017-09-07 10:06 ` [PATCH 02/14] drm/i915: Expose OA sample source to userspace Sagar Arun Kamble
2017-09-07 10:06 ` [PATCH 03/14] drm/i915: Framework for capturing command stream based OA reports Sagar Arun Kamble
2017-09-07 10:06 ` [PATCH 04/14] drm/i915: Define CTX_ID property for perf sampling Sagar Arun Kamble
2017-09-07 10:06 ` [PATCH 05/14] drm/i915: Flush periodic samples, in case of no pending CS sample requests Sagar Arun Kamble
2017-09-07 10:06 ` [PATCH 06/14] drm/i915: Inform userspace about command stream OA buf overflow Sagar Arun Kamble
2017-09-07 10:06 ` [PATCH 07/14] drm/i915: Populate ctx ID for periodic OA reports Sagar Arun Kamble
2017-09-07 10:06 ` [PATCH 08/14] drm/i915: Add support for having pid output with OA report Sagar Arun Kamble
2017-09-07 10:06 ` [PATCH 09/14] drm/i915: Add support for emitting execbuffer tags through OA counter reports Sagar Arun Kamble
2017-09-07 10:06 ` [PATCH 10/14] drm/i915: Link perf stream structures with Engines Sagar Arun Kamble
2017-09-07 10:06 ` [PATCH 11/14] drm/i915: Add support for collecting timestamps on all gpu engines Sagar Arun Kamble
2017-09-07 10:06 ` Sagar Arun Kamble [this message]
2017-09-07 10:06 ` [PATCH 13/14] drm/i915: Async check for streams data availability with hrtimer rescheduling Sagar Arun Kamble
2017-09-07 10:06 ` [PATCH 14/14] drm/i915: Support for capturing MMIO register values Sagar Arun Kamble
2017-09-07 11:01 ` ✓ Fi.CI.BAT: success for i915 perf support for command stream based OA, GPU and workload metrics capture (rev3) Patchwork
2017-09-07 14:16 ` ✗ Fi.CI.IGT: failure " Patchwork
  -- strict thread matches above, loose matches on Subject: below --
2017-08-28  9:52 [PATCH 00/14] i915 perf support for command stream based OA, GPU and workload metrics capture Sagar Arun Kamble
2017-08-28  9:52 ` [PATCH 12/14] drm/i915: Extract raw GPU timestamps from OA reports to forward in perf samples Sagar Arun Kamble

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1504778774-18117-13-git-send-email-sagar.a.kamble@intel.com \
    --to=sagar.a.kamble@intel.com \
    --cc=intel-gfx@lists.freedesktop.org \
    --cc=sourab.gupta@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.