All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Subject: [Intel-gfx] [PATCH 36/39] drm/i915/selftests: Confirm RING_TIMESTAMP / CTX_TIMESTAMP share a clock
Date: Wed, 26 Aug 2020 14:28:08 +0100	[thread overview]
Message-ID: <20200826132811.17577-36-chris@chris-wilson.co.uk> (raw)
In-Reply-To: <20200826132811.17577-1-chris@chris-wilson.co.uk>

We assume that both timestamps are driven off the same clock [reported
to userspace as I915_PARAM_CS_TIMESTAMP_FREQUENCY]. Verify that this is
so by reading the timestamp registers around a busywait (on an otherwise
idle engine so there should be no preemptions).

v2: Icelake (not ehl, nor tgl) seems to be using a fixed 80ns interval
for, and only for, CTX_TIMESTAMP. As far as I can tell, this behaviour
is undocumented.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
---
 drivers/gpu/drm/i915/gt/selftest_engine_pm.c | 202 +++++++++++++++++++
 1 file changed, 202 insertions(+)

diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
index b08fc5390e8a..5fcbadc8d4f1 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
@@ -4,13 +4,214 @@
  * Copyright © 2018 Intel Corporation
  */
 
+#include <linux/sort.h>
+
 #include "i915_selftest.h"
+#include "intel_gt_clock_utils.h"
 #include "selftest_engine.h"
 #include "selftest_engine_heartbeat.h"
 #include "selftests/igt_atomic.h"
 #include "selftests/igt_flush_test.h"
 #include "selftests/igt_spinner.h"
 
+#define COUNT 5
+
+static int cmp_u64(const void *A, const void *B)
+{
+	const u64 *a = A, *b = B;
+
+	return *a - *b;
+}
+
+static u64 trifilter(u64 *a)
+{
+	sort(a, COUNT, sizeof(*a), cmp_u64, NULL);
+	return (a[1] + 2 * a[2] + a[3]) >> 2;
+}
+
+static u32 *emit_wait(u32 *cs, u32 offset, int op, u32 value)
+{
+	*cs++ = MI_SEMAPHORE_WAIT |
+		MI_SEMAPHORE_GLOBAL_GTT |
+		MI_SEMAPHORE_POLL |
+		op;
+	*cs++ = value;
+	*cs++ = offset;
+	*cs++ = 0;
+
+	return cs;
+}
+
+static u32 *emit_store(u32 *cs, u32 offset, u32 value)
+{
+	*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+	*cs++ = offset;
+	*cs++ = 0;
+	*cs++ = value;
+
+	return cs;
+}
+
+static u32 *emit_srm(u32 *cs, i915_reg_t reg, u32 offset)
+{
+	*cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+	*cs++ = i915_mmio_reg_offset(reg);
+	*cs++ = offset;
+	*cs++ = 0;
+
+	return cs;
+}
+
+static void write_semaphore(u32 *x, u32 value)
+{
+	WRITE_ONCE(*x, value);
+	wmb();
+}
+
+static int __measure_timestamps(struct intel_context *ce,
+				u64 *dt, u64 *d_ring, u64 *d_ctx)
+{
+	struct intel_engine_cs *engine = ce->engine;
+	u32 *sema = memset32(engine->status_page.addr + 1000, 0, 5);
+	u32 offset = i915_ggtt_offset(engine->status_page.vma);
+	struct i915_request *rq;
+	u32 *cs;
+
+	rq = intel_context_create_request(ce);
+	if (IS_ERR(rq))
+		return PTR_ERR(rq);
+
+	cs = intel_ring_begin(rq, 28);
+	if (IS_ERR(cs)) {
+		i915_request_add(rq);
+		return PTR_ERR(cs);
+	}
+
+	/* Signal & wait for start */
+	cs = emit_store(cs, offset + 4008, 1);
+	cs = emit_wait(cs, offset + 4008, MI_SEMAPHORE_SAD_NEQ_SDD, 1);
+
+	cs = emit_srm(cs, RING_TIMESTAMP(engine->mmio_base), offset + 4000);
+	cs = emit_srm(cs, RING_CTX_TIMESTAMP(engine->mmio_base), offset + 4004);
+
+	/* Busy wait */
+	cs = emit_wait(cs, offset + 4008, MI_SEMAPHORE_SAD_EQ_SDD, 1);
+
+	cs = emit_srm(cs, RING_TIMESTAMP(engine->mmio_base), offset + 4016);
+	cs = emit_srm(cs, RING_CTX_TIMESTAMP(engine->mmio_base), offset + 4012);
+
+	intel_ring_advance(rq, cs);
+	i915_request_get(rq);
+	i915_request_add(rq);
+	intel_engine_flush_submission(engine);
+
+	/* Wait for the request to start executing, that then waits for us */
+	while (READ_ONCE(sema[2]) == 0)
+		cpu_relax();
+
+	/* Run the request for a 100us, sampling timestamps before/after */
+	preempt_disable();
+	*dt = ktime_get_raw_fast_ns();
+	write_semaphore(&sema[2], 0);
+	udelay(100);
+	write_semaphore(&sema[2], 1);
+	*dt = ktime_get_raw_fast_ns() - *dt;
+	preempt_enable();
+
+	if (i915_request_wait(rq, 0, HZ / 2) < 0) {
+		i915_request_put(rq);
+		return -ETIME;
+	}
+	i915_request_put(rq);
+
+	pr_debug("%s CTX_TIMESTAMP: [%x, %x], RING_TIMESTAMP: [%x, %x]\n",
+		 engine->name, sema[1], sema[3], sema[0], sema[4]);
+
+	*d_ctx = sema[3] - sema[1];
+	*d_ring = sema[4] - sema[0];
+	return 0;
+}
+
+static int __live_engine_timestamps(struct intel_engine_cs *engine)
+{
+	u64 s_ring[COUNT], s_ctx[COUNT], st[COUNT], d_ring, d_ctx, dt;
+	struct intel_context *ce;
+	int i, err = 0;
+
+	ce = intel_context_create(engine);
+	if (IS_ERR(ce))
+		return PTR_ERR(ce);
+
+	for (i = 0; i < COUNT; i++) {
+		err = __measure_timestamps(ce, &st[i], &s_ring[i], &s_ctx[i]);
+		if (err)
+			break;
+	}
+	intel_context_put(ce);
+	if (err)
+		return err;
+
+	dt = trifilter(st);
+	d_ring = trifilter(s_ring);
+	d_ctx = trifilter(s_ctx);
+
+	pr_info("%s elapsed:%lldns, CTX_TIMESTAMP:%dns, RING_TIMESTAMP:%dns\n",
+		engine->name, dt,
+		intel_gt_clock_interval_to_ns(engine->gt, d_ctx),
+		intel_gt_clock_interval_to_ns(engine->gt, d_ring));
+
+	d_ring = intel_gt_clock_interval_to_ns(engine->gt, d_ring);
+	if (3 * dt > 4 * d_ring || 4 * dt < 3 * d_ring) {
+		pr_err("%s Mismatch between ring timestamp and walltime!\n",
+		       engine->name);
+		return -EINVAL;
+	}
+
+	d_ring = trifilter(s_ring);
+	d_ctx = trifilter(s_ctx);
+
+	d_ctx *= RUNTIME_INFO(engine->i915)->cs_timestamp_frequency_hz;
+	if (IS_ICELAKE(engine->i915))
+		d_ring *= 12500000; /* Fixed 80ns for icl ctx timestamp? */
+	else
+		d_ring *= RUNTIME_INFO(engine->i915)->cs_timestamp_frequency_hz;
+
+	if (3 * d_ctx > 4 * d_ring || 4 * d_ctx < 3 * d_ring) {
+		pr_err("%s Mismatch between ring and context timestamps!\n",
+		       engine->name);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int live_engine_timestamps(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+
+	/*
+	 * Check that CS_TIMESTAMP / CTX_TIMESTAMP are in sync, i.e. share
+	 * the same CS clock.
+	 */
+
+	if (INTEL_GEN(gt->i915) < 8)
+		return 0;
+
+	for_each_engine(engine, gt, id) {
+		int err;
+
+		st_engine_heartbeat_disable(engine);
+		err = __live_engine_timestamps(engine);
+		st_engine_heartbeat_enable(engine);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
 static int live_engine_busy_stats(void *arg)
 {
 	struct intel_gt *gt = arg;
@@ -177,6 +378,7 @@ static int live_engine_pm(void *arg)
 int live_engine_pm_selftests(struct intel_gt *gt)
 {
 	static const struct i915_subtest tests[] = {
+		SUBTEST(live_engine_timestamps),
 		SUBTEST(live_engine_busy_stats),
 		SUBTEST(live_engine_pm),
 	};
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2020-08-26 13:28 UTC|newest]

Thread overview: 65+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-08-26 13:27 [PATCH 01/39] drm/i915/gem: Avoid implicit vmap for highmem on x86-32 Chris Wilson
2020-08-26 13:27 ` [Intel-gfx] " Chris Wilson
2020-08-26 13:27 ` [Intel-gfx] [PATCH 02/39] drm/i915/gem: Use set_pte_at() for assigning the vmapped PTE Chris Wilson
2020-08-26 16:36   ` Matthew Auld
2020-08-26 16:55     ` Chris Wilson
2020-08-26 13:27 ` [PATCH 03/39] drm/i915/gem: Prevent using pgprot_writecombine() if PAT is not supported Chris Wilson
2020-08-26 13:27   ` [Intel-gfx] " Chris Wilson
2020-08-26 13:27 ` [Intel-gfx] [PATCH 04/39] drm/i915/gt: Clear the buffer pool age before use Chris Wilson
2020-08-26 17:15   ` Matthew Auld
2020-08-26 13:27 ` [Intel-gfx] [PATCH 05/39] drm/i915/gt: Widen CSB pointer to u64 for the parsers Chris Wilson
2020-08-26 13:27 ` [PATCH 06/39] drm/i915/gt: Wait for CSB entries on Tigerlake Chris Wilson
2020-08-26 13:27   ` [Intel-gfx] " Chris Wilson
2020-08-28 14:04   ` Mika Kuoppala
2020-08-28 14:04     ` [Intel-gfx] " Mika Kuoppala
2020-08-26 13:27 ` [Intel-gfx] [PATCH 07/39] drm/i915/gt: Apply the CSB w/a for all Chris Wilson
2020-08-26 13:27 ` [Intel-gfx] [PATCH 08/39] drm/i915/gt: Show engine properties in the pretty printer Chris Wilson
2020-08-27 12:25   ` Mika Kuoppala
2020-08-26 13:27 ` [Intel-gfx] [PATCH 09/39] drm/i915/gem: Hold request reference for canceling an active context Chris Wilson
2020-08-26 13:27 ` [PATCH 10/39] drm/i915: Cancel outstanding work after disabling heartbeats on an engine Chris Wilson
2020-08-26 13:27   ` [Intel-gfx] " Chris Wilson
2020-08-29  9:30   ` Mika Kuoppala
2020-08-29  9:30     ` Mika Kuoppala
2020-08-26 13:27 ` [PATCH 11/39] drm/i915/gt: Always send a pulse down the engine after disabling heartbeat Chris Wilson
2020-08-26 13:27   ` [Intel-gfx] " Chris Wilson
2020-08-26 13:27 ` [PATCH 12/39] drm/i915/gem: Always test execution status on closing the context Chris Wilson
2020-08-26 13:27   ` [Intel-gfx] " Chris Wilson
2020-08-26 13:27 ` [Intel-gfx] [PATCH 13/39] drm/i915/gt: Signal cancelled requests Chris Wilson
2020-08-26 13:27 ` [Intel-gfx] [PATCH 14/39] drm/i915/selftests: Finish pending mock requests on cancellation Chris Wilson
2020-08-26 13:27 ` [Intel-gfx] [PATCH 15/39] drm/i915/gt: Retire cancelled requests on unload Chris Wilson
2020-08-26 13:27 ` [Intel-gfx] [PATCH 16/39] drm/i915/gt: Remove defunct intel_virtual_engine_get_sibling() Chris Wilson
2020-08-26 13:27 ` [Intel-gfx] [PATCH 17/39] drm/i915/gt: Defer enabling the breadcrumb interrupt to after submission Chris Wilson
2020-08-26 13:27 ` [Intel-gfx] [PATCH 18/39] drm/i915/gt: Track signaled breadcrumbs outside of the breadcrumb spinlock Chris Wilson
2020-08-26 13:27 ` [Intel-gfx] [PATCH 19/39] drm/i915/gt: Don't cancel the interrupt shadow too early Chris Wilson
2020-08-26 13:27 ` [Intel-gfx] [PATCH 20/39] drm/i915/gt: Free stale request on destroying the virtual engine Chris Wilson
2020-08-26 13:27 ` [Intel-gfx] [PATCH 21/39] drm/i915/gt: Protect context lifetime with RCU Chris Wilson
2020-08-26 13:27 ` [Intel-gfx] [PATCH 22/39] drm/i915/gt: Split the breadcrumb spinlock between global and contexts Chris Wilson
2020-08-26 13:27 ` [Intel-gfx] [PATCH 23/39] drm/i915/gt: Move the breadcrumb to the signaler if completed upon cancel Chris Wilson
2020-08-26 13:27 ` [Intel-gfx] [PATCH 24/39] drm/i915/gt: Decouple completed requests on unwind Chris Wilson
2020-08-26 13:27 ` [Intel-gfx] [PATCH 25/39] drm/i915/gt: Check for a completed last request once Chris Wilson
2020-08-26 13:27 ` [Intel-gfx] [PATCH 26/39] drm/i915/gt: Replace direct submit with direct call to tasklet Chris Wilson
2020-08-26 13:27 ` [Intel-gfx] [PATCH 27/39] drm/i915/gt: ce->inflight updates are now serialised Chris Wilson
2020-08-26 13:28 ` [Intel-gfx] [PATCH 28/39] drm/i915/gt: Use virtual_engine during execlists_dequeue Chris Wilson
2020-08-26 13:28 ` [Intel-gfx] [PATCH 29/39] drm/i915/gt: Decouple inflight virtual engines Chris Wilson
2020-08-26 13:28 ` [Intel-gfx] [PATCH 30/39] drm/i915/gt: Defer schedule_out until after the next dequeue Chris Wilson
2020-08-26 13:28 ` [Intel-gfx] [PATCH 31/39] drm/i915/gt: Remove virtual breadcrumb before transfer Chris Wilson
2020-08-26 13:28 ` [Intel-gfx] [PATCH 32/39] drm/i915/gt: Shrink the critical section for irq signaling Chris Wilson
2020-08-26 13:28 ` [Intel-gfx] [PATCH 33/39] drm/i915/gt: Resubmit the virtual engine on schedule-out Chris Wilson
2020-08-26 13:28 ` [Intel-gfx] [PATCH 34/39] drm/i915/gt: Simplify virtual engine handling for execlists_hold() Chris Wilson
2020-08-26 13:28 ` [Intel-gfx] [PATCH 35/39] drm/i915: Encode fence specific waitqueue behaviour into the wait.flags Chris Wilson
2020-09-02 14:02   ` Thomas Hellström (Intel)
2020-09-03  9:50     ` Thomas Hellström (Intel)
2020-09-03 11:32       ` Chris Wilson
2020-09-03 12:08         ` Thomas Hellström (Intel)
2020-08-26 13:28 ` Chris Wilson [this message]
2020-08-26 13:28 ` [Intel-gfx] [PATCH 37/39] drm/i915/gt: Consolidate the CS timestamp clocks Chris Wilson
2020-08-26 13:28 ` [PATCH 38/39] drm/i915: Break up error capture compression loops with cond_resched() Chris Wilson
2020-08-26 13:28   ` [Intel-gfx] " Chris Wilson
2020-08-26 13:28 ` [Intel-gfx] [PATCH 39/39] drm/i915: Reduce GPU error capture mutex hold time Chris Wilson
2020-08-26 13:53 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/39] drm/i915/gem: Avoid implicit vmap for highmem on x86-32 Patchwork
2020-08-26 13:54 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2020-08-26 14:11 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork
2020-08-26 14:20 ` [Intel-gfx] [PATCH 01/39] " Matthew Auld
2020-08-26 14:20   ` Matthew Auld
2020-08-26 20:43 ` Harald Arnesen
2020-08-26 20:43   ` [Intel-gfx] " Harald Arnesen

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200826132811.17577-36-chris@chris-wilson.co.uk \
    --to=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.