* [PATCH] drm/i915: Hold rpm wakeref for printing the engine's register state
@ 2018-02-12 10:24 Chris Wilson
2018-02-12 10:47 ` ✓ Fi.CI.BAT: success for " Patchwork
` (2 more replies)
0 siblings, 3 replies; 5+ messages in thread
From: Chris Wilson @ 2018-02-12 10:24 UTC (permalink / raw)
To: intel-gfx; +Cc: Mika Kuoppala
When dumping the engine, we print out the current register values. This
requires the rpm wakeref. If the device is alseep, we can assume the
engine is asleep (and the register state is uninteresting) so skip and
only acquire the rpm wakeref if the device is already awake.
Reported-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
---
drivers/gpu/drm/i915/intel_engine_cs.c | 162 ++++++++++++++++++--------------
drivers/gpu/drm/i915/intel_ringbuffer.h | 6 +-
2 files changed, 94 insertions(+), 74 deletions(-)
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 3efc589a7f37..2df9a2d038ee 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -691,7 +691,7 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
engine->context_unpin(engine, engine->i915->kernel_context);
}
-u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
+u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
u64 acthd;
@@ -707,7 +707,7 @@ u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
return acthd;
}
-u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine)
+u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
u64 bbaddr;
@@ -1705,73 +1705,20 @@ static void hexdump(struct drm_printer *m, const void *buf, size_t len)
}
}
-void intel_engine_dump(struct intel_engine_cs *engine,
- struct drm_printer *m,
- const char *header, ...)
+static void intel_engine_print_registers(const struct intel_engine_cs *engine,
+ struct drm_printer *m)
{
- struct intel_breadcrumbs * const b = &engine->breadcrumbs;
- const struct intel_engine_execlists * const execlists = &engine->execlists;
- struct i915_gpu_error * const error = &engine->i915->gpu_error;
struct drm_i915_private *dev_priv = engine->i915;
- struct drm_i915_gem_request *rq;
- struct rb_node *rb;
- char hdr[80];
+ const struct intel_engine_execlists * const execlists =
+ &engine->execlists;
u64 addr;
- if (header) {
- va_list ap;
-
- va_start(ap, header);
- drm_vprintf(m, header, &ap);
- va_end(ap);
- }
-
- if (i915_terminally_wedged(&engine->i915->gpu_error))
- drm_printf(m, "*** WEDGED ***\n");
-
- drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n",
- intel_engine_get_seqno(engine),
- intel_engine_last_submit(engine),
- engine->hangcheck.seqno,
- jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
- engine->timeline->inflight_seqnos);
- drm_printf(m, "\tReset count: %d (global %d)\n",
- i915_reset_engine_count(error, engine),
- i915_reset_count(error));
-
- rcu_read_lock();
-
- drm_printf(m, "\tRequests:\n");
-
- rq = list_first_entry(&engine->timeline->requests,
- struct drm_i915_gem_request, link);
- if (&rq->link != &engine->timeline->requests)
- print_request(m, rq, "\t\tfirst ");
-
- rq = list_last_entry(&engine->timeline->requests,
- struct drm_i915_gem_request, link);
- if (&rq->link != &engine->timeline->requests)
- print_request(m, rq, "\t\tlast ");
-
- rq = i915_gem_find_active_request(engine);
- if (rq) {
- print_request(m, rq, "\t\tactive ");
- drm_printf(m,
- "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
- rq->head, rq->postfix, rq->tail,
- rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
- rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
- }
-
- drm_printf(m, "\tRING_START: 0x%08x [0x%08x]\n",
- I915_READ(RING_START(engine->mmio_base)),
- rq ? i915_ggtt_offset(rq->ring->vma) : 0);
- drm_printf(m, "\tRING_HEAD: 0x%08x [0x%08x]\n",
- I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR,
- rq ? rq->ring->head : 0);
- drm_printf(m, "\tRING_TAIL: 0x%08x [0x%08x]\n",
- I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR,
- rq ? rq->ring->tail : 0);
+ drm_printf(m, "\tRING_START: 0x%08x\n",
+ I915_READ(RING_START(engine->mmio_base)));
+ drm_printf(m, "\tRING_HEAD: 0x%08x\n",
+ I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR);
+ drm_printf(m, "\tRING_TAIL: 0x%08x\n",
+ I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR);
drm_printf(m, "\tRING_CTL: 0x%08x%s\n",
I915_READ(RING_CTL(engine->mmio_base)),
I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
@@ -1780,6 +1727,11 @@ void intel_engine_dump(struct intel_engine_cs *engine,
I915_READ(RING_MI_MODE(engine->mmio_base)),
I915_READ(RING_MI_MODE(engine->mmio_base)) & (MODE_IDLE) ? " [idle]" : "");
}
+
+ if (INTEL_GEN(dev_priv) >= 6) {
+ drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine));
+ }
+
if (HAS_LEGACY_SEMAPHORES(dev_priv)) {
drm_printf(m, "\tSYNC_0: 0x%08x\n",
I915_READ(RING_SYNC_0(engine->mmio_base)));
@@ -1790,8 +1742,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
I915_READ(RING_SYNC_2(engine->mmio_base)));
}
- rcu_read_unlock();
-
addr = intel_engine_get_active_head(engine);
drm_printf(m, "\tACTHD: 0x%08x_%08x\n",
upper_32_bits(addr), lower_32_bits(addr));
@@ -1853,10 +1803,13 @@ void intel_engine_dump(struct intel_engine_cs *engine,
rcu_read_lock();
for (idx = 0; idx < execlists_num_ports(execlists); idx++) {
+ struct drm_i915_gem_request *rq;
unsigned int count;
rq = port_unpack(&execlists->port[idx], &count);
if (rq) {
+ char hdr[80];
+
snprintf(hdr, sizeof(hdr),
"\t\tELSP[%d] count=%d, rq: ",
idx, count);
@@ -1875,6 +1828,77 @@ void intel_engine_dump(struct intel_engine_cs *engine,
drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
I915_READ(RING_PP_DIR_DCLV(engine)));
}
+}
+
+void intel_engine_dump(struct intel_engine_cs *engine,
+ struct drm_printer *m,
+ const char *header, ...)
+{
+ struct intel_breadcrumbs * const b = &engine->breadcrumbs;
+ const struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_gpu_error * const error = &engine->i915->gpu_error;
+ struct drm_i915_gem_request *rq;
+ struct rb_node *rb;
+
+ if (header) {
+ va_list ap;
+
+ va_start(ap, header);
+ drm_vprintf(m, header, &ap);
+ va_end(ap);
+ }
+
+ if (i915_terminally_wedged(&engine->i915->gpu_error))
+ drm_printf(m, "*** WEDGED ***\n");
+
+ drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n",
+ intel_engine_get_seqno(engine),
+ intel_engine_last_submit(engine),
+ engine->hangcheck.seqno,
+ jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
+ engine->timeline->inflight_seqnos);
+ drm_printf(m, "\tReset count: %d (global %d)\n",
+ i915_reset_engine_count(error, engine),
+ i915_reset_count(error));
+
+ rcu_read_lock();
+
+ drm_printf(m, "\tRequests:\n");
+
+ rq = list_first_entry(&engine->timeline->requests,
+ struct drm_i915_gem_request, link);
+ if (&rq->link != &engine->timeline->requests)
+ print_request(m, rq, "\t\tfirst ");
+
+ rq = list_last_entry(&engine->timeline->requests,
+ struct drm_i915_gem_request, link);
+ if (&rq->link != &engine->timeline->requests)
+ print_request(m, rq, "\t\tlast ");
+
+ rq = i915_gem_find_active_request(engine);
+ if (rq) {
+ print_request(m, rq, "\t\tactive ");
+ drm_printf(m,
+ "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
+ rq->head, rq->postfix, rq->tail,
+ rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
+ rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
+ drm_printf(m, "\t\tring->start: 0x%08x\n",
+ i915_ggtt_offset(rq->ring->vma));
+ drm_printf(m, "\t\tring->head: 0x%08x\n",
+ rq->ring->head);
+ drm_printf(m, "\t\tring->tail: 0x%08x\n",
+ rq->ring->tail);
+ }
+
+ rcu_read_unlock();
+
+ if (intel_runtime_pm_get_if_in_use(engine->i915)) {
+ intel_engine_print_registers(engine, m);
+ intel_runtime_pm_put(engine->i915);
+ } else {
+ drm_printf(m, "Device is alseep; skipping register dump\n");
+ }
spin_lock_irq(&engine->timeline->lock);
list_for_each_entry(rq, &engine->timeline->requests, link)
@@ -1897,10 +1921,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
}
spin_unlock_irq(&b->rb_lock);
- if (INTEL_GEN(dev_priv) >= 6) {
- drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine));
- }
-
drm_printf(m, "IRQ? 0x%lx (breadcrumbs? %s) (execlists? %s)\n",
engine->irq_posted,
yesno(test_bit(ENGINE_IRQ_BREADCRUMB,
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 8f1a4badf812..51523ad049de 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -659,7 +659,7 @@ intel_engine_flag(const struct intel_engine_cs *engine)
}
static inline u32
-intel_read_status_page(struct intel_engine_cs *engine, int reg)
+intel_read_status_page(const struct intel_engine_cs *engine, int reg)
{
/* Ensure that the compiler doesn't optimize away the load. */
return READ_ONCE(engine->status_page.page_addr[reg]);
@@ -817,8 +817,8 @@ int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
-u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
-u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine);
+u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
+u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
{
--
2.16.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 5+ messages in thread
* ✓ Fi.CI.BAT: success for drm/i915: Hold rpm wakeref for printing the engine's register state
2018-02-12 10:24 [PATCH] drm/i915: Hold rpm wakeref for printing the engine's register state Chris Wilson
@ 2018-02-12 10:47 ` Patchwork
2018-02-12 12:45 ` ✓ Fi.CI.IGT: " Patchwork
2018-02-12 13:28 ` [PATCH] " Mika Kuoppala
2 siblings, 0 replies; 5+ messages in thread
From: Patchwork @ 2018-02-12 10:47 UTC (permalink / raw)
To: Chris Wilson; +Cc: intel-gfx
== Series Details ==
Series: drm/i915: Hold rpm wakeref for printing the engine's register state
URL : https://patchwork.freedesktop.org/series/38077/
State : success
== Summary ==
Series 38077v1 drm/i915: Hold rpm wakeref for printing the engine's register state
https://patchwork.freedesktop.org/api/1.0/series/38077/revisions/1/mbox/
Test debugfs_test:
Subgroup read_all_entries:
incomplete -> PASS (fi-snb-2520m) fdo#103713
Test kms_pipe_crc_basic:
Subgroup suspend-read-crc-pipe-a:
dmesg-warn -> PASS (fi-cnl-y3) fdo#103191
fdo#103713 https://bugs.freedesktop.org/show_bug.cgi?id=103713
fdo#103191 https://bugs.freedesktop.org/show_bug.cgi?id=103191
fi-bdw-5557u total:288 pass:267 dwarn:0 dfail:0 fail:0 skip:21 time:419s
fi-bdw-gvtdvm total:288 pass:264 dwarn:0 dfail:0 fail:0 skip:24 time:426s
fi-blb-e6850 total:288 pass:223 dwarn:1 dfail:0 fail:0 skip:64 time:383s
fi-bsw-n3050 total:288 pass:242 dwarn:0 dfail:0 fail:0 skip:46 time:495s
fi-bwr-2160 total:288 pass:183 dwarn:0 dfail:0 fail:0 skip:105 time:289s
fi-bxt-dsi total:288 pass:258 dwarn:0 dfail:0 fail:0 skip:30 time:487s
fi-bxt-j4205 total:288 pass:259 dwarn:0 dfail:0 fail:0 skip:29 time:492s
fi-byt-j1900 total:288 pass:253 dwarn:0 dfail:0 fail:0 skip:35 time:474s
fi-byt-n2820 total:288 pass:249 dwarn:0 dfail:0 fail:0 skip:39 time:468s
fi-cfl-s2 total:288 pass:262 dwarn:0 dfail:0 fail:0 skip:26 time:562s
fi-cnl-y3 total:288 pass:262 dwarn:0 dfail:0 fail:0 skip:26 time:580s
fi-elk-e7500 total:288 pass:229 dwarn:0 dfail:0 fail:0 skip:59 time:420s
fi-gdg-551 total:288 pass:179 dwarn:0 dfail:0 fail:1 skip:108 time:284s
fi-glk-1 total:288 pass:260 dwarn:0 dfail:0 fail:0 skip:28 time:514s
fi-hsw-4770 total:288 pass:261 dwarn:0 dfail:0 fail:0 skip:27 time:390s
fi-ilk-650 total:288 pass:228 dwarn:0 dfail:0 fail:0 skip:60 time:413s
fi-ivb-3520m total:288 pass:259 dwarn:0 dfail:0 fail:0 skip:29 time:466s
fi-ivb-3770 total:288 pass:255 dwarn:0 dfail:0 fail:0 skip:33 time:414s
fi-kbl-7500u total:288 pass:263 dwarn:1 dfail:0 fail:0 skip:24 time:463s
fi-kbl-7560u total:288 pass:269 dwarn:0 dfail:0 fail:0 skip:19 time:495s
fi-kbl-r total:288 pass:261 dwarn:0 dfail:0 fail:0 skip:27 time:501s
fi-pnv-d510 total:288 pass:222 dwarn:1 dfail:0 fail:0 skip:65 time:596s
fi-skl-6260u total:288 pass:268 dwarn:0 dfail:0 fail:0 skip:20 time:434s
fi-skl-6600u total:288 pass:261 dwarn:0 dfail:0 fail:0 skip:27 time:512s
fi-skl-6700hq total:288 pass:262 dwarn:0 dfail:0 fail:0 skip:26 time:528s
fi-skl-6700k2 total:288 pass:264 dwarn:0 dfail:0 fail:0 skip:24 time:485s
fi-skl-6770hq total:288 pass:268 dwarn:0 dfail:0 fail:0 skip:20 time:474s
fi-skl-guc total:288 pass:260 dwarn:0 dfail:0 fail:0 skip:28 time:416s
fi-skl-gvtdvm total:288 pass:265 dwarn:0 dfail:0 fail:0 skip:23 time:433s
fi-snb-2520m total:288 pass:248 dwarn:0 dfail:0 fail:0 skip:40 time:530s
fi-snb-2600 total:288 pass:248 dwarn:0 dfail:0 fail:0 skip:40 time:403s
Blacklisted hosts:
fi-glk-dsi total:215 pass:116 dwarn:0 dfail:2 fail:0 skip:96
28dc2a502747536dbf76fa5582d682e37f089b93 drm-tip: 2018y-02m-12d-09h-02m-17s UTC integration manifest
80ec379dac01 drm/i915: Hold rpm wakeref for printing the engine's register state
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_7978/issues.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 5+ messages in thread
* ✓ Fi.CI.IGT: success for drm/i915: Hold rpm wakeref for printing the engine's register state
2018-02-12 10:24 [PATCH] drm/i915: Hold rpm wakeref for printing the engine's register state Chris Wilson
2018-02-12 10:47 ` ✓ Fi.CI.BAT: success for " Patchwork
@ 2018-02-12 12:45 ` Patchwork
2018-02-12 13:28 ` [PATCH] " Mika Kuoppala
2 siblings, 0 replies; 5+ messages in thread
From: Patchwork @ 2018-02-12 12:45 UTC (permalink / raw)
To: Chris Wilson; +Cc: intel-gfx
== Series Details ==
Series: drm/i915: Hold rpm wakeref for printing the engine's register state
URL : https://patchwork.freedesktop.org/series/38077/
State : success
== Summary ==
Test perf:
Subgroup oa-exponents:
pass -> FAIL (shard-apl) fdo#102254
Test gem_softpin:
Subgroup noreloc-s3:
dmesg-warn -> PASS (shard-snb) fdo#103375 +1
Test perf_pmu:
Subgroup rc6:
skip -> PASS (shard-hsw)
Test kms_cursor_legacy:
Subgroup cursor-vs-flip-atomic-transitions:
fail -> PASS (shard-apl) fdo#103355
Subgroup flip-vs-cursor-atomic:
fail -> PASS (shard-hsw) fdo#102670
Test kms_frontbuffer_tracking:
Subgroup fbc-rgb565-draw-blt:
fail -> PASS (shard-apl) fdo#101623
Test pm_rps:
Subgroup min-max-config-loaded:
fail -> PASS (shard-apl) fdo#104060
fdo#102254 https://bugs.freedesktop.org/show_bug.cgi?id=102254
fdo#103375 https://bugs.freedesktop.org/show_bug.cgi?id=103375
fdo#103355 https://bugs.freedesktop.org/show_bug.cgi?id=103355
fdo#102670 https://bugs.freedesktop.org/show_bug.cgi?id=102670
fdo#101623 https://bugs.freedesktop.org/show_bug.cgi?id=101623
fdo#104060 https://bugs.freedesktop.org/show_bug.cgi?id=104060
shard-apl total:3420 pass:1774 dwarn:1 dfail:0 fail:21 skip:1623 time:12470s
shard-hsw total:3444 pass:1760 dwarn:1 dfail:0 fail:11 skip:1671 time:11876s
shard-snb total:3444 pass:1351 dwarn:1 dfail:0 fail:9 skip:2083 time:6638s
Blacklisted hosts:
shard-kbl total:3444 pass:1913 dwarn:1 dfail:0 fail:21 skip:1509 time:9640s
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_7978/shards.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH] drm/i915: Hold rpm wakeref for printing the engine's register state
2018-02-12 10:24 [PATCH] drm/i915: Hold rpm wakeref for printing the engine's register state Chris Wilson
2018-02-12 10:47 ` ✓ Fi.CI.BAT: success for " Patchwork
2018-02-12 12:45 ` ✓ Fi.CI.IGT: " Patchwork
@ 2018-02-12 13:28 ` Mika Kuoppala
2018-02-12 13:34 ` Chris Wilson
2 siblings, 1 reply; 5+ messages in thread
From: Mika Kuoppala @ 2018-02-12 13:28 UTC (permalink / raw)
To: Chris Wilson, intel-gfx
Chris Wilson <chris@chris-wilson.co.uk> writes:
> When dumping the engine, we print out the current register values. This
> requires the rpm wakeref. If the device is alseep, we can assume the
> engine is asleep (and the register state is uninteresting) so skip and
> only acquire the rpm wakeref if the device is already awake.
>
> Reported-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> Cc: Mika Kuoppala <mika.kuoppala@intel.com>
> ---
> drivers/gpu/drm/i915/intel_engine_cs.c | 162 ++++++++++++++++++--------------
> drivers/gpu/drm/i915/intel_ringbuffer.h | 6 +-
> 2 files changed, 94 insertions(+), 74 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
> index 3efc589a7f37..2df9a2d038ee 100644
> --- a/drivers/gpu/drm/i915/intel_engine_cs.c
> +++ b/drivers/gpu/drm/i915/intel_engine_cs.c
> @@ -691,7 +691,7 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
> engine->context_unpin(engine, engine->i915->kernel_context);
> }
>
> -u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
> +u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
> {
> struct drm_i915_private *dev_priv = engine->i915;
> u64 acthd;
> @@ -707,7 +707,7 @@ u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
> return acthd;
> }
>
> -u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine)
> +u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
> {
> struct drm_i915_private *dev_priv = engine->i915;
> u64 bbaddr;
> @@ -1705,73 +1705,20 @@ static void hexdump(struct drm_printer *m, const void *buf, size_t len)
> }
> }
>
> -void intel_engine_dump(struct intel_engine_cs *engine,
> - struct drm_printer *m,
> - const char *header, ...)
> +static void intel_engine_print_registers(const struct intel_engine_cs *engine,
> + struct drm_printer *m)
> {
> - struct intel_breadcrumbs * const b = &engine->breadcrumbs;
> - const struct intel_engine_execlists * const execlists = &engine->execlists;
> - struct i915_gpu_error * const error = &engine->i915->gpu_error;
> struct drm_i915_private *dev_priv = engine->i915;
> - struct drm_i915_gem_request *rq;
> - struct rb_node *rb;
> - char hdr[80];
> + const struct intel_engine_execlists * const execlists =
> + &engine->execlists;
> u64 addr;
>
> - if (header) {
> - va_list ap;
> -
> - va_start(ap, header);
> - drm_vprintf(m, header, &ap);
> - va_end(ap);
> - }
> -
> - if (i915_terminally_wedged(&engine->i915->gpu_error))
> - drm_printf(m, "*** WEDGED ***\n");
> -
> - drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n",
> - intel_engine_get_seqno(engine),
> - intel_engine_last_submit(engine),
> - engine->hangcheck.seqno,
> - jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
> - engine->timeline->inflight_seqnos);
> - drm_printf(m, "\tReset count: %d (global %d)\n",
> - i915_reset_engine_count(error, engine),
> - i915_reset_count(error));
> -
> - rcu_read_lock();
> -
> - drm_printf(m, "\tRequests:\n");
> -
> - rq = list_first_entry(&engine->timeline->requests,
> - struct drm_i915_gem_request, link);
> - if (&rq->link != &engine->timeline->requests)
> - print_request(m, rq, "\t\tfirst ");
> -
> - rq = list_last_entry(&engine->timeline->requests,
> - struct drm_i915_gem_request, link);
> - if (&rq->link != &engine->timeline->requests)
> - print_request(m, rq, "\t\tlast ");
> -
> - rq = i915_gem_find_active_request(engine);
> - if (rq) {
> - print_request(m, rq, "\t\tactive ");
> - drm_printf(m,
> - "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
> - rq->head, rq->postfix, rq->tail,
> - rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
> - rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
> - }
> -
> - drm_printf(m, "\tRING_START: 0x%08x [0x%08x]\n",
> - I915_READ(RING_START(engine->mmio_base)),
> - rq ? i915_ggtt_offset(rq->ring->vma) : 0);
> - drm_printf(m, "\tRING_HEAD: 0x%08x [0x%08x]\n",
> - I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR,
> - rq ? rq->ring->head : 0);
> - drm_printf(m, "\tRING_TAIL: 0x%08x [0x%08x]\n",
> - I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR,
> - rq ? rq->ring->tail : 0);
> + drm_printf(m, "\tRING_START: 0x%08x\n",
> + I915_READ(RING_START(engine->mmio_base)));
> + drm_printf(m, "\tRING_HEAD: 0x%08x\n",
> + I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR);
> + drm_printf(m, "\tRING_TAIL: 0x%08x\n",
> + I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR);
> drm_printf(m, "\tRING_CTL: 0x%08x%s\n",
> I915_READ(RING_CTL(engine->mmio_base)),
> I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
> @@ -1780,6 +1727,11 @@ void intel_engine_dump(struct intel_engine_cs *engine,
> I915_READ(RING_MI_MODE(engine->mmio_base)),
> I915_READ(RING_MI_MODE(engine->mmio_base)) & (MODE_IDLE) ? " [idle]" : "");
> }
> +
> + if (INTEL_GEN(dev_priv) >= 6) {
> + drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine));
> + }
> +
> if (HAS_LEGACY_SEMAPHORES(dev_priv)) {
> drm_printf(m, "\tSYNC_0: 0x%08x\n",
> I915_READ(RING_SYNC_0(engine->mmio_base)));
> @@ -1790,8 +1742,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
> I915_READ(RING_SYNC_2(engine->mmio_base)));
> }
>
> - rcu_read_unlock();
> -
> addr = intel_engine_get_active_head(engine);
> drm_printf(m, "\tACTHD: 0x%08x_%08x\n",
> upper_32_bits(addr), lower_32_bits(addr));
> @@ -1853,10 +1803,13 @@ void intel_engine_dump(struct intel_engine_cs *engine,
>
> rcu_read_lock();
> for (idx = 0; idx < execlists_num_ports(execlists); idx++) {
> + struct drm_i915_gem_request *rq;
> unsigned int count;
>
> rq = port_unpack(&execlists->port[idx], &count);
> if (rq) {
> + char hdr[80];
> +
> snprintf(hdr, sizeof(hdr),
> "\t\tELSP[%d] count=%d, rq: ",
> idx, count);
> @@ -1875,6 +1828,77 @@ void intel_engine_dump(struct intel_engine_cs *engine,
> drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
> I915_READ(RING_PP_DIR_DCLV(engine)));
> }
> +}
> +
> +void intel_engine_dump(struct intel_engine_cs *engine,
> + struct drm_printer *m,
> + const char *header, ...)
> +{
> + struct intel_breadcrumbs * const b = &engine->breadcrumbs;
> + const struct intel_engine_execlists * const execlists = &engine->execlists;
> + struct i915_gpu_error * const error = &engine->i915->gpu_error;
> + struct drm_i915_gem_request *rq;
> + struct rb_node *rb;
> +
> + if (header) {
> + va_list ap;
> +
> + va_start(ap, header);
> + drm_vprintf(m, header, &ap);
> + va_end(ap);
> + }
> +
> + if (i915_terminally_wedged(&engine->i915->gpu_error))
> + drm_printf(m, "*** WEDGED ***\n");
> +
> + drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n",
> + intel_engine_get_seqno(engine),
> + intel_engine_last_submit(engine),
> + engine->hangcheck.seqno,
> + jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
> + engine->timeline->inflight_seqnos);
> + drm_printf(m, "\tReset count: %d (global %d)\n",
> + i915_reset_engine_count(error, engine),
> + i915_reset_count(error));
> +
> + rcu_read_lock();
> +
> + drm_printf(m, "\tRequests:\n");
> +
> + rq = list_first_entry(&engine->timeline->requests,
> + struct drm_i915_gem_request, link);
> + if (&rq->link != &engine->timeline->requests)
> + print_request(m, rq, "\t\tfirst ");
> +
> + rq = list_last_entry(&engine->timeline->requests,
> + struct drm_i915_gem_request, link);
> + if (&rq->link != &engine->timeline->requests)
> + print_request(m, rq, "\t\tlast ");
> +
> + rq = i915_gem_find_active_request(engine);
> + if (rq) {
> + print_request(m, rq, "\t\tactive ");
> + drm_printf(m,
> + "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
> + rq->head, rq->postfix, rq->tail,
> + rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
> + rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
> + drm_printf(m, "\t\tring->start: 0x%08x\n",
> + i915_ggtt_offset(rq->ring->vma));
> + drm_printf(m, "\t\tring->head: 0x%08x\n",
> + rq->ring->head);
> + drm_printf(m, "\t\tring->tail: 0x%08x\n",
> + rq->ring->tail);
> + }
> +
> + rcu_read_unlock();
> +
> + if (intel_runtime_pm_get_if_in_use(engine->i915)) {
> + intel_engine_print_registers(engine, m);
> + intel_runtime_pm_put(engine->i915);
> + } else {
> + drm_printf(m, "Device is alseep; skipping register dump\n");
> + }
s/alseep/asleep
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
>
> spin_lock_irq(&engine->timeline->lock);
> list_for_each_entry(rq, &engine->timeline->requests, link)
> @@ -1897,10 +1921,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
> }
> spin_unlock_irq(&b->rb_lock);
>
> - if (INTEL_GEN(dev_priv) >= 6) {
> - drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine));
> - }
> -
> drm_printf(m, "IRQ? 0x%lx (breadcrumbs? %s) (execlists? %s)\n",
> engine->irq_posted,
> yesno(test_bit(ENGINE_IRQ_BREADCRUMB,
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index 8f1a4badf812..51523ad049de 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -659,7 +659,7 @@ intel_engine_flag(const struct intel_engine_cs *engine)
> }
>
> static inline u32
> -intel_read_status_page(struct intel_engine_cs *engine, int reg)
> +intel_read_status_page(const struct intel_engine_cs *engine, int reg)
> {
> /* Ensure that the compiler doesn't optimize away the load. */
> return READ_ONCE(engine->status_page.page_addr[reg]);
> @@ -817,8 +817,8 @@ int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
> int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
> int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
>
> -u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
> -u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine);
> +u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
> +u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
>
> static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
> {
> --
> 2.16.1
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH] drm/i915: Hold rpm wakeref for printing the engine's register state
2018-02-12 13:28 ` [PATCH] " Mika Kuoppala
@ 2018-02-12 13:34 ` Chris Wilson
0 siblings, 0 replies; 5+ messages in thread
From: Chris Wilson @ 2018-02-12 13:34 UTC (permalink / raw)
To: Mika Kuoppala, intel-gfx
Quoting Mika Kuoppala (2018-02-12 13:28:54)
> Chris Wilson <chris@chris-wilson.co.uk> writes:
>
> > When dumping the engine, we print out the current register values. This
> > requires the rpm wakeref. If the device is alseep, we can assume the
> > engine is asleep (and the register state is uninteresting) so skip and
> > only acquire the rpm wakeref if the device is already awake.
> >
> > Reported-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> > Cc: Mika Kuoppala <mika.kuoppala@intel.com>
> > ---
> > + if (intel_runtime_pm_get_if_in_use(engine->i915)) {
> > + intel_engine_print_registers(engine, m);
> > + intel_runtime_pm_put(engine->i915);
> > + } else {
> > + drm_printf(m, "Device is alseep; skipping register dump\n");
> > + }
>
> s/alseep/asleep
>
> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Done, and both pushed. Now to try Tvrtko's warning patch on the new
baseline. Thanks,
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2018-02-12 13:34 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-02-12 10:24 [PATCH] drm/i915: Hold rpm wakeref for printing the engine's register state Chris Wilson
2018-02-12 10:47 ` ✓ Fi.CI.BAT: success for " Patchwork
2018-02-12 12:45 ` ✓ Fi.CI.IGT: " Patchwork
2018-02-12 13:28 ` [PATCH] " Mika Kuoppala
2018-02-12 13:34 ` Chris Wilson
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.