* [PATCH igt] igt/perf: Busywait for MI_REPORT_PERF_COUNT results
@ 2017-12-08 14:31 Chris Wilson
2017-12-08 15:06 ` ✓ Fi.CI.BAT: success for " Patchwork
` (5 more replies)
0 siblings, 6 replies; 9+ messages in thread
From: Chris Wilson @ 2017-12-08 14:31 UTC (permalink / raw)
To: intel-gfx; +Cc: Matthew Auld
On Haswell, at least, MI_REPORT_PERF_COUNT is not flushed by the
PIPECONTROL surrounding the batch. (In theory, before the breadcrumb is
updated the CPU's view of memory is coherent with the GPU, i.e. all
writes have landed and are visible to userspace. This does not appear to
be the case for MI_REPORT_PERF_COUNT.)
As MI_RPC does not apear to be synchronized with the batch, busyspin for
its completion.
(This has far deeper implications; since it means the GPU can still be
writing to memory after release.)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
---
tests/perf.c | 93 +++++++++++++++++++++++++++++++++---------------------------
1 file changed, 52 insertions(+), 41 deletions(-)
diff --git a/tests/perf.c b/tests/perf.c
index a161c45d7..8c20fbe09 100644
--- a/tests/perf.c
+++ b/tests/perf.c
@@ -706,47 +706,59 @@ emit_report_perf_count(struct intel_batchbuffer *batch,
}
static uint32_t
-i915_get_one_gpu_timestamp(uint32_t *context_id)
+i915_get_one_gpu_timestamp(void)
{
- drm_intel_bufmgr *bufmgr = drm_intel_bufmgr_gem_init(drm_fd, 4096);
- drm_intel_context *mi_rpc_ctx = drm_intel_gem_context_create(bufmgr);
- drm_intel_bo *mi_rpc_bo = drm_intel_bo_alloc(bufmgr, "mi_rpc dest bo", 4096, 64);
- struct intel_batchbuffer *mi_rpc_batch = intel_batchbuffer_alloc(bufmgr, devid);
- int ret;
- uint32_t timestamp;
-
- drm_intel_bufmgr_gem_enable_reuse(bufmgr);
-
- if (context_id) {
- ret = drm_intel_gem_context_get_id(mi_rpc_ctx, context_id);
- igt_assert_eq(ret, 0);
- }
-
- igt_assert(mi_rpc_ctx);
- igt_assert(mi_rpc_bo);
- igt_assert(mi_rpc_batch);
-
- ret = drm_intel_bo_map(mi_rpc_bo, true);
- igt_assert_eq(ret, 0);
- memset(mi_rpc_bo->virtual, 0x80, 4096);
- drm_intel_bo_unmap(mi_rpc_bo);
-
- emit_report_perf_count(mi_rpc_batch,
- mi_rpc_bo, /* dst */
- 0, /* dst offset in bytes */
- 0xdeadbeef); /* report ID */
+ struct drm_i915_gem_execbuffer2 execbuf;
+ struct drm_i915_gem_exec_object2 obj[2];
+ struct drm_i915_gem_relocation_entry reloc;
+ uint32_t *ptr, timestamp;
+ struct timespec tv = {};
+ int i;
- intel_batchbuffer_flush_with_context(mi_rpc_batch, mi_rpc_ctx);
+ memset(obj, 0, sizeof(obj));
+ obj[0].handle = gem_create(drm_fd, 4096);
+ ptr = gem_mmap__cpu(drm_fd, obj[0].handle, 0, 4096, PROT_WRITE);
+ memset(ptr, 0x80, 4096);
+ munmap(ptr, 4096);
+
+ obj[1].handle = gem_create(drm_fd, 4096);
+ obj[1].relocs_ptr = to_user_pointer(&reloc);
+ obj[1].relocation_count = 1;
+ ptr = gem_mmap__cpu(drm_fd, obj[1].handle, 0, 4096, PROT_WRITE);
+
+ memset(&reloc, 0, sizeof(reloc));
+ reloc.target_handle = obj[0].handle;
+ reloc.offset = sizeof(uint32_t);
+ reloc.read_domains = I915_GEM_DOMAIN_RENDER;
+ reloc.write_domain = I915_GEM_DOMAIN_RENDER;
+
+ i = 2;
+ ptr[0] = GEN6_MI_REPORT_PERF_COUNT;
+ if (intel_gen(devid) >= 8)
+ ptr[0]++, i++; /* 64b reloc */
+ ptr[i++] = 0xdeadbeef;
+ ptr[i] = MI_BATCH_BUFFER_END;
+ munmap(ptr, 4096);
+
+ memset(&execbuf, 0, sizeof(execbuf));
+ execbuf.buffers_ptr = to_user_pointer(obj);
+ execbuf.buffer_count = 2;
+ execbuf.batch_len = 4096;
+ gem_execbuf(drm_fd, &execbuf);
+ gem_close(drm_fd, obj[1].handle);
- ret = drm_intel_bo_map(mi_rpc_bo, false /* write enable */);
- igt_assert_eq(ret, 0);
- timestamp = ((uint32_t *)mi_rpc_bo->virtual)[1];
- drm_intel_bo_unmap(mi_rpc_bo);
+ /*
+ * MI_REPORT_PERF_COUNT is unserialised, i.e. not flushed by
+ * the PIPECONTROLs surrounding batch execution. Ergo, we must
+ * manually wait.
+ */
+ do {
+ gem_read(drm_fd, obj[0].handle, sizeof(uint32_t),
+ ×tamp, sizeof(timestamp));
+ } while (timestamp == 0x80808080 && !igt_seconds_elapsed(&tv));
+ gem_close(drm_fd, obj[0].handle);
- drm_intel_bo_unreference(mi_rpc_bo);
- intel_batchbuffer_free(mi_rpc_batch);
- drm_intel_gem_context_destroy(mi_rpc_ctx);
- drm_intel_bufmgr_destroy(bufmgr);
+ igt_assert_neq(timestamp, 0x80808080);
return timestamp;
}
@@ -1915,7 +1927,6 @@ test_oa_exponents(void)
uint32_t n_reports = 0;
uint32_t n_idle_reports = 0;
uint32_t n_reads = 0;
- uint32_t context_id;
uint64_t first_timestamp = 0;
bool check_first_timestamp = true;
struct drm_i915_perf_record_header *header;
@@ -1944,7 +1955,7 @@ test_oa_exponents(void)
* first timestamp as way to filter previously
* scheduled work that would have configured
* the OA unit at a different period. */
- first_timestamp = i915_get_one_gpu_timestamp(&context_id);
+ first_timestamp = i915_get_one_gpu_timestamp();
while (n_reads < ARRAY_SIZE(reads) &&
n_reports < ARRAY_SIZE(reports)) {
@@ -2070,8 +2081,8 @@ test_oa_exponents(void)
uint32_t *rpt = NULL, *last = NULL, *last_periodic = NULL;
igt_debug(" > More than 5%% error: avg_ts_delta = %"PRIu64", delta_delta = %"PRIu64", "
- "expected_delta = %"PRIu64", first_timestamp = %"PRIu64" ctx_id=%"PRIu32"\n",
- average_timestamp_delta, delta_delta, expected_timestamp_delta, first_timestamp, context_id);
+ "expected_delta = %"PRIu64", first_timestamp = %"PRIu64"\n",
+ average_timestamp_delta, delta_delta, expected_timestamp_delta, first_timestamp);
for (int i = 0; i < (n_reports - 1); i++) {
/* XXX: calculating with u32 arithmetic to account for overflow */
uint32_t u32_delta =
--
2.15.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 9+ messages in thread
* ✓ Fi.CI.BAT: success for igt/perf: Busywait for MI_REPORT_PERF_COUNT results
2017-12-08 14:31 [PATCH igt] igt/perf: Busywait for MI_REPORT_PERF_COUNT results Chris Wilson
@ 2017-12-08 15:06 ` Patchwork
2017-12-08 15:08 ` [PATCH igt] " Lionel Landwerlin
` (4 subsequent siblings)
5 siblings, 0 replies; 9+ messages in thread
From: Patchwork @ 2017-12-08 15:06 UTC (permalink / raw)
To: Chris Wilson; +Cc: intel-gfx
== Series Details ==
Series: igt/perf: Busywait for MI_REPORT_PERF_COUNT results
URL : https://patchwork.freedesktop.org/series/35091/
State : success
== Summary ==
IGT patchset tested on top of latest successful build
2fc64acf8a4465d5eab3d6cfec9b3c1b5df30d73 igt/perf_pmu: Tweak wait_for_rc6, yet again
with latest DRM-Tip kernel build CI_DRM_3483
b5f297e08432 drm-tip: 2017y-12m-08d-13h-53m-36s UTC integration manifest
No testlist changes.
Test gem_mmap_gtt:
Subgroup basic-small-bo-tiledx:
pass -> FAIL (fi-gdg-551) fdo#102575
Test kms_cursor_legacy:
Subgroup basic-busy-flip-before-cursor-legacy:
pass -> FAIL (fi-gdg-551) fdo#102618
Test kms_pipe_crc_basic:
Subgroup suspend-read-crc-pipe-b:
pass -> INCOMPLETE (fi-snb-2520m) fdo#103713
fdo#102575 https://bugs.freedesktop.org/show_bug.cgi?id=102575
fdo#102618 https://bugs.freedesktop.org/show_bug.cgi?id=102618
fdo#103713 https://bugs.freedesktop.org/show_bug.cgi?id=103713
fi-bdw-5557u total:288 pass:267 dwarn:0 dfail:0 fail:0 skip:21 time:437s
fi-bdw-gvtdvm total:288 pass:264 dwarn:0 dfail:0 fail:0 skip:24 time:443s
fi-blb-e6850 total:288 pass:223 dwarn:1 dfail:0 fail:0 skip:64 time:382s
fi-bsw-n3050 total:288 pass:242 dwarn:0 dfail:0 fail:0 skip:46 time:526s
fi-bwr-2160 total:288 pass:183 dwarn:0 dfail:0 fail:0 skip:105 time:282s
fi-bxt-dsi total:288 pass:258 dwarn:0 dfail:0 fail:0 skip:30 time:505s
fi-bxt-j4205 total:288 pass:259 dwarn:0 dfail:0 fail:0 skip:29 time:510s
fi-byt-j1900 total:288 pass:253 dwarn:0 dfail:0 fail:0 skip:35 time:494s
fi-byt-n2820 total:288 pass:249 dwarn:0 dfail:0 fail:0 skip:39 time:474s
fi-elk-e7500 total:224 pass:163 dwarn:15 dfail:0 fail:0 skip:45
fi-gdg-551 total:288 pass:177 dwarn:1 dfail:0 fail:2 skip:108 time:277s
fi-glk-1 total:288 pass:260 dwarn:0 dfail:0 fail:0 skip:28 time:540s
fi-hsw-4770 total:288 pass:261 dwarn:0 dfail:0 fail:0 skip:27 time:376s
fi-hsw-4770r total:288 pass:224 dwarn:0 dfail:0 fail:0 skip:64 time:262s
fi-ilk-650 total:288 pass:228 dwarn:0 dfail:0 fail:0 skip:60 time:399s
fi-ivb-3520m total:288 pass:259 dwarn:0 dfail:0 fail:0 skip:29 time:481s
fi-ivb-3770 total:288 pass:259 dwarn:0 dfail:0 fail:0 skip:29 time:453s
fi-kbl-7500u total:288 pass:263 dwarn:1 dfail:0 fail:0 skip:24 time:489s
fi-kbl-7560u total:288 pass:269 dwarn:0 dfail:0 fail:0 skip:19 time:532s
fi-kbl-7567u total:288 pass:268 dwarn:0 dfail:0 fail:0 skip:20 time:478s
fi-kbl-r total:288 pass:260 dwarn:1 dfail:0 fail:0 skip:27 time:536s
fi-pnv-d510 total:288 pass:222 dwarn:1 dfail:0 fail:0 skip:65 time:599s
fi-skl-6260u total:288 pass:268 dwarn:0 dfail:0 fail:0 skip:20 time:449s
fi-skl-6600u total:288 pass:261 dwarn:0 dfail:0 fail:0 skip:27 time:544s
fi-skl-6700hq total:288 pass:262 dwarn:0 dfail:0 fail:0 skip:26 time:571s
fi-skl-6700k total:288 pass:264 dwarn:0 dfail:0 fail:0 skip:24 time:514s
fi-skl-6770hq total:288 pass:268 dwarn:0 dfail:0 fail:0 skip:20 time:500s
fi-skl-gvtdvm total:288 pass:265 dwarn:0 dfail:0 fail:0 skip:23 time:449s
fi-snb-2520m total:245 pass:211 dwarn:0 dfail:0 fail:0 skip:33
fi-snb-2600 total:288 pass:248 dwarn:0 dfail:0 fail:0 skip:40 time:418s
Blacklisted hosts:
fi-cfl-s2 total:288 pass:262 dwarn:0 dfail:0 fail:0 skip:26 time:620s
fi-cnl-y total:288 pass:262 dwarn:0 dfail:0 fail:0 skip:26 time:630s
fi-glk-dsi total:288 pass:257 dwarn:0 dfail:0 fail:1 skip:30 time:496s
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_623/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH igt] igt/perf: Busywait for MI_REPORT_PERF_COUNT results
2017-12-08 14:31 [PATCH igt] igt/perf: Busywait for MI_REPORT_PERF_COUNT results Chris Wilson
2017-12-08 15:06 ` ✓ Fi.CI.BAT: success for " Patchwork
@ 2017-12-08 15:08 ` Lionel Landwerlin
2017-12-08 15:13 ` [PATCH igt v2] igt/perf: Use igt_sysfs rather than opencoding Chris Wilson
` (3 subsequent siblings)
5 siblings, 0 replies; 9+ messages in thread
From: Lionel Landwerlin @ 2017-12-08 15:08 UTC (permalink / raw)
To: Chris Wilson, intel-gfx; +Cc: Matthew Auld
Hmm that sucks...
I'll bring this up with hardware people.
Maybe replacing this with a MI_STORE_REGISTER_MEM of the RCS timestamp
register (least significant 32bits) is a better approach.
On 08/12/17 14:31, Chris Wilson wrote:
> On Haswell, at least, MI_REPORT_PERF_COUNT is not flushed by the
> PIPECONTROL surrounding the batch. (In theory, before the breadcrumb is
> updated the CPU's view of memory is coherent with the GPU, i.e. all
> writes have landed and are visible to userspace. This does not appear to
> be the case for MI_REPORT_PERF_COUNT.)
>
> As MI_RPC does not apear to be synchronized with the batch, busyspin for
> its completion.
>
> (This has far deeper implications; since it means the GPU can still be
> writing to memory after release.)
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
> Cc: Matthew Auld <matthew.auld@intel.com>
> ---
> tests/perf.c | 93 +++++++++++++++++++++++++++++++++---------------------------
> 1 file changed, 52 insertions(+), 41 deletions(-)
>
> diff --git a/tests/perf.c b/tests/perf.c
> index a161c45d7..8c20fbe09 100644
> --- a/tests/perf.c
> +++ b/tests/perf.c
> @@ -706,47 +706,59 @@ emit_report_perf_count(struct intel_batchbuffer *batch,
> }
>
> static uint32_t
> -i915_get_one_gpu_timestamp(uint32_t *context_id)
> +i915_get_one_gpu_timestamp(void)
> {
> - drm_intel_bufmgr *bufmgr = drm_intel_bufmgr_gem_init(drm_fd, 4096);
> - drm_intel_context *mi_rpc_ctx = drm_intel_gem_context_create(bufmgr);
> - drm_intel_bo *mi_rpc_bo = drm_intel_bo_alloc(bufmgr, "mi_rpc dest bo", 4096, 64);
> - struct intel_batchbuffer *mi_rpc_batch = intel_batchbuffer_alloc(bufmgr, devid);
> - int ret;
> - uint32_t timestamp;
> -
> - drm_intel_bufmgr_gem_enable_reuse(bufmgr);
> -
> - if (context_id) {
> - ret = drm_intel_gem_context_get_id(mi_rpc_ctx, context_id);
> - igt_assert_eq(ret, 0);
> - }
> -
> - igt_assert(mi_rpc_ctx);
> - igt_assert(mi_rpc_bo);
> - igt_assert(mi_rpc_batch);
> -
> - ret = drm_intel_bo_map(mi_rpc_bo, true);
> - igt_assert_eq(ret, 0);
> - memset(mi_rpc_bo->virtual, 0x80, 4096);
> - drm_intel_bo_unmap(mi_rpc_bo);
> -
> - emit_report_perf_count(mi_rpc_batch,
> - mi_rpc_bo, /* dst */
> - 0, /* dst offset in bytes */
> - 0xdeadbeef); /* report ID */
> + struct drm_i915_gem_execbuffer2 execbuf;
> + struct drm_i915_gem_exec_object2 obj[2];
> + struct drm_i915_gem_relocation_entry reloc;
> + uint32_t *ptr, timestamp;
> + struct timespec tv = {};
> + int i;
>
> - intel_batchbuffer_flush_with_context(mi_rpc_batch, mi_rpc_ctx);
> + memset(obj, 0, sizeof(obj));
> + obj[0].handle = gem_create(drm_fd, 4096);
> + ptr = gem_mmap__cpu(drm_fd, obj[0].handle, 0, 4096, PROT_WRITE);
> + memset(ptr, 0x80, 4096);
> + munmap(ptr, 4096);
> +
> + obj[1].handle = gem_create(drm_fd, 4096);
> + obj[1].relocs_ptr = to_user_pointer(&reloc);
> + obj[1].relocation_count = 1;
> + ptr = gem_mmap__cpu(drm_fd, obj[1].handle, 0, 4096, PROT_WRITE);
> +
> + memset(&reloc, 0, sizeof(reloc));
> + reloc.target_handle = obj[0].handle;
> + reloc.offset = sizeof(uint32_t);
> + reloc.read_domains = I915_GEM_DOMAIN_RENDER;
> + reloc.write_domain = I915_GEM_DOMAIN_RENDER;
> +
> + i = 2;
> + ptr[0] = GEN6_MI_REPORT_PERF_COUNT;
> + if (intel_gen(devid) >= 8)
> + ptr[0]++, i++; /* 64b reloc */
> + ptr[i++] = 0xdeadbeef;
> + ptr[i] = MI_BATCH_BUFFER_END;
> + munmap(ptr, 4096);
> +
> + memset(&execbuf, 0, sizeof(execbuf));
> + execbuf.buffers_ptr = to_user_pointer(obj);
> + execbuf.buffer_count = 2;
> + execbuf.batch_len = 4096;
> + gem_execbuf(drm_fd, &execbuf);
> + gem_close(drm_fd, obj[1].handle);
>
> - ret = drm_intel_bo_map(mi_rpc_bo, false /* write enable */);
> - igt_assert_eq(ret, 0);
> - timestamp = ((uint32_t *)mi_rpc_bo->virtual)[1];
> - drm_intel_bo_unmap(mi_rpc_bo);
> + /*
> + * MI_REPORT_PERF_COUNT is unserialised, i.e. not flushed by
> + * the PIPECONTROLs surrounding batch execution. Ergo, we must
> + * manually wait.
> + */
> + do {
> + gem_read(drm_fd, obj[0].handle, sizeof(uint32_t),
> + ×tamp, sizeof(timestamp));
> + } while (timestamp == 0x80808080 && !igt_seconds_elapsed(&tv));
> + gem_close(drm_fd, obj[0].handle);
>
> - drm_intel_bo_unreference(mi_rpc_bo);
> - intel_batchbuffer_free(mi_rpc_batch);
> - drm_intel_gem_context_destroy(mi_rpc_ctx);
> - drm_intel_bufmgr_destroy(bufmgr);
> + igt_assert_neq(timestamp, 0x80808080);
>
> return timestamp;
> }
> @@ -1915,7 +1927,6 @@ test_oa_exponents(void)
> uint32_t n_reports = 0;
> uint32_t n_idle_reports = 0;
> uint32_t n_reads = 0;
> - uint32_t context_id;
> uint64_t first_timestamp = 0;
> bool check_first_timestamp = true;
> struct drm_i915_perf_record_header *header;
> @@ -1944,7 +1955,7 @@ test_oa_exponents(void)
> * first timestamp as way to filter previously
> * scheduled work that would have configured
> * the OA unit at a different period. */
> - first_timestamp = i915_get_one_gpu_timestamp(&context_id);
> + first_timestamp = i915_get_one_gpu_timestamp();
>
> while (n_reads < ARRAY_SIZE(reads) &&
> n_reports < ARRAY_SIZE(reports)) {
> @@ -2070,8 +2081,8 @@ test_oa_exponents(void)
> uint32_t *rpt = NULL, *last = NULL, *last_periodic = NULL;
>
> igt_debug(" > More than 5%% error: avg_ts_delta = %"PRIu64", delta_delta = %"PRIu64", "
> - "expected_delta = %"PRIu64", first_timestamp = %"PRIu64" ctx_id=%"PRIu32"\n",
> - average_timestamp_delta, delta_delta, expected_timestamp_delta, first_timestamp, context_id);
> + "expected_delta = %"PRIu64", first_timestamp = %"PRIu64"\n",
> + average_timestamp_delta, delta_delta, expected_timestamp_delta, first_timestamp);
> for (int i = 0; i < (n_reports - 1); i++) {
> /* XXX: calculating with u32 arithmetic to account for overflow */
> uint32_t u32_delta =
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 9+ messages in thread
* [PATCH igt v2] igt/perf: Use igt_sysfs rather than opencoding
2017-12-08 14:31 [PATCH igt] igt/perf: Busywait for MI_REPORT_PERF_COUNT results Chris Wilson
2017-12-08 15:06 ` ✓ Fi.CI.BAT: success for " Patchwork
2017-12-08 15:08 ` [PATCH igt] " Lionel Landwerlin
@ 2017-12-08 15:13 ` Chris Wilson
2017-12-08 16:20 ` Lionel Landwerlin
2017-12-08 15:14 ` [PATCH igt v2] igt/perf: Read RCS0 timestamp directly Chris Wilson
` (2 subsequent siblings)
5 siblings, 1 reply; 9+ messages in thread
From: Chris Wilson @ 2017-12-08 15:13 UTC (permalink / raw)
To: intel-gfx; +Cc: Matthew Auld
As igt_sysfs exists to provide convenience routine for parsing files
found in the device's sysfs dir, use it.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
---
tests/perf.c | 98 +++++++++++-------------------------------------------------
1 file changed, 18 insertions(+), 80 deletions(-)
diff --git a/tests/perf.c b/tests/perf.c
index a161c45d7..05ec7a472 100644
--- a/tests/perf.c
+++ b/tests/perf.c
@@ -39,6 +39,7 @@
#include <math.h>
#include "igt.h"
+#include "igt_sysfs.h"
#include "drm.h"
IGT_TEST_DESCRIPTION("Test the i915 perf metrics streaming interface");
@@ -278,6 +279,7 @@ static bool hsw_undefined_a_counters[45] = {
static bool gen8_undefined_a_counters[45];
static int drm_fd = -1;
+static int sysfs = -1;
static int pm_fd = -1;
static int stream_fd = -1;
static uint32_t devid;
@@ -425,67 +427,16 @@ write_u64_file(const char *file, uint64_t val)
close(fd);
}
-static uint64_t
+static unsigned long
sysfs_read(const char *file)
{
- char buf[512];
-
- snprintf(buf, sizeof(buf), "/sys/class/drm/card%d/%s", card, file);
-
- return read_u64_file(buf);
-}
-
-static char *
-read_debugfs_record(int device, const char *file, const char *key)
-{
- FILE *fp;
- int fd;
- char *line = NULL;
- size_t line_buf_size = 0;
- int len = 0;
- int key_len = strlen(key);
- char *value = NULL;
+ unsigned long value;
- fd = igt_debugfs_open(device, file, O_RDONLY);
- fp = fdopen(fd, "r");
- igt_require(fp);
+ igt_assert(igt_sysfs_scanf(sysfs, file, "%lu", &value) == 1);
- while ((len = getline(&line, &line_buf_size, fp)) > 0) {
-
- if (line[len - 1] == '\n')
- line[len - 1] = '\0';
-
- if (strncmp(key, line, key_len) == 0 &&
- line[key_len] == ':' &&
- line[key_len + 1] == ' ')
- {
- value = strdup(line + key_len + 2);
- goto done;
- }
- }
-
- igt_assert(!"reached");
-done:
- free(line);
- fclose(fp);
- close(fd);
return value;
}
-static uint64_t
-read_debugfs_u64_record(int fd, const char *file, const char *key)
-{
- char *str_val = read_debugfs_record(fd, file, key);
- uint64_t val;
-
- igt_require(str_val);
-
- val = strtoull(str_val, NULL, 0);
- free(str_val);
-
- return val;
-}
-
/* XXX: For Haswell this utility is only applicable to the render basic
* metric set.
*
@@ -4021,15 +3972,9 @@ gen8_test_single_ctx_render_target_writes_a_counter(void)
} while (WEXITSTATUS(child_ret) == EAGAIN);
}
-static bool
-rc6_enabled(void)
+static unsigned long rc6_residency_ms(void)
{
- char *rc6_status = read_debugfs_record(drm_fd, "i915_drpc_info",
- "RC6 Enabled");
- bool enabled = strcmp(rc6_status, "yes") == 0;
-
- free(rc6_status);
- return enabled;
+ return sysfs_read("power/rc6_residency_ms");
}
static void
@@ -4049,32 +3994,25 @@ test_rc6_disable(void)
.num_properties = sizeof(properties) / 16,
.properties_ptr = to_user_pointer(properties),
};
- uint64_t n_events_start, n_events_end;
+ unsigned long n_events_start, n_events_end;
+ unsigned long rc6_enabled;
- igt_skip_on(!rc6_enabled());
+ rc6_enabled = 0;
+ igt_sysfs_scanf(sysfs, "power/rc6_enable", "%lu", &rc6_enabled);
+ igt_require(rc6_enabled);
stream_fd = __perf_open(drm_fd, ¶m, false);
- n_events_start = read_debugfs_u64_record(drm_fd, "i915_drpc_info",
- "RC6 residency since boot");
-
+ n_events_start = rc6_residency_ms();
nanosleep(&(struct timespec){ .tv_sec = 0, .tv_nsec = 500000000 }, NULL);
-
- n_events_end = read_debugfs_u64_record(drm_fd, "i915_drpc_info",
- "RC6 residency since boot");
-
+ n_events_end = rc6_residency_ms();
igt_assert_eq(n_events_end - n_events_start, 0);
__perf_close(stream_fd);
- n_events_start = read_debugfs_u64_record(drm_fd, "i915_drpc_info",
- "RC6 residency since boot");
-
+ n_events_start = rc6_residency_ms();
nanosleep(&(struct timespec){ .tv_sec = 1, .tv_nsec = 0 }, NULL);
-
- n_events_end = read_debugfs_u64_record(drm_fd, "i915_drpc_info",
- "RC6 residency since boot");
-
+ n_events_end = rc6_residency_ms();
igt_assert_neq(n_events_end - n_events_start, 0);
}
@@ -4533,9 +4471,9 @@ igt_main
* should have closed drm_fd...
*/
igt_assert_eq(drm_fd, -1);
- drm_fd = drm_open_driver_render(DRIVER_INTEL);
+ drm_fd = drm_open_driver(DRIVER_INTEL);
devid = intel_get_drm_devid(drm_fd);
- card = drm_get_card();
+ sysfs = igt_sysfs_open(drm_fd, &card);
igt_require(init_sys_info());
--
2.15.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [PATCH igt v2] igt/perf: Read RCS0 timestamp directly
2017-12-08 14:31 [PATCH igt] igt/perf: Busywait for MI_REPORT_PERF_COUNT results Chris Wilson
` (2 preceding siblings ...)
2017-12-08 15:13 ` [PATCH igt v2] igt/perf: Use igt_sysfs rather than opencoding Chris Wilson
@ 2017-12-08 15:14 ` Chris Wilson
2017-12-08 15:28 ` Lionel Landwerlin
2017-12-08 15:56 ` ✓ Fi.CI.BAT: success for igt/perf: Busywait for MI_REPORT_PERF_COUNT results (rev3) Patchwork
2017-12-08 17:45 ` ✗ Fi.CI.IGT: warning " Patchwork
5 siblings, 1 reply; 9+ messages in thread
From: Chris Wilson @ 2017-12-08 15:14 UTC (permalink / raw)
To: intel-gfx; +Cc: Matthew Auld
On Haswell, at least, MI_REPORT_PERF_COUNT is not flushed by the
PIPECONTROL surrounding the batch. (In theory, before the breadcrumb is
updated the CPU's view of memory is coherent with the GPU, i.e. all
writes have landed and are visible to userspace. This does not appear to
be the case for MI_REPORT_PERF_COUNT.) This makes it an unreliable
method for querying the timestamp, so use MI_STORE_REGISTER_MEM instead.
Testcase: igt/perf/oa-exponents
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
---
tests/perf.c | 80 +++++++++++++++++++++++++++++-------------------------------
1 file changed, 39 insertions(+), 41 deletions(-)
diff --git a/tests/perf.c b/tests/perf.c
index 05ec7a472..92e32d93c 100644
--- a/tests/perf.c
+++ b/tests/perf.c
@@ -657,47 +657,46 @@ emit_report_perf_count(struct intel_batchbuffer *batch,
}
static uint32_t
-i915_get_one_gpu_timestamp(uint32_t *context_id)
+i915_get_one_gpu_timestamp(void)
{
- drm_intel_bufmgr *bufmgr = drm_intel_bufmgr_gem_init(drm_fd, 4096);
- drm_intel_context *mi_rpc_ctx = drm_intel_gem_context_create(bufmgr);
- drm_intel_bo *mi_rpc_bo = drm_intel_bo_alloc(bufmgr, "mi_rpc dest bo", 4096, 64);
- struct intel_batchbuffer *mi_rpc_batch = intel_batchbuffer_alloc(bufmgr, devid);
- int ret;
+ const bool r64b = intel_gen(devid) >= 8;
+ struct drm_i915_gem_execbuffer2 execbuf;
+ struct drm_i915_gem_exec_object2 obj[2];
+ struct drm_i915_gem_relocation_entry reloc;
+ uint32_t batch[16];
uint32_t timestamp;
+ int i;
- drm_intel_bufmgr_gem_enable_reuse(bufmgr);
-
- if (context_id) {
- ret = drm_intel_gem_context_get_id(mi_rpc_ctx, context_id);
- igt_assert_eq(ret, 0);
- }
-
- igt_assert(mi_rpc_ctx);
- igt_assert(mi_rpc_bo);
- igt_assert(mi_rpc_batch);
-
- ret = drm_intel_bo_map(mi_rpc_bo, true);
- igt_assert_eq(ret, 0);
- memset(mi_rpc_bo->virtual, 0x80, 4096);
- drm_intel_bo_unmap(mi_rpc_bo);
-
- emit_report_perf_count(mi_rpc_batch,
- mi_rpc_bo, /* dst */
- 0, /* dst offset in bytes */
- 0xdeadbeef); /* report ID */
-
- intel_batchbuffer_flush_with_context(mi_rpc_batch, mi_rpc_ctx);
-
- ret = drm_intel_bo_map(mi_rpc_bo, false /* write enable */);
- igt_assert_eq(ret, 0);
- timestamp = ((uint32_t *)mi_rpc_bo->virtual)[1];
- drm_intel_bo_unmap(mi_rpc_bo);
+ memset(obj, 0, sizeof(obj));
+ obj[0].handle = gem_create(drm_fd, 4096);
+ obj[1].handle = gem_create(drm_fd, 4096);
+ obj[1].relocs_ptr = to_user_pointer(&reloc);
+ obj[1].relocation_count = 1;
- drm_intel_bo_unreference(mi_rpc_bo);
- intel_batchbuffer_free(mi_rpc_batch);
- drm_intel_gem_context_destroy(mi_rpc_ctx);
- drm_intel_bufmgr_destroy(bufmgr);
+ i = 0;
+ batch[i++] = 0x24 << 23 | (1 + r64b); /* SRM */
+ batch[i++] = 0x2358; /* RCS0 timestamp */
+ reloc.target_handle = obj[0].handle;
+ reloc.presumed_offset = obj[0].offset;
+ reloc.offset = i * sizeof(batch[0]);
+ reloc.delta = 0;
+ reloc.read_domains = I915_GEM_DOMAIN_RENDER;
+ reloc.write_domain = I915_GEM_DOMAIN_RENDER;
+ batch[i++] = reloc.delta;
+ if (r64b)
+ batch[i++] = 0;
+ batch[i] = MI_BATCH_BUFFER_END;
+ gem_write(drm_fd, obj[1].handle, 0, batch, sizeof(batch));
+
+ memset(&execbuf, 0, sizeof(execbuf));
+ execbuf.buffers_ptr = to_user_pointer(obj);
+ execbuf.buffer_count = 2;
+ execbuf.batch_len = 4096;
+ gem_execbuf(drm_fd, &execbuf);
+ gem_close(drm_fd, obj[1].handle);
+
+ gem_read(drm_fd, obj[0].handle, 0, ×tamp, sizeof(timestamp));
+ gem_close(drm_fd, obj[0].handle);
return timestamp;
}
@@ -1866,7 +1865,6 @@ test_oa_exponents(void)
uint32_t n_reports = 0;
uint32_t n_idle_reports = 0;
uint32_t n_reads = 0;
- uint32_t context_id;
uint64_t first_timestamp = 0;
bool check_first_timestamp = true;
struct drm_i915_perf_record_header *header;
@@ -1895,7 +1893,7 @@ test_oa_exponents(void)
* first timestamp as way to filter previously
* scheduled work that would have configured
* the OA unit at a different period. */
- first_timestamp = i915_get_one_gpu_timestamp(&context_id);
+ first_timestamp = i915_get_one_gpu_timestamp();
while (n_reads < ARRAY_SIZE(reads) &&
n_reports < ARRAY_SIZE(reports)) {
@@ -2021,8 +2019,8 @@ test_oa_exponents(void)
uint32_t *rpt = NULL, *last = NULL, *last_periodic = NULL;
igt_debug(" > More than 5%% error: avg_ts_delta = %"PRIu64", delta_delta = %"PRIu64", "
- "expected_delta = %"PRIu64", first_timestamp = %"PRIu64" ctx_id=%"PRIu32"\n",
- average_timestamp_delta, delta_delta, expected_timestamp_delta, first_timestamp, context_id);
+ "expected_delta = %"PRIu64", first_timestamp = %"PRIu64"\n",
+ average_timestamp_delta, delta_delta, expected_timestamp_delta, first_timestamp);
for (int i = 0; i < (n_reports - 1); i++) {
/* XXX: calculating with u32 arithmetic to account for overflow */
uint32_t u32_delta =
--
2.15.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 9+ messages in thread
* Re: [PATCH igt v2] igt/perf: Read RCS0 timestamp directly
2017-12-08 15:14 ` [PATCH igt v2] igt/perf: Read RCS0 timestamp directly Chris Wilson
@ 2017-12-08 15:28 ` Lionel Landwerlin
0 siblings, 0 replies; 9+ messages in thread
From: Lionel Landwerlin @ 2017-12-08 15:28 UTC (permalink / raw)
To: Chris Wilson, intel-gfx; +Cc: Matthew Auld
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
On 08/12/17 15:14, Chris Wilson wrote:
> On Haswell, at least, MI_REPORT_PERF_COUNT is not flushed by the
> PIPECONTROL surrounding the batch. (In theory, before the breadcrumb is
> updated the CPU's view of memory is coherent with the GPU, i.e. all
> writes have landed and are visible to userspace. This does not appear to
> be the case for MI_REPORT_PERF_COUNT.) This makes it an unreliable
> method for querying the timestamp, so use MI_STORE_REGISTER_MEM instead.
>
> Testcase: igt/perf/oa-exponents
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
> Cc: Matthew Auld <matthew.auld@intel.com>
> ---
> tests/perf.c | 80 +++++++++++++++++++++++++++++-------------------------------
> 1 file changed, 39 insertions(+), 41 deletions(-)
>
> diff --git a/tests/perf.c b/tests/perf.c
> index 05ec7a472..92e32d93c 100644
> --- a/tests/perf.c
> +++ b/tests/perf.c
> @@ -657,47 +657,46 @@ emit_report_perf_count(struct intel_batchbuffer *batch,
> }
>
> static uint32_t
> -i915_get_one_gpu_timestamp(uint32_t *context_id)
> +i915_get_one_gpu_timestamp(void)
> {
> - drm_intel_bufmgr *bufmgr = drm_intel_bufmgr_gem_init(drm_fd, 4096);
> - drm_intel_context *mi_rpc_ctx = drm_intel_gem_context_create(bufmgr);
> - drm_intel_bo *mi_rpc_bo = drm_intel_bo_alloc(bufmgr, "mi_rpc dest bo", 4096, 64);
> - struct intel_batchbuffer *mi_rpc_batch = intel_batchbuffer_alloc(bufmgr, devid);
> - int ret;
> + const bool r64b = intel_gen(devid) >= 8;
> + struct drm_i915_gem_execbuffer2 execbuf;
> + struct drm_i915_gem_exec_object2 obj[2];
> + struct drm_i915_gem_relocation_entry reloc;
> + uint32_t batch[16];
> uint32_t timestamp;
> + int i;
>
> - drm_intel_bufmgr_gem_enable_reuse(bufmgr);
> -
> - if (context_id) {
> - ret = drm_intel_gem_context_get_id(mi_rpc_ctx, context_id);
> - igt_assert_eq(ret, 0);
> - }
> -
> - igt_assert(mi_rpc_ctx);
> - igt_assert(mi_rpc_bo);
> - igt_assert(mi_rpc_batch);
> -
> - ret = drm_intel_bo_map(mi_rpc_bo, true);
> - igt_assert_eq(ret, 0);
> - memset(mi_rpc_bo->virtual, 0x80, 4096);
> - drm_intel_bo_unmap(mi_rpc_bo);
> -
> - emit_report_perf_count(mi_rpc_batch,
> - mi_rpc_bo, /* dst */
> - 0, /* dst offset in bytes */
> - 0xdeadbeef); /* report ID */
> -
> - intel_batchbuffer_flush_with_context(mi_rpc_batch, mi_rpc_ctx);
> -
> - ret = drm_intel_bo_map(mi_rpc_bo, false /* write enable */);
> - igt_assert_eq(ret, 0);
> - timestamp = ((uint32_t *)mi_rpc_bo->virtual)[1];
> - drm_intel_bo_unmap(mi_rpc_bo);
> + memset(obj, 0, sizeof(obj));
> + obj[0].handle = gem_create(drm_fd, 4096);
> + obj[1].handle = gem_create(drm_fd, 4096);
> + obj[1].relocs_ptr = to_user_pointer(&reloc);
> + obj[1].relocation_count = 1;
>
> - drm_intel_bo_unreference(mi_rpc_bo);
> - intel_batchbuffer_free(mi_rpc_batch);
> - drm_intel_gem_context_destroy(mi_rpc_ctx);
> - drm_intel_bufmgr_destroy(bufmgr);
> + i = 0;
> + batch[i++] = 0x24 << 23 | (1 + r64b); /* SRM */
> + batch[i++] = 0x2358; /* RCS0 timestamp */
> + reloc.target_handle = obj[0].handle;
> + reloc.presumed_offset = obj[0].offset;
> + reloc.offset = i * sizeof(batch[0]);
> + reloc.delta = 0;
> + reloc.read_domains = I915_GEM_DOMAIN_RENDER;
> + reloc.write_domain = I915_GEM_DOMAIN_RENDER;
> + batch[i++] = reloc.delta;
> + if (r64b)
> + batch[i++] = 0;
> + batch[i] = MI_BATCH_BUFFER_END;
> + gem_write(drm_fd, obj[1].handle, 0, batch, sizeof(batch));
> +
> + memset(&execbuf, 0, sizeof(execbuf));
> + execbuf.buffers_ptr = to_user_pointer(obj);
> + execbuf.buffer_count = 2;
> + execbuf.batch_len = 4096;
> + gem_execbuf(drm_fd, &execbuf);
> + gem_close(drm_fd, obj[1].handle);
> +
> + gem_read(drm_fd, obj[0].handle, 0, ×tamp, sizeof(timestamp));
> + gem_close(drm_fd, obj[0].handle);
>
> return timestamp;
> }
> @@ -1866,7 +1865,6 @@ test_oa_exponents(void)
> uint32_t n_reports = 0;
> uint32_t n_idle_reports = 0;
> uint32_t n_reads = 0;
> - uint32_t context_id;
> uint64_t first_timestamp = 0;
> bool check_first_timestamp = true;
> struct drm_i915_perf_record_header *header;
> @@ -1895,7 +1893,7 @@ test_oa_exponents(void)
> * first timestamp as way to filter previously
> * scheduled work that would have configured
> * the OA unit at a different period. */
> - first_timestamp = i915_get_one_gpu_timestamp(&context_id);
> + first_timestamp = i915_get_one_gpu_timestamp();
>
> while (n_reads < ARRAY_SIZE(reads) &&
> n_reports < ARRAY_SIZE(reports)) {
> @@ -2021,8 +2019,8 @@ test_oa_exponents(void)
> uint32_t *rpt = NULL, *last = NULL, *last_periodic = NULL;
>
> igt_debug(" > More than 5%% error: avg_ts_delta = %"PRIu64", delta_delta = %"PRIu64", "
> - "expected_delta = %"PRIu64", first_timestamp = %"PRIu64" ctx_id=%"PRIu32"\n",
> - average_timestamp_delta, delta_delta, expected_timestamp_delta, first_timestamp, context_id);
> + "expected_delta = %"PRIu64", first_timestamp = %"PRIu64"\n",
> + average_timestamp_delta, delta_delta, expected_timestamp_delta, first_timestamp);
> for (int i = 0; i < (n_reports - 1); i++) {
> /* XXX: calculating with u32 arithmetic to account for overflow */
> uint32_t u32_delta =
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 9+ messages in thread
* ✓ Fi.CI.BAT: success for igt/perf: Busywait for MI_REPORT_PERF_COUNT results (rev3)
2017-12-08 14:31 [PATCH igt] igt/perf: Busywait for MI_REPORT_PERF_COUNT results Chris Wilson
` (3 preceding siblings ...)
2017-12-08 15:14 ` [PATCH igt v2] igt/perf: Read RCS0 timestamp directly Chris Wilson
@ 2017-12-08 15:56 ` Patchwork
2017-12-08 17:45 ` ✗ Fi.CI.IGT: warning " Patchwork
5 siblings, 0 replies; 9+ messages in thread
From: Patchwork @ 2017-12-08 15:56 UTC (permalink / raw)
To: Chris Wilson; +Cc: intel-gfx
== Series Details ==
Series: igt/perf: Busywait for MI_REPORT_PERF_COUNT results (rev3)
URL : https://patchwork.freedesktop.org/series/35091/
State : success
== Summary ==
IGT patchset tested on top of latest successful build
2fc64acf8a4465d5eab3d6cfec9b3c1b5df30d73 igt/perf_pmu: Tweak wait_for_rc6, yet again
with latest DRM-Tip kernel build CI_DRM_3483
b5f297e08432 drm-tip: 2017y-12m-08d-13h-53m-36s UTC integration manifest
No testlist changes.
Test gem_mmap_gtt:
Subgroup basic-small-bo-tiledx:
pass -> FAIL (fi-gdg-551) fdo#102575
fdo#102575 https://bugs.freedesktop.org/show_bug.cgi?id=102575
fi-bdw-5557u total:288 pass:267 dwarn:0 dfail:0 fail:0 skip:21 time:440s
fi-bdw-gvtdvm total:288 pass:264 dwarn:0 dfail:0 fail:0 skip:24 time:445s
fi-blb-e6850 total:288 pass:223 dwarn:1 dfail:0 fail:0 skip:64 time:389s
fi-bsw-n3050 total:288 pass:242 dwarn:0 dfail:0 fail:0 skip:46 time:528s
fi-bwr-2160 total:288 pass:183 dwarn:0 dfail:0 fail:0 skip:105 time:282s
fi-bxt-dsi total:288 pass:258 dwarn:0 dfail:0 fail:0 skip:30 time:510s
fi-bxt-j4205 total:288 pass:259 dwarn:0 dfail:0 fail:0 skip:29 time:510s
fi-byt-j1900 total:288 pass:253 dwarn:0 dfail:0 fail:0 skip:35 time:492s
fi-byt-n2820 total:288 pass:249 dwarn:0 dfail:0 fail:0 skip:39 time:478s
fi-elk-e7500 total:224 pass:163 dwarn:15 dfail:0 fail:0 skip:45
fi-gdg-551 total:288 pass:178 dwarn:1 dfail:0 fail:1 skip:108 time:270s
fi-glk-1 total:288 pass:260 dwarn:0 dfail:0 fail:0 skip:28 time:545s
fi-hsw-4770 total:288 pass:261 dwarn:0 dfail:0 fail:0 skip:27 time:374s
fi-hsw-4770r total:288 pass:224 dwarn:0 dfail:0 fail:0 skip:64 time:261s
fi-ilk-650 total:288 pass:228 dwarn:0 dfail:0 fail:0 skip:60 time:396s
fi-ivb-3520m total:288 pass:259 dwarn:0 dfail:0 fail:0 skip:29 time:475s
fi-ivb-3770 total:288 pass:259 dwarn:0 dfail:0 fail:0 skip:29 time:455s
fi-kbl-7500u total:288 pass:263 dwarn:1 dfail:0 fail:0 skip:24 time:491s
fi-kbl-7560u total:288 pass:269 dwarn:0 dfail:0 fail:0 skip:19 time:527s
fi-kbl-7567u total:288 pass:268 dwarn:0 dfail:0 fail:0 skip:20 time:479s
fi-kbl-r total:288 pass:260 dwarn:1 dfail:0 fail:0 skip:27 time:536s
fi-pnv-d510 total:288 pass:222 dwarn:1 dfail:0 fail:0 skip:65 time:594s
fi-skl-6260u total:288 pass:268 dwarn:0 dfail:0 fail:0 skip:20 time:450s
fi-skl-6600u total:288 pass:261 dwarn:0 dfail:0 fail:0 skip:27 time:536s
fi-skl-6700hq total:288 pass:262 dwarn:0 dfail:0 fail:0 skip:26 time:566s
fi-skl-6700k total:288 pass:264 dwarn:0 dfail:0 fail:0 skip:24 time:520s
fi-skl-6770hq total:288 pass:268 dwarn:0 dfail:0 fail:0 skip:20 time:503s
fi-skl-gvtdvm total:288 pass:265 dwarn:0 dfail:0 fail:0 skip:23 time:452s
fi-snb-2520m total:288 pass:249 dwarn:0 dfail:0 fail:0 skip:39 time:559s
fi-snb-2600 total:288 pass:248 dwarn:0 dfail:0 fail:0 skip:40 time:422s
Blacklisted hosts:
fi-cfl-s2 total:288 pass:262 dwarn:0 dfail:0 fail:0 skip:26 time:613s
fi-cnl-y total:288 pass:262 dwarn:0 dfail:0 fail:0 skip:26 time:638s
fi-glk-dsi total:288 pass:258 dwarn:0 dfail:0 fail:0 skip:30 time:493s
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_626/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH igt v2] igt/perf: Use igt_sysfs rather than opencoding
2017-12-08 15:13 ` [PATCH igt v2] igt/perf: Use igt_sysfs rather than opencoding Chris Wilson
@ 2017-12-08 16:20 ` Lionel Landwerlin
0 siblings, 0 replies; 9+ messages in thread
From: Lionel Landwerlin @ 2017-12-08 16:20 UTC (permalink / raw)
To: Chris Wilson, intel-gfx; +Cc: Matthew Auld
Thanks for the cleanup :)
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
On 08/12/17 15:13, Chris Wilson wrote:
> As igt_sysfs exists to provide convenience routine for parsing files
> found in the device's sysfs dir, use it.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
> Cc: Matthew Auld <matthew.auld@intel.com>
> ---
> tests/perf.c | 98 +++++++++++-------------------------------------------------
> 1 file changed, 18 insertions(+), 80 deletions(-)
>
> diff --git a/tests/perf.c b/tests/perf.c
> index a161c45d7..05ec7a472 100644
> --- a/tests/perf.c
> +++ b/tests/perf.c
> @@ -39,6 +39,7 @@
> #include <math.h>
>
> #include "igt.h"
> +#include "igt_sysfs.h"
> #include "drm.h"
>
> IGT_TEST_DESCRIPTION("Test the i915 perf metrics streaming interface");
> @@ -278,6 +279,7 @@ static bool hsw_undefined_a_counters[45] = {
> static bool gen8_undefined_a_counters[45];
>
> static int drm_fd = -1;
> +static int sysfs = -1;
> static int pm_fd = -1;
> static int stream_fd = -1;
> static uint32_t devid;
> @@ -425,67 +427,16 @@ write_u64_file(const char *file, uint64_t val)
> close(fd);
> }
>
> -static uint64_t
> +static unsigned long
> sysfs_read(const char *file)
> {
> - char buf[512];
> -
> - snprintf(buf, sizeof(buf), "/sys/class/drm/card%d/%s", card, file);
> -
> - return read_u64_file(buf);
> -}
> -
> -static char *
> -read_debugfs_record(int device, const char *file, const char *key)
> -{
> - FILE *fp;
> - int fd;
> - char *line = NULL;
> - size_t line_buf_size = 0;
> - int len = 0;
> - int key_len = strlen(key);
> - char *value = NULL;
> + unsigned long value;
>
> - fd = igt_debugfs_open(device, file, O_RDONLY);
> - fp = fdopen(fd, "r");
> - igt_require(fp);
> + igt_assert(igt_sysfs_scanf(sysfs, file, "%lu", &value) == 1);
>
> - while ((len = getline(&line, &line_buf_size, fp)) > 0) {
> -
> - if (line[len - 1] == '\n')
> - line[len - 1] = '\0';
> -
> - if (strncmp(key, line, key_len) == 0 &&
> - line[key_len] == ':' &&
> - line[key_len + 1] == ' ')
> - {
> - value = strdup(line + key_len + 2);
> - goto done;
> - }
> - }
> -
> - igt_assert(!"reached");
> -done:
> - free(line);
> - fclose(fp);
> - close(fd);
> return value;
> }
>
> -static uint64_t
> -read_debugfs_u64_record(int fd, const char *file, const char *key)
> -{
> - char *str_val = read_debugfs_record(fd, file, key);
> - uint64_t val;
> -
> - igt_require(str_val);
> -
> - val = strtoull(str_val, NULL, 0);
> - free(str_val);
> -
> - return val;
> -}
> -
> /* XXX: For Haswell this utility is only applicable to the render basic
> * metric set.
> *
> @@ -4021,15 +3972,9 @@ gen8_test_single_ctx_render_target_writes_a_counter(void)
> } while (WEXITSTATUS(child_ret) == EAGAIN);
> }
>
> -static bool
> -rc6_enabled(void)
> +static unsigned long rc6_residency_ms(void)
> {
> - char *rc6_status = read_debugfs_record(drm_fd, "i915_drpc_info",
> - "RC6 Enabled");
> - bool enabled = strcmp(rc6_status, "yes") == 0;
> -
> - free(rc6_status);
> - return enabled;
> + return sysfs_read("power/rc6_residency_ms");
> }
>
> static void
> @@ -4049,32 +3994,25 @@ test_rc6_disable(void)
> .num_properties = sizeof(properties) / 16,
> .properties_ptr = to_user_pointer(properties),
> };
> - uint64_t n_events_start, n_events_end;
> + unsigned long n_events_start, n_events_end;
> + unsigned long rc6_enabled;
>
> - igt_skip_on(!rc6_enabled());
> + rc6_enabled = 0;
> + igt_sysfs_scanf(sysfs, "power/rc6_enable", "%lu", &rc6_enabled);
> + igt_require(rc6_enabled);
>
> stream_fd = __perf_open(drm_fd, ¶m, false);
>
> - n_events_start = read_debugfs_u64_record(drm_fd, "i915_drpc_info",
> - "RC6 residency since boot");
> -
> + n_events_start = rc6_residency_ms();
> nanosleep(&(struct timespec){ .tv_sec = 0, .tv_nsec = 500000000 }, NULL);
> -
> - n_events_end = read_debugfs_u64_record(drm_fd, "i915_drpc_info",
> - "RC6 residency since boot");
> -
> + n_events_end = rc6_residency_ms();
> igt_assert_eq(n_events_end - n_events_start, 0);
>
> __perf_close(stream_fd);
>
> - n_events_start = read_debugfs_u64_record(drm_fd, "i915_drpc_info",
> - "RC6 residency since boot");
> -
> + n_events_start = rc6_residency_ms();
> nanosleep(&(struct timespec){ .tv_sec = 1, .tv_nsec = 0 }, NULL);
> -
> - n_events_end = read_debugfs_u64_record(drm_fd, "i915_drpc_info",
> - "RC6 residency since boot");
> -
> + n_events_end = rc6_residency_ms();
> igt_assert_neq(n_events_end - n_events_start, 0);
> }
>
> @@ -4533,9 +4471,9 @@ igt_main
> * should have closed drm_fd...
> */
> igt_assert_eq(drm_fd, -1);
> - drm_fd = drm_open_driver_render(DRIVER_INTEL);
> + drm_fd = drm_open_driver(DRIVER_INTEL);
> devid = intel_get_drm_devid(drm_fd);
> - card = drm_get_card();
> + sysfs = igt_sysfs_open(drm_fd, &card);
>
> igt_require(init_sys_info());
>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 9+ messages in thread
* ✗ Fi.CI.IGT: warning for igt/perf: Busywait for MI_REPORT_PERF_COUNT results (rev3)
2017-12-08 14:31 [PATCH igt] igt/perf: Busywait for MI_REPORT_PERF_COUNT results Chris Wilson
` (4 preceding siblings ...)
2017-12-08 15:56 ` ✓ Fi.CI.BAT: success for igt/perf: Busywait for MI_REPORT_PERF_COUNT results (rev3) Patchwork
@ 2017-12-08 17:45 ` Patchwork
5 siblings, 0 replies; 9+ messages in thread
From: Patchwork @ 2017-12-08 17:45 UTC (permalink / raw)
To: Chris Wilson; +Cc: intel-gfx
== Series Details ==
Series: igt/perf: Busywait for MI_REPORT_PERF_COUNT results (rev3)
URL : https://patchwork.freedesktop.org/series/35091/
State : warning
== Summary ==
Test drv_module_reload:
Subgroup basic-reload:
dmesg-warn -> PASS (shard-snb) fdo#102848
dmesg-warn -> PASS (shard-hsw) fdo#102707
Test kms_fbcon_fbt:
Subgroup fbc-suspend:
pass -> SKIP (shard-snb)
Test kms_frontbuffer_tracking:
Subgroup fbc-1p-primscrn-spr-indfb-draw-blt:
pass -> SKIP (shard-snb) fdo#101623 +1
Test drv_suspend:
Subgroup debugfs-reader:
skip -> PASS (shard-hsw)
Subgroup fence-restore-tiled2untiled-hibernate:
skip -> FAIL (shard-hsw) fdo#103375
Subgroup forcewake:
skip -> PASS (shard-hsw)
Test pm_rc6_residency:
Subgroup rc6-accuracy:
skip -> PASS (shard-snb)
fdo#102848 https://bugs.freedesktop.org/show_bug.cgi?id=102848
fdo#102707 https://bugs.freedesktop.org/show_bug.cgi?id=102707
fdo#101623 https://bugs.freedesktop.org/show_bug.cgi?id=101623
fdo#103375 https://bugs.freedesktop.org/show_bug.cgi?id=103375
shard-hsw total:2679 pass:1535 dwarn:1 dfail:0 fail:10 skip:1133 time:9494s
shard-snb total:2679 pass:1307 dwarn:1 dfail:0 fail:11 skip:1360 time:8059s
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_626/shards.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 9+ messages in thread
end of thread, other threads:[~2017-12-08 17:45 UTC | newest]
Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-12-08 14:31 [PATCH igt] igt/perf: Busywait for MI_REPORT_PERF_COUNT results Chris Wilson
2017-12-08 15:06 ` ✓ Fi.CI.BAT: success for " Patchwork
2017-12-08 15:08 ` [PATCH igt] " Lionel Landwerlin
2017-12-08 15:13 ` [PATCH igt v2] igt/perf: Use igt_sysfs rather than opencoding Chris Wilson
2017-12-08 16:20 ` Lionel Landwerlin
2017-12-08 15:14 ` [PATCH igt v2] igt/perf: Read RCS0 timestamp directly Chris Wilson
2017-12-08 15:28 ` Lionel Landwerlin
2017-12-08 15:56 ` ✓ Fi.CI.BAT: success for igt/perf: Busywait for MI_REPORT_PERF_COUNT results (rev3) Patchwork
2017-12-08 17:45 ` ✗ Fi.CI.IGT: warning " Patchwork
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.