All of lore.kernel.org
 help / color / mirror / Atom feed
From: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
To: Intel-gfx@lists.freedesktop.org
Subject: [Intel-gfx] [PATCH 7/9] drm/i915: Expose per-engine client busyness
Date: Wed, 18 Mar 2020 11:01:44 +0000	[thread overview]
Message-ID: <20200318110146.22339-8-tvrtko.ursulin@linux.intel.com> (raw)
In-Reply-To: <20200318110146.22339-1-tvrtko.ursulin@linux.intel.com>

From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Expose per-client and per-engine busyness under the previously added sysfs
client root.

The new files are one per-engine instance and located under the 'busy'
directory. Each contains a monotonically increasing nano-second resolution
times each client's jobs were executing on the GPU.

This enables userspace to create a top-like tool for GPU utilization:

==========================================================================
intel-gpu-top -  935/ 935 MHz;    0% RC6; 14.73 Watts;     1097 irqs/s

      IMC reads:     1401 MiB/s
     IMC writes:        4 MiB/s

          ENGINE      BUSY                                 MI_SEMA MI_WAIT
     Render/3D/0   63.73% |███████████████████           |      3%      0%
       Blitter/0    9.53% |██▊                           |      6%      0%
         Video/0   39.32% |███████████▊                  |     16%      0%
         Video/1   15.62% |████▋                         |      0%      0%
  VideoEnhance/0    0.00% |                              |      0%      0%

  PID            NAME     RCS          BCS          VCS         VECS
 4084        gem_wsim |█████▌     ||█          ||           ||           |
 4086        gem_wsim |█▌         ||           ||███        ||           |
==========================================================================

v2: Use intel_context_engine_get_busy_time.
v3: New directory structure.
v4: Rebase.
v5: sysfs_attr_init.
v6: Small tidy in i915_gem_add_client.
v7: Rebase to be engine class based.
v8:
 * Always enable stats.
 * Walk all client contexts.
v9:
 * Skip unsupported engine classes. (Chris)
 * Use scheduler caps. (Chris)
v10:
 * Use pphwsp runtime only.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/i915_drm_client.c | 110 ++++++++++++++++++++++++-
 drivers/gpu/drm/i915/i915_drm_client.h |  11 +++
 2 files changed, 120 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/i915_drm_client.c b/drivers/gpu/drm/i915/i915_drm_client.c
index c88d9ff448e0..0a2d933fe83c 100644
--- a/drivers/gpu/drm/i915/i915_drm_client.c
+++ b/drivers/gpu/drm/i915/i915_drm_client.c
@@ -9,8 +9,13 @@
 
 #include <drm/drm_print.h>
 
+#include <uapi/drm/i915_drm.h>
+
 #include "i915_drv.h"
 #include "i915_drm_client.h"
+#include "gem/i915_gem_context.h"
+#include "gt/intel_engine_user.h"
+#include "i915_drv.h"
 #include "i915_gem.h"
 #include "i915_utils.h"
 
@@ -52,6 +57,104 @@ show_client_pid(struct device *kdev, struct device_attribute *attr, char *buf)
 	return ret;
 }
 
+static u64
+pphwsp_busy_add(struct i915_gem_context *ctx, unsigned int class)
+{
+	struct i915_gem_engines *engines = rcu_dereference(ctx->engines);
+	struct i915_gem_engines_iter it;
+	struct intel_context *ce;
+	u64 total = 0;
+
+	for_each_gem_engine(ce, engines, it) {
+		if (ce->engine->uabi_class == class)
+			total += ce->runtime.total;
+	}
+
+	return total;
+}
+
+static ssize_t
+show_client_busy(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	struct i915_engine_busy_attribute *i915_attr =
+		container_of(attr, typeof(*i915_attr), attr);
+	unsigned int class = i915_attr->engine_class;
+	struct i915_drm_client *client = i915_attr->client;
+	u64 total = atomic64_read(&client->past_runtime[class]);
+	struct list_head *list = &client->ctx_list;
+	struct i915_gem_context *ctx;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(ctx, list, client_link) {
+		total += atomic64_read(&ctx->past_runtime[class]);
+		total += pphwsp_busy_add(ctx, class);
+	}
+	rcu_read_unlock();
+
+	total *= RUNTIME_INFO(i915_attr->i915)->cs_timestamp_period_ns;
+
+	return snprintf(buf, PAGE_SIZE, "%llu\n", total);
+}
+
+static const char * const uabi_class_names[] = {
+	[I915_ENGINE_CLASS_RENDER] = "0",
+	[I915_ENGINE_CLASS_COPY] = "1",
+	[I915_ENGINE_CLASS_VIDEO] = "2",
+	[I915_ENGINE_CLASS_VIDEO_ENHANCE] = "3",
+};
+
+static int
+__client_register_sysfs_busy(struct i915_drm_client *client)
+{
+	struct i915_drm_clients *clients = client->clients;
+	struct drm_i915_private *i915 =
+		container_of(clients, typeof(*i915), clients);
+	unsigned int i;
+	int ret = 0;
+
+	if (!HAS_LOGICAL_RING_CONTEXTS(i915))
+		return 0;
+
+	client->busy_root = kobject_create_and_add("busy", client->root);
+	if (!client->busy_root)
+		return -ENOMEM;
+
+	for (i = 0; i < ARRAY_SIZE(uabi_class_names); i++) {
+		struct i915_engine_busy_attribute *i915_attr =
+			&client->attr.busy[i];
+		struct device_attribute *attr = &i915_attr->attr;
+
+		if (!intel_engine_lookup_user(i915, i, 0))
+			continue;
+
+		i915_attr->client = client;
+		i915_attr->i915 = i915;
+		i915_attr->engine_class = i;
+
+		sysfs_attr_init(&attr->attr);
+
+		attr->attr.name = uabi_class_names[i];
+		attr->attr.mode = 0444;
+		attr->show = show_client_busy;
+
+		ret = sysfs_create_file(client->busy_root,
+					(struct attribute *)attr);
+		if (ret)
+			goto err;
+	}
+
+	return 0;
+
+err:
+	kobject_put(client->busy_root);
+	return ret;
+}
+
+static void __client_unregister_sysfs_busy(struct i915_drm_client *client)
+{
+	kobject_put(fetch_and_zero(&client->busy_root));
+}
+
 static int
 __client_register_sysfs(struct i915_drm_client *client)
 {
@@ -88,9 +191,12 @@ __client_register_sysfs(struct i915_drm_client *client)
 
 		ret = sysfs_create_file(client->root, (struct attribute *)attr);
 		if (ret)
-			break;
+			goto out;
 	}
 
+	ret = __client_register_sysfs_busy(client);
+
+out:
 	if (ret)
 		kobject_put(client->root);
 
@@ -99,6 +205,8 @@ __client_register_sysfs(struct i915_drm_client *client)
 
 static void __client_unregister_sysfs(struct i915_drm_client *client)
 {
+	__client_unregister_sysfs_busy(client);
+
 	kobject_put(fetch_and_zero(&client->root));
 }
 
diff --git a/drivers/gpu/drm/i915/i915_drm_client.h b/drivers/gpu/drm/i915/i915_drm_client.h
index 0be27aa9bbda..98c5fc24fcb4 100644
--- a/drivers/gpu/drm/i915/i915_drm_client.h
+++ b/drivers/gpu/drm/i915/i915_drm_client.h
@@ -26,6 +26,15 @@ struct i915_drm_clients {
 	struct kobject *root;
 };
 
+struct i915_drm_client;
+
+struct i915_engine_busy_attribute {
+	struct device_attribute attr;
+	struct drm_i915_private *i915;
+	struct i915_drm_client *client;
+	unsigned int engine_class;
+};
+
 struct i915_drm_client {
 	struct kref kref;
 
@@ -44,9 +53,11 @@ struct i915_drm_client {
 	struct i915_drm_clients *clients;
 
 	struct kobject *root;
+	struct kobject *busy_root;
 	struct {
 		struct device_attribute pid;
 		struct device_attribute name;
+		struct i915_engine_busy_attribute busy[MAX_ENGINE_CLASS + 1];
 	} attr;
 
 	/**
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2020-03-18 11:02 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-03-18 11:01 [Intel-gfx] [PATCH 0/9] Per client engine busyness Tvrtko Ursulin
2020-03-18 11:01 ` [Intel-gfx] [PATCH 1/9] drm/i915: Update client name on context create Tvrtko Ursulin
2020-03-18 11:01 ` [Intel-gfx] [PATCH 2/9] drm/i915: Make GEM contexts track DRM clients Tvrtko Ursulin
2020-03-18 11:01 ` [Intel-gfx] [PATCH 3/9] drm/i915: Use explicit flag to mark unreachable intel_context Tvrtko Ursulin
2020-03-18 11:01 ` [Intel-gfx] [PATCH 4/9] drm/i915: Track runtime spent in unreachable intel_contexts Tvrtko Ursulin
2020-03-18 11:01 ` [Intel-gfx] [PATCH 5/9] drm/i915: Track runtime spent in closed GEM contexts Tvrtko Ursulin
2020-03-18 11:01 ` [Intel-gfx] [PATCH 6/9] drm/i915: Track all user contexts per client Tvrtko Ursulin
2020-03-18 11:01 ` Tvrtko Ursulin [this message]
2020-03-18 11:01 ` [Intel-gfx] [PATCH 8/9] drm/i915: Track context current active time Tvrtko Ursulin
2020-03-18 11:01 ` [Intel-gfx] [PATCH 9/9] drm/i915: Prefer software tracked context busyness Tvrtko Ursulin
2020-03-18 11:49 ` [Intel-gfx] ✗ Fi.CI.BUILD: failure for Per client engine busyness (rev8) Patchwork
2020-03-18 12:16 ` [Intel-gfx] [PATCH 0/9] Per client engine busyness Tvrtko Ursulin
2020-04-15 10:11 Tvrtko Ursulin
2020-04-15 10:11 ` [Intel-gfx] [PATCH 7/9] drm/i915: Expose per-engine client busyness Tvrtko Ursulin
2020-09-04 12:59 [Intel-gfx] [PATCH 0/9] Per client engine busyness Tvrtko Ursulin
2020-09-04 12:59 ` [Intel-gfx] [PATCH 7/9] drm/i915: Expose per-engine client busyness Tvrtko Ursulin
2020-09-14 13:12 [Intel-gfx] [PATCH 0/9] Per client engine busyness Tvrtko Ursulin
2020-09-14 13:12 ` [Intel-gfx] [PATCH 7/9] drm/i915: Expose per-engine client busyness Tvrtko Ursulin
2021-01-19 12:10 [Intel-gfx] [PATCH 1/9] drm/i915/gt: Show the per-engine runtime in sysfs Chris Wilson
2021-01-19 12:10 ` [Intel-gfx] [PATCH 7/9] drm/i915: Expose per-engine client busyness Chris Wilson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200318110146.22339-8-tvrtko.ursulin@linux.intel.com \
    --to=tvrtko.ursulin@linux.intel.com \
    --cc=Intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.