All of lore.kernel.org
 help / color / mirror / Atom feed
From: sourab.gupta@intel.com
To: intel-gfx@lists.freedesktop.org
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>,
	Sourab Gupta <sourab.gupta@intel.com>,
	Matthew Auld <matthew.auld@intel.com>
Subject: [PATCH v2 08/15] drm/i915: Add support for emitting execbuffer tags through OA counter reports
Date: Mon,  7 Nov 2016 16:00:56 +0530	[thread overview]
Message-ID: <1478514656-31035-1-git-send-email-sourab.gupta@intel.com> (raw)
In-Reply-To: <20161104100450.GJ15981@nuc-i3427.alporthouse.com>

From: Sourab Gupta <sourab.gupta@intel.com>

This patch enables userspace to specify tags (per workload), provided via
execbuffer ioctl, which could be added to OA reports, to help associate
reports with the corresponding workloads.

There may be multiple stages within a single context, from a userspace
perspective. An ability is needed to individually associate the OA reports
with their corresponding workloads(execbuffers), which may not be possible
solely with ctx_id or pid information. This patch enables such a mechanism.

In this patch, upper 32 bits of rsvd1 field, which were previously unused
are now being used to pass in the tag.

v2: Corrected the tag extraction macro (Chris)

Signed-off-by: Sourab Gupta <sourab.gupta@intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h            |  6 +++--
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  6 +++--
 drivers/gpu/drm/i915/i915_perf.c           | 38 ++++++++++++++++++++++++++----
 include/uapi/drm/i915_drm.h                | 12 ++++++++++
 4 files changed, 53 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f250e7b..0f171f8 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1814,7 +1814,7 @@ struct i915_perf_stream_ops {
 	 * Routine to emit the commands in the command streamer associated
 	 * with the corresponding gpu engine.
 	 */
-	void (*command_stream_hook)(struct drm_i915_gem_request *req);
+	void (*command_stream_hook)(struct drm_i915_gem_request *req, u32 tag);
 };
 
 enum i915_perf_stream_state {
@@ -1873,6 +1873,7 @@ struct i915_perf_cs_data_node {
 	u32 offset;
 	u32 ctx_id;
 	u32 pid;
+	u32 tag;
 };
 
 struct drm_i915_private {
@@ -2244,6 +2245,7 @@ struct drm_i915_private {
 
 		u32 last_ctx_id;
 		u32 last_pid;
+		u32 last_tag;
 		struct list_head node_list;
 		spinlock_t node_list_lock;
 	} perf;
@@ -3666,7 +3668,7 @@ void i915_oa_legacy_ctx_switch_notify(struct drm_i915_gem_request *req);
 void i915_oa_update_reg_state(struct intel_engine_cs *engine,
 			      struct i915_gem_context *ctx,
 			      uint32_t *reg_state);
-void i915_perf_command_stream_hook(struct drm_i915_gem_request *req);
+void i915_perf_command_stream_hook(struct drm_i915_gem_request *req, u32 tag);
 
 /* i915_gem_evict.c */
 int __must_check i915_gem_evict_something(struct i915_address_space *vm,
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index da502c7..d89787b 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -58,6 +58,7 @@ struct i915_execbuffer_params {
 	struct intel_engine_cs          *engine;
 	struct i915_gem_context         *ctx;
 	struct drm_i915_gem_request     *request;
+	uint32_t			tag;
 };
 
 struct eb_vmas {
@@ -1523,7 +1524,7 @@ execbuf_submit(struct i915_execbuffer_params *params,
 	if (exec_len == 0)
 		exec_len = params->batch->size - params->args_batch_start_offset;
 
-	i915_perf_command_stream_hook(params->request);
+	i915_perf_command_stream_hook(params->request, params->tag);
 
 	ret = params->engine->emit_bb_start(params->request,
 					    exec_start, exec_len,
@@ -1531,7 +1532,7 @@ execbuf_submit(struct i915_execbuffer_params *params,
 	if (ret)
 		return ret;
 
-	i915_perf_command_stream_hook(params->request);
+	i915_perf_command_stream_hook(params->request, params->tag);
 
 	trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
 
@@ -1843,6 +1844,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	params->engine                    = engine;
 	params->dispatch_flags          = dispatch_flags;
 	params->ctx                     = ctx;
+	params->tag			= i915_execbuffer2_get_tag(*args);
 
 	ret = execbuf_submit(params, args, &eb->vmas);
 err_request:
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 0a13672..18489c2 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -255,6 +255,7 @@ struct oa_sample_data {
 	u32 source;
 	u32 ctx_id;
 	u32 pid;
+	u32 tag;
 	const u8 *report;
 };
 
@@ -311,6 +312,7 @@ static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
 #define SAMPLE_OA_SOURCE_INFO	(1<<1)
 #define SAMPLE_CTX_ID		(1<<2)
 #define SAMPLE_PID		(1<<3)
+#define SAMPLE_TAG		(1<<4)
 
 struct perf_open_properties {
 	u32 sample_flags;
@@ -335,7 +337,8 @@ struct perf_open_properties {
  * perf mutex lock.
  */
 
-void i915_perf_command_stream_hook(struct drm_i915_gem_request *request)
+void i915_perf_command_stream_hook(struct drm_i915_gem_request *request,
+					u32 tag)
 {
 	struct intel_engine_cs *engine = request->engine;
 	struct drm_i915_private *dev_priv = engine->i915;
@@ -348,7 +351,7 @@ void i915_perf_command_stream_hook(struct drm_i915_gem_request *request)
 	list_for_each_entry(stream, &dev_priv->perf.streams, link) {
 		if ((stream->state == I915_PERF_STREAM_ENABLED) &&
 					stream->cs_mode)
-			stream->ops->command_stream_hook(request);
+			stream->ops->command_stream_hook(request, tag);
 	}
 	mutex_unlock(&dev_priv->perf.streams_lock);
 }
@@ -462,7 +465,8 @@ out_unlock:
 	return ret;
 }
 
-static void i915_perf_command_stream_hook_oa(struct drm_i915_gem_request *req)
+static void i915_perf_command_stream_hook_oa(struct drm_i915_gem_request *req,
+						u32 tag)
 {
 	struct drm_i915_private *dev_priv = req->i915;
 	struct intel_ring *ring = req->ring;
@@ -487,6 +491,7 @@ static void i915_perf_command_stream_hook_oa(struct drm_i915_gem_request *req)
 
 	entry->ctx_id = ctx->hw_id;
 	entry->pid = current->pid;
+	entry->tag = tag;
 	i915_gem_request_assign(&entry->request, req);
 
 	addr = dev_priv->perf.command_stream_buf.vma->node.start +
@@ -744,6 +749,12 @@ static int append_oa_sample(struct i915_perf_stream *stream,
 		buf += 4;
 	}
 
+	if (sample_flags & SAMPLE_TAG) {
+		if (copy_to_user(buf, &data->tag, 4))
+			return -EFAULT;
+		buf += 4;
+	}
+
 	if (sample_flags & SAMPLE_OA_REPORT) {
 		if (copy_to_user(buf, data->report, report_size))
 			return -EFAULT;
@@ -789,6 +800,9 @@ static int append_oa_buffer_sample(struct i915_perf_stream *stream,
 	if (sample_flags & SAMPLE_PID)
 		data.pid = dev_priv->perf.last_pid;
 
+	if (sample_flags & SAMPLE_TAG)
+		data.tag = dev_priv->perf.last_tag;
+
 	if (sample_flags & SAMPLE_OA_REPORT)
 		data.report = report;
 
@@ -1310,6 +1324,11 @@ static int append_oa_rcs_sample(struct i915_perf_stream *stream,
 		dev_priv->perf.last_pid = node->pid;
 	}
 
+	if (sample_flags & SAMPLE_TAG) {
+		data.tag = node->tag;
+		dev_priv->perf.last_tag = node->tag;
+	}
+
 	if (sample_flags & SAMPLE_OA_REPORT)
 		data.report = report;
 
@@ -2144,7 +2163,8 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
 	struct drm_i915_private *dev_priv = stream->dev_priv;
 	bool require_oa_unit = props->sample_flags & (SAMPLE_OA_REPORT |
 						      SAMPLE_OA_SOURCE_INFO);
-	bool require_cs_mode = props->sample_flags & SAMPLE_PID;
+	bool require_cs_mode = props->sample_flags & (SAMPLE_PID |
+						      SAMPLE_TAG);
 	bool cs_sample_data = props->sample_flags & SAMPLE_OA_REPORT;
 	int ret;
 
@@ -2297,7 +2317,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
 	}
 
 	if (require_cs_mode && !props->cs_mode) {
-		DRM_ERROR("PID sampling requires a ring to be specified");
+		DRM_ERROR("PID or TAG sampling require a ring to be specified");
 		ret = -EINVAL;
 		goto cs_error;
 	}
@@ -2330,6 +2350,11 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
 			stream->sample_size += 4;
 		}
 
+		if (props->sample_flags & SAMPLE_TAG) {
+			stream->sample_flags |= SAMPLE_TAG;
+			stream->sample_size += 4;
+		}
+
 		ret = alloc_command_stream_buf(dev_priv);
 		if (ret)
 			goto cs_error;
@@ -3005,6 +3030,9 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
 		case DRM_I915_PERF_PROP_SAMPLE_PID:
 			props->sample_flags |= SAMPLE_PID;
 			break;
+		case DRM_I915_PERF_PROP_SAMPLE_TAG:
+			props->sample_flags |= SAMPLE_TAG;
+			break;
 		case DRM_I915_PERF_PROP_MAX:
 			BUG();
 		}
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index ead97b7f4..452c497 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -832,6 +832,11 @@ struct drm_i915_gem_execbuffer2 {
 #define i915_execbuffer2_get_context_id(eb2) \
 	((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
 
+/* upper 32 bits of rsvd1 field contain tag */
+#define I915_EXEC_TAG_MASK		(0xffffffff00000000UL)
+#define i915_execbuffer2_get_tag(eb2) \
+	(((eb2).rsvd1 & I915_EXEC_TAG_MASK) >> 32)
+
 struct drm_i915_gem_pin {
 	/** Handle of the buffer to be pinned. */
 	__u32 handle;
@@ -1313,6 +1318,12 @@ enum drm_i915_perf_property_id {
 	 */
 	DRM_I915_PERF_PROP_SAMPLE_PID,
 
+	/**
+	 * The value of this property set to 1 requests inclusion of tag in the
+	 * perf sample data.
+	 */
+	DRM_I915_PERF_PROP_SAMPLE_TAG,
+
 	DRM_I915_PERF_PROP_MAX /* non-ABI */
 };
 
@@ -1380,6 +1391,7 @@ enum drm_i915_perf_record_type {
 	 *     { u32 source_info; } && DRM_I915_PERF_PROP_SAMPLE_OA_SOURCE
 	 *     { u32 ctx_id; } && DRM_I915_PERF_PROP_SAMPLE_CTX_ID
 	 *     { u32 pid; } && DRM_I915_PERF_PROP_SAMPLE_PID
+	 *     { u32 tag; } && DRM_I915_PERF_PROP_SAMPLE_TAG
 	 *     { u32 oa_report[]; } && DRM_I915_PERF_PROP_SAMPLE_OA
 	 * };
 	 */
-- 
1.9.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  reply	other threads:[~2016-11-07 10:28 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-11-04  9:30 [PATCH 00/15] Framework to collect command stream gpu metrics using i915 perf sourab.gupta
2016-11-04  9:30 ` [PATCH 01/15] drm/i915: Add ctx getparam ioctl parameter to retrieve ctx unique id sourab.gupta
2016-11-04  9:30 ` [PATCH 02/15] drm/i915: Expose OA sample source to userspace sourab.gupta
2016-11-04  9:30 ` [PATCH 03/15] drm/i915: Framework for capturing command stream based OA reports sourab.gupta
2016-11-04  9:30 ` [PATCH 04/15] drm/i915: flush periodic samples, in case of no pending CS sample requests sourab.gupta
2016-11-04  9:30 ` [PATCH 05/15] drm/i915: Handle the overflow condition for command stream buf sourab.gupta
2016-11-07 11:10   ` Matthew Auld
2016-11-07 14:35     ` sourab gupta
2016-11-04  9:30 ` [PATCH 06/15] drm/i915: Populate ctx ID for periodic OA reports sourab.gupta
2016-11-04 10:01   ` Chris Wilson
2016-11-07 10:28     ` [PATCH v2 " sourab.gupta
2016-11-04  9:30 ` [PATCH 07/15] drm/i915: Add support for having pid output with OA report sourab.gupta
2016-11-04  9:30 ` [PATCH 08/15] drm/i915: Add support for emitting execbuffer tags through OA counter reports sourab.gupta
2016-11-04 10:04   ` Chris Wilson
2016-11-07 10:30     ` sourab.gupta [this message]
2016-11-04  9:30 ` [PATCH 09/15] drm/i915: Extend i915 perf framework for collecting timestamps on all gpu engines sourab.gupta
2016-11-04  9:30 ` [PATCH 10/15] drm/i915: Extract raw GPU timestamps from OA reports to forward in perf samples sourab.gupta
2016-11-04  9:30 ` [PATCH 11/15] drm/i915: Support opening multiple concurrent perf streams sourab.gupta
2016-11-04  9:30 ` [PATCH 12/15] time: Expose current clocksource in use by timekeeping framework sourab.gupta
2016-11-04  9:30 ` [PATCH 13/15] time: export clocks_calc_mult_shift sourab.gupta
2016-11-04  9:30 ` [PATCH 14/15] drm/i915: Mechanism to forward clock monotonic raw time in perf samples sourab.gupta
2016-11-04  9:42   ` Chris Wilson
2016-11-07 10:32     ` [PATCH v2 " sourab.gupta
2016-11-04  9:30 ` [PATCH 15/15] drm/i915: Support for capturing MMIO register values sourab.gupta

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1478514656-31035-1-git-send-email-sourab.gupta@intel.com \
    --to=sourab.gupta@intel.com \
    --cc=daniel.vetter@ffwll.ch \
    --cc=intel-gfx@lists.freedesktop.org \
    --cc=matthew.auld@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.