All of lore.kernel.org
 help / color / mirror / Atom feed
From: Rodrigo Vivi <rodrigo.vivi@gmail.com>
To: intel-gfx@lists.freedesktop.org
Cc: Ben Widawsky <benjamin.widawsky@intel.com>
Subject: [PATCH 3/4] drm/i915: Per-process stats work better when evaluated per-process
Date: Thu, 13 Mar 2014 11:57:00 -0300	[thread overview]
Message-ID: <1394722620-24780-1-git-send-email-rodrigo.vivi@gmail.com> (raw)

From: Chris Wilson <chris@chris-wilson.co.uk>

The idea of printing objects used by each process is to judge how each
process is using them. This means that we need to evaluate whether the
object is bound for that particular process, rather than just whether it
is bound into the global GTT.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Ben Widawsky <benjamin.widawsky@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@gmail.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c     | 34 ++++++++++++++++++++++++++-------
 drivers/gpu/drm/i915/i915_drv.h         |  2 ++
 drivers/gpu/drm/i915/i915_gem_context.c |  1 +
 3 files changed, 30 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a90d31c..ed3965f 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -299,28 +299,46 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
 } while (0)
 
 struct file_stats {
+	struct drm_i915_file_private *file_priv;
 	int count;
-	size_t total, active, inactive, unbound;
+	size_t total, global, active, inactive, unbound;
 };
 
 static int per_file_stats(int id, void *ptr, void *data)
 {
 	struct drm_i915_gem_object *obj = ptr;
 	struct file_stats *stats = data;
+	struct i915_vma *vma;
 
 	stats->count++;
 	stats->total += obj->base.size;
 
-	if (i915_gem_obj_ggtt_bound(obj)) {
-		if (!list_empty(&obj->ring_list))
+	list_for_each_entry(vma, &obj->vma_list, vma_link) {
+		struct i915_hw_ppgtt *ppgtt;
+
+		if (!drm_mm_node_allocated(&vma->node))
+			continue;
+
+		ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base);
+		if (ppgtt->ctx == NULL) {
+			stats->global += obj->base.size;
+			continue;
+		}
+
+		if (ppgtt->ctx->file_priv != stats->file_priv)
+			continue;
+
+		if (obj->ring) /* XXX per-vma statistic */
 			stats->active += obj->base.size;
 		else
 			stats->inactive += obj->base.size;
-	} else {
-		if (!list_empty(&obj->global_list))
-			stats->unbound += obj->base.size;
+
+		return 0;
 	}
 
+	if (!list_empty(&obj->global_list))
+		stats->unbound += obj->base.size;
+
 	return 0;
 }
 
@@ -411,6 +429,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
 		struct task_struct *task;
 
 		memset(&stats, 0, sizeof(stats));
+		stats.file_priv = file->driver_priv;
 		idr_for_each(&file->object_idr, per_file_stats, &stats);
 		/*
 		 * Although we have a valid reference on file->pid, that does
@@ -420,12 +439,13 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
 		 */
 		rcu_read_lock();
 		task = pid_task(file->pid, PIDTYPE_PID);
-		seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu unbound)\n",
+		seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu unbound)\n",
 			   task ? task->comm : "<unknown>",
 			   stats.count,
 			   stats.total,
 			   stats.active,
 			   stats.inactive,
+			   stats.global,
 			   stats.unbound);
 		rcu_read_unlock();
 	}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 2a319ba..b76c6de 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -721,6 +721,8 @@ struct i915_hw_ppgtt {
 		dma_addr_t *gen8_pt_dma_addr[4];
 	};
 
+	struct i915_hw_context *ctx;
+
 	int (*enable)(struct i915_hw_ppgtt *ppgtt);
 	int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
 			 struct intel_ring_buffer *ring,
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index ce41cff..1a94b07 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -215,6 +215,7 @@ create_vm_for_ctx(struct drm_device *dev, struct i915_hw_context *ctx)
 		return ERR_PTR(ret);
 	}
 
+	ppgtt->ctx = ctx;
 	return ppgtt;
 }
 
-- 
1.8.5.3

             reply	other threads:[~2014-03-13 14:57 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-03-13 14:57 Rodrigo Vivi [this message]
2014-03-19  0:05 ` [PATCH 3/4] drm/i915: Per-process stats work better when evaluated per-process Ben Widawsky
2014-03-19  6:55   ` Chris Wilson
2014-03-19 13:45     ` [PATCH 1/2] " Chris Wilson
2014-03-19 13:45       ` [PATCH 2/2] drm/i915: Print how many objects are shared in per-process stats Chris Wilson
2014-03-20  4:42         ` [PATCH 2/2] drm/i915: Print how many objects are shared in per-process statsg Ben Widawsky
2014-03-20  7:40           ` Chris Wilson
2014-03-20 14:11             ` Daniel Vetter
2014-03-19 17:30       ` [PATCH 1/2] drm/i915: Per-process stats work better when evaluated per-process Ben Widawsky

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1394722620-24780-1-git-send-email-rodrigo.vivi@gmail.com \
    --to=rodrigo.vivi@gmail.com \
    --cc=benjamin.widawsky@intel.com \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.