All of lore.kernel.org
 help / color / mirror / Atom feed
From: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
To: Intel-gfx@lists.freedesktop.org
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Subject: [PATCH 10/12] drm/i915: Introduce dedicated object VMA iterator
Date: Tue,  2 Feb 2016 11:06:28 +0000	[thread overview]
Message-ID: <1454411190-15721-11-git-send-email-tvrtko.ursulin@linux.intel.com> (raw)
In-Reply-To: <1454411190-15721-1-git-send-email-tvrtko.ursulin@linux.intel.com>

From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Purpose is to catch places which iterate the object VMA list
without holding the big lock.

Implemented by open coding list_for_each_entry to make the
macro compatible with existing call sites.

v2: Error capture runs without the mutex so iterate directly from there.
v3: Replace WARN_ON with lockdep_assert_held. (Chris Wilson, Daniel Vetter)
v4: Moved under dedicated CONFIG_DRM_I915_DEBUG and back to WARN_ON.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_debugfs.c      |  8 ++++----
 drivers/gpu/drm/i915/i915_drv.h          | 11 +++++++++++
 drivers/gpu/drm/i915/i915_gem.c          | 24 ++++++++++++------------
 drivers/gpu/drm/i915/i915_gem_gtt.c      |  2 +-
 drivers/gpu/drm/i915/i915_gem_shrinker.c |  2 +-
 5 files changed, 29 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 863012a2602e..ff444f09ea98 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -117,7 +117,7 @@ static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
 	u64 size = 0;
 	struct i915_vma *vma;
 
-	list_for_each_entry(vma, &obj->vma_list, vma_link) {
+	i915_gem_obj_for_each_vma(vma, obj) {
 		if (i915_is_ggtt(vma->vm) &&
 		    drm_mm_node_allocated(&vma->node))
 			size += vma->node.size;
@@ -155,7 +155,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
 	if (obj->base.name)
 		seq_printf(m, " (name: %d)", obj->base.name);
-	list_for_each_entry(vma, &obj->vma_list, vma_link) {
+	i915_gem_obj_for_each_vma(vma, obj) {
 		if (vma->pin_count > 0)
 			pin_count++;
 	}
@@ -164,7 +164,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 		seq_printf(m, " (display)");
 	if (obj->fence_reg != I915_FENCE_REG_NONE)
 		seq_printf(m, " (fence: %d)", obj->fence_reg);
-	list_for_each_entry(vma, &obj->vma_list, vma_link) {
+	i915_gem_obj_for_each_vma(vma, obj) {
 		seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
 			   i915_is_ggtt(vma->vm) ? "g" : "pp",
 			   vma->node.start, vma->node.size);
@@ -342,7 +342,7 @@ static int per_file_stats(int id, void *ptr, void *data)
 		stats->shared += obj->base.size;
 
 	if (USES_FULL_PPGTT(obj->base.dev)) {
-		list_for_each_entry(vma, &obj->vma_list, vma_link) {
+		i915_gem_obj_for_each_vma(vma, obj) {
 			struct i915_hw_ppgtt *ppgtt;
 
 			if (!drm_mm_node_allocated(&vma->node))
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 905e90f25957..05ef750386df 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2861,6 +2861,17 @@ struct drm_i915_gem_object *i915_gem_object_create_from_data(
 void i915_gem_free_object(struct drm_gem_object *obj);
 void i915_gem_vma_destroy(struct i915_vma *vma);
 
+#ifdef CONFIG_DRM_I915_DEBUG
+  #define i915_gem_obj_for_each_vma(vma, obj) \
+	for (WARN_ON_ONCE(!mutex_is_locked(&(obj)->base.dev->struct_mutex)), \
+	     vma = list_first_entry(&(obj)->vma_list, typeof(*vma), vma_link);\
+	     &vma->vma_link != (&(obj)->vma_list); \
+	     vma = list_next_entry(vma, vma_link))
+#else
+  #define i915_gem_obj_for_each_vma(vma, obj) \
+		list_for_each_entry((vma), &(obj)->vma_list, vma_link)
+#endif
+
 /* Flags used by pin/bind&friends. */
 #define PIN_MAPPABLE	(1<<0)
 #define PIN_NONBLOCK	(1<<1)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c558887b2084..ce9d0544b42c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2454,7 +2454,7 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
 	list_move_tail(&obj->global_list,
 		       &to_i915(obj->base.dev)->mm.bound_list);
 
-	list_for_each_entry(vma, &obj->vma_list, vma_link) {
+	i915_gem_obj_for_each_vma(vma, obj) {
 		if (!list_empty(&vma->mm_list))
 			list_move_tail(&vma->mm_list, &vma->vm->inactive_list);
 	}
@@ -3873,7 +3873,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 			 */
 		}
 
-		list_for_each_entry(vma, &obj->vma_list, vma_link) {
+		i915_gem_obj_for_each_vma(vma, obj) {
 			if (!drm_mm_node_allocated(&vma->node))
 				continue;
 
@@ -3883,7 +3883,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 		}
 	}
 
-	list_for_each_entry(vma, &obj->vma_list, vma_link)
+	i915_gem_obj_for_each_vma(vma, obj)
 		vma->node.color = cache_level;
 	obj->cache_level = cache_level;
 
@@ -4613,7 +4613,7 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
 				     struct i915_address_space *vm)
 {
 	struct i915_vma *vma;
-	list_for_each_entry(vma, &obj->vma_list, vma_link) {
+	i915_gem_obj_for_each_vma(vma, obj) {
 		if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL &&
 		    vma->vm == vm)
 			return vma;
@@ -4630,7 +4630,7 @@ struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
 	if (WARN_ONCE(!view, "no view specified"))
 		return ERR_PTR(-EINVAL);
 
-	list_for_each_entry(vma, &obj->vma_list, vma_link)
+	i915_gem_obj_for_each_vma(vma, obj)
 		if (vma->vm == ggtt &&
 		    i915_ggtt_view_equal(&vma->ggtt_view, view))
 			return vma;
@@ -5201,7 +5201,7 @@ u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
 
 	WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
 
-	list_for_each_entry(vma, &o->vma_list, vma_link) {
+	i915_gem_obj_for_each_vma(vma, o) {
 		if (i915_is_ggtt(vma->vm) &&
 		    vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
 			continue;
@@ -5220,7 +5220,7 @@ u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
 	struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
 	struct i915_vma *vma;
 
-	list_for_each_entry(vma, &o->vma_list, vma_link)
+	i915_gem_obj_for_each_vma(vma, o)
 		if (vma->vm == ggtt &&
 		    i915_ggtt_view_equal(&vma->ggtt_view, view))
 			return vma->node.start;
@@ -5234,7 +5234,7 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
 {
 	struct i915_vma *vma;
 
-	list_for_each_entry(vma, &o->vma_list, vma_link) {
+	i915_gem_obj_for_each_vma(vma, o) {
 		if (i915_is_ggtt(vma->vm) &&
 		    vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
 			continue;
@@ -5251,7 +5251,7 @@ bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
 	struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
 	struct i915_vma *vma;
 
-	list_for_each_entry(vma, &o->vma_list, vma_link)
+	i915_gem_obj_for_each_vma(vma, o)
 		if (vma->vm == ggtt &&
 		    i915_ggtt_view_equal(&vma->ggtt_view, view) &&
 		    drm_mm_node_allocated(&vma->node))
@@ -5264,7 +5264,7 @@ bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
 {
 	struct i915_vma *vma;
 
-	list_for_each_entry(vma, &o->vma_list, vma_link)
+	i915_gem_obj_for_each_vma(vma, o)
 		if (drm_mm_node_allocated(&vma->node))
 			return true;
 
@@ -5281,7 +5281,7 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
 
 	BUG_ON(list_empty(&o->vma_list));
 
-	list_for_each_entry(vma, &o->vma_list, vma_link) {
+	i915_gem_obj_for_each_vma(vma, o) {
 		if (i915_is_ggtt(vma->vm) &&
 		    vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
 			continue;
@@ -5294,7 +5294,7 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
 bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
 {
 	struct i915_vma *vma;
-	list_for_each_entry(vma, &obj->vma_list, vma_link)
+	i915_gem_obj_for_each_vma(vma, obj)
 		if (vma->pin_count > 0)
 			return true;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 715a771f0b31..ab00b2a3c035 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -3237,7 +3237,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
 	vm = &dev_priv->gtt.base;
 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
 		flush = false;
-		list_for_each_entry(vma, &obj->vma_list, vma_link) {
+		i915_gem_obj_for_each_vma(vma, obj) {
 			if (vma->vm != vm)
 				continue;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 58c1e592bbdb..4c89bd45dbde 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -52,7 +52,7 @@ static int num_vma_bound(struct drm_i915_gem_object *obj)
 	struct i915_vma *vma;
 	int count = 0;
 
-	list_for_each_entry(vma, &obj->vma_list, vma_link) {
+	i915_gem_obj_for_each_vma(vma, obj) {
 		if (drm_mm_node_allocated(&vma->node))
 			count++;
 		if (vma->pin_count)
-- 
1.9.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2016-02-02 11:06 UTC|newest]

Thread overview: 44+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-02-02 11:06 [PATCH 00/12] Misc locking fixes and GEM debugging Tvrtko Ursulin
2016-02-02 11:06 ` [PATCH 01/12] drm/i915: Add wait_for_us Tvrtko Ursulin
2016-02-02 11:57   ` Chris Wilson
2016-02-02 14:04     ` Tvrtko Ursulin
2016-02-02 15:43       ` Chris Wilson
2016-02-02 13:35   ` Dave Gordon
2016-02-02 13:58     ` Tvrtko Ursulin
2016-02-02 14:44     ` [PATCH v2 " Tvrtko Ursulin
2016-02-02 11:06 ` [PATCH 02/12] drm/i915: Do not wait atomically for display clocks Tvrtko Ursulin
2016-02-02 12:00   ` Chris Wilson
2016-02-02 14:08     ` Dave Gordon
2016-02-02 15:39       ` Chris Wilson
2016-02-02 11:06 ` [PATCH 03/12] drm/i915/guc: Do not wait for firmware load atomically Tvrtko Ursulin
2016-02-02 14:13   ` Dave Gordon
2016-02-02 11:06 ` [PATCH 04/12] drm/i915/lrc: Do not wait atomically when stopping engines Tvrtko Ursulin
2016-02-02 11:06 ` [PATCH 05/12] drm/i915: Kconfig for extra driver debugging Tvrtko Ursulin
2016-02-02 11:06 ` [PATCH 06/12] drm/i915: Do not lie about atomic wait granularity Tvrtko Ursulin
2016-02-02 12:29   ` Chris Wilson
2016-02-02 14:45     ` [PATCH v3 " Tvrtko Ursulin
2016-02-02 11:06 ` [PATCH 07/12] drm/i915: GEM operations need to be done under the big lock Tvrtko Ursulin
2016-02-02 12:05   ` Chris Wilson
2016-02-02 14:46     ` [PATCH v4 " Tvrtko Ursulin
2016-02-02 15:49       ` Chris Wilson
2016-02-11 10:13       ` Chris Wilson
2016-02-15 16:09         ` Daniel Vetter
2016-02-11 10:07   ` [PATCH " Chris Wilson
2016-02-02 11:06 ` [PATCH 08/12] drm/i915: Fix struct mutex vs. RPS lock inversion Tvrtko Ursulin
2016-02-02 13:16   ` Chris Wilson
2016-02-02 14:13     ` Tvrtko Ursulin
2016-02-02 14:48       ` Chris Wilson
2016-02-02 14:46     ` [PATCH v3 " Tvrtko Ursulin
2016-02-11 10:06       ` Chris Wilson
2016-02-02 11:06 ` [PATCH 09/12] drm/i915/ilk: Move register read under spinlock Tvrtko Ursulin
2016-02-02 12:01   ` Chris Wilson
2016-02-02 11:06 ` Tvrtko Ursulin [this message]
2016-02-02 11:36   ` [PATCH 10/12] drm/i915: Introduce dedicated object VMA iterator Chris Wilson
2016-02-02 12:10     ` Tvrtko Ursulin
2016-02-02 12:58       ` Chris Wilson
2016-02-02 13:56         ` Tvrtko Ursulin
2016-02-02 11:06 ` [PATCH 11/12] drm/i915: Introduce dedicated safe " Tvrtko Ursulin
2016-02-02 11:06 ` [PATCH 12/12] drm/i915: Add BKL asserts to get page helpers Tvrtko Ursulin
2016-02-02 11:39   ` Chris Wilson
2016-02-02 12:02     ` Tvrtko Ursulin
2016-02-02 11:22 ` ✓ Fi.CI.BAT: success for Misc locking fixes and GEM debugging Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1454411190-15721-11-git-send-email-tvrtko.ursulin@linux.intel.com \
    --to=tvrtko.ursulin@linux.intel.com \
    --cc=Intel-gfx@lists.freedesktop.org \
    --cc=daniel.vetter@ffwll.ch \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.