All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Subject: [PATCH 08/38] drm/i915: Pull VM lists under the VM mutex.
Date: Fri, 18 Jan 2019 14:00:39 +0000	[thread overview]
Message-ID: <20190118140109.25261-9-chris@chris-wilson.co.uk> (raw)
In-Reply-To: <20190118140109.25261-1-chris@chris-wilson.co.uk>

A starting point to counter the pervasive struct_mutex. For the goal of
avoiding (or at least blocking under them!) global locks during user
request submission, a simple but important step is being able to manage
each clients GTT separately. For which, we want to replace using the
struct_mutex as the guard for all things GTT/VM and switch instead to a
specific mutex inside i915_address_space.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem.c                 | 14 ++++++++------
 drivers/gpu/drm/i915/i915_gem_evict.c           |  2 ++
 drivers/gpu/drm/i915/i915_gem_gtt.c             | 15 +++++++++++++--
 drivers/gpu/drm/i915/i915_gem_shrinker.c        |  4 ++++
 drivers/gpu/drm/i915/i915_gem_stolen.c          |  2 ++
 drivers/gpu/drm/i915/i915_vma.c                 | 11 +++++++++++
 drivers/gpu/drm/i915/selftests/i915_gem_evict.c |  3 +++
 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c   |  3 +++
 8 files changed, 46 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f45186ddb236..538fa5404603 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -245,18 +245,19 @@ int
 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
 			    struct drm_file *file)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
+	struct i915_ggtt *ggtt = &to_i915(dev)->ggtt;
 	struct drm_i915_gem_get_aperture *args = data;
 	struct i915_vma *vma;
 	u64 pinned;
 
+	mutex_lock(&ggtt->vm.mutex);
+
 	pinned = ggtt->vm.reserved;
-	mutex_lock(&dev->struct_mutex);
 	list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
 		if (i915_vma_is_pinned(vma))
 			pinned += vma->node.size;
-	mutex_unlock(&dev->struct_mutex);
+
+	mutex_unlock(&ggtt->vm.mutex);
 
 	args->aper_size = ggtt->vm.total;
 	args->aper_available_size = args->aper_size - pinned;
@@ -1529,20 +1530,21 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
 
 static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_private *i915;
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 	struct list_head *list;
 	struct i915_vma *vma;
 
 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
 
+	mutex_lock(&i915->ggtt.vm.mutex);
 	for_each_ggtt_vma(vma, obj) {
 		if (!drm_mm_node_allocated(&vma->node))
 			continue;
 
 		list_move_tail(&vma->vm_link, &vma->vm->bound_list);
 	}
+	mutex_unlock(&i915->ggtt.vm.mutex);
 
-	i915 = to_i915(obj->base.dev);
 	spin_lock(&i915->mm.obj_lock);
 	list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
 	list_move_tail(&obj->mm.link, list);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 5cfe4b75e7d6..dc137701acb8 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -432,6 +432,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm)
 	}
 
 	INIT_LIST_HEAD(&eviction_list);
+	mutex_lock(&vm->mutex);
 	list_for_each_entry(vma, &vm->bound_list, vm_link) {
 		if (i915_vma_is_pinned(vma))
 			continue;
@@ -439,6 +440,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm)
 		__i915_vma_pin(vma);
 		list_add(&vma->evict_link, &eviction_list);
 	}
+	mutex_unlock(&vm->mutex);
 
 	ret = 0;
 	list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 2ad9070a54c1..49b00996a15e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1931,7 +1931,10 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
 	vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */
 
 	INIT_LIST_HEAD(&vma->obj_link);
+
+	mutex_lock(&vma->vm->mutex);
 	list_add(&vma->vm_link, &vma->vm->unbound_list);
+	mutex_unlock(&vma->vm->mutex);
 
 	return vma;
 }
@@ -3504,9 +3507,10 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
 
 	i915_check_and_clear_faults(dev_priv);
 
+	mutex_lock(&ggtt->vm.mutex);
+
 	/* First fill our portion of the GTT with scratch pages */
 	ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
-
 	ggtt->vm.closed = true; /* skip rewriting PTE on VMA unbind */
 
 	/* clflush objects bound into the GGTT and rebind them. */
@@ -3516,19 +3520,26 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
 		if (!(vma->flags & I915_VMA_GLOBAL_BIND))
 			continue;
 
+		mutex_unlock(&ggtt->vm.mutex);
+
 		if (!i915_vma_unbind(vma))
-			continue;
+			goto lock;
 
 		WARN_ON(i915_vma_bind(vma,
 				      obj ? obj->cache_level : 0,
 				      PIN_UPDATE));
 		if (obj)
 			WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
+
+lock:
+		mutex_lock(&ggtt->vm.mutex);
 	}
 
 	ggtt->vm.closed = false;
 	i915_ggtt_invalidate(dev_priv);
 
+	mutex_unlock(&ggtt->vm.mutex);
+
 	if (INTEL_GEN(dev_priv) >= 8) {
 		struct intel_ppat *ppat = &dev_priv->ppat;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index a76d6c95c824..6da795c7e62e 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -461,6 +461,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
 					       I915_SHRINK_VMAPS);
 
 	/* We also want to clear any cached iomaps as they wrap vmap */
+	mutex_lock(&i915->ggtt.vm.mutex);
 	list_for_each_entry_safe(vma, next,
 				 &i915->ggtt.vm.bound_list, vm_link) {
 		unsigned long count = vma->node.size >> PAGE_SHIFT;
@@ -468,9 +469,12 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
 		if (!vma->iomap || i915_vma_is_active(vma))
 			continue;
 
+		mutex_unlock(&i915->ggtt.vm.mutex);
 		if (i915_vma_unbind(vma) == 0)
 			freed_pages += count;
+		mutex_lock(&i915->ggtt.vm.mutex);
 	}
+	mutex_unlock(&i915->ggtt.vm.mutex);
 
 out:
 	shrinker_unlock(i915, unlock);
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index a9e365789686..74a9661479ca 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -702,7 +702,9 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
 	vma->flags |= I915_VMA_GLOBAL_BIND;
 	__i915_vma_set_map_and_fenceable(vma);
 
+	mutex_lock(&ggtt->vm.mutex);
 	list_move_tail(&vma->vm_link, &ggtt->vm.bound_list);
+	mutex_unlock(&ggtt->vm.mutex);
 
 	spin_lock(&dev_priv->mm.obj_lock);
 	list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 7de28baffb8f..dcbd0d345c72 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -213,7 +213,10 @@ vma_create(struct drm_i915_gem_object *obj,
 	}
 	rb_link_node(&vma->obj_node, rb, p);
 	rb_insert_color(&vma->obj_node, &obj->vma_tree);
+
+	mutex_lock(&vm->mutex);
 	list_add(&vma->vm_link, &vm->unbound_list);
+	mutex_unlock(&vm->mutex);
 
 	return vma;
 
@@ -656,7 +659,9 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 	GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level));
 
+	mutex_lock(&vma->vm->mutex);
 	list_move_tail(&vma->vm_link, &vma->vm->bound_list);
+	mutex_unlock(&vma->vm->mutex);
 
 	if (vma->obj) {
 		struct drm_i915_gem_object *obj = vma->obj;
@@ -689,8 +694,10 @@ i915_vma_remove(struct i915_vma *vma)
 
 	vma->ops->clear_pages(vma);
 
+	mutex_lock(&vma->vm->mutex);
 	drm_mm_remove_node(&vma->node);
 	list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
+	mutex_unlock(&vma->vm->mutex);
 
 	/*
 	 * Since the unbound list is global, only move to that list if
@@ -802,7 +809,11 @@ static void __i915_vma_destroy(struct i915_vma *vma)
 	GEM_BUG_ON(i915_gem_active_isset(&vma->last_fence));
 
 	list_del(&vma->obj_link);
+
+	mutex_lock(&vma->vm->mutex);
 	list_del(&vma->vm_link);
+	mutex_unlock(&vma->vm->mutex);
+
 	if (vma->obj)
 		rb_erase(&vma->obj_node, &vma->obj->vma_tree);
 
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 9e6b9304f2bd..c8deb961a020 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -109,10 +109,13 @@ static int populate_ggtt(struct drm_i915_private *i915)
 
 static void unpin_ggtt(struct drm_i915_private *i915)
 {
+	struct i915_ggtt *ggtt = &i915->ggtt;
 	struct i915_vma *vma;
 
+	mutex_lock(&ggtt->vm.mutex);
 	list_for_each_entry(vma, &i915->ggtt.vm.bound_list, vm_link)
 		i915_vma_unpin(vma);
+	mutex_unlock(&ggtt->vm.mutex);
 }
 
 static void cleanup_objects(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 852b06cb50a0..35eb40e5de91 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -1237,7 +1237,10 @@ static void track_vma_bind(struct i915_vma *vma)
 	__i915_gem_object_pin_pages(obj);
 
 	vma->pages = obj->mm.pages;
+
+	mutex_lock(&vma->vm->mutex);
 	list_move_tail(&vma->vm_link, &vma->vm->bound_list);
+	mutex_unlock(&vma->vm->mutex);
 }
 
 static int exercise_mock(struct drm_i915_private *i915,
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2019-01-18 14:02 UTC|newest]

Thread overview: 66+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-01-18 14:00 Keeping Tvrtko busy Chris Wilson
2019-01-18 14:00 ` [PATCH 01/38] drm/i915/execlists: Store the highest priority context Chris Wilson
2019-01-18 14:00 ` [PATCH 02/38] drm/i915: Make all GPU resets atomic Chris Wilson
2019-01-18 14:22   ` Mika Kuoppala
2019-01-18 14:00 ` [PATCH 03/38] drm/i915/guc: Disable global reset Chris Wilson
2019-01-18 14:00 ` [PATCH 04/38] drm/i915: Remove GPU reset dependence on struct_mutex Chris Wilson
2019-01-18 14:00 ` [PATCH 05/38] drm/i915/selftests: Trim struct_mutex duration for set-wedged selftest Chris Wilson
2019-01-18 14:29   ` Mika Kuoppala
2019-01-18 14:00 ` [PATCH 06/38] drm/i915: Issue engine resets onto idle engines Chris Wilson
2019-01-18 14:00 ` [PATCH 07/38] drm/i915: Stop tracking MRU activity on VMA Chris Wilson
2019-01-18 16:03   ` Tvrtko Ursulin
2019-01-18 16:06     ` Chris Wilson
2019-01-22 14:19     ` Chris Wilson
2019-01-25 10:46       ` Tvrtko Ursulin
2019-01-25 13:38         ` Chris Wilson
2019-01-25 13:46           ` Chris Wilson
2019-01-25 14:08             ` Tvrtko Ursulin
2019-01-18 14:00 ` Chris Wilson [this message]
2019-01-18 16:04   ` [PATCH 08/38] drm/i915: Pull VM lists under the VM mutex Tvrtko Ursulin
2019-01-18 14:00 ` [PATCH 09/38] drm/i915: Move vma lookup to its own lock Chris Wilson
2019-01-18 16:14   ` Tvrtko Ursulin
2019-01-18 14:00 ` [PATCH 10/38] drm/i915/selftests: Allocate mock ring/timeline per context Chris Wilson
2019-01-18 16:26   ` Tvrtko Ursulin
2019-01-18 14:00 ` [PATCH 11/38] drm/i915: Always allocate an object/vma for the HWSP Chris Wilson
2019-01-18 14:00 ` [PATCH 12/38] drm/i915: Move list of timelines under its own lock Chris Wilson
2019-01-18 16:28   ` Tvrtko Ursulin
2019-01-18 14:00 ` [PATCH 13/38] drm/i915: Introduce concept of per-timeline (context) HWSP Chris Wilson
2019-01-18 14:00 ` [PATCH 14/38] drm/i915: Enlarge vma->pin_count Chris Wilson
2019-01-18 14:00 ` [PATCH 15/38] drm/i915: Allocate a status page for each timeline Chris Wilson
2019-01-21 11:18   ` Tvrtko Ursulin
2019-01-18 14:00 ` [PATCH 16/38] drm/i915: Share per-timeline HWSP using a slab suballocator Chris Wilson
2019-01-18 14:00 ` [PATCH 17/38] drm/i915: Keep all partially allocated HWSP on a freelist Chris Wilson
2019-01-18 14:00 ` [PATCH 18/38] drm/i915: Track the context's seqno in its own timeline HWSP Chris Wilson
2019-01-18 14:00 ` [PATCH 19/38] drm/i915: Identify active requests Chris Wilson
2019-01-18 14:00 ` [PATCH 20/38] drm/i915: Remove the intel_engine_notify tracepoint Chris Wilson
2019-01-18 14:00 ` [PATCH 21/38] drm/i915: Replace global breadcrumbs with per-context interrupt tracking Chris Wilson
2019-01-18 14:00 ` [PATCH 22/38] drm/i915: Drop fake breadcrumb irq Chris Wilson
2019-01-18 14:00 ` [PATCH 23/38] drm/i915: Replace global_seqno with a hangcheck heartbeat seqno Chris Wilson
2019-01-18 14:00 ` [PATCH 24/38] drm/i915: Avoid presumption of execution ordering for kernel context switching Chris Wilson
2019-01-18 14:00 ` [PATCH 25/38] drm/i915/pmu: Always sample an active ringbuffer Chris Wilson
2019-01-22  9:20   ` Tvrtko Ursulin
2019-01-18 14:00 ` [PATCH 26/38] drm/i915: Remove the global per-engine execution timeline Chris Wilson
2019-01-18 14:00 ` [PATCH 27/38] drm/i915: Introduce the i915_user_extension_method Chris Wilson
2019-01-22  9:31   ` Tvrtko Ursulin
2019-01-22 10:47     ` Chris Wilson
2019-01-22 11:05       ` Tvrtko Ursulin
2019-01-18 14:00 ` [PATCH 28/38] drm/i915: Create/destroy VM (ppGTT) for use with contexts Chris Wilson
2019-01-23 11:30   ` Tvrtko Ursulin
2019-01-23 11:51     ` Chris Wilson
2019-01-23 12:03       ` Tvrtko Ursulin
2019-01-24 15:58     ` [PATCH v3] " Chris Wilson
2019-01-18 14:01 ` [PATCH 29/38] drm/i915: Expose user control over the ppGTT associated with a context Chris Wilson
2019-01-23 12:00   ` Tvrtko Ursulin
2019-01-23 12:15     ` Chris Wilson
2019-01-18 14:01 ` [PATCH 30/38] drm/i915: Extend CONTEXT_CREATE to set parameters upon construction Chris Wilson
2019-01-18 14:01 ` [PATCH 31/38] drm/i915: Allow contexts to share a single timeline across all engines Chris Wilson
2019-01-24 17:35   ` Tvrtko Ursulin
2019-01-18 14:01 ` [PATCH 32/38] drm/i915: Fix I915_EXEC_RING_MASK Chris Wilson
2019-01-18 14:01 ` [PATCH 33/38] drm/i915: Remove last traces of exec-id (GEM_BUSY) Chris Wilson
2019-01-18 14:01 ` [PATCH 34/38] drm/i915: Re-arrange execbuf so context is known before engine Chris Wilson
2019-01-18 14:01 ` [PATCH 35/38] drm/i915: Allow a context to define its set of engines Chris Wilson
2019-01-18 14:01 ` [PATCH 36/38] drm/i915/execlists: Refactor out can_merge_rq() Chris Wilson
2019-01-18 14:01 ` [PATCH 37/38] drm/i915: Store the BIT(engine->id) as the engine's mask Chris Wilson
2019-01-18 14:01 ` [PATCH 38/38] drm/i915: Load balancing across a virtual engine Chris Wilson
2019-01-18 14:17 ` ✗ Fi.CI.BAT: failure for series starting with [01/38] drm/i915/execlists: Store the highest priority context Patchwork
2019-01-24 16:28 ` ✗ Fi.CI.BAT: failure for series starting with [01/38] drm/i915/execlists: Store the highest priority context (rev2) Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190118140109.25261-9-chris@chris-wilson.co.uk \
    --to=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.