All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jason Gunthorpe <jgg-uk2M96/98Pc@public.gmane.org>
To: Jerome Glisse <jglisse-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>,
	Ralph Campbell
	<rcampbell-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>,
	John Hubbard <jhubbard-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>,
	Felix.Kuehling-5C7GfCeVMHo@public.gmane.org
Cc: linux-rdma-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	linux-mm-Bw31MaZKKs3YtjvyW6yDsg@public.gmane.org,
	Andrea Arcangeli
	<aarcange-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
Subject: [PATCH v2 12/11] mm/hmm: Fix error flows in hmm_invalidate_range_start
Date: Fri, 7 Jun 2019 13:05:57 -0300	[thread overview]
Message-ID: <20190607160557.GA335@ziepe.ca> (raw)
In-Reply-To: <20190606184438.31646-1-jgg-uk2M96/98Pc@public.gmane.org>

If the trylock on the hmm->mirrors_sem fails the function will return
without decrementing the notifiers that were previously incremented. Since
the caller will not call invalidate_range_end() on EAGAIN this will result
in notifiers becoming permanently incremented and deadlock.

If the sync_cpu_device_pagetables() required blocking the function will
not return EAGAIN even though the device continues to touch the
pages. This is a violation of the mmu notifier contract.

Switch, and rename, the ranges_lock to a spin lock so we can reliably
obtain it without blocking during error unwind.

The error unwind is necessary since the notifiers count must be held
incremented across the call to sync_cpu_device_pagetables() as we cannot
allow the range to become marked valid by a parallel
invalidate_start/end() pair while doing sync_cpu_device_pagetables().

Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
---
 include/linux/hmm.h |  2 +-
 mm/hmm.c            | 77 +++++++++++++++++++++++++++------------------
 2 files changed, 48 insertions(+), 31 deletions(-)

I almost lost this patch - it is part of the series, hasn't been
posted before, and wasn't sent with the rest, sorry.

diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index bf013e96525771..0fa8ea34ccef6d 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -86,7 +86,7 @@
 struct hmm {
 	struct mm_struct	*mm;
 	struct kref		kref;
-	struct mutex		lock;
+	spinlock_t		ranges_lock;
 	struct list_head	ranges;
 	struct list_head	mirrors;
 	struct mmu_notifier	mmu_notifier;
diff --git a/mm/hmm.c b/mm/hmm.c
index 4215edf737ef5b..10103a24e9b7b3 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -68,7 +68,7 @@ static struct hmm *hmm_get_or_create(struct mm_struct *mm)
 	init_rwsem(&hmm->mirrors_sem);
 	hmm->mmu_notifier.ops = NULL;
 	INIT_LIST_HEAD(&hmm->ranges);
-	mutex_init(&hmm->lock);
+	spin_lock_init(&hmm->ranges_lock);
 	kref_init(&hmm->kref);
 	hmm->notifiers = 0;
 	hmm->mm = mm;
@@ -114,18 +114,19 @@ static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
 {
 	struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
 	struct hmm_mirror *mirror;
+	unsigned long flags;
 
 	/* Bail out if hmm is in the process of being freed */
 	if (!kref_get_unless_zero(&hmm->kref))
 		return;
 
-	mutex_lock(&hmm->lock);
+	spin_lock_irqsave(&hmm->ranges_lock, flags);
 	/*
 	 * Since hmm_range_register() holds the mmget() lock hmm_release() is
 	 * prevented as long as a range exists.
 	 */
 	WARN_ON(!list_empty(&hmm->ranges));
-	mutex_unlock(&hmm->lock);
+	spin_unlock_irqrestore(&hmm->ranges_lock, flags);
 
 	down_read(&hmm->mirrors_sem);
 	list_for_each_entry(mirror, &hmm->mirrors, list) {
@@ -141,6 +142,23 @@ static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
 	hmm_put(hmm);
 }
 
+static void notifiers_decrement(struct hmm *hmm)
+{
+	lockdep_assert_held(&hmm->ranges_lock);
+
+	hmm->notifiers--;
+	if (!hmm->notifiers) {
+		struct hmm_range *range;
+
+		list_for_each_entry(range, &hmm->ranges, list) {
+			if (range->valid)
+				continue;
+			range->valid = true;
+		}
+		wake_up_all(&hmm->wq);
+	}
+}
+
 static int hmm_invalidate_range_start(struct mmu_notifier *mn,
 			const struct mmu_notifier_range *nrange)
 {
@@ -148,6 +166,7 @@ static int hmm_invalidate_range_start(struct mmu_notifier *mn,
 	struct hmm_mirror *mirror;
 	struct hmm_update update;
 	struct hmm_range *range;
+	unsigned long flags;
 	int ret = 0;
 
 	if (!kref_get_unless_zero(&hmm->kref))
@@ -158,12 +177,7 @@ static int hmm_invalidate_range_start(struct mmu_notifier *mn,
 	update.event = HMM_UPDATE_INVALIDATE;
 	update.blockable = mmu_notifier_range_blockable(nrange);
 
-	if (mmu_notifier_range_blockable(nrange))
-		mutex_lock(&hmm->lock);
-	else if (!mutex_trylock(&hmm->lock)) {
-		ret = -EAGAIN;
-		goto out;
-	}
+	spin_lock_irqsave(&hmm->ranges_lock, flags);
 	hmm->notifiers++;
 	list_for_each_entry(range, &hmm->ranges, list) {
 		if (update.end < range->start || update.start >= range->end)
@@ -171,7 +185,7 @@ static int hmm_invalidate_range_start(struct mmu_notifier *mn,
 
 		range->valid = false;
 	}
-	mutex_unlock(&hmm->lock);
+	spin_unlock_irqrestore(&hmm->ranges_lock, flags);
 
 	if (mmu_notifier_range_blockable(nrange))
 		down_read(&hmm->mirrors_sem);
@@ -179,16 +193,26 @@ static int hmm_invalidate_range_start(struct mmu_notifier *mn,
 		ret = -EAGAIN;
 		goto out;
 	}
+
 	list_for_each_entry(mirror, &hmm->mirrors, list) {
-		int ret;
+		int rc;
 
-		ret = mirror->ops->sync_cpu_device_pagetables(mirror, &update);
-		if (!update.blockable && ret == -EAGAIN)
+		rc = mirror->ops->sync_cpu_device_pagetables(mirror, &update);
+		if (rc) {
+			if (WARN_ON(update.blockable || rc != -EAGAIN))
+				continue;
+			ret = -EAGAIN;
 			break;
+		}
 	}
 	up_read(&hmm->mirrors_sem);
 
 out:
+	if (ret) {
+		spin_lock_irqsave(&hmm->ranges_lock, flags);
+		notifiers_decrement(hmm);
+		spin_unlock_irqrestore(&hmm->ranges_lock, flags);
+	}
 	hmm_put(hmm);
 	return ret;
 }
@@ -197,23 +221,14 @@ static void hmm_invalidate_range_end(struct mmu_notifier *mn,
 			const struct mmu_notifier_range *nrange)
 {
 	struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
+	unsigned long flags;
 
 	if (!kref_get_unless_zero(&hmm->kref))
 		return;
 
-	mutex_lock(&hmm->lock);
-	hmm->notifiers--;
-	if (!hmm->notifiers) {
-		struct hmm_range *range;
-
-		list_for_each_entry(range, &hmm->ranges, list) {
-			if (range->valid)
-				continue;
-			range->valid = true;
-		}
-		wake_up_all(&hmm->wq);
-	}
-	mutex_unlock(&hmm->lock);
+	spin_lock_irqsave(&hmm->ranges_lock, flags);
+	notifiers_decrement(hmm);
+	spin_unlock_irqrestore(&hmm->ranges_lock, flags);
 
 	hmm_put(hmm);
 }
@@ -866,6 +881,7 @@ int hmm_range_register(struct hmm_range *range,
 {
 	unsigned long mask = ((1UL << page_shift) - 1UL);
 	struct hmm *hmm = mirror->hmm;
+	unsigned long flags;
 
 	range->valid = false;
 	range->hmm = NULL;
@@ -887,7 +903,7 @@ int hmm_range_register(struct hmm_range *range,
 	kref_get(&hmm->kref);
 
 	/* Initialize range to track CPU page table updates. */
-	mutex_lock(&hmm->lock);
+	spin_lock_irqsave(&hmm->ranges_lock, flags);
 
 	range->hmm = hmm;
 	list_add(&range->list, &hmm->ranges);
@@ -898,7 +914,7 @@ int hmm_range_register(struct hmm_range *range,
 	 */
 	if (!hmm->notifiers)
 		range->valid = true;
-	mutex_unlock(&hmm->lock);
+	spin_unlock_irqrestore(&hmm->ranges_lock, flags);
 
 	return 0;
 }
@@ -914,13 +930,14 @@ EXPORT_SYMBOL(hmm_range_register);
 void hmm_range_unregister(struct hmm_range *range)
 {
 	struct hmm *hmm = range->hmm;
+	unsigned long flags;
 
 	if (WARN_ON(range->end <= range->start))
 		return;
 
-	mutex_lock(&hmm->lock);
+	spin_lock_irqsave(&hmm->ranges_lock, flags);
 	list_del(&range->list);
-	mutex_unlock(&hmm->lock);
+	spin_unlock_irqrestore(&hmm->ranges_lock, flags);
 
 	/* Drop reference taken by hmm_range_register() */
 	range->valid = false;
-- 
2.21.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

WARNING: multiple messages have this Message-ID (diff)
From: Jason Gunthorpe <jgg@ziepe.ca>
To: Jerome Glisse <jglisse@redhat.com>,
	Ralph Campbell <rcampbell@nvidia.com>,
	John Hubbard <jhubbard@nvidia.com>,
	Felix.Kuehling@amd.com
Cc: linux-rdma@vger.kernel.org, linux-mm@kvack.org,
	Andrea Arcangeli <aarcange@redhat.com>,
	dri-devel@lists.freedesktop.org, amd-gfx@lists.freedesktop.org
Subject: [PATCH v2 12/11] mm/hmm: Fix error flows in hmm_invalidate_range_start
Date: Fri, 7 Jun 2019 13:05:57 -0300	[thread overview]
Message-ID: <20190607160557.GA335@ziepe.ca> (raw)
In-Reply-To: <20190606184438.31646-1-jgg@ziepe.ca>

If the trylock on the hmm->mirrors_sem fails the function will return
without decrementing the notifiers that were previously incremented. Since
the caller will not call invalidate_range_end() on EAGAIN this will result
in notifiers becoming permanently incremented and deadlock.

If the sync_cpu_device_pagetables() required blocking the function will
not return EAGAIN even though the device continues to touch the
pages. This is a violation of the mmu notifier contract.

Switch, and rename, the ranges_lock to a spin lock so we can reliably
obtain it without blocking during error unwind.

The error unwind is necessary since the notifiers count must be held
incremented across the call to sync_cpu_device_pagetables() as we cannot
allow the range to become marked valid by a parallel
invalidate_start/end() pair while doing sync_cpu_device_pagetables().

Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
---
 include/linux/hmm.h |  2 +-
 mm/hmm.c            | 77 +++++++++++++++++++++++++++------------------
 2 files changed, 48 insertions(+), 31 deletions(-)

I almost lost this patch - it is part of the series, hasn't been
posted before, and wasn't sent with the rest, sorry.

diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index bf013e96525771..0fa8ea34ccef6d 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -86,7 +86,7 @@
 struct hmm {
 	struct mm_struct	*mm;
 	struct kref		kref;
-	struct mutex		lock;
+	spinlock_t		ranges_lock;
 	struct list_head	ranges;
 	struct list_head	mirrors;
 	struct mmu_notifier	mmu_notifier;
diff --git a/mm/hmm.c b/mm/hmm.c
index 4215edf737ef5b..10103a24e9b7b3 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -68,7 +68,7 @@ static struct hmm *hmm_get_or_create(struct mm_struct *mm)
 	init_rwsem(&hmm->mirrors_sem);
 	hmm->mmu_notifier.ops = NULL;
 	INIT_LIST_HEAD(&hmm->ranges);
-	mutex_init(&hmm->lock);
+	spin_lock_init(&hmm->ranges_lock);
 	kref_init(&hmm->kref);
 	hmm->notifiers = 0;
 	hmm->mm = mm;
@@ -114,18 +114,19 @@ static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
 {
 	struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
 	struct hmm_mirror *mirror;
+	unsigned long flags;
 
 	/* Bail out if hmm is in the process of being freed */
 	if (!kref_get_unless_zero(&hmm->kref))
 		return;
 
-	mutex_lock(&hmm->lock);
+	spin_lock_irqsave(&hmm->ranges_lock, flags);
 	/*
 	 * Since hmm_range_register() holds the mmget() lock hmm_release() is
 	 * prevented as long as a range exists.
 	 */
 	WARN_ON(!list_empty(&hmm->ranges));
-	mutex_unlock(&hmm->lock);
+	spin_unlock_irqrestore(&hmm->ranges_lock, flags);
 
 	down_read(&hmm->mirrors_sem);
 	list_for_each_entry(mirror, &hmm->mirrors, list) {
@@ -141,6 +142,23 @@ static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
 	hmm_put(hmm);
 }
 
+static void notifiers_decrement(struct hmm *hmm)
+{
+	lockdep_assert_held(&hmm->ranges_lock);
+
+	hmm->notifiers--;
+	if (!hmm->notifiers) {
+		struct hmm_range *range;
+
+		list_for_each_entry(range, &hmm->ranges, list) {
+			if (range->valid)
+				continue;
+			range->valid = true;
+		}
+		wake_up_all(&hmm->wq);
+	}
+}
+
 static int hmm_invalidate_range_start(struct mmu_notifier *mn,
 			const struct mmu_notifier_range *nrange)
 {
@@ -148,6 +166,7 @@ static int hmm_invalidate_range_start(struct mmu_notifier *mn,
 	struct hmm_mirror *mirror;
 	struct hmm_update update;
 	struct hmm_range *range;
+	unsigned long flags;
 	int ret = 0;
 
 	if (!kref_get_unless_zero(&hmm->kref))
@@ -158,12 +177,7 @@ static int hmm_invalidate_range_start(struct mmu_notifier *mn,
 	update.event = HMM_UPDATE_INVALIDATE;
 	update.blockable = mmu_notifier_range_blockable(nrange);
 
-	if (mmu_notifier_range_blockable(nrange))
-		mutex_lock(&hmm->lock);
-	else if (!mutex_trylock(&hmm->lock)) {
-		ret = -EAGAIN;
-		goto out;
-	}
+	spin_lock_irqsave(&hmm->ranges_lock, flags);
 	hmm->notifiers++;
 	list_for_each_entry(range, &hmm->ranges, list) {
 		if (update.end < range->start || update.start >= range->end)
@@ -171,7 +185,7 @@ static int hmm_invalidate_range_start(struct mmu_notifier *mn,
 
 		range->valid = false;
 	}
-	mutex_unlock(&hmm->lock);
+	spin_unlock_irqrestore(&hmm->ranges_lock, flags);
 
 	if (mmu_notifier_range_blockable(nrange))
 		down_read(&hmm->mirrors_sem);
@@ -179,16 +193,26 @@ static int hmm_invalidate_range_start(struct mmu_notifier *mn,
 		ret = -EAGAIN;
 		goto out;
 	}
+
 	list_for_each_entry(mirror, &hmm->mirrors, list) {
-		int ret;
+		int rc;
 
-		ret = mirror->ops->sync_cpu_device_pagetables(mirror, &update);
-		if (!update.blockable && ret == -EAGAIN)
+		rc = mirror->ops->sync_cpu_device_pagetables(mirror, &update);
+		if (rc) {
+			if (WARN_ON(update.blockable || rc != -EAGAIN))
+				continue;
+			ret = -EAGAIN;
 			break;
+		}
 	}
 	up_read(&hmm->mirrors_sem);
 
 out:
+	if (ret) {
+		spin_lock_irqsave(&hmm->ranges_lock, flags);
+		notifiers_decrement(hmm);
+		spin_unlock_irqrestore(&hmm->ranges_lock, flags);
+	}
 	hmm_put(hmm);
 	return ret;
 }
@@ -197,23 +221,14 @@ static void hmm_invalidate_range_end(struct mmu_notifier *mn,
 			const struct mmu_notifier_range *nrange)
 {
 	struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
+	unsigned long flags;
 
 	if (!kref_get_unless_zero(&hmm->kref))
 		return;
 
-	mutex_lock(&hmm->lock);
-	hmm->notifiers--;
-	if (!hmm->notifiers) {
-		struct hmm_range *range;
-
-		list_for_each_entry(range, &hmm->ranges, list) {
-			if (range->valid)
-				continue;
-			range->valid = true;
-		}
-		wake_up_all(&hmm->wq);
-	}
-	mutex_unlock(&hmm->lock);
+	spin_lock_irqsave(&hmm->ranges_lock, flags);
+	notifiers_decrement(hmm);
+	spin_unlock_irqrestore(&hmm->ranges_lock, flags);
 
 	hmm_put(hmm);
 }
@@ -866,6 +881,7 @@ int hmm_range_register(struct hmm_range *range,
 {
 	unsigned long mask = ((1UL << page_shift) - 1UL);
 	struct hmm *hmm = mirror->hmm;
+	unsigned long flags;
 
 	range->valid = false;
 	range->hmm = NULL;
@@ -887,7 +903,7 @@ int hmm_range_register(struct hmm_range *range,
 	kref_get(&hmm->kref);
 
 	/* Initialize range to track CPU page table updates. */
-	mutex_lock(&hmm->lock);
+	spin_lock_irqsave(&hmm->ranges_lock, flags);
 
 	range->hmm = hmm;
 	list_add(&range->list, &hmm->ranges);
@@ -898,7 +914,7 @@ int hmm_range_register(struct hmm_range *range,
 	 */
 	if (!hmm->notifiers)
 		range->valid = true;
-	mutex_unlock(&hmm->lock);
+	spin_unlock_irqrestore(&hmm->ranges_lock, flags);
 
 	return 0;
 }
@@ -914,13 +930,14 @@ EXPORT_SYMBOL(hmm_range_register);
 void hmm_range_unregister(struct hmm_range *range)
 {
 	struct hmm *hmm = range->hmm;
+	unsigned long flags;
 
 	if (WARN_ON(range->end <= range->start))
 		return;
 
-	mutex_lock(&hmm->lock);
+	spin_lock_irqsave(&hmm->ranges_lock, flags);
 	list_del(&range->list);
-	mutex_unlock(&hmm->lock);
+	spin_unlock_irqrestore(&hmm->ranges_lock, flags);
 
 	/* Drop reference taken by hmm_range_register() */
 	range->valid = false;
-- 
2.21.0


  parent reply	other threads:[~2019-06-07 16:05 UTC|newest]

Thread overview: 158+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-06-06 18:44 [PATCH v2 hmm 00/11] Various revisions from a locking/code review Jason Gunthorpe
2019-06-06 18:44 ` Jason Gunthorpe
     [not found] ` <20190606184438.31646-1-jgg-uk2M96/98Pc@public.gmane.org>
2019-06-06 18:44   ` [PATCH v2 hmm 01/11] mm/hmm: fix use after free with struct hmm in the mmu notifiers Jason Gunthorpe
2019-06-06 18:44     ` Jason Gunthorpe
     [not found]     ` <20190606184438.31646-2-jgg-uk2M96/98Pc@public.gmane.org>
2019-06-07  2:29       ` John Hubbard
2019-06-07  2:29         ` John Hubbard
     [not found]         ` <9c72d18d-2924-cb90-ea44-7cd4b10b5bc2-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>
2019-06-07 12:34           ` Jason Gunthorpe
2019-06-07 12:34             ` Jason Gunthorpe
     [not found]             ` <20190607123432.GB14802-uk2M96/98Pc@public.gmane.org>
2019-06-07 13:42               ` Jason Gunthorpe
2019-06-07 13:42                 ` Jason Gunthorpe
2019-06-08  1:13             ` John Hubbard
2019-06-08  1:13               ` John Hubbard
2019-06-08  1:37             ` John Hubbard
2019-06-08  1:37               ` John Hubbard
2019-06-07 18:12       ` Ralph Campbell
2019-06-07 18:12         ` Ralph Campbell
2019-06-08  8:49       ` Christoph Hellwig
2019-06-08  8:49         ` Christoph Hellwig
2019-06-08 11:33         ` Jason Gunthorpe
2019-06-08 11:33           ` Jason Gunthorpe
2019-06-06 18:44   ` [PATCH v2 hmm 02/11] mm/hmm: Use hmm_mirror not mm as an argument for hmm_range_register Jason Gunthorpe
2019-06-06 18:44     ` Jason Gunthorpe
2019-06-07 22:33     ` Ira Weiny
2019-06-07 22:33       ` Ira Weiny
     [not found]     ` <20190606184438.31646-3-jgg-uk2M96/98Pc@public.gmane.org>
2019-06-07  2:36       ` John Hubbard
2019-06-07  2:36         ` John Hubbard
2019-06-07 18:24       ` Ralph Campbell
2019-06-07 18:24         ` Ralph Campbell
2019-06-07 22:39         ` Ralph Campbell
2019-06-07 22:39           ` Ralph Campbell
     [not found]           ` <e460ddf5-9ed3-7f3b-98ce-526c12fdb8b1-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>
2019-06-10 13:09             ` Jason Gunthorpe
2019-06-10 13:09               ` Jason Gunthorpe
2019-06-08  8:54       ` Christoph Hellwig
2019-06-08  8:54         ` Christoph Hellwig
     [not found]         ` <20190608085425.GB32185-wEGCiKHe2LqWVfeAwA7xHQ@public.gmane.org>
2019-06-11 19:44           ` Jason Gunthorpe
2019-06-11 19:44             ` Jason Gunthorpe
     [not found]             ` <20190611194431.GC29375-uk2M96/98Pc@public.gmane.org>
2019-06-12  7:12               ` Christoph Hellwig
2019-06-12  7:12                 ` Christoph Hellwig
     [not found]                 ` <20190612071234.GA20306-wEGCiKHe2LqWVfeAwA7xHQ@public.gmane.org>
2019-06-12 11:41                   ` Jason Gunthorpe
2019-06-12 11:41                     ` Jason Gunthorpe
     [not found]                     ` <20190612114125.GA3876-uk2M96/98Pc@public.gmane.org>
2019-06-12 12:11                       ` Christoph Hellwig
2019-06-12 12:11                         ` Christoph Hellwig
2019-06-06 18:44   ` [PATCH v2 hmm 03/11] mm/hmm: Hold a mmgrab from hmm to mm Jason Gunthorpe
2019-06-06 18:44     ` Jason Gunthorpe
     [not found]     ` <20190606184438.31646-4-jgg-uk2M96/98Pc@public.gmane.org>
2019-06-07  2:44       ` John Hubbard
2019-06-07  2:44         ` John Hubbard
     [not found]         ` <48fcaa19-6ac3-59d0-cd51-455abeca7cdb-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>
2019-06-07 12:36           ` Jason Gunthorpe
2019-06-07 12:36             ` Jason Gunthorpe
2019-06-07 18:41       ` Ralph Campbell
2019-06-07 18:41         ` Ralph Campbell
     [not found]         ` <605172dc-5c66-123f-61a3-8e6880678aef-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>
2019-06-07 18:51           ` Jason Gunthorpe
2019-06-07 18:51             ` Jason Gunthorpe
2019-06-07 22:38     ` Ira Weiny
2019-06-07 22:38       ` Ira Weiny
2019-06-06 18:44   ` [PATCH v2 hmm 04/11] mm/hmm: Simplify hmm_get_or_create and make it reliable Jason Gunthorpe
2019-06-06 18:44     ` Jason Gunthorpe
2019-06-07  2:54     ` John Hubbard
2019-06-07  2:54       ` John Hubbard
     [not found]     ` <20190606184438.31646-5-jgg-uk2M96/98Pc@public.gmane.org>
2019-06-07 18:52       ` Ralph Campbell
2019-06-07 18:52         ` Ralph Campbell
2019-06-07 22:44     ` Ira Weiny
2019-06-07 22:44       ` Ira Weiny
2019-06-06 18:44   ` [PATCH v2 hmm 05/11] mm/hmm: Remove duplicate condition test before wait_event_timeout Jason Gunthorpe
2019-06-06 18:44     ` Jason Gunthorpe
2019-06-07  3:06     ` John Hubbard
2019-06-07  3:06       ` John Hubbard
     [not found]       ` <86962e22-88b1-c1bf-d704-d5a5053fa100-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>
2019-06-07 12:47         ` Jason Gunthorpe
2019-06-07 12:47           ` Jason Gunthorpe
2019-06-07 13:31         ` [PATCH v3 " Jason Gunthorpe
2019-06-07 13:31           ` Jason Gunthorpe
2019-06-07 22:55           ` Ira Weiny
2019-06-07 22:55             ` Ira Weiny
2019-06-08  1:32           ` John Hubbard
2019-06-08  1:32             ` John Hubbard
     [not found]     ` <20190606184438.31646-6-jgg-uk2M96/98Pc@public.gmane.org>
2019-06-07 19:01       ` [PATCH v2 " Ralph Campbell
2019-06-07 19:01         ` Ralph Campbell
     [not found]         ` <6833be96-12a3-1a1c-1514-c148ba2dd87b-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>
2019-06-07 19:13           ` Jason Gunthorpe
2019-06-07 19:13             ` Jason Gunthorpe
     [not found]             ` <20190607191302.GR14802-uk2M96/98Pc@public.gmane.org>
2019-06-07 20:21               ` Ralph Campbell
2019-06-07 20:21                 ` Ralph Campbell
2019-06-07 20:44                 ` Jason Gunthorpe
2019-06-07 20:44                   ` Jason Gunthorpe
2019-06-07 22:13                   ` Ralph Campbell
2019-06-07 22:13                     ` Ralph Campbell
2019-06-08  1:47                     ` Jason Gunthorpe
2019-06-08  1:47                       ` Jason Gunthorpe
2019-06-06 18:44   ` [PATCH v2 hmm 06/11] mm/hmm: Hold on to the mmget for the lifetime of the range Jason Gunthorpe
2019-06-06 18:44     ` Jason Gunthorpe
2019-06-07  3:15     ` John Hubbard
2019-06-07  3:15       ` John Hubbard
     [not found]     ` <20190606184438.31646-7-jgg-uk2M96/98Pc@public.gmane.org>
2019-06-07 20:29       ` Ralph Campbell
2019-06-07 20:29         ` Ralph Campbell
2019-06-06 18:44   ` [PATCH v2 hmm 07/11] mm/hmm: Use lockdep instead of comments Jason Gunthorpe
2019-06-06 18:44     ` Jason Gunthorpe
2019-06-07  3:19     ` John Hubbard
2019-06-07  3:19       ` John Hubbard
     [not found]     ` <20190606184438.31646-8-jgg-uk2M96/98Pc@public.gmane.org>
2019-06-07 20:31       ` Ralph Campbell
2019-06-07 20:31         ` Ralph Campbell
2019-06-07 22:16     ` Souptick Joarder
2019-06-07 22:16       ` Souptick Joarder
2019-06-06 18:44   ` [PATCH v2 hmm 08/11] mm/hmm: Remove racy protection against double-unregistration Jason Gunthorpe
2019-06-06 18:44     ` Jason Gunthorpe
2019-06-07  3:29     ` John Hubbard
2019-06-07  3:29       ` John Hubbard
     [not found]       ` <88400de9-e1ae-509b-718f-c6b0f726b14c-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>
2019-06-07 13:57         ` Jason Gunthorpe
2019-06-07 13:57           ` Jason Gunthorpe
     [not found]     ` <20190606184438.31646-9-jgg-uk2M96/98Pc@public.gmane.org>
2019-06-07 20:33       ` Ralph Campbell
2019-06-07 20:33         ` Ralph Campbell
2019-06-06 18:44   ` [PATCH v2 hmm 09/11] mm/hmm: Poison hmm_range during unregister Jason Gunthorpe
2019-06-06 18:44     ` Jason Gunthorpe
2019-06-07  3:37     ` John Hubbard
2019-06-07  3:37       ` John Hubbard
     [not found]       ` <c00da0f2-b4b8-813b-0441-a50d4de9d8be-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>
2019-06-07 14:03         ` Jason Gunthorpe
2019-06-07 14:03           ` Jason Gunthorpe
2019-06-07 20:46     ` Ralph Campbell
2019-06-07 20:46       ` Ralph Campbell
2019-06-07 20:49       ` Jason Gunthorpe
2019-06-07 20:49         ` Jason Gunthorpe
2019-06-07 23:01     ` Ira Weiny
2019-06-07 23:01       ` Ira Weiny
2019-06-06 18:44   ` [PATCH v2 hmm 10/11] mm/hmm: Do not use list*_rcu() for hmm->ranges Jason Gunthorpe
2019-06-06 18:44     ` Jason Gunthorpe
2019-06-07  3:40     ` John Hubbard
2019-06-07  3:40       ` John Hubbard
2019-06-07 20:49     ` Ralph Campbell
2019-06-07 20:49       ` Ralph Campbell
2019-06-07 22:11     ` Souptick Joarder
2019-06-07 22:11       ` Souptick Joarder
2019-06-07 23:02     ` Ira Weiny
2019-06-07 23:02       ` Ira Weiny
2019-06-06 18:44   ` [PATCH v2 hmm 11/11] mm/hmm: Remove confusing comment and logic from hmm_release Jason Gunthorpe
2019-06-06 18:44     ` Jason Gunthorpe
2019-06-07  3:47     ` John Hubbard
2019-06-07  3:47       ` John Hubbard
     [not found]       ` <3edc47bd-e8f6-0e65-5844-d16901890637-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>
2019-06-07 12:58         ` Jason Gunthorpe
2019-06-07 12:58           ` Jason Gunthorpe
2019-06-07 21:37     ` Ralph Campbell
2019-06-07 21:37       ` Ralph Campbell
2019-06-08  2:12       ` Jason Gunthorpe
2019-06-08  2:12         ` Jason Gunthorpe
     [not found]       ` <61ea869d-43d2-d1e5-dc00-cf5e3e139169-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>
2019-06-10 16:02         ` Jason Gunthorpe
2019-06-10 16:02           ` Jason Gunthorpe
2019-06-10 22:03           ` Ralph Campbell
2019-06-10 22:03             ` Ralph Campbell
2019-06-07 16:05   ` Jason Gunthorpe [this message]
2019-06-07 16:05     ` [PATCH v2 12/11] mm/hmm: Fix error flows in hmm_invalidate_range_start Jason Gunthorpe
2019-06-07 23:52     ` Ralph Campbell
2019-06-07 23:52       ` Ralph Campbell
2019-06-08  1:35       ` Jason Gunthorpe
2019-06-08  1:35         ` Jason Gunthorpe
2019-06-11 19:48   ` [PATCH v2 hmm 00/11] Various revisions from a locking/code review Jason Gunthorpe
2019-06-11 19:48     ` Jason Gunthorpe
2019-06-12 17:54     ` Kuehling, Felix
2019-06-12 17:54       ` Kuehling, Felix
     [not found]       ` <5d3b0ae2-3662-cab2-5e6c-82912f32356a-5C7GfCeVMHo@public.gmane.org>
2019-06-12 21:49         ` Yang, Philip
2019-06-12 21:49           ` Yang, Philip
2019-06-13 17:50           ` Jason Gunthorpe
2019-06-13 17:50             ` Jason Gunthorpe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190607160557.GA335@ziepe.ca \
    --to=jgg-uk2m96/98pc@public.gmane.org \
    --cc=Felix.Kuehling-5C7GfCeVMHo@public.gmane.org \
    --cc=aarcange-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org \
    --cc=amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org \
    --cc=dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org \
    --cc=jglisse-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org \
    --cc=jhubbard-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org \
    --cc=linux-mm-Bw31MaZKKs3YtjvyW6yDsg@public.gmane.org \
    --cc=linux-rdma-u79uwXL29TY76Z2rM5mHXA@public.gmane.org \
    --cc=rcampbell-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.