All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jason Gunthorpe <jgg@ziepe.ca>
To: Jerome Glisse <jglisse@redhat.com>,
	Ralph Campbell <rcampbell@nvidia.com>,
	Felix.Kuehling@amd.com
Cc: linux-mm@kvack.org, John Hubbard <jhubbard@nvidia.com>,
	dri-devel@lists.freedesktop.org, amd-gfx@lists.freedesktop.org,
	Christoph Hellwig <hch@lst.de>, Philip Yang <Philip.Yang@amd.com>,
	Jason Gunthorpe <jgg@mellanox.com>
Subject: [PATCH  hmm 7/8] mm/hmm: return -EFAULT when setting HMM_PFN_ERROR on requested valid pages
Date: Wed, 11 Mar 2020 15:35:05 -0300	[thread overview]
Message-ID: <20200311183506.3997-8-jgg@ziepe.ca> (raw)
In-Reply-To: <20200311183506.3997-1-jgg@ziepe.ca>

From: Jason Gunthorpe <jgg@mellanox.com>

hmm_range_fault() should never return 0 if the caller requested a valid
page, but the pfns output for that page would be HMM_PFN_ERROR.

hmm_pte_need_fault() must always be called before setting HMM_PFN_ERROR to
detect if the page is in faulting mode or not.

Fix two cases in hmm_vma_walk_pmd() and reorganize some of the duplicated
code.

Fixes: d08faca018c4 ("mm/hmm: properly handle migration pmd")
Fixes: da4c3c735ea4 ("mm/hmm/mirror: helper to snapshot CPU page table")
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
---
 mm/hmm.c | 38 +++++++++++++++++++++-----------------
 1 file changed, 21 insertions(+), 17 deletions(-)

diff --git a/mm/hmm.c b/mm/hmm.c
index bf676cfef3e8ee..f61fddf2ef6505 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -363,8 +363,10 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
 {
 	struct hmm_vma_walk *hmm_vma_walk = walk->private;
 	struct hmm_range *range = hmm_vma_walk->range;
-	uint64_t *pfns = range->pfns;
-	unsigned long addr = start, i;
+	uint64_t *pfns = &range->pfns[(start - range->start) >> PAGE_SHIFT];
+	unsigned long npages = (end - start) >> PAGE_SHIFT;
+	unsigned long addr = start;
+	bool fault, write_fault;
 	pte_t *ptep;
 	pmd_t pmd;
 
@@ -374,14 +376,6 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
 		return hmm_vma_walk_hole(start, end, -1, walk);
 
 	if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
-		bool fault, write_fault;
-		unsigned long npages;
-		uint64_t *pfns;
-
-		i = (addr - range->start) >> PAGE_SHIFT;
-		npages = (end - addr) >> PAGE_SHIFT;
-		pfns = &range->pfns[i];
-
 		hmm_range_need_fault(hmm_vma_walk, pfns, npages,
 				     0, &fault, &write_fault);
 		if (fault || write_fault) {
@@ -390,8 +384,15 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
 			return -EBUSY;
 		}
 		return hmm_pfns_fill(start, end, range, HMM_PFN_NONE);
-	} else if (!pmd_present(pmd))
+	}
+
+	if (!pmd_present(pmd)) {
+		hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0, &fault,
+				     &write_fault);
+		if (fault || write_fault)
+			return -EFAULT;
 		return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
+	}
 
 	if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
 		/*
@@ -408,8 +409,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
 		if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
 			goto again;
 
-		i = (addr - range->start) >> PAGE_SHIFT;
-		return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd);
+		return hmm_vma_handle_pmd(walk, addr, end, pfns, pmd);
 	}
 
 	/*
@@ -418,15 +418,19 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
 	 * entry pointing to pte directory or it is a bad pmd that will not
 	 * recover.
 	 */
-	if (pmd_bad(pmd))
+	if (pmd_bad(pmd)) {
+		hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0, &fault,
+				     &write_fault);
+		if (fault || write_fault)
+			return -EFAULT;
 		return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
+	}
 
 	ptep = pte_offset_map(pmdp, addr);
-	i = (addr - range->start) >> PAGE_SHIFT;
-	for (; addr < end; addr += PAGE_SIZE, ptep++, i++) {
+	for (; addr < end; addr += PAGE_SIZE, ptep++, pfns++) {
 		int r;
 
-		r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, &pfns[i]);
+		r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, pfns);
 		if (r) {
 			/* hmm_vma_handle_pte() did pte_unmap() */
 			hmm_vma_walk->last = addr;
-- 
2.25.1



WARNING: multiple messages have this Message-ID (diff)
From: Jason Gunthorpe <jgg@ziepe.ca>
To: Jerome Glisse <jglisse@redhat.com>,
	Ralph Campbell <rcampbell@nvidia.com>,
	Felix.Kuehling@amd.com
Cc: Philip Yang <Philip.Yang@amd.com>,
	John Hubbard <jhubbard@nvidia.com>,
	amd-gfx@lists.freedesktop.org, linux-mm@kvack.org,
	Jason Gunthorpe <jgg@mellanox.com>,
	dri-devel@lists.freedesktop.org, Christoph Hellwig <hch@lst.de>
Subject: [PATCH hmm 7/8] mm/hmm: return -EFAULT when setting HMM_PFN_ERROR on requested valid pages
Date: Wed, 11 Mar 2020 15:35:05 -0300	[thread overview]
Message-ID: <20200311183506.3997-8-jgg@ziepe.ca> (raw)
In-Reply-To: <20200311183506.3997-1-jgg@ziepe.ca>

From: Jason Gunthorpe <jgg@mellanox.com>

hmm_range_fault() should never return 0 if the caller requested a valid
page, but the pfns output for that page would be HMM_PFN_ERROR.

hmm_pte_need_fault() must always be called before setting HMM_PFN_ERROR to
detect if the page is in faulting mode or not.

Fix two cases in hmm_vma_walk_pmd() and reorganize some of the duplicated
code.

Fixes: d08faca018c4 ("mm/hmm: properly handle migration pmd")
Fixes: da4c3c735ea4 ("mm/hmm/mirror: helper to snapshot CPU page table")
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
---
 mm/hmm.c | 38 +++++++++++++++++++++-----------------
 1 file changed, 21 insertions(+), 17 deletions(-)

diff --git a/mm/hmm.c b/mm/hmm.c
index bf676cfef3e8ee..f61fddf2ef6505 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -363,8 +363,10 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
 {
 	struct hmm_vma_walk *hmm_vma_walk = walk->private;
 	struct hmm_range *range = hmm_vma_walk->range;
-	uint64_t *pfns = range->pfns;
-	unsigned long addr = start, i;
+	uint64_t *pfns = &range->pfns[(start - range->start) >> PAGE_SHIFT];
+	unsigned long npages = (end - start) >> PAGE_SHIFT;
+	unsigned long addr = start;
+	bool fault, write_fault;
 	pte_t *ptep;
 	pmd_t pmd;
 
@@ -374,14 +376,6 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
 		return hmm_vma_walk_hole(start, end, -1, walk);
 
 	if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
-		bool fault, write_fault;
-		unsigned long npages;
-		uint64_t *pfns;
-
-		i = (addr - range->start) >> PAGE_SHIFT;
-		npages = (end - addr) >> PAGE_SHIFT;
-		pfns = &range->pfns[i];
-
 		hmm_range_need_fault(hmm_vma_walk, pfns, npages,
 				     0, &fault, &write_fault);
 		if (fault || write_fault) {
@@ -390,8 +384,15 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
 			return -EBUSY;
 		}
 		return hmm_pfns_fill(start, end, range, HMM_PFN_NONE);
-	} else if (!pmd_present(pmd))
+	}
+
+	if (!pmd_present(pmd)) {
+		hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0, &fault,
+				     &write_fault);
+		if (fault || write_fault)
+			return -EFAULT;
 		return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
+	}
 
 	if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
 		/*
@@ -408,8 +409,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
 		if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
 			goto again;
 
-		i = (addr - range->start) >> PAGE_SHIFT;
-		return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd);
+		return hmm_vma_handle_pmd(walk, addr, end, pfns, pmd);
 	}
 
 	/*
@@ -418,15 +418,19 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
 	 * entry pointing to pte directory or it is a bad pmd that will not
 	 * recover.
 	 */
-	if (pmd_bad(pmd))
+	if (pmd_bad(pmd)) {
+		hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0, &fault,
+				     &write_fault);
+		if (fault || write_fault)
+			return -EFAULT;
 		return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
+	}
 
 	ptep = pte_offset_map(pmdp, addr);
-	i = (addr - range->start) >> PAGE_SHIFT;
-	for (; addr < end; addr += PAGE_SIZE, ptep++, i++) {
+	for (; addr < end; addr += PAGE_SIZE, ptep++, pfns++) {
 		int r;
 
-		r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, &pfns[i]);
+		r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, pfns);
 		if (r) {
 			/* hmm_vma_handle_pte() did pte_unmap() */
 			hmm_vma_walk->last = addr;
-- 
2.25.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

WARNING: multiple messages have this Message-ID (diff)
From: Jason Gunthorpe <jgg@ziepe.ca>
To: Jerome Glisse <jglisse@redhat.com>,
	Ralph Campbell <rcampbell@nvidia.com>,
	Felix.Kuehling@amd.com
Cc: Philip Yang <Philip.Yang@amd.com>,
	John Hubbard <jhubbard@nvidia.com>,
	amd-gfx@lists.freedesktop.org, linux-mm@kvack.org,
	Jason Gunthorpe <jgg@mellanox.com>,
	dri-devel@lists.freedesktop.org, Christoph Hellwig <hch@lst.de>
Subject: [PATCH hmm 7/8] mm/hmm: return -EFAULT when setting HMM_PFN_ERROR on requested valid pages
Date: Wed, 11 Mar 2020 15:35:05 -0300	[thread overview]
Message-ID: <20200311183506.3997-8-jgg@ziepe.ca> (raw)
In-Reply-To: <20200311183506.3997-1-jgg@ziepe.ca>

From: Jason Gunthorpe <jgg@mellanox.com>

hmm_range_fault() should never return 0 if the caller requested a valid
page, but the pfns output for that page would be HMM_PFN_ERROR.

hmm_pte_need_fault() must always be called before setting HMM_PFN_ERROR to
detect if the page is in faulting mode or not.

Fix two cases in hmm_vma_walk_pmd() and reorganize some of the duplicated
code.

Fixes: d08faca018c4 ("mm/hmm: properly handle migration pmd")
Fixes: da4c3c735ea4 ("mm/hmm/mirror: helper to snapshot CPU page table")
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
---
 mm/hmm.c | 38 +++++++++++++++++++++-----------------
 1 file changed, 21 insertions(+), 17 deletions(-)

diff --git a/mm/hmm.c b/mm/hmm.c
index bf676cfef3e8ee..f61fddf2ef6505 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -363,8 +363,10 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
 {
 	struct hmm_vma_walk *hmm_vma_walk = walk->private;
 	struct hmm_range *range = hmm_vma_walk->range;
-	uint64_t *pfns = range->pfns;
-	unsigned long addr = start, i;
+	uint64_t *pfns = &range->pfns[(start - range->start) >> PAGE_SHIFT];
+	unsigned long npages = (end - start) >> PAGE_SHIFT;
+	unsigned long addr = start;
+	bool fault, write_fault;
 	pte_t *ptep;
 	pmd_t pmd;
 
@@ -374,14 +376,6 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
 		return hmm_vma_walk_hole(start, end, -1, walk);
 
 	if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
-		bool fault, write_fault;
-		unsigned long npages;
-		uint64_t *pfns;
-
-		i = (addr - range->start) >> PAGE_SHIFT;
-		npages = (end - addr) >> PAGE_SHIFT;
-		pfns = &range->pfns[i];
-
 		hmm_range_need_fault(hmm_vma_walk, pfns, npages,
 				     0, &fault, &write_fault);
 		if (fault || write_fault) {
@@ -390,8 +384,15 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
 			return -EBUSY;
 		}
 		return hmm_pfns_fill(start, end, range, HMM_PFN_NONE);
-	} else if (!pmd_present(pmd))
+	}
+
+	if (!pmd_present(pmd)) {
+		hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0, &fault,
+				     &write_fault);
+		if (fault || write_fault)
+			return -EFAULT;
 		return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
+	}
 
 	if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
 		/*
@@ -408,8 +409,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
 		if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
 			goto again;
 
-		i = (addr - range->start) >> PAGE_SHIFT;
-		return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd);
+		return hmm_vma_handle_pmd(walk, addr, end, pfns, pmd);
 	}
 
 	/*
@@ -418,15 +418,19 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
 	 * entry pointing to pte directory or it is a bad pmd that will not
 	 * recover.
 	 */
-	if (pmd_bad(pmd))
+	if (pmd_bad(pmd)) {
+		hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0, &fault,
+				     &write_fault);
+		if (fault || write_fault)
+			return -EFAULT;
 		return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
+	}
 
 	ptep = pte_offset_map(pmdp, addr);
-	i = (addr - range->start) >> PAGE_SHIFT;
-	for (; addr < end; addr += PAGE_SIZE, ptep++, i++) {
+	for (; addr < end; addr += PAGE_SIZE, ptep++, pfns++) {
 		int r;
 
-		r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, &pfns[i]);
+		r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, pfns);
 		if (r) {
 			/* hmm_vma_handle_pte() did pte_unmap() */
 			hmm_vma_walk->last = addr;
-- 
2.25.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

  parent reply	other threads:[~2020-03-11 18:35 UTC|newest]

Thread overview: 153+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-03-11 18:34 [PATCH hmm 0/8] Various error case bug fixes for hmm_range_fault() Jason Gunthorpe
2020-03-11 18:34 ` Jason Gunthorpe
2020-03-11 18:34 ` Jason Gunthorpe
2020-03-11 18:34 ` [PATCH hmm 1/8] mm/hmm: add missing unmaps of the ptep during hmm_vma_handle_pte() Jason Gunthorpe
2020-03-11 18:34   ` Jason Gunthorpe
2020-03-11 18:34   ` Jason Gunthorpe
2020-03-12  1:28   ` Ralph Campbell
2020-03-12  1:28     ` Ralph Campbell
2020-03-12  1:28     ` Ralph Campbell
2020-03-12 14:24     ` Jason Gunthorpe
2020-03-12 14:24       ` Jason Gunthorpe
2020-03-12 14:24       ` Jason Gunthorpe
2020-03-16  8:58   ` Christoph Hellwig
2020-03-16  8:58     ` Christoph Hellwig
2020-03-11 18:35 ` [PATCH hmm 2/8] mm/hmm: don't free the cached pgmap while scanning Jason Gunthorpe
2020-03-11 18:35   ` Jason Gunthorpe
2020-03-11 18:35   ` Jason Gunthorpe
2020-03-12  1:29   ` Ralph Campbell
2020-03-12  1:29     ` Ralph Campbell
2020-03-12  1:29     ` Ralph Campbell
2020-03-16  9:02   ` Christoph Hellwig
2020-03-16  9:02     ` Christoph Hellwig
2020-03-16 18:07     ` Jason Gunthorpe
2020-03-16 18:07       ` Jason Gunthorpe
2020-03-16 18:07       ` Jason Gunthorpe
2020-03-16 18:13       ` Christoph Hellwig
2020-03-16 18:13         ` Christoph Hellwig
2020-03-16 19:23         ` Jason Gunthorpe
2020-03-16 19:23           ` Jason Gunthorpe
2020-03-16 19:23           ` Jason Gunthorpe
2020-03-11 18:35 ` [PATCH hmm 3/8] mm/hmm: do not call hmm_vma_walk_hole() while holding a spinlock Jason Gunthorpe
2020-03-11 18:35   ` Jason Gunthorpe
2020-03-11 18:35   ` Jason Gunthorpe
2020-03-12  1:31   ` Ralph Campbell
2020-03-12  1:31     ` Ralph Campbell
2020-03-12  1:31     ` Ralph Campbell
2020-03-12  8:54   ` Steven Price
2020-03-12  8:54     ` Steven Price
2020-03-12  8:54     ` Steven Price
2020-03-12 10:28     ` [PATCH] mm/hmm: Simplify hmm_vma_walk_pud slightly Steven Price
2020-03-12 10:28       ` Steven Price
2020-03-12 10:28       ` Steven Price
2020-03-12 14:27       ` Jason Gunthorpe
2020-03-12 14:27         ` Jason Gunthorpe
2020-03-12 14:27         ` Jason Gunthorpe
2020-03-12 14:40         ` Steven Price
2020-03-12 14:40           ` Steven Price
2020-03-12 14:40           ` Steven Price
2020-03-12 15:11           ` Jason Gunthorpe
2020-03-12 15:11             ` Jason Gunthorpe
2020-03-12 15:11             ` Jason Gunthorpe
2020-03-12 16:16             ` Steven Price
2020-03-12 16:16               ` Steven Price
2020-03-12 16:16               ` Steven Price
2020-03-12 16:37               ` Jason Gunthorpe
2020-03-12 16:37                 ` Jason Gunthorpe
2020-03-12 16:37                 ` Jason Gunthorpe
2020-03-12 17:02                 ` Steven Price
2020-03-12 17:02                   ` Steven Price
2020-03-12 17:02                   ` Steven Price
2020-03-12 17:17                   ` Jason Gunthorpe
2020-03-12 17:17                     ` Jason Gunthorpe
2020-03-12 17:17                     ` Jason Gunthorpe
2020-03-13 19:55                   ` Jason Gunthorpe
2020-03-13 19:55                     ` Jason Gunthorpe
2020-03-13 19:55                     ` Jason Gunthorpe
2020-03-13 21:04                     ` Matthew Wilcox
2020-03-13 21:04                       ` Matthew Wilcox
2020-03-13 21:04                       ` Matthew Wilcox
2020-03-13 22:51                       ` Jason Gunthorpe
2020-03-13 22:51                         ` Jason Gunthorpe
2020-03-13 22:51                         ` Jason Gunthorpe
2020-03-16  9:05   ` [PATCH hmm 3/8] mm/hmm: do not call hmm_vma_walk_hole() while holding a spinlock Christoph Hellwig
2020-03-16  9:05     ` Christoph Hellwig
2020-03-16 12:56     ` Jason Gunthorpe
2020-03-16 12:56       ` Jason Gunthorpe
2020-03-16 12:56       ` Jason Gunthorpe
2020-03-11 18:35 ` [PATCH hmm 4/8] mm/hmm: add missing pfns set to hmm_vma_walk_pmd() Jason Gunthorpe
2020-03-11 18:35   ` Jason Gunthorpe
2020-03-11 18:35   ` Jason Gunthorpe
2020-03-12  1:33   ` Ralph Campbell
2020-03-12  1:33     ` Ralph Campbell
2020-03-12  1:33     ` Ralph Campbell
2020-03-16  9:06   ` Christoph Hellwig
2020-03-16  9:06     ` Christoph Hellwig
2020-03-11 18:35 ` [PATCH hmm 5/8] mm/hmm: add missing call to hmm_range_need_fault() before returning EFAULT Jason Gunthorpe
2020-03-11 18:35   ` Jason Gunthorpe
2020-03-11 18:35   ` Jason Gunthorpe
2020-03-12  1:34   ` Ralph Campbell
2020-03-12  1:34     ` Ralph Campbell
2020-03-12  1:34     ` Ralph Campbell
2020-03-16  9:07   ` Christoph Hellwig
2020-03-16  9:07     ` Christoph Hellwig
2020-03-11 18:35 ` [PATCH hmm 6/8] mm/hmm: reorganize how !pte_present is handled in hmm_vma_handle_pte() Jason Gunthorpe
2020-03-11 18:35   ` Jason Gunthorpe
2020-03-11 18:35   ` Jason Gunthorpe
2020-03-12  1:36   ` Ralph Campbell
2020-03-12  1:36     ` Ralph Campbell
2020-03-12  1:36     ` Ralph Campbell
2020-03-16  9:11   ` Christoph Hellwig
2020-03-16  9:11     ` Christoph Hellwig
2020-03-11 18:35 ` Jason Gunthorpe [this message]
2020-03-11 18:35   ` [PATCH hmm 7/8] mm/hmm: return -EFAULT when setting HMM_PFN_ERROR on requested valid pages Jason Gunthorpe
2020-03-11 18:35   ` Jason Gunthorpe
2020-03-12  1:36   ` Ralph Campbell
2020-03-12  1:36     ` Ralph Campbell
2020-03-12  1:36     ` Ralph Campbell
2020-03-12 14:35     ` Jason Gunthorpe
2020-03-12 14:35       ` Jason Gunthorpe
2020-03-12 14:35       ` Jason Gunthorpe
2020-03-16  9:12   ` Christoph Hellwig
2020-03-16  9:12     ` Christoph Hellwig
2020-03-11 18:35 ` [PATCH hmm 8/8] mm/hmm: add missing call to hmm_pte_need_fault in HMM_PFN_SPECIAL handling Jason Gunthorpe
2020-03-11 18:35   ` Jason Gunthorpe
2020-03-11 18:35   ` Jason Gunthorpe
2020-03-12  1:38   ` Ralph Campbell
2020-03-12  1:38     ` Ralph Campbell
2020-03-12  1:38     ` Ralph Campbell
2020-03-16  9:13   ` Christoph Hellwig
2020-03-16  9:13     ` Christoph Hellwig
2020-03-16 12:10     ` Jason Gunthorpe
2020-03-16 12:10       ` Jason Gunthorpe
2020-03-16 12:10       ` Jason Gunthorpe
2020-03-16 12:49       ` Christoph Hellwig
2020-03-16 12:49         ` Christoph Hellwig
2020-03-16 13:04         ` Jason Gunthorpe
2020-03-16 13:04           ` Jason Gunthorpe
2020-03-16 13:04           ` Jason Gunthorpe
2020-03-16 13:12           ` Christoph Hellwig
2020-03-16 13:12             ` Christoph Hellwig
2020-03-17 12:32             ` Christoph Hellwig
2020-03-17 12:32               ` Christoph Hellwig
2020-03-17 12:53               ` Jason Gunthorpe
2020-03-17 12:53                 ` Jason Gunthorpe
2020-03-17 12:53                 ` Jason Gunthorpe
2020-03-17 13:06                 ` Christoph Hellwig
2020-03-17 13:06                   ` Christoph Hellwig
2020-03-17 13:25                   ` Jason Gunthorpe
2020-03-17 13:25                     ` Jason Gunthorpe
2020-03-17 13:25                     ` Jason Gunthorpe
2020-03-16 12:51   ` Christoph Hellwig
2020-03-16 12:51     ` Christoph Hellwig
2020-03-12 19:33 ` [PATCH hmm 9/8] mm/hmm: do not check pmd_protnone twice in hmm_vma_handle_pmd() Jason Gunthorpe
2020-03-12 19:33   ` Jason Gunthorpe
2020-03-12 19:33   ` Jason Gunthorpe
2020-03-12 23:50   ` Ralph Campbell
2020-03-12 23:50     ` Ralph Campbell
2020-03-12 23:50     ` Ralph Campbell
2020-03-16  9:14   ` Christoph Hellwig
2020-03-16  9:14     ` Christoph Hellwig
2020-03-16 18:25 ` [PATCH hmm 0/8] Various error case bug fixes for hmm_range_fault() Jason Gunthorpe
2020-03-16 18:25   ` Jason Gunthorpe
2020-03-16 18:25   ` Jason Gunthorpe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200311183506.3997-8-jgg@ziepe.ca \
    --to=jgg@ziepe.ca \
    --cc=Felix.Kuehling@amd.com \
    --cc=Philip.Yang@amd.com \
    --cc=amd-gfx@lists.freedesktop.org \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=hch@lst.de \
    --cc=jgg@mellanox.com \
    --cc=jglisse@redhat.com \
    --cc=jhubbard@nvidia.com \
    --cc=linux-mm@kvack.org \
    --cc=rcampbell@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.