All of lore.kernel.org
 help / color / mirror / Atom feed
From: Steven Price <steven.price@arm.com>
To: Jason Gunthorpe <jgg@ziepe.ca>,
	Jerome Glisse <jglisse@redhat.com>,
	Ralph Campbell <rcampbell@nvidia.com>,
	Felix.Kuehling@amd.com
Cc: Philip Yang <Philip.Yang@amd.com>,
	John Hubbard <jhubbard@nvidia.com>,
	amd-gfx@lists.freedesktop.org, linux-mm@kvack.org,
	Jason Gunthorpe <jgg@mellanox.com>,
	dri-devel@lists.freedesktop.org, Christoph Hellwig <hch@lst.de>,
	Steven Price <steven.price@arm.com>
Subject: [PATCH] mm/hmm: Simplify hmm_vma_walk_pud slightly
Date: Thu, 12 Mar 2020 10:28:13 +0000	[thread overview]
Message-ID: <20200312102813.56699-1-steven.price@arm.com> (raw)
In-Reply-To: <5bd778fa-51e5-3e0c-d9bb-b38539b03c8d@arm.com>

By refactoring to deal with the !pud_huge(pud) || !pud_devmap(pud)
condition early it's possible to remove the 'ret' variable and remove a
level of indentation from half the function making the code easier to
read.

No functional change.

Signed-off-by: Steven Price <steven.price@arm.com>
---
Thanks to Jason's changes there were only two code paths left using
the out_unlock label so it seemed like a good opportunity to refactor.
---
 mm/hmm.c | 69 ++++++++++++++++++++++++++------------------------------
 1 file changed, 32 insertions(+), 37 deletions(-)

diff --git a/mm/hmm.c b/mm/hmm.c
index ca33d086bdc1..0117c86426d1 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -466,8 +466,10 @@ static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
 	struct hmm_range *range = hmm_vma_walk->range;
 	unsigned long addr = start;
 	pud_t pud;
-	int ret = 0;
 	spinlock_t *ptl = pud_trans_huge_lock(pudp, walk->vma);
+	unsigned long i, npages, pfn;
+	uint64_t *pfns, cpu_flags;
+	bool fault, write_fault;
 
 	if (!ptl)
 		return 0;
@@ -481,50 +483,43 @@ static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
 		return hmm_vma_walk_hole(start, end, -1, walk);
 	}
 
-	if (pud_huge(pud) && pud_devmap(pud)) {
-		unsigned long i, npages, pfn;
-		uint64_t *pfns, cpu_flags;
-		bool fault, write_fault;
+	if (!pud_huge(pud) || !pud_devmap(pud)) {
+		/* Ask for the PUD to be split */
+		walk->action = ACTION_SUBTREE;
+		spin_unlock(ptl);
+		return 0;
+	}
 
-		if (!pud_present(pud)) {
-			spin_unlock(ptl);
-			return hmm_vma_walk_hole(start, end, -1, walk);
-		}
+	if (!pud_present(pud)) {
+		spin_unlock(ptl);
+		return hmm_vma_walk_hole(start, end, -1, walk);
+	}
 
-		i = (addr - range->start) >> PAGE_SHIFT;
-		npages = (end - addr) >> PAGE_SHIFT;
-		pfns = &range->pfns[i];
+	i = (addr - range->start) >> PAGE_SHIFT;
+	npages = (end - addr) >> PAGE_SHIFT;
+	pfns = &range->pfns[i];
 
-		cpu_flags = pud_to_hmm_pfn_flags(range, pud);
-		hmm_range_need_fault(hmm_vma_walk, pfns, npages,
-				     cpu_flags, &fault, &write_fault);
-		if (fault || write_fault) {
-			spin_unlock(ptl);
-			return hmm_vma_walk_hole_(addr, end, fault, write_fault,
-						  walk);
-		}
+	cpu_flags = pud_to_hmm_pfn_flags(range, pud);
+	hmm_range_need_fault(hmm_vma_walk, pfns, npages,
+			     cpu_flags, &fault, &write_fault);
+	if (fault || write_fault) {
+		spin_unlock(ptl);
+		return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
+	}
 
-		pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
-		for (i = 0; i < npages; ++i, ++pfn) {
-			hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
-					      hmm_vma_walk->pgmap);
-			if (unlikely(!hmm_vma_walk->pgmap)) {
-				ret = -EBUSY;
-				goto out_unlock;
-			}
-			pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
-				  cpu_flags;
+	pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
+	for (i = 0; i < npages; ++i, ++pfn) {
+		hmm_vma_walk->pgmap = get_dev_pagemap(pfn, hmm_vma_walk->pgmap);
+		if (unlikely(!hmm_vma_walk->pgmap)) {
+			spin_unlock(ptl);
+			return -EBUSY;
 		}
-		hmm_vma_walk->last = end;
-		goto out_unlock;
+		pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags;
 	}
+	hmm_vma_walk->last = end;
 
-	/* Ask for the PUD to be split */
-	walk->action = ACTION_SUBTREE;
-
-out_unlock:
 	spin_unlock(ptl);
-	return ret;
+	return 0;
 }
 #else
 #define hmm_vma_walk_pud	NULL
-- 
2.20.1



WARNING: multiple messages have this Message-ID (diff)
From: Steven Price <steven.price@arm.com>
To: Jason Gunthorpe <jgg@ziepe.ca>,
	Jerome Glisse <jglisse@redhat.com>,
	Ralph Campbell <rcampbell@nvidia.com>,
	Felix.Kuehling@amd.com
Cc: Philip Yang <Philip.Yang@amd.com>,
	John Hubbard <jhubbard@nvidia.com>,
	dri-devel@lists.freedesktop.org,
	Steven Price <steven.price@arm.com>,
	linux-mm@kvack.org, Jason Gunthorpe <jgg@mellanox.com>,
	amd-gfx@lists.freedesktop.org, Christoph Hellwig <hch@lst.de>
Subject: [PATCH] mm/hmm: Simplify hmm_vma_walk_pud slightly
Date: Thu, 12 Mar 2020 10:28:13 +0000	[thread overview]
Message-ID: <20200312102813.56699-1-steven.price@arm.com> (raw)
In-Reply-To: <5bd778fa-51e5-3e0c-d9bb-b38539b03c8d@arm.com>

By refactoring to deal with the !pud_huge(pud) || !pud_devmap(pud)
condition early it's possible to remove the 'ret' variable and remove a
level of indentation from half the function making the code easier to
read.

No functional change.

Signed-off-by: Steven Price <steven.price@arm.com>
---
Thanks to Jason's changes there were only two code paths left using
the out_unlock label so it seemed like a good opportunity to refactor.
---
 mm/hmm.c | 69 ++++++++++++++++++++++++++------------------------------
 1 file changed, 32 insertions(+), 37 deletions(-)

diff --git a/mm/hmm.c b/mm/hmm.c
index ca33d086bdc1..0117c86426d1 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -466,8 +466,10 @@ static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
 	struct hmm_range *range = hmm_vma_walk->range;
 	unsigned long addr = start;
 	pud_t pud;
-	int ret = 0;
 	spinlock_t *ptl = pud_trans_huge_lock(pudp, walk->vma);
+	unsigned long i, npages, pfn;
+	uint64_t *pfns, cpu_flags;
+	bool fault, write_fault;
 
 	if (!ptl)
 		return 0;
@@ -481,50 +483,43 @@ static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
 		return hmm_vma_walk_hole(start, end, -1, walk);
 	}
 
-	if (pud_huge(pud) && pud_devmap(pud)) {
-		unsigned long i, npages, pfn;
-		uint64_t *pfns, cpu_flags;
-		bool fault, write_fault;
+	if (!pud_huge(pud) || !pud_devmap(pud)) {
+		/* Ask for the PUD to be split */
+		walk->action = ACTION_SUBTREE;
+		spin_unlock(ptl);
+		return 0;
+	}
 
-		if (!pud_present(pud)) {
-			spin_unlock(ptl);
-			return hmm_vma_walk_hole(start, end, -1, walk);
-		}
+	if (!pud_present(pud)) {
+		spin_unlock(ptl);
+		return hmm_vma_walk_hole(start, end, -1, walk);
+	}
 
-		i = (addr - range->start) >> PAGE_SHIFT;
-		npages = (end - addr) >> PAGE_SHIFT;
-		pfns = &range->pfns[i];
+	i = (addr - range->start) >> PAGE_SHIFT;
+	npages = (end - addr) >> PAGE_SHIFT;
+	pfns = &range->pfns[i];
 
-		cpu_flags = pud_to_hmm_pfn_flags(range, pud);
-		hmm_range_need_fault(hmm_vma_walk, pfns, npages,
-				     cpu_flags, &fault, &write_fault);
-		if (fault || write_fault) {
-			spin_unlock(ptl);
-			return hmm_vma_walk_hole_(addr, end, fault, write_fault,
-						  walk);
-		}
+	cpu_flags = pud_to_hmm_pfn_flags(range, pud);
+	hmm_range_need_fault(hmm_vma_walk, pfns, npages,
+			     cpu_flags, &fault, &write_fault);
+	if (fault || write_fault) {
+		spin_unlock(ptl);
+		return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
+	}
 
-		pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
-		for (i = 0; i < npages; ++i, ++pfn) {
-			hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
-					      hmm_vma_walk->pgmap);
-			if (unlikely(!hmm_vma_walk->pgmap)) {
-				ret = -EBUSY;
-				goto out_unlock;
-			}
-			pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
-				  cpu_flags;
+	pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
+	for (i = 0; i < npages; ++i, ++pfn) {
+		hmm_vma_walk->pgmap = get_dev_pagemap(pfn, hmm_vma_walk->pgmap);
+		if (unlikely(!hmm_vma_walk->pgmap)) {
+			spin_unlock(ptl);
+			return -EBUSY;
 		}
-		hmm_vma_walk->last = end;
-		goto out_unlock;
+		pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags;
 	}
+	hmm_vma_walk->last = end;
 
-	/* Ask for the PUD to be split */
-	walk->action = ACTION_SUBTREE;
-
-out_unlock:
 	spin_unlock(ptl);
-	return ret;
+	return 0;
 }
 #else
 #define hmm_vma_walk_pud	NULL
-- 
2.20.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

WARNING: multiple messages have this Message-ID (diff)
From: Steven Price <steven.price@arm.com>
To: Jason Gunthorpe <jgg@ziepe.ca>,
	Jerome Glisse <jglisse@redhat.com>,
	Ralph Campbell <rcampbell@nvidia.com>,
	Felix.Kuehling@amd.com
Cc: Philip Yang <Philip.Yang@amd.com>,
	John Hubbard <jhubbard@nvidia.com>,
	dri-devel@lists.freedesktop.org,
	Steven Price <steven.price@arm.com>,
	linux-mm@kvack.org, Jason Gunthorpe <jgg@mellanox.com>,
	amd-gfx@lists.freedesktop.org, Christoph Hellwig <hch@lst.de>
Subject: [PATCH] mm/hmm: Simplify hmm_vma_walk_pud slightly
Date: Thu, 12 Mar 2020 10:28:13 +0000	[thread overview]
Message-ID: <20200312102813.56699-1-steven.price@arm.com> (raw)
In-Reply-To: <5bd778fa-51e5-3e0c-d9bb-b38539b03c8d@arm.com>

By refactoring to deal with the !pud_huge(pud) || !pud_devmap(pud)
condition early it's possible to remove the 'ret' variable and remove a
level of indentation from half the function making the code easier to
read.

No functional change.

Signed-off-by: Steven Price <steven.price@arm.com>
---
Thanks to Jason's changes there were only two code paths left using
the out_unlock label so it seemed like a good opportunity to refactor.
---
 mm/hmm.c | 69 ++++++++++++++++++++++++++------------------------------
 1 file changed, 32 insertions(+), 37 deletions(-)

diff --git a/mm/hmm.c b/mm/hmm.c
index ca33d086bdc1..0117c86426d1 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -466,8 +466,10 @@ static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
 	struct hmm_range *range = hmm_vma_walk->range;
 	unsigned long addr = start;
 	pud_t pud;
-	int ret = 0;
 	spinlock_t *ptl = pud_trans_huge_lock(pudp, walk->vma);
+	unsigned long i, npages, pfn;
+	uint64_t *pfns, cpu_flags;
+	bool fault, write_fault;
 
 	if (!ptl)
 		return 0;
@@ -481,50 +483,43 @@ static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
 		return hmm_vma_walk_hole(start, end, -1, walk);
 	}
 
-	if (pud_huge(pud) && pud_devmap(pud)) {
-		unsigned long i, npages, pfn;
-		uint64_t *pfns, cpu_flags;
-		bool fault, write_fault;
+	if (!pud_huge(pud) || !pud_devmap(pud)) {
+		/* Ask for the PUD to be split */
+		walk->action = ACTION_SUBTREE;
+		spin_unlock(ptl);
+		return 0;
+	}
 
-		if (!pud_present(pud)) {
-			spin_unlock(ptl);
-			return hmm_vma_walk_hole(start, end, -1, walk);
-		}
+	if (!pud_present(pud)) {
+		spin_unlock(ptl);
+		return hmm_vma_walk_hole(start, end, -1, walk);
+	}
 
-		i = (addr - range->start) >> PAGE_SHIFT;
-		npages = (end - addr) >> PAGE_SHIFT;
-		pfns = &range->pfns[i];
+	i = (addr - range->start) >> PAGE_SHIFT;
+	npages = (end - addr) >> PAGE_SHIFT;
+	pfns = &range->pfns[i];
 
-		cpu_flags = pud_to_hmm_pfn_flags(range, pud);
-		hmm_range_need_fault(hmm_vma_walk, pfns, npages,
-				     cpu_flags, &fault, &write_fault);
-		if (fault || write_fault) {
-			spin_unlock(ptl);
-			return hmm_vma_walk_hole_(addr, end, fault, write_fault,
-						  walk);
-		}
+	cpu_flags = pud_to_hmm_pfn_flags(range, pud);
+	hmm_range_need_fault(hmm_vma_walk, pfns, npages,
+			     cpu_flags, &fault, &write_fault);
+	if (fault || write_fault) {
+		spin_unlock(ptl);
+		return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
+	}
 
-		pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
-		for (i = 0; i < npages; ++i, ++pfn) {
-			hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
-					      hmm_vma_walk->pgmap);
-			if (unlikely(!hmm_vma_walk->pgmap)) {
-				ret = -EBUSY;
-				goto out_unlock;
-			}
-			pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
-				  cpu_flags;
+	pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
+	for (i = 0; i < npages; ++i, ++pfn) {
+		hmm_vma_walk->pgmap = get_dev_pagemap(pfn, hmm_vma_walk->pgmap);
+		if (unlikely(!hmm_vma_walk->pgmap)) {
+			spin_unlock(ptl);
+			return -EBUSY;
 		}
-		hmm_vma_walk->last = end;
-		goto out_unlock;
+		pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags;
 	}
+	hmm_vma_walk->last = end;
 
-	/* Ask for the PUD to be split */
-	walk->action = ACTION_SUBTREE;
-
-out_unlock:
 	spin_unlock(ptl);
-	return ret;
+	return 0;
 }
 #else
 #define hmm_vma_walk_pud	NULL
-- 
2.20.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

  reply	other threads:[~2020-03-12 10:29 UTC|newest]

Thread overview: 153+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-03-11 18:34 [PATCH hmm 0/8] Various error case bug fixes for hmm_range_fault() Jason Gunthorpe
2020-03-11 18:34 ` Jason Gunthorpe
2020-03-11 18:34 ` Jason Gunthorpe
2020-03-11 18:34 ` [PATCH hmm 1/8] mm/hmm: add missing unmaps of the ptep during hmm_vma_handle_pte() Jason Gunthorpe
2020-03-11 18:34   ` Jason Gunthorpe
2020-03-11 18:34   ` Jason Gunthorpe
2020-03-12  1:28   ` Ralph Campbell
2020-03-12  1:28     ` Ralph Campbell
2020-03-12  1:28     ` Ralph Campbell
2020-03-12 14:24     ` Jason Gunthorpe
2020-03-12 14:24       ` Jason Gunthorpe
2020-03-12 14:24       ` Jason Gunthorpe
2020-03-16  8:58   ` Christoph Hellwig
2020-03-16  8:58     ` Christoph Hellwig
2020-03-11 18:35 ` [PATCH hmm 2/8] mm/hmm: don't free the cached pgmap while scanning Jason Gunthorpe
2020-03-11 18:35   ` Jason Gunthorpe
2020-03-11 18:35   ` Jason Gunthorpe
2020-03-12  1:29   ` Ralph Campbell
2020-03-12  1:29     ` Ralph Campbell
2020-03-12  1:29     ` Ralph Campbell
2020-03-16  9:02   ` Christoph Hellwig
2020-03-16  9:02     ` Christoph Hellwig
2020-03-16 18:07     ` Jason Gunthorpe
2020-03-16 18:07       ` Jason Gunthorpe
2020-03-16 18:07       ` Jason Gunthorpe
2020-03-16 18:13       ` Christoph Hellwig
2020-03-16 18:13         ` Christoph Hellwig
2020-03-16 19:23         ` Jason Gunthorpe
2020-03-16 19:23           ` Jason Gunthorpe
2020-03-16 19:23           ` Jason Gunthorpe
2020-03-11 18:35 ` [PATCH hmm 3/8] mm/hmm: do not call hmm_vma_walk_hole() while holding a spinlock Jason Gunthorpe
2020-03-11 18:35   ` Jason Gunthorpe
2020-03-11 18:35   ` Jason Gunthorpe
2020-03-12  1:31   ` Ralph Campbell
2020-03-12  1:31     ` Ralph Campbell
2020-03-12  1:31     ` Ralph Campbell
2020-03-12  8:54   ` Steven Price
2020-03-12  8:54     ` Steven Price
2020-03-12  8:54     ` Steven Price
2020-03-12 10:28     ` Steven Price [this message]
2020-03-12 10:28       ` [PATCH] mm/hmm: Simplify hmm_vma_walk_pud slightly Steven Price
2020-03-12 10:28       ` Steven Price
2020-03-12 14:27       ` Jason Gunthorpe
2020-03-12 14:27         ` Jason Gunthorpe
2020-03-12 14:27         ` Jason Gunthorpe
2020-03-12 14:40         ` Steven Price
2020-03-12 14:40           ` Steven Price
2020-03-12 14:40           ` Steven Price
2020-03-12 15:11           ` Jason Gunthorpe
2020-03-12 15:11             ` Jason Gunthorpe
2020-03-12 15:11             ` Jason Gunthorpe
2020-03-12 16:16             ` Steven Price
2020-03-12 16:16               ` Steven Price
2020-03-12 16:16               ` Steven Price
2020-03-12 16:37               ` Jason Gunthorpe
2020-03-12 16:37                 ` Jason Gunthorpe
2020-03-12 16:37                 ` Jason Gunthorpe
2020-03-12 17:02                 ` Steven Price
2020-03-12 17:02                   ` Steven Price
2020-03-12 17:02                   ` Steven Price
2020-03-12 17:17                   ` Jason Gunthorpe
2020-03-12 17:17                     ` Jason Gunthorpe
2020-03-12 17:17                     ` Jason Gunthorpe
2020-03-13 19:55                   ` Jason Gunthorpe
2020-03-13 19:55                     ` Jason Gunthorpe
2020-03-13 19:55                     ` Jason Gunthorpe
2020-03-13 21:04                     ` Matthew Wilcox
2020-03-13 21:04                       ` Matthew Wilcox
2020-03-13 21:04                       ` Matthew Wilcox
2020-03-13 22:51                       ` Jason Gunthorpe
2020-03-13 22:51                         ` Jason Gunthorpe
2020-03-13 22:51                         ` Jason Gunthorpe
2020-03-16  9:05   ` [PATCH hmm 3/8] mm/hmm: do not call hmm_vma_walk_hole() while holding a spinlock Christoph Hellwig
2020-03-16  9:05     ` Christoph Hellwig
2020-03-16 12:56     ` Jason Gunthorpe
2020-03-16 12:56       ` Jason Gunthorpe
2020-03-16 12:56       ` Jason Gunthorpe
2020-03-11 18:35 ` [PATCH hmm 4/8] mm/hmm: add missing pfns set to hmm_vma_walk_pmd() Jason Gunthorpe
2020-03-11 18:35   ` Jason Gunthorpe
2020-03-11 18:35   ` Jason Gunthorpe
2020-03-12  1:33   ` Ralph Campbell
2020-03-12  1:33     ` Ralph Campbell
2020-03-12  1:33     ` Ralph Campbell
2020-03-16  9:06   ` Christoph Hellwig
2020-03-16  9:06     ` Christoph Hellwig
2020-03-11 18:35 ` [PATCH hmm 5/8] mm/hmm: add missing call to hmm_range_need_fault() before returning EFAULT Jason Gunthorpe
2020-03-11 18:35   ` Jason Gunthorpe
2020-03-11 18:35   ` Jason Gunthorpe
2020-03-12  1:34   ` Ralph Campbell
2020-03-12  1:34     ` Ralph Campbell
2020-03-12  1:34     ` Ralph Campbell
2020-03-16  9:07   ` Christoph Hellwig
2020-03-16  9:07     ` Christoph Hellwig
2020-03-11 18:35 ` [PATCH hmm 6/8] mm/hmm: reorganize how !pte_present is handled in hmm_vma_handle_pte() Jason Gunthorpe
2020-03-11 18:35   ` Jason Gunthorpe
2020-03-11 18:35   ` Jason Gunthorpe
2020-03-12  1:36   ` Ralph Campbell
2020-03-12  1:36     ` Ralph Campbell
2020-03-12  1:36     ` Ralph Campbell
2020-03-16  9:11   ` Christoph Hellwig
2020-03-16  9:11     ` Christoph Hellwig
2020-03-11 18:35 ` [PATCH hmm 7/8] mm/hmm: return -EFAULT when setting HMM_PFN_ERROR on requested valid pages Jason Gunthorpe
2020-03-11 18:35   ` Jason Gunthorpe
2020-03-11 18:35   ` Jason Gunthorpe
2020-03-12  1:36   ` Ralph Campbell
2020-03-12  1:36     ` Ralph Campbell
2020-03-12  1:36     ` Ralph Campbell
2020-03-12 14:35     ` Jason Gunthorpe
2020-03-12 14:35       ` Jason Gunthorpe
2020-03-12 14:35       ` Jason Gunthorpe
2020-03-16  9:12   ` Christoph Hellwig
2020-03-16  9:12     ` Christoph Hellwig
2020-03-11 18:35 ` [PATCH hmm 8/8] mm/hmm: add missing call to hmm_pte_need_fault in HMM_PFN_SPECIAL handling Jason Gunthorpe
2020-03-11 18:35   ` Jason Gunthorpe
2020-03-11 18:35   ` Jason Gunthorpe
2020-03-12  1:38   ` Ralph Campbell
2020-03-12  1:38     ` Ralph Campbell
2020-03-12  1:38     ` Ralph Campbell
2020-03-16  9:13   ` Christoph Hellwig
2020-03-16  9:13     ` Christoph Hellwig
2020-03-16 12:10     ` Jason Gunthorpe
2020-03-16 12:10       ` Jason Gunthorpe
2020-03-16 12:10       ` Jason Gunthorpe
2020-03-16 12:49       ` Christoph Hellwig
2020-03-16 12:49         ` Christoph Hellwig
2020-03-16 13:04         ` Jason Gunthorpe
2020-03-16 13:04           ` Jason Gunthorpe
2020-03-16 13:04           ` Jason Gunthorpe
2020-03-16 13:12           ` Christoph Hellwig
2020-03-16 13:12             ` Christoph Hellwig
2020-03-17 12:32             ` Christoph Hellwig
2020-03-17 12:32               ` Christoph Hellwig
2020-03-17 12:53               ` Jason Gunthorpe
2020-03-17 12:53                 ` Jason Gunthorpe
2020-03-17 12:53                 ` Jason Gunthorpe
2020-03-17 13:06                 ` Christoph Hellwig
2020-03-17 13:06                   ` Christoph Hellwig
2020-03-17 13:25                   ` Jason Gunthorpe
2020-03-17 13:25                     ` Jason Gunthorpe
2020-03-17 13:25                     ` Jason Gunthorpe
2020-03-16 12:51   ` Christoph Hellwig
2020-03-16 12:51     ` Christoph Hellwig
2020-03-12 19:33 ` [PATCH hmm 9/8] mm/hmm: do not check pmd_protnone twice in hmm_vma_handle_pmd() Jason Gunthorpe
2020-03-12 19:33   ` Jason Gunthorpe
2020-03-12 19:33   ` Jason Gunthorpe
2020-03-12 23:50   ` Ralph Campbell
2020-03-12 23:50     ` Ralph Campbell
2020-03-12 23:50     ` Ralph Campbell
2020-03-16  9:14   ` Christoph Hellwig
2020-03-16  9:14     ` Christoph Hellwig
2020-03-16 18:25 ` [PATCH hmm 0/8] Various error case bug fixes for hmm_range_fault() Jason Gunthorpe
2020-03-16 18:25   ` Jason Gunthorpe
2020-03-16 18:25   ` Jason Gunthorpe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200312102813.56699-1-steven.price@arm.com \
    --to=steven.price@arm.com \
    --cc=Felix.Kuehling@amd.com \
    --cc=Philip.Yang@amd.com \
    --cc=amd-gfx@lists.freedesktop.org \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=hch@lst.de \
    --cc=jgg@mellanox.com \
    --cc=jgg@ziepe.ca \
    --cc=jglisse@redhat.com \
    --cc=jhubbard@nvidia.com \
    --cc=linux-mm@kvack.org \
    --cc=rcampbell@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.