All of lore.kernel.org
 help / color / mirror / Atom feed
* remove follow_pfn
@ 2024-03-24 23:45 Christoph Hellwig
  2024-03-24 23:45 ` [PATCH 1/3] virt: acrn: stop using follow_pfn Christoph Hellwig
                   ` (2 more replies)
  0 siblings, 3 replies; 11+ messages in thread
From: Christoph Hellwig @ 2024-03-24 23:45 UTC (permalink / raw)
  To: Andrew Morton, Dave Hansen, Andy Lutomirski, Peter Zijlstra, Fei Li
  Cc: x86, linux-mm

Hi all,

this series open codes follow_pfn in the only remaining caller, although
the code there remains questionable.  It then also moves follow_phys into
the only user and simplifies it a bit.

Diffstat:
 arch/x86/mm/pat/memtype.c |   22 ++++++++++++++-
 drivers/virt/acrn/mm.c    |   10 +++++--
 include/linux/mm.h        |    4 --
 mm/memory.c               |   64 +---------------------------------------------
 mm/nommu.c                |   21 ---------------
 5 files changed, 30 insertions(+), 91 deletions(-)


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH 1/3] virt: acrn: stop using follow_pfn
  2024-03-24 23:45 remove follow_pfn Christoph Hellwig
@ 2024-03-24 23:45 ` Christoph Hellwig
  2024-03-25 10:33   ` David Hildenbrand
  2024-03-24 23:45 ` [PATCH 2/3] mm: remove follow_pfn Christoph Hellwig
  2024-03-24 23:45 ` [PATCH 3/3] mm: move follow_phys to arch/x86/mm/pat/memtype.c Christoph Hellwig
  2 siblings, 1 reply; 11+ messages in thread
From: Christoph Hellwig @ 2024-03-24 23:45 UTC (permalink / raw)
  To: Andrew Morton, Dave Hansen, Andy Lutomirski, Peter Zijlstra, Fei Li
  Cc: x86, linux-mm

Switch from follow_pfn to follow_pte so that we can get rid of
follow_pfn.  Note that this doesn't fix any of the pre-existing
raciness and lack of permission checking in the code.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/virt/acrn/mm.c | 10 ++++++++--
 1 file changed, 8 insertions(+), 2 deletions(-)

diff --git a/drivers/virt/acrn/mm.c b/drivers/virt/acrn/mm.c
index fa5d9ca6be5706..69c3f619f88196 100644
--- a/drivers/virt/acrn/mm.c
+++ b/drivers/virt/acrn/mm.c
@@ -171,18 +171,24 @@ int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
 	mmap_read_lock(current->mm);
 	vma = vma_lookup(current->mm, memmap->vma_base);
 	if (vma && ((vma->vm_flags & VM_PFNMAP) != 0)) {
+		spinlock_t *ptl;
+		pte_t *ptep;
+
 		if ((memmap->vma_base + memmap->len) > vma->vm_end) {
 			mmap_read_unlock(current->mm);
 			return -EINVAL;
 		}
 
-		ret = follow_pfn(vma, memmap->vma_base, &pfn);
-		mmap_read_unlock(current->mm);
+		ret = follow_pte(vma->vm_mm, memmap->vma_base, &ptep, &ptl);
 		if (ret < 0) {
+			mmap_read_unlock(current->mm);
 			dev_dbg(acrn_dev.this_device,
 				"Failed to lookup PFN at VMA:%pK.\n", (void *)memmap->vma_base);
 			return ret;
 		}
+		pfn = pte_pfn(ptep_get(ptep));
+		pte_unmap_unlock(ptep, ptl);
+		mmap_read_unlock(current->mm);
 
 		return acrn_mm_region_add(vm, memmap->user_vm_pa,
 			 PFN_PHYS(pfn), memmap->len,
-- 
2.39.2



^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 2/3] mm: remove follow_pfn
  2024-03-24 23:45 remove follow_pfn Christoph Hellwig
  2024-03-24 23:45 ` [PATCH 1/3] virt: acrn: stop using follow_pfn Christoph Hellwig
@ 2024-03-24 23:45 ` Christoph Hellwig
  2024-03-25 10:33   ` David Hildenbrand
  2024-03-24 23:45 ` [PATCH 3/3] mm: move follow_phys to arch/x86/mm/pat/memtype.c Christoph Hellwig
  2 siblings, 1 reply; 11+ messages in thread
From: Christoph Hellwig @ 2024-03-24 23:45 UTC (permalink / raw)
  To: Andrew Morton, Dave Hansen, Andy Lutomirski, Peter Zijlstra, Fei Li
  Cc: x86, linux-mm

Remove follow_pfn now that the last user is gone.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 include/linux/mm.h |  2 --
 mm/memory.c        | 36 ++----------------------------------
 mm/nommu.c         | 21 ---------------------
 3 files changed, 2 insertions(+), 57 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 0436b919f1c7fc..9cd2c69f913601 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2393,8 +2393,6 @@ int
 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
 int follow_pte(struct mm_struct *mm, unsigned long address,
 	       pte_t **ptepp, spinlock_t **ptlp);
-int follow_pfn(struct vm_area_struct *vma, unsigned long address,
-	unsigned long *pfn);
 int follow_phys(struct vm_area_struct *vma, unsigned long address,
 		unsigned int flags, unsigned long *prot, resource_size_t *phys);
 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
diff --git a/mm/memory.c b/mm/memory.c
index f2bc6dd15eb830..d7f09d5aae6e53 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -5880,8 +5880,8 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
  * Only IO mappings and raw PFN mappings are allowed.  The mmap semaphore
  * should be taken for read.
  *
- * KVM uses this function.  While it is arguably less bad than ``follow_pfn``,
- * it is not a good general-purpose API.
+ * KVM uses this function.  While it is arguably less bad than the historic
+ * ``follow_pfn``, it is not a good general-purpose API.
  *
  * Return: zero on success, -ve otherwise.
  */
@@ -5923,38 +5923,6 @@ int follow_pte(struct mm_struct *mm, unsigned long address,
 }
 EXPORT_SYMBOL_GPL(follow_pte);
 
-/**
- * follow_pfn - look up PFN at a user virtual address
- * @vma: memory mapping
- * @address: user virtual address
- * @pfn: location to store found PFN
- *
- * Only IO mappings and raw PFN mappings are allowed.
- *
- * This function does not allow the caller to read the permissions
- * of the PTE.  Do not use it.
- *
- * Return: zero and the pfn at @pfn on success, -ve otherwise.
- */
-int follow_pfn(struct vm_area_struct *vma, unsigned long address,
-	unsigned long *pfn)
-{
-	int ret = -EINVAL;
-	spinlock_t *ptl;
-	pte_t *ptep;
-
-	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
-		return ret;
-
-	ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
-	if (ret)
-		return ret;
-	*pfn = pte_pfn(ptep_get(ptep));
-	pte_unmap_unlock(ptep, ptl);
-	return 0;
-}
-EXPORT_SYMBOL(follow_pfn);
-
 #ifdef CONFIG_HAVE_IOREMAP_PROT
 int follow_phys(struct vm_area_struct *vma,
 		unsigned long address, unsigned int flags,
diff --git a/mm/nommu.c b/mm/nommu.c
index 5ec8f44e7ce976..38d3ecc30c3ba9 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -110,27 +110,6 @@ unsigned int kobjsize(const void *objp)
 	return page_size(page);
 }
 
-/**
- * follow_pfn - look up PFN at a user virtual address
- * @vma: memory mapping
- * @address: user virtual address
- * @pfn: location to store found PFN
- *
- * Only IO mappings and raw PFN mappings are allowed.
- *
- * Returns zero and the pfn at @pfn on success, -ve otherwise.
- */
-int follow_pfn(struct vm_area_struct *vma, unsigned long address,
-	unsigned long *pfn)
-{
-	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
-		return -EINVAL;
-
-	*pfn = address >> PAGE_SHIFT;
-	return 0;
-}
-EXPORT_SYMBOL(follow_pfn);
-
 void vfree(const void *addr)
 {
 	kfree(addr);
-- 
2.39.2



^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 3/3] mm: move follow_phys to arch/x86/mm/pat/memtype.c
  2024-03-24 23:45 remove follow_pfn Christoph Hellwig
  2024-03-24 23:45 ` [PATCH 1/3] virt: acrn: stop using follow_pfn Christoph Hellwig
  2024-03-24 23:45 ` [PATCH 2/3] mm: remove follow_pfn Christoph Hellwig
@ 2024-03-24 23:45 ` Christoph Hellwig
  2024-03-25 10:28   ` Ingo Molnar
  2024-03-25 10:36   ` David Hildenbrand
  2 siblings, 2 replies; 11+ messages in thread
From: Christoph Hellwig @ 2024-03-24 23:45 UTC (permalink / raw)
  To: Andrew Morton, Dave Hansen, Andy Lutomirski, Peter Zijlstra, Fei Li
  Cc: x86, linux-mm

follow_phys is only used by two allers in arch/x86/mm/pat/memtype.c.
Move it there and hardcode the two arguments that get the same values
passed by both caller.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 arch/x86/mm/pat/memtype.c | 22 ++++++++++++++++++++--
 include/linux/mm.h        |  2 --
 mm/memory.c               | 28 ----------------------------
 3 files changed, 20 insertions(+), 32 deletions(-)

diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c
index 0d72183b5dd028..bad99eb5c95b0d 100644
--- a/arch/x86/mm/pat/memtype.c
+++ b/arch/x86/mm/pat/memtype.c
@@ -947,6 +947,24 @@ static void free_pfn_range(u64 paddr, unsigned long size)
 		memtype_free(paddr, paddr + size);
 }
 
+static int follow_phys(struct vm_area_struct *vma, unsigned long *prot,
+		resource_size_t *phys)
+{
+	pte_t *ptep, pte;
+	spinlock_t *ptl;
+
+	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
+		return -EINVAL;
+
+	if (follow_pte(vma->vm_mm, vma->vm_start, &ptep, &ptl))
+		return -EINVAL;
+	pte = ptep_get(ptep);
+	*prot = pgprot_val(pte_pgprot(pte));
+	*phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
+	pte_unmap_unlock(ptep, ptl);
+	return 0;
+}
+
 /*
  * track_pfn_copy is called when vma that is covering the pfnmap gets
  * copied through copy_page_range().
@@ -966,7 +984,7 @@ int track_pfn_copy(struct vm_area_struct *vma)
 		 * reserve the whole chunk covered by vma. We need the
 		 * starting address and protection from pte.
 		 */
-		if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
+		if (follow_phys(vma, &prot, &paddr)) {
 			WARN_ON_ONCE(1);
 			return -EINVAL;
 		}
@@ -1053,7 +1071,7 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
 	/* free the chunk starting from pfn or the whole chunk */
 	paddr = (resource_size_t)pfn << PAGE_SHIFT;
 	if (!paddr && !size) {
-		if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
+		if (follow_phys(vma, &prot, &paddr)) {
 			WARN_ON_ONCE(1);
 			return;
 		}
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 9cd2c69f913601..51cfc8267da755 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2393,8 +2393,6 @@ int
 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
 int follow_pte(struct mm_struct *mm, unsigned long address,
 	       pte_t **ptepp, spinlock_t **ptlp);
-int follow_phys(struct vm_area_struct *vma, unsigned long address,
-		unsigned int flags, unsigned long *prot, resource_size_t *phys);
 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
 			void *buf, int len, int write);
 
diff --git a/mm/memory.c b/mm/memory.c
index d7f09d5aae6e53..d18f6d15f56e6c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -5924,34 +5924,6 @@ int follow_pte(struct mm_struct *mm, unsigned long address,
 EXPORT_SYMBOL_GPL(follow_pte);
 
 #ifdef CONFIG_HAVE_IOREMAP_PROT
-int follow_phys(struct vm_area_struct *vma,
-		unsigned long address, unsigned int flags,
-		unsigned long *prot, resource_size_t *phys)
-{
-	int ret = -EINVAL;
-	pte_t *ptep, pte;
-	spinlock_t *ptl;
-
-	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
-		goto out;
-
-	if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
-		goto out;
-	pte = ptep_get(ptep);
-
-	if ((flags & FOLL_WRITE) && !pte_write(pte))
-		goto unlock;
-
-	*prot = pgprot_val(pte_pgprot(pte));
-	*phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
-
-	ret = 0;
-unlock:
-	pte_unmap_unlock(ptep, ptl);
-out:
-	return ret;
-}
-
 /**
  * generic_access_phys - generic implementation for iomem mmap access
  * @vma: the vma to access
-- 
2.39.2



^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [PATCH 3/3] mm: move follow_phys to arch/x86/mm/pat/memtype.c
  2024-03-24 23:45 ` [PATCH 3/3] mm: move follow_phys to arch/x86/mm/pat/memtype.c Christoph Hellwig
@ 2024-03-25 10:28   ` Ingo Molnar
  2024-03-25 10:36   ` David Hildenbrand
  1 sibling, 0 replies; 11+ messages in thread
From: Ingo Molnar @ 2024-03-25 10:28 UTC (permalink / raw)
  To: Christoph Hellwig
  Cc: Andrew Morton, Dave Hansen, Andy Lutomirski, Peter Zijlstra,
	Fei Li, x86, linux-mm


* Christoph Hellwig <hch@lst.de> wrote:

> follow_phys is only used by two allers in arch/x86/mm/pat/memtype.c.
> Move it there and hardcode the two arguments that get the same values
> passed by both caller.

s/aller
 /caller

s/both caller.
 /both callers.

> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  arch/x86/mm/pat/memtype.c | 22 ++++++++++++++++++++--
>  include/linux/mm.h        |  2 --
>  mm/memory.c               | 28 ----------------------------
>  3 files changed, 20 insertions(+), 32 deletions(-)
> 
> diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c
> index 0d72183b5dd028..bad99eb5c95b0d 100644
> --- a/arch/x86/mm/pat/memtype.c
> +++ b/arch/x86/mm/pat/memtype.c
> @@ -947,6 +947,24 @@ static void free_pfn_range(u64 paddr, unsigned long size)
>  		memtype_free(paddr, paddr + size);
>  }
>  
> +static int follow_phys(struct vm_area_struct *vma, unsigned long *prot,
> +		resource_size_t *phys)
> +{
> +	pte_t *ptep, pte;
> +	spinlock_t *ptl;
> +
> +	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
> +		return -EINVAL;
> +
> +	if (follow_pte(vma->vm_mm, vma->vm_start, &ptep, &ptl))
> +		return -EINVAL;
> +	pte = ptep_get(ptep);
> +	*prot = pgprot_val(pte_pgprot(pte));
> +	*phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
> +	pte_unmap_unlock(ptep, ptl);
> +	return 0;
> +}

Please keep the readability newlines as the original had, ie.:

 +	if (follow_pte(vma->vm_mm, vma->vm_start, &ptep, &ptl))
 +		return -EINVAL;
 +
 +	pte = ptep_get(ptep);
 +	*prot = pgprot_val(pte_pgprot(pte));
 +	*phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
 +	pte_unmap_unlock(ptep, ptl);
 +
 +	return 0;

Thanks,

	Ingo


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 1/3] virt: acrn: stop using follow_pfn
  2024-03-24 23:45 ` [PATCH 1/3] virt: acrn: stop using follow_pfn Christoph Hellwig
@ 2024-03-25 10:33   ` David Hildenbrand
  2024-03-26  6:04     ` Christoph Hellwig
  0 siblings, 1 reply; 11+ messages in thread
From: David Hildenbrand @ 2024-03-25 10:33 UTC (permalink / raw)
  To: Christoph Hellwig, Andrew Morton, Dave Hansen, Andy Lutomirski,
	Peter Zijlstra, Fei Li
  Cc: x86, linux-mm

On 25.03.24 00:45, Christoph Hellwig wrote:
> Switch from follow_pfn to follow_pte so that we can get rid of
> follow_pfn.  Note that this doesn't fix any of the pre-existing
> raciness and lack of permission checking in the code.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   drivers/virt/acrn/mm.c | 10 ++++++++--
>   1 file changed, 8 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/virt/acrn/mm.c b/drivers/virt/acrn/mm.c
> index fa5d9ca6be5706..69c3f619f88196 100644
> --- a/drivers/virt/acrn/mm.c
> +++ b/drivers/virt/acrn/mm.c
> @@ -171,18 +171,24 @@ int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
>   	mmap_read_lock(current->mm);
>   	vma = vma_lookup(current->mm, memmap->vma_base);
>   	if (vma && ((vma->vm_flags & VM_PFNMAP) != 0)) {
> +		spinlock_t *ptl;
> +		pte_t *ptep;
> +
>   		if ((memmap->vma_base + memmap->len) > vma->vm_end) {
>   			mmap_read_unlock(current->mm);
>   			return -EINVAL;
>   		}
>   
> -		ret = follow_pfn(vma, memmap->vma_base, &pfn);
> -		mmap_read_unlock(current->mm);
> +		ret = follow_pte(vma->vm_mm, memmap->vma_base, &ptep, &ptl);
>   		if (ret < 0) {
> +			mmap_read_unlock(current->mm);
>   			dev_dbg(acrn_dev.this_device,
>   				"Failed to lookup PFN at VMA:%pK.\n", (void *)memmap->vma_base);
>   			return ret;
>   		}
> +		pfn = pte_pfn(ptep_get(ptep));
> +		pte_unmap_unlock(ptep, ptl);
> +		mmap_read_unlock(current->mm);
>   
>   		return acrn_mm_region_add(vm, memmap->user_vm_pa,
>   			 PFN_PHYS(pfn), memmap->len,

... I have similar patches lying around here (see bwlow). I added some
actual access permission checks.

(I also realized, that if we get an anon folio in a COW mapping via follow_pte()
here, I suspect one might be able to do some nasty things. Just imagine if we
munmap(), free the anon folio, and then it gets used in other context ... At
least KVM/vfio handle that using references+MMU notifiers.)

Reviewed-by: David Hildenbrand <david@redhat.com>


commit 812e577dea97327bcc68d34504e7387dff2ffd8f
Author: David Hildenbrand <david@redhat.com>
Date:   Fri Mar 8 13:53:04 2024 +0100

     virt/acrn/mm: use follow_pte() instead of follow_pfn()
     
     follow_pfn() should not be used. Instead, use follow_pte() and do some
     best-guess PTE permission checks.
     
     Should we simply always require pte_write()? Maybe. Performing no
     checks clearly looks wrong, and pin_user_pages_fast() is unconditionally
     called with FOLL_WRITE.
     
     Signed-off-by: David Hildenbrand <david@redhat.com>

diff --git a/drivers/virt/acrn/mm.c b/drivers/virt/acrn/mm.c
index fa5d9ca6be57..563c545adb2c 100644
--- a/drivers/virt/acrn/mm.c
+++ b/drivers/virt/acrn/mm.c
@@ -171,12 +171,22 @@ int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
         mmap_read_lock(current->mm);
         vma = vma_lookup(current->mm, memmap->vma_base);
         if (vma && ((vma->vm_flags & VM_PFNMAP) != 0)) {
+               spinlock_t *ptl;
+               pte_t *ptep;
+
                 if ((memmap->vma_base + memmap->len) > vma->vm_end) {
                         mmap_read_unlock(current->mm);
                         return -EINVAL;
                 }
  
-               ret = follow_pfn(vma, memmap->vma_base, &pfn);
+               ret = follow_pte(vma, memmap->vma_base, &ptep, &ptl);
+               if (!ret) {
+                       pfn = pte_pfn(ptep_get(ptep));
+                       if (!pte_write(ptep_get(ptep)) &&
+                           (memmap->attr & ACRN_MEM_ACCESS_WRITE))
+                               ret = -EFAULT;
+                       pte_unmap_unlock(ptep, ptl);
+               }
                 mmap_read_unlock(current->mm);
                 if (ret < 0) {


-- 
Cheers,

David / dhildenb



^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [PATCH 2/3] mm: remove follow_pfn
  2024-03-24 23:45 ` [PATCH 2/3] mm: remove follow_pfn Christoph Hellwig
@ 2024-03-25 10:33   ` David Hildenbrand
  0 siblings, 0 replies; 11+ messages in thread
From: David Hildenbrand @ 2024-03-25 10:33 UTC (permalink / raw)
  To: Christoph Hellwig, Andrew Morton, Dave Hansen, Andy Lutomirski,
	Peter Zijlstra, Fei Li
  Cc: x86, linux-mm

On 25.03.24 00:45, Christoph Hellwig wrote:
> Remove follow_pfn now that the last user is gone.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---

Reviewed-by: David Hildenbrand <david@redhat.com>

-- 
Cheers,

David / dhildenb



^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 3/3] mm: move follow_phys to arch/x86/mm/pat/memtype.c
  2024-03-24 23:45 ` [PATCH 3/3] mm: move follow_phys to arch/x86/mm/pat/memtype.c Christoph Hellwig
  2024-03-25 10:28   ` Ingo Molnar
@ 2024-03-25 10:36   ` David Hildenbrand
  1 sibling, 0 replies; 11+ messages in thread
From: David Hildenbrand @ 2024-03-25 10:36 UTC (permalink / raw)
  To: Christoph Hellwig, Andrew Morton, Dave Hansen, Andy Lutomirski,
	Peter Zijlstra, Fei Li
  Cc: x86, linux-mm

On 25.03.24 00:45, Christoph Hellwig wrote:
> follow_phys is only used by two allers in arch/x86/mm/pat/memtype.c.
> Move it there and hardcode the two arguments that get the same values
> passed by both caller.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   arch/x86/mm/pat/memtype.c | 22 ++++++++++++++++++++--
>   include/linux/mm.h        |  2 --
>   mm/memory.c               | 28 ----------------------------
>   3 files changed, 20 insertions(+), 32 deletions(-)
> 
> diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c
> index 0d72183b5dd028..bad99eb5c95b0d 100644
> --- a/arch/x86/mm/pat/memtype.c
> +++ b/arch/x86/mm/pat/memtype.c
> @@ -947,6 +947,24 @@ static void free_pfn_range(u64 paddr, unsigned long size)
>   		memtype_free(paddr, paddr + size);
>   }
>   
> +static int follow_phys(struct vm_area_struct *vma, unsigned long *prot,
> +		resource_size_t *phys)
> +{
> +	pte_t *ptep, pte;
> +	spinlock_t *ptl;
> +
> +	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
> +		return -EINVAL;
> +
> +	if (follow_pte(vma->vm_mm, vma->vm_start, &ptep, &ptl))
> +		return -EINVAL;

I have a patch here that (a) makes follow_pte() consume a VMA instead of 
a MM, and (b) moves the VMA check into follow_pte().

While you're at it, and if you're resending this, you might want to 
include similar changes; otherwise I'll can send this myself later on top.

-- 
Cheers,

David / dhildenb



^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 1/3] virt: acrn: stop using follow_pfn
  2024-03-25 10:33   ` David Hildenbrand
@ 2024-03-26  6:04     ` Christoph Hellwig
  2024-03-26 17:06       ` David Hildenbrand
  0 siblings, 1 reply; 11+ messages in thread
From: Christoph Hellwig @ 2024-03-26  6:04 UTC (permalink / raw)
  To: David Hildenbrand
  Cc: Christoph Hellwig, Andrew Morton, Dave Hansen, Andy Lutomirski,
	Peter Zijlstra, Fei Li, x86, linux-mm

On Mon, Mar 25, 2024 at 11:33:31AM +0100, David Hildenbrand wrote:
> ... I have similar patches lying around here (see bwlow). I added some
> actual access permission checks.
>
> (I also realized, that if we get an anon folio in a COW mapping via follow_pte()
> here, I suspect one might be able to do some nasty things. Just imagine if we
> munmap(), free the anon folio, and then it gets used in other context ... At
> least KVM/vfio handle that using references+MMU notifiers.)

How about you just send out your series that seems to go further and
I retract mine?


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 1/3] virt: acrn: stop using follow_pfn
  2024-03-26  6:04     ` Christoph Hellwig
@ 2024-03-26 17:06       ` David Hildenbrand
  0 siblings, 0 replies; 11+ messages in thread
From: David Hildenbrand @ 2024-03-26 17:06 UTC (permalink / raw)
  To: Christoph Hellwig
  Cc: Andrew Morton, Dave Hansen, Andy Lutomirski, Peter Zijlstra,
	Fei Li, x86, linux-mm

On 26.03.24 07:04, Christoph Hellwig wrote:
> On Mon, Mar 25, 2024 at 11:33:31AM +0100, David Hildenbrand wrote:
>> ... I have similar patches lying around here (see bwlow). I added some
>> actual access permission checks.
>>
>> (I also realized, that if we get an anon folio in a COW mapping via follow_pte()
>> here, I suspect one might be able to do some nasty things. Just imagine if we
>> munmap(), free the anon folio, and then it gets used in other context ... At
>> least KVM/vfio handle that using references+MMU notifiers.)
> 
> How about you just send out your series that seems to go further and
> I retract mine?

Let's go with yours first and I'll rebase.

Regarding above issue, I still have not made up my mind: likely we 
should reject any PFN in acrn that has a valid "struct page", and that 
page does not have PG_reserved set. That's what VFIO effectively does IIRC.

-- 
Cheers,

David / dhildenb



^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH 1/3] virt: acrn: stop using follow_pfn
  2024-03-28  8:46 remove follow_pfn v2 Christoph Hellwig
@ 2024-03-28  8:46 ` Christoph Hellwig
  0 siblings, 0 replies; 11+ messages in thread
From: Christoph Hellwig @ 2024-03-28  8:46 UTC (permalink / raw)
  To: Andrew Morton, Dave Hansen, Andy Lutomirski, Peter Zijlstra, Fei Li
  Cc: x86, linux-mm, David Hildenbrand

Switch from follow_pfn to follow_pte so that we can get rid of
follow_pfn.  Note that this doesn't fix any of the pre-existing
raciness and lack of permission checking in the code.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: David Hildenbrand <david@redhat.com>
---
 drivers/virt/acrn/mm.c | 10 ++++++++--
 1 file changed, 8 insertions(+), 2 deletions(-)

diff --git a/drivers/virt/acrn/mm.c b/drivers/virt/acrn/mm.c
index fa5d9ca6be5706..69c3f619f88196 100644
--- a/drivers/virt/acrn/mm.c
+++ b/drivers/virt/acrn/mm.c
@@ -171,18 +171,24 @@ int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
 	mmap_read_lock(current->mm);
 	vma = vma_lookup(current->mm, memmap->vma_base);
 	if (vma && ((vma->vm_flags & VM_PFNMAP) != 0)) {
+		spinlock_t *ptl;
+		pte_t *ptep;
+
 		if ((memmap->vma_base + memmap->len) > vma->vm_end) {
 			mmap_read_unlock(current->mm);
 			return -EINVAL;
 		}
 
-		ret = follow_pfn(vma, memmap->vma_base, &pfn);
-		mmap_read_unlock(current->mm);
+		ret = follow_pte(vma->vm_mm, memmap->vma_base, &ptep, &ptl);
 		if (ret < 0) {
+			mmap_read_unlock(current->mm);
 			dev_dbg(acrn_dev.this_device,
 				"Failed to lookup PFN at VMA:%pK.\n", (void *)memmap->vma_base);
 			return ret;
 		}
+		pfn = pte_pfn(ptep_get(ptep));
+		pte_unmap_unlock(ptep, ptl);
+		mmap_read_unlock(current->mm);
 
 		return acrn_mm_region_add(vm, memmap->user_vm_pa,
 			 PFN_PHYS(pfn), memmap->len,
-- 
2.39.2



^ permalink raw reply related	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2024-03-28  8:46 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-03-24 23:45 remove follow_pfn Christoph Hellwig
2024-03-24 23:45 ` [PATCH 1/3] virt: acrn: stop using follow_pfn Christoph Hellwig
2024-03-25 10:33   ` David Hildenbrand
2024-03-26  6:04     ` Christoph Hellwig
2024-03-26 17:06       ` David Hildenbrand
2024-03-24 23:45 ` [PATCH 2/3] mm: remove follow_pfn Christoph Hellwig
2024-03-25 10:33   ` David Hildenbrand
2024-03-24 23:45 ` [PATCH 3/3] mm: move follow_phys to arch/x86/mm/pat/memtype.c Christoph Hellwig
2024-03-25 10:28   ` Ingo Molnar
2024-03-25 10:36   ` David Hildenbrand
2024-03-28  8:46 remove follow_pfn v2 Christoph Hellwig
2024-03-28  8:46 ` [PATCH 1/3] virt: acrn: stop using follow_pfn Christoph Hellwig

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.