All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2] mm/mremap: Don't account pages in vma_to_resize()
@ 2021-07-21 13:13 Dmitry Safonov
  2021-07-21 13:21 ` Dmitry Safonov
  2021-07-22 15:58 ` Brian Geffon
  0 siblings, 2 replies; 7+ messages in thread
From: Dmitry Safonov @ 2021-07-21 13:13 UTC (permalink / raw)
  To: linux-kernel
  Cc: Dmitry Safonov, Dmitry Safonov, Alexander Viro, Andrew Morton,
	Andy Lutomirski, Brian Geffon, Catalin Marinas, Chen Wandun,
	Dan Carpenter, Dan Williams, Dave Jiang, Hugh Dickins,
	Ingo Molnar, Jason Gunthorpe, John Hubbard, Kefeng Wang,
	Kirill A. Shutemov, Mike Kravetz, Minchan Kim, Ralph Campbell,
	Russell King, Thomas Bogendoerfer, Thomas Gleixner, Vishal Verma,
	Vlastimil Babka, Wei Yongjun, Will Deacon

All this vm_unacct_memory(charged) dance seems to complicate the life
without a good reason. Furthermore, it seems not always done right on
error-pathes in mremap_to().
And worse than that: this `charged' difference is sometimes
double-accounted for growing MREMAP_DONTUNMAP mremap()s in move_vma():
: if (security_vm_enough_memory_mm(mm, new_len >> PAGE_SHIFT))

Let's not do this.
Account memory in mremap() fast-path for growing VMAs or in move_vma()
for actually moving things. The same simpler way as it's done by
vm_stat_account(), but with a difference to call
security_vm_enough_memory_mm() before copying/adjusting VMA.

Originally noticed by Chen Wandun:
https://lkml.kernel.org/r/20210717101942.120607-1-chenwandun@huawei.com

Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Brian Geffon <bgeffon@google.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chen Wandun <chenwandun@huawei.com>
Cc: Dan Carpenter <dan.carpenter@oracle.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Jiang <dave.jiang@intel.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vishal Verma <vishal.l.verma@intel.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Wei Yongjun <weiyongjun1@huawei.com>
Cc: Will Deacon <will@kernel.org>
Fixes: e346b3813067 ("mm/mremap: add MREMAP_DONTUNMAP to mremap()")
Signed-off-by: Dmitry Safonov <dima@arista.com>
---
 mm/mremap.c | 50 ++++++++++++++++++++++----------------------------
 1 file changed, 22 insertions(+), 28 deletions(-)

diff --git a/mm/mremap.c b/mm/mremap.c
index 5989d3990020..b90c8e732e29 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -565,6 +565,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
 		bool *locked, unsigned long flags,
 		struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap)
 {
+	long to_account = new_len - old_len;
 	struct mm_struct *mm = vma->vm_mm;
 	struct vm_area_struct *new_vma;
 	unsigned long vm_flags = vma->vm_flags;
@@ -583,6 +584,9 @@ static unsigned long move_vma(struct vm_area_struct *vma,
 	if (mm->map_count >= sysctl_max_map_count - 3)
 		return -ENOMEM;
 
+	if (unlikely(flags & MREMAP_DONTUNMAP))
+		to_account = new_len;
+
 	if (vma->vm_ops && vma->vm_ops->may_split) {
 		if (vma->vm_start != old_addr)
 			err = vma->vm_ops->may_split(vma, old_addr);
@@ -604,8 +608,8 @@ static unsigned long move_vma(struct vm_area_struct *vma,
 	if (err)
 		return err;
 
-	if (unlikely(flags & MREMAP_DONTUNMAP && vm_flags & VM_ACCOUNT)) {
-		if (security_vm_enough_memory_mm(mm, new_len >> PAGE_SHIFT))
+	if (vm_flags & VM_ACCOUNT) {
+		if (security_vm_enough_memory_mm(mm, to_account >> PAGE_SHIFT))
 			return -ENOMEM;
 	}
 
@@ -613,8 +617,8 @@ static unsigned long move_vma(struct vm_area_struct *vma,
 	new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
 			   &need_rmap_locks);
 	if (!new_vma) {
-		if (unlikely(flags & MREMAP_DONTUNMAP && vm_flags & VM_ACCOUNT))
-			vm_unacct_memory(new_len >> PAGE_SHIFT);
+		if (vm_flags & VM_ACCOUNT)
+			vm_unacct_memory(to_account >> PAGE_SHIFT);
 		return -ENOMEM;
 	}
 
@@ -708,8 +712,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
 }
 
 static struct vm_area_struct *vma_to_resize(unsigned long addr,
-	unsigned long old_len, unsigned long new_len, unsigned long flags,
-	unsigned long *p)
+	unsigned long old_len, unsigned long new_len, unsigned long flags)
 {
 	struct mm_struct *mm = current->mm;
 	struct vm_area_struct *vma;
@@ -768,13 +771,6 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
 				(new_len - old_len) >> PAGE_SHIFT))
 		return ERR_PTR(-ENOMEM);
 
-	if (vma->vm_flags & VM_ACCOUNT) {
-		unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
-		if (security_vm_enough_memory_mm(mm, charged))
-			return ERR_PTR(-ENOMEM);
-		*p = charged;
-	}
-
 	return vma;
 }
 
@@ -787,7 +783,6 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
 	struct mm_struct *mm = current->mm;
 	struct vm_area_struct *vma;
 	unsigned long ret = -EINVAL;
-	unsigned long charged = 0;
 	unsigned long map_flags = 0;
 
 	if (offset_in_page(new_addr))
@@ -830,7 +825,7 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
 		old_len = new_len;
 	}
 
-	vma = vma_to_resize(addr, old_len, new_len, flags, &charged);
+	vma = vma_to_resize(addr, old_len, new_len, flags);
 	if (IS_ERR(vma)) {
 		ret = PTR_ERR(vma);
 		goto out;
@@ -853,7 +848,7 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
 				((addr - vma->vm_start) >> PAGE_SHIFT),
 				map_flags);
 	if (IS_ERR_VALUE(ret))
-		goto out1;
+		goto out;
 
 	/* We got a new mapping */
 	if (!(flags & MREMAP_FIXED))
@@ -862,12 +857,6 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
 	ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf,
 		       uf_unmap);
 
-	if (!(offset_in_page(ret)))
-		goto out;
-
-out1:
-	vm_unacct_memory(charged);
-
 out:
 	return ret;
 }
@@ -899,7 +888,6 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
 	struct mm_struct *mm = current->mm;
 	struct vm_area_struct *vma;
 	unsigned long ret = -EINVAL;
-	unsigned long charged = 0;
 	bool locked = false;
 	bool downgraded = false;
 	struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
@@ -981,7 +969,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
 	/*
 	 * Ok, we need to grow..
 	 */
-	vma = vma_to_resize(addr, old_len, new_len, flags, &charged);
+	vma = vma_to_resize(addr, old_len, new_len, flags);
 	if (IS_ERR(vma)) {
 		ret = PTR_ERR(vma);
 		goto out;
@@ -992,10 +980,18 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
 	if (old_len == vma->vm_end - addr) {
 		/* can we just expand the current mapping? */
 		if (vma_expandable(vma, new_len - old_len)) {
-			int pages = (new_len - old_len) >> PAGE_SHIFT;
+			long pages = (new_len - old_len) >> PAGE_SHIFT;
+
+			if (vma->vm_flags & VM_ACCOUNT) {
+				if (security_vm_enough_memory_mm(mm, pages)) {
+					ret = -ENOMEM;
+					goto out;
+				}
+			}
 
 			if (vma_adjust(vma, vma->vm_start, addr + new_len,
 				       vma->vm_pgoff, NULL)) {
+				vm_unacct_memory(pages);
 				ret = -ENOMEM;
 				goto out;
 			}
@@ -1034,10 +1030,8 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
 			       &locked, flags, &uf, &uf_unmap);
 	}
 out:
-	if (offset_in_page(ret)) {
-		vm_unacct_memory(charged);
+	if (offset_in_page(ret))
 		locked = false;
-	}
 	if (downgraded)
 		mmap_read_unlock(current->mm);
 	else
-- 
2.32.0


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH v2] mm/mremap: Don't account pages in vma_to_resize()
  2021-07-21 13:13 [PATCH v2] mm/mremap: Don't account pages in vma_to_resize() Dmitry Safonov
@ 2021-07-21 13:21 ` Dmitry Safonov
  2021-07-21 20:05   ` Andrew Morton
  2021-07-22 15:58 ` Brian Geffon
  1 sibling, 1 reply; 7+ messages in thread
From: Dmitry Safonov @ 2021-07-21 13:21 UTC (permalink / raw)
  To: Dmitry Safonov, linux-kernel
  Cc: Alexander Viro, Andrew Morton, Andy Lutomirski, Brian Geffon,
	Catalin Marinas, Chen Wandun, Dan Carpenter, Dan Williams,
	Dave Jiang, Hugh Dickins, Ingo Molnar, Jason Gunthorpe,
	John Hubbard, Kefeng Wang, Kirill A. Shutemov, Mike Kravetz,
	Minchan Kim, Ralph Campbell, Russell King, Thomas Bogendoerfer,
	Thomas Gleixner, Vishal Verma, Vlastimil Babka, Wei Yongjun,
	Will Deacon

On 7/21/21 2:13 PM, Dmitry Safonov wrote:
> All this vm_unacct_memory(charged) dance seems to complicate the life
> without a good reason. Furthermore, it seems not always done right on
> error-pathes in mremap_to().
> And worse than that: this `charged' difference is sometimes
> double-accounted for growing MREMAP_DONTUNMAP mremap()s in move_vma():
> : if (security_vm_enough_memory_mm(mm, new_len >> PAGE_SHIFT))
> 
> Let's not do this.
> Account memory in mremap() fast-path for growing VMAs or in move_vma()
> for actually moving things. The same simpler way as it's done by
> vm_stat_account(), but with a difference to call
> security_vm_enough_memory_mm() before copying/adjusting VMA.
> 
> Originally noticed by Chen Wandun:
> https://lkml.kernel.org/r/20210717101942.120607-1-chenwandun@huawei.com

The patch by Chen Wandun still makes sense with this v2.
Heh :-)

> 
> Cc: Alexander Viro <viro@zeniv.linux.org.uk>
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Cc: Andy Lutomirski <luto@kernel.org>
> Cc: Brian Geffon <bgeffon@google.com>
> Cc: Catalin Marinas <catalin.marinas@arm.com>
> Cc: Chen Wandun <chenwandun@huawei.com>
> Cc: Dan Carpenter <dan.carpenter@oracle.com>
> Cc: Dan Williams <dan.j.williams@intel.com>
> Cc: Dave Jiang <dave.jiang@intel.com>
> Cc: Hugh Dickins <hughd@google.com>
> Cc: Ingo Molnar <mingo@redhat.com>
> Cc: Jason Gunthorpe <jgg@ziepe.ca>
> Cc: John Hubbard <jhubbard@nvidia.com>
> Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
> Cc: Mike Kravetz <mike.kravetz@oracle.com>
> Cc: Minchan Kim <minchan@kernel.org>
> Cc: Ralph Campbell <rcampbell@nvidia.com>
> Cc: Russell King <linux@armlinux.org.uk>
> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
> Cc: Thomas Gleixner <tglx@linutronix.de>
> Cc: Vishal Verma <vishal.l.verma@intel.com>
> Cc: Vlastimil Babka <vbabka@suse.cz>
> Cc: Wei Yongjun <weiyongjun1@huawei.com>
> Cc: Will Deacon <will@kernel.org>
> Fixes: e346b3813067 ("mm/mremap: add MREMAP_DONTUNMAP to mremap()")
> Signed-off-by: Dmitry Safonov <dima@arista.com>
> ---
>  mm/mremap.c | 50 ++++++++++++++++++++++----------------------------
>  1 file changed, 22 insertions(+), 28 deletions(-)
> 
> diff --git a/mm/mremap.c b/mm/mremap.c
> index 5989d3990020..b90c8e732e29 100644
> --- a/mm/mremap.c
> +++ b/mm/mremap.c
> @@ -565,6 +565,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
>  		bool *locked, unsigned long flags,
>  		struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap)
>  {
> +	long to_account = new_len - old_len;
>  	struct mm_struct *mm = vma->vm_mm;
>  	struct vm_area_struct *new_vma;
>  	unsigned long vm_flags = vma->vm_flags;
> @@ -583,6 +584,9 @@ static unsigned long move_vma(struct vm_area_struct *vma,
>  	if (mm->map_count >= sysctl_max_map_count - 3)
>  		return -ENOMEM;
>  
> +	if (unlikely(flags & MREMAP_DONTUNMAP))
> +		to_account = new_len;
> +
>  	if (vma->vm_ops && vma->vm_ops->may_split) {
>  		if (vma->vm_start != old_addr)
>  			err = vma->vm_ops->may_split(vma, old_addr);
> @@ -604,8 +608,8 @@ static unsigned long move_vma(struct vm_area_struct *vma,
>  	if (err)
>  		return err;
>  
> -	if (unlikely(flags & MREMAP_DONTUNMAP && vm_flags & VM_ACCOUNT)) {
> -		if (security_vm_enough_memory_mm(mm, new_len >> PAGE_SHIFT))
> +	if (vm_flags & VM_ACCOUNT) {
> +		if (security_vm_enough_memory_mm(mm, to_account >> PAGE_SHIFT))
>  			return -ENOMEM;
>  	}
>  
> @@ -613,8 +617,8 @@ static unsigned long move_vma(struct vm_area_struct *vma,
>  	new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
>  			   &need_rmap_locks);
>  	if (!new_vma) {
> -		if (unlikely(flags & MREMAP_DONTUNMAP && vm_flags & VM_ACCOUNT))
> -			vm_unacct_memory(new_len >> PAGE_SHIFT);
> +		if (vm_flags & VM_ACCOUNT)
> +			vm_unacct_memory(to_account >> PAGE_SHIFT);
>  		return -ENOMEM;
>  	}
>  
> @@ -708,8 +712,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
>  }
>  
>  static struct vm_area_struct *vma_to_resize(unsigned long addr,
> -	unsigned long old_len, unsigned long new_len, unsigned long flags,
> -	unsigned long *p)
> +	unsigned long old_len, unsigned long new_len, unsigned long flags)
>  {
>  	struct mm_struct *mm = current->mm;
>  	struct vm_area_struct *vma;
> @@ -768,13 +771,6 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
>  				(new_len - old_len) >> PAGE_SHIFT))
>  		return ERR_PTR(-ENOMEM);
>  
> -	if (vma->vm_flags & VM_ACCOUNT) {
> -		unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
> -		if (security_vm_enough_memory_mm(mm, charged))
> -			return ERR_PTR(-ENOMEM);
> -		*p = charged;
> -	}
> -
>  	return vma;
>  }
>  
> @@ -787,7 +783,6 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
>  	struct mm_struct *mm = current->mm;
>  	struct vm_area_struct *vma;
>  	unsigned long ret = -EINVAL;
> -	unsigned long charged = 0;
>  	unsigned long map_flags = 0;
>  
>  	if (offset_in_page(new_addr))
> @@ -830,7 +825,7 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
>  		old_len = new_len;
>  	}
>  
> -	vma = vma_to_resize(addr, old_len, new_len, flags, &charged);
> +	vma = vma_to_resize(addr, old_len, new_len, flags);
>  	if (IS_ERR(vma)) {
>  		ret = PTR_ERR(vma);
>  		goto out;
> @@ -853,7 +848,7 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
>  				((addr - vma->vm_start) >> PAGE_SHIFT),
>  				map_flags);
>  	if (IS_ERR_VALUE(ret))
> -		goto out1;
> +		goto out;
>  
>  	/* We got a new mapping */
>  	if (!(flags & MREMAP_FIXED))
> @@ -862,12 +857,6 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
>  	ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf,
>  		       uf_unmap);
>  
> -	if (!(offset_in_page(ret)))
> -		goto out;
> -
> -out1:
> -	vm_unacct_memory(charged);
> -
>  out:
>  	return ret;
>  }
> @@ -899,7 +888,6 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
>  	struct mm_struct *mm = current->mm;
>  	struct vm_area_struct *vma;
>  	unsigned long ret = -EINVAL;
> -	unsigned long charged = 0;
>  	bool locked = false;
>  	bool downgraded = false;
>  	struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
> @@ -981,7 +969,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
>  	/*
>  	 * Ok, we need to grow..
>  	 */
> -	vma = vma_to_resize(addr, old_len, new_len, flags, &charged);
> +	vma = vma_to_resize(addr, old_len, new_len, flags);
>  	if (IS_ERR(vma)) {
>  		ret = PTR_ERR(vma);
>  		goto out;
> @@ -992,10 +980,18 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
>  	if (old_len == vma->vm_end - addr) {
>  		/* can we just expand the current mapping? */
>  		if (vma_expandable(vma, new_len - old_len)) {
> -			int pages = (new_len - old_len) >> PAGE_SHIFT;
> +			long pages = (new_len - old_len) >> PAGE_SHIFT;
> +
> +			if (vma->vm_flags & VM_ACCOUNT) {
> +				if (security_vm_enough_memory_mm(mm, pages)) {
> +					ret = -ENOMEM;
> +					goto out;
> +				}
> +			}
>  
>  			if (vma_adjust(vma, vma->vm_start, addr + new_len,
>  				       vma->vm_pgoff, NULL)) {
> +				vm_unacct_memory(pages);
>  				ret = -ENOMEM;
>  				goto out;
>  			}
> @@ -1034,10 +1030,8 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
>  			       &locked, flags, &uf, &uf_unmap);
>  	}
>  out:
> -	if (offset_in_page(ret)) {
> -		vm_unacct_memory(charged);
> +	if (offset_in_page(ret))
>  		locked = false;
> -	}
>  	if (downgraded)
>  		mmap_read_unlock(current->mm);
>  	else
> 


-- 
Dima

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v2] mm/mremap: Don't account pages in vma_to_resize()
  2021-07-21 13:21 ` Dmitry Safonov
@ 2021-07-21 20:05   ` Andrew Morton
  2021-07-21 20:08     ` Dmitry Safonov
  0 siblings, 1 reply; 7+ messages in thread
From: Andrew Morton @ 2021-07-21 20:05 UTC (permalink / raw)
  To: Dmitry Safonov
  Cc: Dmitry Safonov, linux-kernel, Alexander Viro, Andy Lutomirski,
	Brian Geffon, Catalin Marinas, Chen Wandun, Dan Carpenter,
	Dan Williams, Dave Jiang, Hugh Dickins, Ingo Molnar,
	Jason Gunthorpe, John Hubbard, Kefeng Wang, Kirill A. Shutemov,
	Mike Kravetz, Minchan Kim, Ralph Campbell, Russell King,
	Thomas Bogendoerfer, Thomas Gleixner, Vishal Verma,
	Vlastimil Babka, Wei Yongjun, Will Deacon

On Wed, 21 Jul 2021 14:21:54 +0100 Dmitry Safonov <0x7f454c46@gmail.com> wrote:

> > Let's not do this.
> > Account memory in mremap() fast-path for growing VMAs or in move_vma()
> > for actually moving things. The same simpler way as it's done by
> > vm_stat_account(), but with a difference to call
> > security_vm_enough_memory_mm() before copying/adjusting VMA.
> > 
> > Originally noticed by Chen Wandun:
> > https://lkml.kernel.org/r/20210717101942.120607-1-chenwandun@huawei.com
> 
> The patch by Chen Wandun still makes sense with this v2.
> Heh :-)

Should
https://lkml.kernel.org/r/20210717101942.120607-1-chenwandun@huawei.com
still be applied then?  Did you ack it?

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v2] mm/mremap: Don't account pages in vma_to_resize()
  2021-07-21 20:05   ` Andrew Morton
@ 2021-07-21 20:08     ` Dmitry Safonov
  2021-07-21 20:18       ` Dmitry Safonov
  0 siblings, 1 reply; 7+ messages in thread
From: Dmitry Safonov @ 2021-07-21 20:08 UTC (permalink / raw)
  To: Andrew Morton, Dmitry Safonov
  Cc: linux-kernel, Alexander Viro, Andy Lutomirski, Brian Geffon,
	Catalin Marinas, Chen Wandun, Dan Carpenter, Dan Williams,
	Dave Jiang, Hugh Dickins, Ingo Molnar, Jason Gunthorpe,
	John Hubbard, Kefeng Wang, Kirill A. Shutemov, Mike Kravetz,
	Minchan Kim, Ralph Campbell, Russell King, Thomas Bogendoerfer,
	Thomas Gleixner, Vishal Verma, Vlastimil Babka, Wei Yongjun,
	Will Deacon

On 7/21/21 9:05 PM, Andrew Morton wrote:
> On Wed, 21 Jul 2021 14:21:54 +0100 Dmitry Safonov <0x7f454c46@gmail.com> wrote:
> 
>>> Let's not do this.
>>> Account memory in mremap() fast-path for growing VMAs or in move_vma()
>>> for actually moving things. The same simpler way as it's done by
>>> vm_stat_account(), but with a difference to call
>>> security_vm_enough_memory_mm() before copying/adjusting VMA.
>>>
>>> Originally noticed by Chen Wandun:
>>> https://lkml.kernel.org/r/20210717101942.120607-1-chenwandun@huawei.com
>>
>> The patch by Chen Wandun still makes sense with this v2.
>> Heh :-)
> 
> Should
> https://lkml.kernel.org/r/20210717101942.120607-1-chenwandun@huawei.com
> still be applied then?  Did you ack it?
> 

Yes, please keep that patch with
Acked-by: Dmitry Safonov <dima@arista.com>

This one comes on the top to correct accounting for MREMAP_DONTUNMAP.

Thanks,
          Dmitry

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v2] mm/mremap: Don't account pages in vma_to_resize()
  2021-07-21 20:08     ` Dmitry Safonov
@ 2021-07-21 20:18       ` Dmitry Safonov
  0 siblings, 0 replies; 7+ messages in thread
From: Dmitry Safonov @ 2021-07-21 20:18 UTC (permalink / raw)
  To: Dmitry Safonov, Andrew Morton, Brian Geffon
  Cc: linux-kernel, Alexander Viro, Andy Lutomirski, Catalin Marinas,
	Chen Wandun, Dan Carpenter, Dan Williams, Dave Jiang,
	Hugh Dickins, Ingo Molnar, Jason Gunthorpe, John Hubbard,
	Kefeng Wang, Kirill A. Shutemov, Mike Kravetz, Minchan Kim,
	Ralph Campbell, Russell King, Thomas Bogendoerfer,
	Thomas Gleixner, Vishal Verma, Vlastimil Babka, Wei Yongjun,
	Will Deacon

On 7/21/21 9:08 PM, Dmitry Safonov wrote:
> On 7/21/21 9:05 PM, Andrew Morton wrote:
>> On Wed, 21 Jul 2021 14:21:54 +0100 Dmitry Safonov <0x7f454c46@gmail.com> wrote:
>>
>>>> Let's not do this.
>>>> Account memory in mremap() fast-path for growing VMAs or in move_vma()
>>>> for actually moving things. The same simpler way as it's done by
>>>> vm_stat_account(), but with a difference to call
>>>> security_vm_enough_memory_mm() before copying/adjusting VMA.
>>>>
>>>> Originally noticed by Chen Wandun:
>>>> https://lkml.kernel.org/r/20210717101942.120607-1-chenwandun@huawei.com
>>>
>>> The patch by Chen Wandun still makes sense with this v2.
>>> Heh :-)
>>
>> Should
>> https://lkml.kernel.org/r/20210717101942.120607-1-chenwandun@huawei.com
>> still be applied then?  Did you ack it?
>>
> 
> Yes, please keep that patch with
> Acked-by: Dmitry Safonov <dima@arista.com>
> 
> This one comes on the top to correct accounting for MREMAP_DONTUNMAP.

Please, also wait for reviews for this one.
I think it's correct and simplifies the complex code, but now I look
into the code again, I see that:
:	/*
:	 * MREMAP_DONTUNMAP is always a move and it does not allow resizing
:	 * in the process.
:	 */
:	if (flags & MREMAP_DONTUNMAP &&
:			(!(flags & MREMAP_MAYMOVE) || old_len != new_len))
:		return ret;

So, this part can be dropped:
> Fixes: e346b3813067 ("mm/mremap: add MREMAP_DONTUNMAP to mremap()")

But I think the patch still makes sense.

Brian, can I have your review on it?

Thanks,
          Dmitry

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v2] mm/mremap: Don't account pages in vma_to_resize()
  2021-07-21 13:13 [PATCH v2] mm/mremap: Don't account pages in vma_to_resize() Dmitry Safonov
  2021-07-21 13:21 ` Dmitry Safonov
@ 2021-07-22 15:58 ` Brian Geffon
  2021-07-22 16:00   ` Brian Geffon
  1 sibling, 1 reply; 7+ messages in thread
From: Brian Geffon @ 2021-07-22 15:58 UTC (permalink / raw)
  To: Dmitry Safonov
  Cc: LKML, Dmitry Safonov, Alexander Viro, Andrew Morton,
	Andy Lutomirski, Catalin Marinas, Chen Wandun, Dan Carpenter,
	Dan Williams, Dave Jiang, Hugh Dickins, Ingo Molnar,
	Jason Gunthorpe, John Hubbard, Kefeng Wang, Kirill A. Shutemov,
	Mike Kravetz, Minchan Kim, Ralph Campbell, Russell King,
	Thomas Bogendoerfer, Thomas Gleixner, Vishal Verma,
	Vlastimil Babka, Wei Yongjun, Will Deacon

On Wed, Jul 21, 2021 at 9:13 AM Dmitry Safonov <dima@arista.com> wrote:
>
> All this vm_unacct_memory(charged) dance seems to complicate the life
> without a good reason. Furthermore, it seems not always done right on
> error-pathes in mremap_to().
> And worse than that: this `charged' difference is sometimes
> double-accounted for growing MREMAP_DONTUNMAP mremap()s in move_vma():
> : if (security_vm_enough_memory_mm(mm, new_len >> PAGE_SHIFT))
>
> Let's not do this.
> Account memory in mremap() fast-path for growing VMAs or in move_vma()
> for actually moving things. The same simpler way as it's done by
> vm_stat_account(), but with a difference to call
> security_vm_enough_memory_mm() before copying/adjusting VMA.
>
> Originally noticed by Chen Wandun:
> https://lkml.kernel.org/r/20210717101942.120607-1-chenwandun@huawei.com
>
> Cc: Alexander Viro <viro@zeniv.linux.org.uk>
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Cc: Andy Lutomirski <luto@kernel.org>
> Cc: Brian Geffon <bgeffon@google.com>
> Cc: Catalin Marinas <catalin.marinas@arm.com>
> Cc: Chen Wandun <chenwandun@huawei.com>
> Cc: Dan Carpenter <dan.carpenter@oracle.com>
> Cc: Dan Williams <dan.j.williams@intel.com>
> Cc: Dave Jiang <dave.jiang@intel.com>
> Cc: Hugh Dickins <hughd@google.com>
> Cc: Ingo Molnar <mingo@redhat.com>
> Cc: Jason Gunthorpe <jgg@ziepe.ca>
> Cc: John Hubbard <jhubbard@nvidia.com>
> Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
> Cc: Mike Kravetz <mike.kravetz@oracle.com>
> Cc: Minchan Kim <minchan@kernel.org>
> Cc: Ralph Campbell <rcampbell@nvidia.com>
> Cc: Russell King <linux@armlinux.org.uk>
> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
> Cc: Thomas Gleixner <tglx@linutronix.de>
> Cc: Vishal Verma <vishal.l.verma@intel.com>
> Cc: Vlastimil Babka <vbabka@suse.cz>
> Cc: Wei Yongjun <weiyongjun1@huawei.com>
> Cc: Will Deacon <will@kernel.org>
> Fixes: e346b3813067 ("mm/mremap: add MREMAP_DONTUNMAP to mremap()")
> Signed-off-by: Dmitry Safonov <dima@arista.com>
> ---
>  mm/mremap.c | 50 ++++++++++++++++++++++----------------------------
>  1 file changed, 22 insertions(+), 28 deletions(-)
>
> diff --git a/mm/mremap.c b/mm/mremap.c
> index 5989d3990020..b90c8e732e29 100644
> --- a/mm/mremap.c
> +++ b/mm/mremap.c
> @@ -565,6 +565,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
>                 bool *locked, unsigned long flags,
>                 struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap)
>  {
> +       long to_account = new_len - old_len;
>         struct mm_struct *mm = vma->vm_mm;
>         struct vm_area_struct *new_vma;
>         unsigned long vm_flags = vma->vm_flags;
> @@ -583,6 +584,9 @@ static unsigned long move_vma(struct vm_area_struct *vma,
>         if (mm->map_count >= sysctl_max_map_count - 3)
>                 return -ENOMEM;
>
> +       if (unlikely(flags & MREMAP_DONTUNMAP))
> +               to_account = new_len;
> +
>         if (vma->vm_ops && vma->vm_ops->may_split) {
>                 if (vma->vm_start != old_addr)
>                         err = vma->vm_ops->may_split(vma, old_addr);
> @@ -604,8 +608,8 @@ static unsigned long move_vma(struct vm_area_struct *vma,
>         if (err)
>                 return err;
>
> -       if (unlikely(flags & MREMAP_DONTUNMAP && vm_flags & VM_ACCOUNT)) {
> -               if (security_vm_enough_memory_mm(mm, new_len >> PAGE_SHIFT))
> +       if (vm_flags & VM_ACCOUNT) {
> +               if (security_vm_enough_memory_mm(mm, to_account >> PAGE_SHIFT))
>                         return -ENOMEM;
>         }
>
> @@ -613,8 +617,8 @@ static unsigned long move_vma(struct vm_area_struct *vma,
>         new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
>                            &need_rmap_locks);
>         if (!new_vma) {
> -               if (unlikely(flags & MREMAP_DONTUNMAP && vm_flags & VM_ACCOUNT))
> -                       vm_unacct_memory(new_len >> PAGE_SHIFT);
> +               if (vm_flags & VM_ACCOUNT)
> +                       vm_unacct_memory(to_account >> PAGE_SHIFT);
>                 return -ENOMEM;
>         }
>
> @@ -708,8 +712,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
>  }
>
>  static struct vm_area_struct *vma_to_resize(unsigned long addr,
> -       unsigned long old_len, unsigned long new_len, unsigned long flags,
> -       unsigned long *p)
> +       unsigned long old_len, unsigned long new_len, unsigned long flags)
>  {
>         struct mm_struct *mm = current->mm;
>         struct vm_area_struct *vma;
> @@ -768,13 +771,6 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
>                                 (new_len - old_len) >> PAGE_SHIFT))
>                 return ERR_PTR(-ENOMEM);
>
> -       if (vma->vm_flags & VM_ACCOUNT) {
> -               unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
> -               if (security_vm_enough_memory_mm(mm, charged))
> -                       return ERR_PTR(-ENOMEM);
> -               *p = charged;
> -       }
> -
>         return vma;
>  }
>
> @@ -787,7 +783,6 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
>         struct mm_struct *mm = current->mm;
>         struct vm_area_struct *vma;
>         unsigned long ret = -EINVAL;
> -       unsigned long charged = 0;
>         unsigned long map_flags = 0;
>
>         if (offset_in_page(new_addr))
> @@ -830,7 +825,7 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
>                 old_len = new_len;
>         }
>
> -       vma = vma_to_resize(addr, old_len, new_len, flags, &charged);
> +       vma = vma_to_resize(addr, old_len, new_len, flags);
>         if (IS_ERR(vma)) {
>                 ret = PTR_ERR(vma);
>                 goto out;
> @@ -853,7 +848,7 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
>                                 ((addr - vma->vm_start) >> PAGE_SHIFT),
>                                 map_flags);
>         if (IS_ERR_VALUE(ret))
> -               goto out1;
> +               goto out;
>
>         /* We got a new mapping */
>         if (!(flags & MREMAP_FIXED))
> @@ -862,12 +857,6 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
>         ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf,
>                        uf_unmap);
>
> -       if (!(offset_in_page(ret)))
> -               goto out;
> -
> -out1:
> -       vm_unacct_memory(charged);
> -
>  out:
>         return ret;
>  }
> @@ -899,7 +888,6 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
>         struct mm_struct *mm = current->mm;
>         struct vm_area_struct *vma;
>         unsigned long ret = -EINVAL;
> -       unsigned long charged = 0;
>         bool locked = false;
>         bool downgraded = false;
>         struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
> @@ -981,7 +969,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
>         /*
>          * Ok, we need to grow..
>          */
> -       vma = vma_to_resize(addr, old_len, new_len, flags, &charged);
> +       vma = vma_to_resize(addr, old_len, new_len, flags);
>         if (IS_ERR(vma)) {
>                 ret = PTR_ERR(vma);
>                 goto out;
> @@ -992,10 +980,18 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
>         if (old_len == vma->vm_end - addr) {
>                 /* can we just expand the current mapping? */
>                 if (vma_expandable(vma, new_len - old_len)) {
> -                       int pages = (new_len - old_len) >> PAGE_SHIFT;
> +                       long pages = (new_len - old_len) >> PAGE_SHIFT;
> +
> +                       if (vma->vm_flags & VM_ACCOUNT) {
> +                               if (security_vm_enough_memory_mm(mm, pages)) {
> +                                       ret = -ENOMEM;
> +                                       goto out;
> +                               }
> +                       }

Wasn't this whole check already performed in vma_to_resize()?

>
>                         if (vma_adjust(vma, vma->vm_start, addr + new_len,
>                                        vma->vm_pgoff, NULL)) {
> +                               vm_unacct_memory(pages);
>                                 ret = -ENOMEM;
>                                 goto out;
>                         }
> @@ -1034,10 +1030,8 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
>                                &locked, flags, &uf, &uf_unmap);
>         }
>  out:
> -       if (offset_in_page(ret)) {
> -               vm_unacct_memory(charged);
> +       if (offset_in_page(ret))
>                 locked = false;
> -       }
>         if (downgraded)
>                 mmap_read_unlock(current->mm);
>         else
> --
> 2.32.0
>

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v2] mm/mremap: Don't account pages in vma_to_resize()
  2021-07-22 15:58 ` Brian Geffon
@ 2021-07-22 16:00   ` Brian Geffon
  0 siblings, 0 replies; 7+ messages in thread
From: Brian Geffon @ 2021-07-22 16:00 UTC (permalink / raw)
  To: Dmitry Safonov
  Cc: LKML, Dmitry Safonov, Alexander Viro, Andrew Morton,
	Andy Lutomirski, Catalin Marinas, Chen Wandun, Dan Carpenter,
	Dan Williams, Dave Jiang, Hugh Dickins, Ingo Molnar,
	Jason Gunthorpe, John Hubbard, Kefeng Wang, Kirill A. Shutemov,
	Mike Kravetz, Minchan Kim, Ralph Campbell, Russell King,
	Thomas Bogendoerfer, Thomas Gleixner, Vishal Verma,
	Vlastimil Babka, Wei Yongjun, Will Deacon

Whoops I missed that you did remove it from vma_to_resize() so then it
looks good to me.

Acked-By: Brian Geffon <bgeffon@google.com>

On Thu, Jul 22, 2021 at 11:58 AM Brian Geffon <bgeffon@google.com> wrote:
>
> On Wed, Jul 21, 2021 at 9:13 AM Dmitry Safonov <dima@arista.com> wrote:
> >
> > All this vm_unacct_memory(charged) dance seems to complicate the life
> > without a good reason. Furthermore, it seems not always done right on
> > error-pathes in mremap_to().
> > And worse than that: this `charged' difference is sometimes
> > double-accounted for growing MREMAP_DONTUNMAP mremap()s in move_vma():
> > : if (security_vm_enough_memory_mm(mm, new_len >> PAGE_SHIFT))
> >
> > Let's not do this.
> > Account memory in mremap() fast-path for growing VMAs or in move_vma()
> > for actually moving things. The same simpler way as it's done by
> > vm_stat_account(), but with a difference to call
> > security_vm_enough_memory_mm() before copying/adjusting VMA.
> >
> > Originally noticed by Chen Wandun:
> > https://lkml.kernel.org/r/20210717101942.120607-1-chenwandun@huawei.com
> >
> > Cc: Alexander Viro <viro@zeniv.linux.org.uk>
> > Cc: Andrew Morton <akpm@linux-foundation.org>
> > Cc: Andy Lutomirski <luto@kernel.org>
> > Cc: Brian Geffon <bgeffon@google.com>
> > Cc: Catalin Marinas <catalin.marinas@arm.com>
> > Cc: Chen Wandun <chenwandun@huawei.com>
> > Cc: Dan Carpenter <dan.carpenter@oracle.com>
> > Cc: Dan Williams <dan.j.williams@intel.com>
> > Cc: Dave Jiang <dave.jiang@intel.com>
> > Cc: Hugh Dickins <hughd@google.com>
> > Cc: Ingo Molnar <mingo@redhat.com>
> > Cc: Jason Gunthorpe <jgg@ziepe.ca>
> > Cc: John Hubbard <jhubbard@nvidia.com>
> > Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
> > Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
> > Cc: Mike Kravetz <mike.kravetz@oracle.com>
> > Cc: Minchan Kim <minchan@kernel.org>
> > Cc: Ralph Campbell <rcampbell@nvidia.com>
> > Cc: Russell King <linux@armlinux.org.uk>
> > Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
> > Cc: Thomas Gleixner <tglx@linutronix.de>
> > Cc: Vishal Verma <vishal.l.verma@intel.com>
> > Cc: Vlastimil Babka <vbabka@suse.cz>
> > Cc: Wei Yongjun <weiyongjun1@huawei.com>
> > Cc: Will Deacon <will@kernel.org>
> > Fixes: e346b3813067 ("mm/mremap: add MREMAP_DONTUNMAP to mremap()")
> > Signed-off-by: Dmitry Safonov <dima@arista.com>
> > ---
> >  mm/mremap.c | 50 ++++++++++++++++++++++----------------------------
> >  1 file changed, 22 insertions(+), 28 deletions(-)
> >
> > diff --git a/mm/mremap.c b/mm/mremap.c
> > index 5989d3990020..b90c8e732e29 100644
> > --- a/mm/mremap.c
> > +++ b/mm/mremap.c
> > @@ -565,6 +565,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
> >                 bool *locked, unsigned long flags,
> >                 struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap)
> >  {
> > +       long to_account = new_len - old_len;
> >         struct mm_struct *mm = vma->vm_mm;
> >         struct vm_area_struct *new_vma;
> >         unsigned long vm_flags = vma->vm_flags;
> > @@ -583,6 +584,9 @@ static unsigned long move_vma(struct vm_area_struct *vma,
> >         if (mm->map_count >= sysctl_max_map_count - 3)
> >                 return -ENOMEM;
> >
> > +       if (unlikely(flags & MREMAP_DONTUNMAP))
> > +               to_account = new_len;
> > +
> >         if (vma->vm_ops && vma->vm_ops->may_split) {
> >                 if (vma->vm_start != old_addr)
> >                         err = vma->vm_ops->may_split(vma, old_addr);
> > @@ -604,8 +608,8 @@ static unsigned long move_vma(struct vm_area_struct *vma,
> >         if (err)
> >                 return err;
> >
> > -       if (unlikely(flags & MREMAP_DONTUNMAP && vm_flags & VM_ACCOUNT)) {
> > -               if (security_vm_enough_memory_mm(mm, new_len >> PAGE_SHIFT))
> > +       if (vm_flags & VM_ACCOUNT) {
> > +               if (security_vm_enough_memory_mm(mm, to_account >> PAGE_SHIFT))
> >                         return -ENOMEM;
> >         }
> >
> > @@ -613,8 +617,8 @@ static unsigned long move_vma(struct vm_area_struct *vma,
> >         new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
> >                            &need_rmap_locks);
> >         if (!new_vma) {
> > -               if (unlikely(flags & MREMAP_DONTUNMAP && vm_flags & VM_ACCOUNT))
> > -                       vm_unacct_memory(new_len >> PAGE_SHIFT);
> > +               if (vm_flags & VM_ACCOUNT)
> > +                       vm_unacct_memory(to_account >> PAGE_SHIFT);
> >                 return -ENOMEM;
> >         }
> >
> > @@ -708,8 +712,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
> >  }
> >
> >  static struct vm_area_struct *vma_to_resize(unsigned long addr,
> > -       unsigned long old_len, unsigned long new_len, unsigned long flags,
> > -       unsigned long *p)
> > +       unsigned long old_len, unsigned long new_len, unsigned long flags)
> >  {
> >         struct mm_struct *mm = current->mm;
> >         struct vm_area_struct *vma;
> > @@ -768,13 +771,6 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
> >                                 (new_len - old_len) >> PAGE_SHIFT))
> >                 return ERR_PTR(-ENOMEM);
> >
> > -       if (vma->vm_flags & VM_ACCOUNT) {
> > -               unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
> > -               if (security_vm_enough_memory_mm(mm, charged))
> > -                       return ERR_PTR(-ENOMEM);
> > -               *p = charged;
> > -       }
> > -
> >         return vma;
> >  }
> >
> > @@ -787,7 +783,6 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
> >         struct mm_struct *mm = current->mm;
> >         struct vm_area_struct *vma;
> >         unsigned long ret = -EINVAL;
> > -       unsigned long charged = 0;
> >         unsigned long map_flags = 0;
> >
> >         if (offset_in_page(new_addr))
> > @@ -830,7 +825,7 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
> >                 old_len = new_len;
> >         }
> >
> > -       vma = vma_to_resize(addr, old_len, new_len, flags, &charged);
> > +       vma = vma_to_resize(addr, old_len, new_len, flags);
> >         if (IS_ERR(vma)) {
> >                 ret = PTR_ERR(vma);
> >                 goto out;
> > @@ -853,7 +848,7 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
> >                                 ((addr - vma->vm_start) >> PAGE_SHIFT),
> >                                 map_flags);
> >         if (IS_ERR_VALUE(ret))
> > -               goto out1;
> > +               goto out;
> >
> >         /* We got a new mapping */
> >         if (!(flags & MREMAP_FIXED))
> > @@ -862,12 +857,6 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
> >         ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf,
> >                        uf_unmap);
> >
> > -       if (!(offset_in_page(ret)))
> > -               goto out;
> > -
> > -out1:
> > -       vm_unacct_memory(charged);
> > -
> >  out:
> >         return ret;
> >  }
> > @@ -899,7 +888,6 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
> >         struct mm_struct *mm = current->mm;
> >         struct vm_area_struct *vma;
> >         unsigned long ret = -EINVAL;
> > -       unsigned long charged = 0;
> >         bool locked = false;
> >         bool downgraded = false;
> >         struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
> > @@ -981,7 +969,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
> >         /*
> >          * Ok, we need to grow..
> >          */
> > -       vma = vma_to_resize(addr, old_len, new_len, flags, &charged);
> > +       vma = vma_to_resize(addr, old_len, new_len, flags);
> >         if (IS_ERR(vma)) {
> >                 ret = PTR_ERR(vma);
> >                 goto out;
> > @@ -992,10 +980,18 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
> >         if (old_len == vma->vm_end - addr) {
> >                 /* can we just expand the current mapping? */
> >                 if (vma_expandable(vma, new_len - old_len)) {
> > -                       int pages = (new_len - old_len) >> PAGE_SHIFT;
> > +                       long pages = (new_len - old_len) >> PAGE_SHIFT;
> > +
> > +                       if (vma->vm_flags & VM_ACCOUNT) {
> > +                               if (security_vm_enough_memory_mm(mm, pages)) {
> > +                                       ret = -ENOMEM;
> > +                                       goto out;
> > +                               }
> > +                       }
>
> Wasn't this whole check already performed in vma_to_resize()?
>
> >
> >                         if (vma_adjust(vma, vma->vm_start, addr + new_len,
> >                                        vma->vm_pgoff, NULL)) {
> > +                               vm_unacct_memory(pages);
> >                                 ret = -ENOMEM;
> >                                 goto out;
> >                         }
> > @@ -1034,10 +1030,8 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
> >                                &locked, flags, &uf, &uf_unmap);
> >         }
> >  out:
> > -       if (offset_in_page(ret)) {
> > -               vm_unacct_memory(charged);
> > +       if (offset_in_page(ret))
> >                 locked = false;
> > -       }
> >         if (downgraded)
> >                 mmap_read_unlock(current->mm);
> >         else
> > --
> > 2.32.0
> >

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2021-07-22 16:01 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-07-21 13:13 [PATCH v2] mm/mremap: Don't account pages in vma_to_resize() Dmitry Safonov
2021-07-21 13:21 ` Dmitry Safonov
2021-07-21 20:05   ` Andrew Morton
2021-07-21 20:08     ` Dmitry Safonov
2021-07-21 20:18       ` Dmitry Safonov
2021-07-22 15:58 ` Brian Geffon
2021-07-22 16:00   ` Brian Geffon

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.