All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] mm/mremap: Don't account pages in vma_to_resize()
@ 2021-07-21 12:49 Dmitry Safonov
  2021-07-21 13:08 ` Dmitry Safonov
  0 siblings, 1 reply; 2+ messages in thread
From: Dmitry Safonov @ 2021-07-21 12:49 UTC (permalink / raw)
  To: linux-kernel
  Cc: Dmitry Safonov, Dmitry Safonov, Alexander Viro, Andrew Morton,
	Andy Lutomirski, Brian Geffon, Catalin Marinas, Chen Wandun,
	Dan Carpenter, Dan Williams, Dave Jiang, Hugh Dickins,
	Ingo Molnar, Jason Gunthorpe, John Hubbard, Kefeng Wang,
	Kirill A. Shutemov, Mike Kravetz, Minchan Kim, Ralph Campbell,
	Russell King, Thomas Bogendoerfer, Thomas Gleixner, Vishal Verma,
	Vlastimil Babka, Wei Yongjun, Will Deacon

All this vm_unacct_memory(charged) dance seems to complicate the life
without a good reason. Furthermore, it seems not always done right on
error-pathes in mremap_to().
And worse than that: this `charged' difference is sometimes
double-accounted for growing MREMAP_DONTUNMAP mremap()s in move_vma():
: if (security_vm_enough_memory_mm(mm, new_len >> PAGE_SHIFT))

Let's not do this.
Account memory in mremap() fast-path for growing VMAs or in move_vma()
for actually moving things.

Originally noticed by Chen Wandun:
https://lkml.kernel.org/r/20210717101942.120607-1-chenwandun@huawei.com

Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Brian Geffon <bgeffon@google.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chen Wandun <chenwandun@huawei.com>
Cc: Dan Carpenter <dan.carpenter@oracle.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Jiang <dave.jiang@intel.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vishal Verma <vishal.l.verma@intel.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Wei Yongjun <weiyongjun1@huawei.com>
Cc: Will Deacon <will@kernel.org>
Fixes: e346b3813067 ("mm/mremap: add MREMAP_DONTUNMAP to mremap()")
Signed-off-by: Dmitry Safonov <dima@arista.com>
---
 mm/mremap.c | 36 +++++++++++++-----------------------
 1 file changed, 13 insertions(+), 23 deletions(-)

diff --git a/mm/mremap.c b/mm/mremap.c
index 5989d3990020..ae48955aee74 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -708,8 +708,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
 }
 
 static struct vm_area_struct *vma_to_resize(unsigned long addr,
-	unsigned long old_len, unsigned long new_len, unsigned long flags,
-	unsigned long *p)
+	unsigned long old_len, unsigned long new_len, unsigned long flags)
 {
 	struct mm_struct *mm = current->mm;
 	struct vm_area_struct *vma;
@@ -768,13 +767,6 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
 				(new_len - old_len) >> PAGE_SHIFT))
 		return ERR_PTR(-ENOMEM);
 
-	if (vma->vm_flags & VM_ACCOUNT) {
-		unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
-		if (security_vm_enough_memory_mm(mm, charged))
-			return ERR_PTR(-ENOMEM);
-		*p = charged;
-	}
-
 	return vma;
 }
 
@@ -787,7 +779,6 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
 	struct mm_struct *mm = current->mm;
 	struct vm_area_struct *vma;
 	unsigned long ret = -EINVAL;
-	unsigned long charged = 0;
 	unsigned long map_flags = 0;
 
 	if (offset_in_page(new_addr))
@@ -830,7 +821,7 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
 		old_len = new_len;
 	}
 
-	vma = vma_to_resize(addr, old_len, new_len, flags, &charged);
+	vma = vma_to_resize(addr, old_len, new_len, flags);
 	if (IS_ERR(vma)) {
 		ret = PTR_ERR(vma);
 		goto out;
@@ -853,7 +844,7 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
 				((addr - vma->vm_start) >> PAGE_SHIFT),
 				map_flags);
 	if (IS_ERR_VALUE(ret))
-		goto out1;
+		goto out;
 
 	/* We got a new mapping */
 	if (!(flags & MREMAP_FIXED))
@@ -862,12 +853,6 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
 	ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf,
 		       uf_unmap);
 
-	if (!(offset_in_page(ret)))
-		goto out;
-
-out1:
-	vm_unacct_memory(charged);
-
 out:
 	return ret;
 }
@@ -899,7 +884,6 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
 	struct mm_struct *mm = current->mm;
 	struct vm_area_struct *vma;
 	unsigned long ret = -EINVAL;
-	unsigned long charged = 0;
 	bool locked = false;
 	bool downgraded = false;
 	struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
@@ -981,7 +965,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
 	/*
 	 * Ok, we need to grow..
 	 */
-	vma = vma_to_resize(addr, old_len, new_len, flags, &charged);
+	vma = vma_to_resize(addr, old_len, new_len, flags);
 	if (IS_ERR(vma)) {
 		ret = PTR_ERR(vma);
 		goto out;
@@ -994,8 +978,16 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
 		if (vma_expandable(vma, new_len - old_len)) {
 			int pages = (new_len - old_len) >> PAGE_SHIFT;
 
+			if (vma->vm_flags & VM_ACCOUNT) {
+				if (security_vm_enough_memory_mm(mm, pages)) {
+					ret = -ENOMEM;
+					goto out;
+				}
+			}
+
 			if (vma_adjust(vma, vma->vm_start, addr + new_len,
 				       vma->vm_pgoff, NULL)) {
+				vm_unacct_memory(pages);
 				ret = -ENOMEM;
 				goto out;
 			}
@@ -1034,10 +1026,8 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
 			       &locked, flags, &uf, &uf_unmap);
 	}
 out:
-	if (offset_in_page(ret)) {
-		vm_unacct_memory(charged);
+	if (offset_in_page(ret))
 		locked = false;
-	}
 	if (downgraded)
 		mmap_read_unlock(current->mm);
 	else
-- 
2.32.0


^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCH] mm/mremap: Don't account pages in vma_to_resize()
  2021-07-21 12:49 [PATCH] mm/mremap: Don't account pages in vma_to_resize() Dmitry Safonov
@ 2021-07-21 13:08 ` Dmitry Safonov
  0 siblings, 0 replies; 2+ messages in thread
From: Dmitry Safonov @ 2021-07-21 13:08 UTC (permalink / raw)
  To: Dmitry Safonov, linux-kernel
  Cc: Alexander Viro, Andrew Morton, Andy Lutomirski, Brian Geffon,
	Catalin Marinas, Chen Wandun, Dan Carpenter, Dan Williams,
	Dave Jiang, Hugh Dickins, Ingo Molnar, Jason Gunthorpe,
	John Hubbard, Kefeng Wang, Kirill A. Shutemov, Mike Kravetz,
	Minchan Kim, Ralph Campbell, Russell King, Thomas Bogendoerfer,
	Thomas Gleixner, Vishal Verma, Vlastimil Babka, Wei Yongjun,
	Will Deacon

On 7/21/21 1:49 PM, Dmitry Safonov wrote:
> All this vm_unacct_memory(charged) dance seems to complicate the life
> without a good reason. Furthermore, it seems not always done right on
> error-pathes in mremap_to().
> And worse than that: this `charged' difference is sometimes
> double-accounted for growing MREMAP_DONTUNMAP mremap()s in move_vma():
> : if (security_vm_enough_memory_mm(mm, new_len >> PAGE_SHIFT))
> 
> Let's not do this.
> Account memory in mremap() fast-path for growing VMAs or in move_vma()
> for actually moving things.

And this one is also wrong: the diff for growing vma should be accounted
for !MREMAP_DONTUNMAP too.
Sending v2...

Thanks,
          Dmitry

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2021-07-21 13:09 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-07-21 12:49 [PATCH] mm/mremap: Don't account pages in vma_to_resize() Dmitry Safonov
2021-07-21 13:08 ` Dmitry Safonov

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.