All of lore.kernel.org
 help / color / mirror / Atom feed
From: Liam Howlett <liam.howlett@oracle.com>
To: "maple-tree@lists.infradead.org" <maple-tree@lists.infradead.org>,
	"linux-mm@kvack.org" <linux-mm@kvack.org>,
	"linux-kernel@vger.kernel.org" <linux-kernel@vger.kernel.org>,
	Andrew Morton <akpm@linux-foundation.org>
Cc: Liam Howlett <liam.howlett@oracle.com>,
	Liam Howlett <liam.howlett@oracle.com>
Subject: [PATCH 05/43] mm: Expand vma iterator interface.
Date: Tue, 29 Nov 2022 16:44:21 +0000	[thread overview]
Message-ID: <20221129164352.3374638-6-Liam.Howlett@oracle.com> (raw)
In-Reply-To: <20221129164352.3374638-1-Liam.Howlett@oracle.com>

From: "Liam R. Howlett" <Liam.Howlett@Oracle.com>

Add wrappers for the maple tree to the vma iterator.  This will provide
type safety at compile time.

Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
---
 include/linux/mm.h       | 46 ++++++++++++++++++++++---
 include/linux/mm_types.h |  4 +--
 mm/mmap.c                | 74 ++++++++++++++++++++++++++++++++++++++++
 3 files changed, 117 insertions(+), 7 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 8bbcccbc5565..2d3a49ba2261 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -664,16 +664,16 @@ static inline bool vma_is_accessible(struct vm_area_struct *vma)
 static inline
 struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
 {
-	return mas_find(&vmi->mas, max);
+	return mas_find(&vmi->mas, max - 1);
 }
 
 static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
 {
 	/*
-	 * Uses vma_find() to get the first VMA when the iterator starts.
+	 * Uses mas_find() to get the first VMA when the iterator starts.
 	 * Calling mas_next() could skip the first entry.
 	 */
-	return vma_find(vmi, ULONG_MAX);
+	return mas_find(&vmi->mas, ULONG_MAX);
 }
 
 static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
@@ -686,12 +686,50 @@ static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
 	return vmi->mas.index;
 }
 
+static inline unsigned long vma_iter_end(struct vma_iterator *vmi)
+{
+	return vmi->mas.last + 1;
+}
+static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi,
+				      unsigned long count)
+{
+	return mas_expected_entries(&vmi->mas, count);
+}
+
+/* Free any unused preallocations */
+static inline void vma_iter_free(struct vma_iterator *vmi)
+{
+	mas_destroy(&vmi->mas);
+}
+
+static inline int vma_iter_bulk_store(struct vma_iterator *vmi,
+				      struct vm_area_struct *vma)
+{
+	vmi->mas.index = vma->vm_start;
+	vmi->mas.last = vma->vm_end - 1;
+	mas_store(&vmi->mas, vma);
+	if (unlikely(mas_is_err(&vmi->mas)))
+		return -ENOMEM;
+
+	return 0;
+}
+
+static inline void vma_iter_invalidate(struct vma_iterator *vmi)
+{
+	mas_pause(&vmi->mas);
+}
+
+static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr)
+{
+	mas_set(&vmi->mas, addr);
+}
+
 #define for_each_vma(__vmi, __vma)					\
 	while (((__vma) = vma_next(&(__vmi))) != NULL)
 
 /* The MM code likes to work with exclusive end addresses */
 #define for_each_vma_range(__vmi, __vma, __end)				\
-	while (((__vma) = vma_find(&(__vmi), (__end) - 1)) != NULL)
+	while (((__vma) = vma_find(&(__vmi), (__end))) != NULL)
 
 #ifdef CONFIG_SHMEM
 /*
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 500e536796ca..faff2cc005c9 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -819,9 +819,7 @@ struct vma_iterator {
 static inline void vma_iter_init(struct vma_iterator *vmi,
 		struct mm_struct *mm, unsigned long addr)
 {
-	vmi->mas.tree = &mm->mm_mt;
-	vmi->mas.index = addr;
-	vmi->mas.node = MAS_START;
+	mas_init(&vmi->mas, &mm->mm_mt, addr);
 }
 
 struct mmu_gather;
diff --git a/mm/mmap.c b/mm/mmap.c
index c3c5c1d6103d..8f24021ef5b5 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -144,6 +144,80 @@ static void remove_vma(struct vm_area_struct *vma)
 	vm_area_free(vma);
 }
 
+static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
+{
+	return mas_walk(&vmi->mas);
+}
+
+static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi,
+						    unsigned long min)
+{
+	return mas_prev(&vmi->mas, min);
+}
+
+static inline int vma_iter_prealloc(struct vma_iterator *vmi,
+				    struct vm_area_struct *vma)
+{
+	return mas_preallocate(&vmi->mas, vma, GFP_KERNEL);
+}
+
+/* Store a VMA with preallocated memory */
+static inline void vma_iter_store(struct vma_iterator *vmi,
+				  struct vm_area_struct *vma)
+{
+	if (WARN_ON(vmi->mas.node != MAS_START && vmi->mas.index > vma->vm_start)) {
+		printk("%lu > %lu\n", vmi->mas.index, vma->vm_start);
+		printk("store of vma %lu-%lu", vma->vm_start, vma->vm_end);
+		printk("into slot    %lu-%lu", vmi->mas.index, vmi->mas.last);
+		mt_dump(vmi->mas.tree);
+	}
+	if (WARN_ON(vmi->mas.node != MAS_START && vmi->mas.last <  vma->vm_start)) {
+		printk("%lu < %lu\n", vmi->mas.last, vma->vm_start);
+		printk("store of vma %lu-%lu", vma->vm_start, vma->vm_end);
+		printk("into slot    %lu-%lu", vmi->mas.index, vmi->mas.last);
+		mt_dump(vmi->mas.tree);
+	}
+
+	if (vmi->mas.node != MAS_START &&
+	    ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
+		vma_iter_invalidate(vmi);
+
+	vmi->mas.index = vma->vm_start;
+	vmi->mas.last = vma->vm_end - 1;
+	mas_store_prealloc(&vmi->mas, vma);
+}
+
+static inline void vma_iter_clear(struct vma_iterator *vmi,
+				  unsigned long start, unsigned long end)
+{
+	mas_set_range(&vmi->mas, start, end - 1);
+	mas_store_prealloc(&vmi->mas, NULL);
+}
+
+static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
+			struct vm_area_struct *vma, gfp_t gfp)
+{
+	vmi->mas.index = vma->vm_start;
+	vmi->mas.last = vma->vm_end - 1;
+	mas_store_gfp(&vmi->mas, vma, gfp);
+	if (unlikely(mas_is_err(&vmi->mas)))
+		return -ENOMEM;
+
+	return 0;
+}
+
+static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
+			unsigned long start, unsigned long end, gfp_t gfp)
+{
+	vmi->mas.index = start;
+	vmi->mas.last = end - 1;
+	mas_store_gfp(&vmi->mas, NULL, gfp);
+	if (unlikely(mas_is_err(&vmi->mas)))
+		return -ENOMEM;
+
+	return 0;
+}
+
 /*
  * check_brk_limits() - Use platform specific check of range & verify mlock
  * limits.
-- 
2.35.1

  parent reply	other threads:[~2022-11-29 16:44 UTC|newest]

Thread overview: 49+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-11-29 16:44 [PATCH 00/43] VMA type safety through VMA iterator Liam Howlett
2022-11-29 16:44 ` [PATCH 03/43] maple_tree: Reduce user error potential Liam Howlett
2022-11-29 16:44 ` [PATCH 02/43] maple_tree: Fix potential rcu issue Liam Howlett
2022-11-29 16:44 ` [PATCH 01/43] maple_tree: Add mas_init() function Liam Howlett
2022-11-29 16:44 ` Liam Howlett [this message]
2022-11-30 16:35   ` [PATCH 05/43] mm: Expand vma iterator interface kernel test robot
2022-12-13 21:04     ` Liam Howlett
2022-11-29 16:44 ` [PATCH 06/43] mm/mmap: convert brk to use vma iterator Liam Howlett
2022-11-29 16:44 ` [PATCH 04/43] test_maple_tree: Test modifications while iterating Liam Howlett
2022-11-29 16:44 ` [PATCH 08/43] mmap: Convert vma_link() vma iterator Liam Howlett
2022-11-29 16:44 ` [PATCH 07/43] kernel/fork: Convert forking to using the vmi iterator Liam Howlett
2022-11-29 16:44 ` [PATCH 09/43] mm/mmap: Remove preallocation from do_mas_align_munmap() Liam Howlett
2022-11-29 16:44 ` [PATCH 10/43] mmap: Change do_mas_munmap and do_mas_aligned_munmap() to use vma iterator Liam Howlett
2022-11-29 16:44 ` [PATCH 11/43] mmap: Convert vma_expand() " Liam Howlett
2022-11-29 16:44 ` [PATCH 12/43] mm: Add temporary vma iterator versions of vma_merge(), split_vma(), and __split_vma() Liam Howlett
2022-11-29 16:44 ` [PATCH 13/43] ipc/shm: Use the vma iterator for munmap calls Liam Howlett
2022-11-29 16:44 ` [PATCH 15/43] mm: Change mprotect_fixup to vma iterator Liam Howlett
2022-11-29 16:44 ` [PATCH 14/43] userfaultfd: Use " Liam Howlett
2022-11-29 16:44 ` [PATCH 16/43] mlock: Convert mlock to " Liam Howlett
2022-11-29 16:44 ` [PATCH 17/43] coredump: Convert " Liam Howlett
2022-11-29 16:44 ` [PATCH 19/43] task_mmu: " Liam Howlett
2022-11-29 16:44 ` [PATCH 18/43] mempolicy: " Liam Howlett
2022-11-29 16:44 ` [PATCH 20/43] sched: " Liam Howlett
2022-11-29 16:44 ` [PATCH 21/43] madvise: Use vmi iterator for __split_vma() and vma_merge() Liam Howlett
2022-11-29 16:44 ` [PATCH 23/43] mmap: Use vmi version of vma_merge() Liam Howlett
2022-11-29 16:44 ` [PATCH 22/43] mmap: Pass through vmi iterator to __split_vma() Liam Howlett
2022-11-29 16:44 ` [PATCH 25/43] mm: Switch vma_merge(), split_vma(), and __split_vma to vma iterator Liam Howlett
2022-11-29 16:44 ` [PATCH 24/43] mm/mremap: Use vmi version of vma_merge() Liam Howlett
2022-11-29 16:44 ` [PATCH 27/43] mm: Pass through vma iterator to __vma_adjust() Liam Howlett
2022-11-29 16:44 ` [PATCH 26/43] mmap: Convert __vma_adjust() to use vma iterator Liam Howlett
2022-12-01 19:53   ` kernel test robot
2022-12-13 21:05     ` Liam Howlett
2022-11-29 16:44 ` [PATCH 30/43] mm: Pass vma iterator through to __vma_adjust() Liam Howlett
2022-11-29 16:44 ` [PATCH 29/43] mm: Remove unnecessary write to vma iterator in __vma_adjust() Liam Howlett
2022-11-29 16:44 ` [PATCH 28/43] madvise: Use split_vma() instead of __split_vma() Liam Howlett
2022-11-29 16:44 ` [PATCH 33/43] mm: Change munmap splitting order and move_vma() Liam Howlett
2022-11-29 16:44 ` [PATCH 32/43] mmap: Clean up mmap_region() unrolling Liam Howlett
2022-11-29 16:44 ` [PATCH 31/43] mm: Add vma iterator to vma_adjust() arguments Liam Howlett
2022-11-29 16:44 ` [PATCH 35/43] mm/mmap: Refactor locking out of __vma_adjust() Liam Howlett
2022-11-29 16:44 ` [PATCH 36/43] mm/mmap: Use vma_prepare() and vma_complete() in vma_expand() Liam Howlett
2022-11-29 16:44 ` [PATCH 34/43] mm/mmap: move anon_vma setting in __vma_adjust() Liam Howlett
2022-11-29 16:44 ` [PATCH 39/43] mm/mmap: Don't use __vma_adjust() in shift_arg_pages() Liam Howlett
2022-11-29 16:44 ` [PATCH 38/43] mm: Don't use __vma_adjust() in __split_vma() Liam Howlett
2022-11-29 16:44 ` [PATCH 37/43] mm/mmap: Introduce init_vma_prep() and init_multi_vma_prep() Liam Howlett
2022-11-29 16:44 ` [PATCH 40/43] mm/mmap: Introduce dup_vma_anon() helper Liam Howlett
2022-11-29 16:44 ` [PATCH 42/43] mm/mmap: Remove __vma_adjust() Liam Howlett
2022-11-29 16:44 ` [PATCH 41/43] mm/mmap: Convert do_brk_flags() to use vma_prepare() and vma_complete() Liam Howlett
2022-11-29 16:44 ` [PATCH 43/43] vma_merge: Set vma iterator to correct position Liam Howlett
2022-11-30  0:46 ` [PATCH 00/43] VMA type safety through VMA iterator Liam Howlett

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221129164352.3374638-6-Liam.Howlett@oracle.com \
    --to=liam.howlett@oracle.com \
    --cc=akpm@linux-foundation.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=maple-tree@lists.infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.