All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Liam R. Howlett" <Liam.Howlett@oracle.com>
To: linux-mm@kvack.org, linux-kernel@vger.kernel.org,
	Andrew Morton <akpm@linux-foundation.org>,
	maple-tree@lists.infradead.org
Cc: "Liam R. Howlett" <Liam.Howlett@oracle.com>
Subject: [PATCH v4 27/49] nommu: Convert nommu to using the vma iterator
Date: Fri, 20 Jan 2023 11:26:28 -0500	[thread overview]
Message-ID: <20230120162650.984577-28-Liam.Howlett@oracle.com> (raw)
In-Reply-To: <20230120162650.984577-1-Liam.Howlett@oracle.com>

Gain type safety in nommu by using the vma_iterator and not the maple
tree directly.

Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
---
 mm/nommu.c | 79 +++++++++++++++++++++---------------------------------
 1 file changed, 31 insertions(+), 48 deletions(-)

diff --git a/mm/nommu.c b/mm/nommu.c
index 0481922fe66e..7a52a7c37009 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -544,19 +544,6 @@ static void put_nommu_region(struct vm_region *region)
 	__put_nommu_region(region);
 }
 
-void vma_mas_store(struct vm_area_struct *vma, struct ma_state *mas)
-{
-	mas_set_range(mas, vma->vm_start, vma->vm_end - 1);
-	mas_store_prealloc(mas, vma);
-}
-
-void vma_mas_remove(struct vm_area_struct *vma, struct ma_state *mas)
-{
-	mas->index = vma->vm_start;
-	mas->last = vma->vm_end - 1;
-	mas_store_prealloc(mas, NULL);
-}
-
 static void setup_vma_to_mm(struct vm_area_struct *vma, struct mm_struct *mm)
 {
 	vma->vm_mm = mm;
@@ -574,13 +561,13 @@ static void setup_vma_to_mm(struct vm_area_struct *vma, struct mm_struct *mm)
 }
 
 /*
- * mas_add_vma_to_mm() - Maple state variant of add_mas_to_mm().
- * @mas: The maple state with preallocations.
+ * vmi_add_vma_to_mm() - VMA Iterator variant of add_vmi_to_mm().
+ * @vmi: The VMA iterator
  * @mm: The mm_struct
  * @vma: The vma to add
  *
  */
-static void mas_add_vma_to_mm(struct ma_state *mas, struct mm_struct *mm,
+static void vmi_add_vma_to_mm(struct vma_iterator *vmi, struct mm_struct *mm,
 			      struct vm_area_struct *vma)
 {
 	BUG_ON(!vma->vm_region);
@@ -589,7 +576,7 @@ static void mas_add_vma_to_mm(struct ma_state *mas, struct mm_struct *mm,
 	mm->map_count++;
 
 	/* add the VMA to the tree */
-	vma_mas_store(vma, mas);
+	vma_iter_store(vmi, vma);
 }
 
 /*
@@ -600,14 +587,14 @@ static void mas_add_vma_to_mm(struct ma_state *mas, struct mm_struct *mm,
  */
 static int add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
 {
-	MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_end);
+	VMA_ITERATOR(vmi, mm, vma->vm_start);
 
-	if (mas_preallocate(&mas, GFP_KERNEL)) {
+	if (vma_iter_prealloc(&vmi)) {
 		pr_warn("Allocation of vma tree for process %d failed\n",
 		       current->pid);
 		return -ENOMEM;
 	}
-	mas_add_vma_to_mm(&mas, mm, vma);
+	vmi_add_vma_to_mm(&vmi, mm, vma);
 	return 0;
 }
 
@@ -626,14 +613,15 @@ static void cleanup_vma_from_mm(struct vm_area_struct *vma)
 		i_mmap_unlock_write(mapping);
 	}
 }
+
 /*
  * delete a VMA from its owning mm_struct and address space
  */
 static int delete_vma_from_mm(struct vm_area_struct *vma)
 {
-	MA_STATE(mas, &vma->vm_mm->mm_mt, 0, 0);
+	VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_start);
 
-	if (mas_preallocate(&mas, GFP_KERNEL)) {
+	if (vma_iter_prealloc(&vmi)) {
 		pr_warn("Allocation of vma tree for process %d failed\n",
 		       current->pid);
 		return -ENOMEM;
@@ -641,10 +629,9 @@ static int delete_vma_from_mm(struct vm_area_struct *vma)
 	cleanup_vma_from_mm(vma);
 
 	/* remove from the MM's tree and list */
-	vma_mas_remove(vma, &mas);
+	vma_iter_clear(&vmi, vma->vm_start, vma->vm_end);
 	return 0;
 }
-
 /*
  * destroy a VMA record
  */
@@ -675,9 +662,9 @@ EXPORT_SYMBOL(find_vma_intersection);
  */
 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
 {
-	MA_STATE(mas, &mm->mm_mt, addr, addr);
+	VMA_ITERATOR(vmi, mm, addr);
 
-	return mas_walk(&mas);
+	return vma_iter_load(&vmi);
 }
 EXPORT_SYMBOL(find_vma);
 
@@ -709,9 +696,9 @@ static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
 {
 	struct vm_area_struct *vma;
 	unsigned long end = addr + len;
-	MA_STATE(mas, &mm->mm_mt, addr, addr);
+	VMA_ITERATOR(vmi, mm, addr);
 
-	vma = mas_walk(&mas);
+	vma = vma_iter_load(&vmi);
 	if (!vma)
 		return NULL;
 	if (vma->vm_start != addr)
@@ -1062,7 +1049,7 @@ unsigned long do_mmap(struct file *file,
 	vm_flags_t vm_flags;
 	unsigned long capabilities, result;
 	int ret;
-	MA_STATE(mas, &current->mm->mm_mt, 0, 0);
+	VMA_ITERATOR(vmi, current->mm, 0);
 
 	*populate = 0;
 
@@ -1091,8 +1078,8 @@ unsigned long do_mmap(struct file *file,
 	if (!vma)
 		goto error_getting_vma;
 
-	if (mas_preallocate(&mas, GFP_KERNEL))
-		goto error_maple_preallocate;
+	if (vma_iter_prealloc(&vmi))
+		goto error_vma_iter_prealloc;
 
 	region->vm_usage = 1;
 	region->vm_flags = vm_flags;
@@ -1234,7 +1221,7 @@ unsigned long do_mmap(struct file *file,
 	current->mm->total_vm += len >> PAGE_SHIFT;
 
 share:
-	mas_add_vma_to_mm(&mas, current->mm, vma);
+	vmi_add_vma_to_mm(&vmi, current->mm, vma);
 
 	/* we flush the region from the icache only when the first executable
 	 * mapping of it is made  */
@@ -1250,7 +1237,7 @@ unsigned long do_mmap(struct file *file,
 error_just_free:
 	up_write(&nommu_region_sem);
 error:
-	mas_destroy(&mas);
+	vma_iter_free(&vmi);
 	if (region->vm_file)
 		fput(region->vm_file);
 	kmem_cache_free(vm_region_jar, region);
@@ -1278,7 +1265,7 @@ unsigned long do_mmap(struct file *file,
 	show_free_areas(0, NULL);
 	return -ENOMEM;
 
-error_maple_preallocate:
+error_vma_iter_prealloc:
 	kmem_cache_free(vm_region_jar, region);
 	vm_area_free(vma);
 	pr_warn("Allocation of vma tree for process %d failed\n", current->pid);
@@ -1344,20 +1331,18 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
  * split a vma into two pieces at address 'addr', a new vma is allocated either
  * for the first part or the tail.
  */
-int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
-	      unsigned long addr, int new_below)
+int vmi_split_vma(struct vma_iterator *vmi, struct mm_struct *mm,
+		  struct vm_area_struct *vma, unsigned long addr, int new_below)
 {
 	struct vm_area_struct *new;
 	struct vm_region *region;
 	unsigned long npages;
-	MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_end);
 
 	/* we're only permitted to split anonymous regions (these should have
 	 * only a single usage on the region) */
 	if (vma->vm_file)
 		return -ENOMEM;
 
-	mm = vma->vm_mm;
 	if (mm->map_count >= sysctl_max_map_count)
 		return -ENOMEM;
 
@@ -1369,10 +1354,10 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
 	if (!new)
 		goto err_vma_dup;
 
-	if (mas_preallocate(&mas, GFP_KERNEL)) {
+	if (vma_iter_prealloc(vmi)) {
 		pr_warn("Allocation of vma tree for process %d failed\n",
 			current->pid);
-		goto err_mas_preallocate;
+		goto err_vmi_preallocate;
 	}
 
 	/* most fields are the same, copy all, and then fixup */
@@ -1406,13 +1391,11 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
 
 	setup_vma_to_mm(vma, mm);
 	setup_vma_to_mm(new, mm);
-	mas_set_range(&mas, vma->vm_start, vma->vm_end - 1);
-	mas_store(&mas, vma);
-	vma_mas_store(new, &mas);
+	vma_iter_store(vmi, new);
 	mm->map_count++;
 	return 0;
 
-err_mas_preallocate:
+err_vmi_preallocate:
 	vm_area_free(new);
 err_vma_dup:
 	kmem_cache_free(vm_region_jar, region);
@@ -1466,7 +1449,7 @@ static int shrink_vma(struct mm_struct *mm,
  */
 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf)
 {
-	MA_STATE(mas, &mm->mm_mt, start, start);
+	VMA_ITERATOR(vmi, mm, start);
 	struct vm_area_struct *vma;
 	unsigned long end;
 	int ret = 0;
@@ -1478,7 +1461,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list
 	end = start + len;
 
 	/* find the first potentially overlapping VMA */
-	vma = mas_find(&mas, end - 1);
+	vma = vma_find(&vmi, end);
 	if (!vma) {
 		static int limit;
 		if (limit < 5) {
@@ -1497,7 +1480,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list
 				return -EINVAL;
 			if (end == vma->vm_end)
 				goto erase_whole_vma;
-			vma = mas_next(&mas, end - 1);
+			vma = vma_find(&vmi, end);
 		} while (vma);
 		return -EINVAL;
 	} else {
@@ -1511,7 +1494,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list
 		if (end != vma->vm_end && offset_in_page(end))
 			return -EINVAL;
 		if (start != vma->vm_start && end != vma->vm_end) {
-			ret = split_vma(mm, vma, start, 1);
+			ret = vmi_split_vma(&vmi, mm, vma, start, 1);
 			if (ret < 0)
 				return ret;
 		}
-- 
2.35.1


  parent reply	other threads:[~2023-01-20 17:15 UTC|newest]

Thread overview: 59+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-01-20 16:26 [PATCH v4 00/49] VMA tree type safety and remove __vma_adjust() Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 01/49] maple_tree: Add mas_init() function Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 02/49] maple_tree: Fix potential rcu issue Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 03/49] maple_tree: Reduce user error potential Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 04/49] test_maple_tree: Test modifications while iterating Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 05/49] maple_tree: Fix handle of invalidated state in mas_wr_store_setup() Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 06/49] maple_tree: Fix mas_prev() and mas_find() state handling Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 07/49] mm: Expand vma iterator interface Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 08/49] mm/mmap: convert brk to use vma iterator Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 09/49] kernel/fork: Convert forking to using the vmi iterator Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 10/49] mmap: Convert vma_link() vma iterator Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 11/49] mm/mmap: Remove preallocation from do_mas_align_munmap() Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 12/49] mmap: Change do_mas_munmap and do_mas_aligned_munmap() to use vma iterator Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 13/49] mmap: Convert vma_expand() " Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 14/49] mm: Add temporary vma iterator versions of vma_merge(), split_vma(), and __split_vma() Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 15/49] ipc/shm: Use the vma iterator for munmap calls Liam R. Howlett
2023-01-25 11:00   ` Sven Schnelle
2023-01-25 13:53     ` Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 16/49] userfaultfd: Use vma iterator Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 17/49] mm: Change mprotect_fixup to " Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 18/49] mlock: Convert mlock " Liam R. Howlett
2023-07-11 14:08   ` Ryan Roberts
2023-07-11 15:27     ` Liam R. Howlett
2023-07-11 15:30       ` Ryan Roberts
2023-07-11 17:57         ` Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 19/49] coredump: Convert " Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 20/49] mempolicy: " Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 21/49] task_mmu: " Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 22/49] sched: " Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 23/49] madvise: Use vmi iterator for __split_vma() and vma_merge() Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 24/49] mmap: Pass through vmi iterator to __split_vma() Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 25/49] mmap: Use vmi version of vma_merge() Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 26/49] mm/mremap: " Liam R. Howlett
2023-01-20 16:26 ` Liam R. Howlett [this message]
2023-01-20 16:26 ` [PATCH v4 28/49] nommu: Pass through vma iterator to shrink_vma() Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 29/49] mm: Switch vma_merge(), split_vma(), and __split_vma to vma iterator Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 30/49] mm/damon/vaddr-test.h: Stop using vma_mas_store() for maple tree store Liam R. Howlett
2023-01-20 17:32   ` SeongJae Park
2023-01-20 16:26 ` [PATCH v4 31/49] mmap: Convert __vma_adjust() to use vma iterator Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 32/49] mm: Pass through vma iterator to __vma_adjust() Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 33/49] madvise: Use split_vma() instead of __split_vma() Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 34/49] mm: Remove unnecessary write to vma iterator in __vma_adjust() Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 35/49] mm: Pass vma iterator through to __vma_adjust() Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 36/49] mm: Add vma iterator to vma_adjust() arguments Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 37/49] mmap: Clean up mmap_region() unrolling Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 38/49] mm: Change munmap splitting order and move_vma() Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 39/49] mm/mmap: move anon_vma setting in __vma_adjust() Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 40/49] mm/mmap: Refactor locking out of __vma_adjust() Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 41/49] mm/mmap: Use vma_prepare() and vma_complete() in vma_expand() Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 42/49] mm/mmap: Introduce init_vma_prep() and init_multi_vma_prep() Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 43/49] mm: Don't use __vma_adjust() in __split_vma() Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 44/49] mm/mremap: Convert vma_adjust() to vma_expand() Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 45/49] mm/mmap: Don't use __vma_adjust() in shift_arg_pages() Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 46/49] mm/mmap: Introduce dup_vma_anon() helper Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 47/49] mm/mmap: Convert do_brk_flags() to use vma_prepare() and vma_complete() Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 48/49] mm/mmap: Remove __vma_adjust() Liam R. Howlett
2023-02-22 16:17   ` Vlastimil Babka
2023-02-23  2:12     ` Liam R. Howlett
2023-01-20 16:26 ` [PATCH v4 49/49] vma_merge: Set vma iterator to correct position Liam R. Howlett

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230120162650.984577-28-Liam.Howlett@oracle.com \
    --to=liam.howlett@oracle.com \
    --cc=akpm@linux-foundation.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=maple-tree@lists.infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.