All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Liam R. Howlett" <Liam.Howlett@oracle.com>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	maple-tree@lists.infradead.org,
	"Liam R. Howlett" <Liam.Howlett@oracle.com>
Subject: [PATCH 17/34] mm: Update validate_mm() to use vma iterator
Date: Tue, 25 Apr 2023 10:09:38 -0400	[thread overview]
Message-ID: <20230425140955.3834476-18-Liam.Howlett@oracle.com> (raw)
In-Reply-To: <20230425140955.3834476-1-Liam.Howlett@oracle.com>

Use the vma iterator in the validation code and combine the code to
check the maple tree into the main validate_mm() function.

Introduce a new function vma_iter_dump_tree() to dump the maple tree in
hex layout.

Replace all calls to validate_mm_mt() with validate_mm().

Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
---
 include/linux/mmdebug.h |  14 ++++++
 mm/debug.c              |   9 ++++
 mm/internal.h           |   3 +-
 mm/mmap.c               | 101 ++++++++++++++++------------------------
 4 files changed, 66 insertions(+), 61 deletions(-)

diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index b8728d11c9490..7c3e7b0b0e8fd 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -8,10 +8,12 @@
 struct page;
 struct vm_area_struct;
 struct mm_struct;
+struct vma_iterator;
 
 void dump_page(struct page *page, const char *reason);
 void dump_vma(const struct vm_area_struct *vma);
 void dump_mm(const struct mm_struct *mm);
+void vma_iter_dump_tree(const struct vma_iterator *vmi);
 
 #ifdef CONFIG_DEBUG_VM
 #define VM_BUG_ON(cond) BUG_ON(cond)
@@ -74,6 +76,17 @@ void dump_mm(const struct mm_struct *mm);
 	}								\
 	unlikely(__ret_warn_once);					\
 })
+#define VM_WARN_ON_ONCE_MM(cond, mm)		({			\
+	static bool __section(".data.once") __warned;			\
+	int __ret_warn_once = !!(cond);					\
+									\
+	if (unlikely(__ret_warn_once && !__warned)) {			\
+		dump_mm(mm);						\
+		__warned = true;					\
+		WARN_ON(1);						\
+	}								\
+	unlikely(__ret_warn_once);					\
+})
 
 #define VM_WARN_ON(cond) (void)WARN_ON(cond)
 #define VM_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond)
@@ -90,6 +103,7 @@ void dump_mm(const struct mm_struct *mm);
 #define VM_WARN_ON_ONCE_PAGE(cond, page)  BUILD_BUG_ON_INVALID(cond)
 #define VM_WARN_ON_FOLIO(cond, folio)  BUILD_BUG_ON_INVALID(cond)
 #define VM_WARN_ON_ONCE_FOLIO(cond, folio)  BUILD_BUG_ON_INVALID(cond)
+#define VM_WARN_ON_ONCE_MM(cond, mm)  BUILD_BUG_ON_INVALID(cond)
 #define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond)
 #define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond)
 #endif
diff --git a/mm/debug.c b/mm/debug.c
index c7b228097bd98..ee533a5ceb79d 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -268,4 +268,13 @@ void page_init_poison(struct page *page, size_t size)
 	if (page_init_poisoning)
 		memset(page, PAGE_POISON_PATTERN, size);
 }
+
+void vma_iter_dump_tree(const struct vma_iterator *vmi)
+{
+#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
+	mas_dump(&vmi->mas);
+	mt_dump(vmi->mas.tree, mt_dump_hex);
+#endif	/* CONFIG_DEBUG_VM_MAPLE_TREE */
+}
+
 #endif		/* CONFIG_DEBUG_VM */
diff --git a/mm/internal.h b/mm/internal.h
index 4c195920f5656..8d1a8bd001247 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1051,13 +1051,14 @@ static inline void vma_iter_store(struct vma_iterator *vmi,
 		printk("%lu > %lu\n", vmi->mas.index, vma->vm_start);
 		printk("store of vma %lu-%lu", vma->vm_start, vma->vm_end);
 		printk("into slot    %lu-%lu", vmi->mas.index, vmi->mas.last);
-		mt_dump(vmi->mas.tree, mt_dump_hex);
+		vma_iter_dump_tree(vmi);
 	}
 	if (WARN_ON(vmi->mas.node != MAS_START && vmi->mas.last <  vma->vm_start)) {
 		printk("%lu < %lu\n", vmi->mas.last, vma->vm_start);
 		printk("store of vma %lu-%lu", vma->vm_start, vma->vm_end);
 		printk("into slot    %lu-%lu", vmi->mas.index, vmi->mas.last);
 		mt_dump(vmi->mas.tree, mt_dump_hex);
+		vma_iter_dump_tree(vmi);
 	}
 #endif
 
diff --git a/mm/mmap.c b/mm/mmap.c
index 1554f90d497ef..d34a41791ddb2 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -299,62 +299,44 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
 	return origbrk;
 }
 
-#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
-extern void mt_validate(struct maple_tree *mt);
-extern void mt_dump(const struct maple_tree *mt, enum mt_dump_format fmt);
-
-/* Validate the maple tree */
-static void validate_mm_mt(struct mm_struct *mm)
-{
-	struct maple_tree *mt = &mm->mm_mt;
-	struct vm_area_struct *vma_mt;
-
-	MA_STATE(mas, mt, 0, 0);
-
-	mt_validate(&mm->mm_mt);
-	mas_for_each(&mas, vma_mt, ULONG_MAX) {
-		if ((vma_mt->vm_start != mas.index) ||
-		    (vma_mt->vm_end - 1 != mas.last)) {
-			pr_emerg("issue in %s\n", current->comm);
-			dump_stack();
-			dump_vma(vma_mt);
-			pr_emerg("mt piv: %p %lu - %lu\n", vma_mt,
-				 mas.index, mas.last);
-			pr_emerg("mt vma: %p %lu - %lu\n", vma_mt,
-				 vma_mt->vm_start, vma_mt->vm_end);
-
-			mt_dump(mas.tree, mt_dump_hex);
-			if (vma_mt->vm_end != mas.last + 1) {
-				pr_err("vma: %p vma_mt %lu-%lu\tmt %lu-%lu\n",
-						mm, vma_mt->vm_start, vma_mt->vm_end,
-						mas.index, mas.last);
-				mt_dump(mas.tree, mt_dump_hex);
-			}
-			VM_BUG_ON_MM(vma_mt->vm_end != mas.last + 1, mm);
-			if (vma_mt->vm_start != mas.index) {
-				pr_err("vma: %p vma_mt %p %lu - %lu doesn't match\n",
-						mm, vma_mt, vma_mt->vm_start, vma_mt->vm_end);
-				mt_dump(mas.tree, mt_dump_hex);
-			}
-			VM_BUG_ON_MM(vma_mt->vm_start != mas.index, mm);
-		}
-	}
-}
-
+#if defined(CONFIG_DEBUG_VM)
 static void validate_mm(struct mm_struct *mm)
 {
 	int bug = 0;
 	int i = 0;
 	struct vm_area_struct *vma;
-	MA_STATE(mas, &mm->mm_mt, 0, 0);
+	VMA_ITERATOR(vmi, mm, 0);
 
-	validate_mm_mt(mm);
+#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
+	mt_validate(&mm->mm_mt);
+#endif
 
-	mas_for_each(&mas, vma, ULONG_MAX) {
+	for_each_vma(vmi, vma) {
 #ifdef CONFIG_DEBUG_VM_RB
 		struct anon_vma *anon_vma = vma->anon_vma;
 		struct anon_vma_chain *avc;
+#endif
+		unsigned long vmi_start, vmi_end;
+		bool warn = 0;
+
+		vmi_start = vma_iter_addr(&vmi);
+		vmi_end = vma_iter_end(&vmi);
+		if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm))
+			warn = 1;
+
+		if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm))
+			warn = 1;
+
+		if (warn) {
+			pr_emerg("issue in %s\n", current->comm);
+			dump_stack();
+			dump_vma(vma);
+			pr_emerg("tree range: %px start %lx end %lx\n", vma,
+				 vmi_start, vmi_end - 1);
+			vma_iter_dump_tree(&vmi);
+		}
 
+#ifdef CONFIG_DEBUG_VM_RB
 		if (anon_vma) {
 			anon_vma_lock_read(anon_vma);
 			list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
@@ -365,16 +347,15 @@ static void validate_mm(struct mm_struct *mm)
 		i++;
 	}
 	if (i != mm->map_count) {
-		pr_emerg("map_count %d mas_for_each %d\n", mm->map_count, i);
+		pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i);
 		bug = 1;
 	}
 	VM_BUG_ON_MM(bug, mm);
 }
 
-#else /* !CONFIG_DEBUG_VM_MAPLE_TREE */
-#define validate_mm_mt(root) do { } while (0)
+#else /* !CONFIG_DEBUG_VM */
 #define validate_mm(mm) do { } while (0)
-#endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
+#endif /* CONFIG_DEBUG_VM */
 
 /*
  * vma has some anon_vma assigned, and is already inserted on that
@@ -2234,7 +2215,7 @@ int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
 	struct vm_area_struct *new;
 	int err;
 
-	validate_mm_mt(vma->vm_mm);
+	validate_mm(vma->vm_mm);
 
 	WARN_ON(vma->vm_start >= addr);
 	WARN_ON(vma->vm_end <= addr);
@@ -2292,7 +2273,7 @@ int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
 	/* Success. */
 	if (new_below)
 		vma_next(vmi);
-	validate_mm_mt(vma->vm_mm);
+	validate_mm(vma->vm_mm);
 	return 0;
 
 out_free_mpol:
@@ -2301,7 +2282,7 @@ int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
 	vma_iter_free(vmi);
 out_free_vma:
 	vm_area_free(new);
-	validate_mm_mt(vma->vm_mm);
+	validate_mm(vma->vm_mm);
 	return err;
 }
 
@@ -2936,7 +2917,7 @@ int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
 
 	arch_unmap(mm, start, end);
 	ret = do_vmi_align_munmap(vmi, vma, mm, start, end, uf, downgrade);
-	validate_mm_mt(mm);
+	validate_mm(mm);
 	return ret;
 }
 
@@ -2958,7 +2939,7 @@ static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
 	struct mm_struct *mm = current->mm;
 	struct vma_prepare vp;
 
-	validate_mm_mt(mm);
+	validate_mm(mm);
 	/*
 	 * Check against address space limits by the changed size
 	 * Note: This happens *after* clearing old mappings in some code paths.
@@ -3199,7 +3180,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
 	bool faulted_in_anon_vma = true;
 	VMA_ITERATOR(vmi, mm, addr);
 
-	validate_mm_mt(mm);
+	validate_mm(mm);
 	/*
 	 * If anonymous vma has not yet been faulted, update new pgoff
 	 * to match new location, to increase its chance of merging.
@@ -3258,7 +3239,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
 			goto out_vma_link;
 		*need_rmap_locks = false;
 	}
-	validate_mm_mt(mm);
+	validate_mm(mm);
 	return new_vma;
 
 out_vma_link:
@@ -3274,7 +3255,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
 out_free_vma:
 	vm_area_free(new_vma);
 out:
-	validate_mm_mt(mm);
+	validate_mm(mm);
 	return NULL;
 }
 
@@ -3411,7 +3392,7 @@ static struct vm_area_struct *__install_special_mapping(
 	int ret;
 	struct vm_area_struct *vma;
 
-	validate_mm_mt(mm);
+	validate_mm(mm);
 	vma = vm_area_alloc(mm);
 	if (unlikely(vma == NULL))
 		return ERR_PTR(-ENOMEM);
@@ -3434,12 +3415,12 @@ static struct vm_area_struct *__install_special_mapping(
 
 	perf_event_mmap(vma);
 
-	validate_mm_mt(mm);
+	validate_mm(mm);
 	return vma;
 
 out:
 	vm_area_free(vma);
-	validate_mm_mt(mm);
+	validate_mm(mm);
 	return ERR_PTR(ret);
 }
 
-- 
2.39.2


  parent reply	other threads:[~2023-04-25 14:12 UTC|newest]

Thread overview: 65+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-04-25 14:09 [PATCH 00/34] Maple tree mas_{next,prev}_range() and cleanup Liam R. Howlett
2023-04-25 14:09 ` [PATCH 01/34] maple_tree: Fix static analyser cppcheck issue Liam R. Howlett
2023-04-26  4:06   ` Peng Zhang
2023-04-25 14:09 ` [PATCH 02/34] maple_tree: Clean up mas_parent_enum() Liam R. Howlett
2023-04-25 16:13   ` Wei Yang
2023-04-26  4:14   ` Peng Zhang
2023-04-26 21:07     ` Liam R. Howlett
2023-04-25 14:09 ` [PATCH 03/34] maple_tree: Avoid unnecessary ascending Liam R. Howlett
2023-04-26  5:27   ` Peng Zhang
2023-04-25 14:09 ` [PATCH 04/34] maple_tree: Clean up mas_dfs_postorder() Liam R. Howlett
2023-04-25 14:09 ` [PATCH 05/34] maple_tree: Add format option to mt_dump() Liam R. Howlett
2023-04-25 14:09 ` [PATCH 06/34] maple_tree: Add debug BUG_ON and WARN_ON variants Liam R. Howlett
2023-04-25 14:09 ` [PATCH 07/34] maple_tree: Convert BUG_ON() to MT_BUG_ON() Liam R. Howlett
2023-04-25 14:09 ` [PATCH 08/34] maple_tree: Change RCU checks to WARN_ON() instead of BUG_ON() Liam R. Howlett
2023-04-25 14:09 ` [PATCH 09/34] maple_tree: Convert debug code to use MT_WARN_ON() and MAS_WARN_ON() Liam R. Howlett
2023-04-25 14:09 ` [PATCH 10/34] maple_tree: Use MAS_BUG_ON() when setting a leaf node as a parent Liam R. Howlett
2023-04-28 10:08   ` Petr Tesařík
2023-05-03 19:31     ` Liam R. Howlett
2023-04-25 14:09 ` [PATCH 11/34] maple_tree: Use MAS_BUG_ON() in mas_set_height() Liam R. Howlett
2023-04-28 10:10   ` Petr Tesařík
2023-05-03 19:33     ` Liam R. Howlett
2023-04-25 14:09 ` [PATCH 12/34] maple_tree: Use MAS_BUG_ON() from mas_topiary_range() Liam R. Howlett
2023-04-25 14:09 ` [PATCH 13/34] maple_tree: Use MAS_WR_BUG_ON() in mas_store_prealloc() Liam R. Howlett
2023-04-25 14:09 ` [PATCH 14/34] maple_tree: Use MAS_BUG_ON() prior to calling mas_meta_gap() Liam R. Howlett
2023-04-25 14:09 ` [PATCH 15/34] maple_tree: Return error on mte_pivots() out of range Liam R. Howlett
2023-04-26  9:55   ` Peng Zhang
2023-04-25 14:09 ` [PATCH 16/34] maple_tree: Make test code work without debug enabled Liam R. Howlett
2023-04-26  3:23   ` kernel test robot
2023-04-25 14:09 ` Liam R. Howlett [this message]
2023-04-25 14:09 ` [PATCH 18/34] mm: Update vma_iter_store() to use MAS_WARN_ON() Liam R. Howlett
2023-04-27  1:07   ` Sergey Senozhatsky
2023-04-27  1:17     ` Liam R. Howlett
2023-04-25 14:09 ` [PATCH 19/34] maple_tree: Add __init and __exit to test module Liam R. Howlett
2023-04-25 14:09 ` [PATCH 20/34] maple_tree: Remove unnecessary check from mas_destroy() Liam R. Howlett
2023-04-26  9:59   ` Peng Zhang
2023-04-25 14:09 ` [PATCH 21/34] maple_tree: mas_start() reset depth on dead node Liam R. Howlett
2023-04-28  2:45   ` Peng Zhang
2023-04-25 14:09 ` [PATCH 22/34] mm/mmap: Change do_vmi_align_munmap() for maple tree iterator changes Liam R. Howlett
2023-04-25 14:09 ` [PATCH 23/34] maple_tree: Try harder to keep active node after mas_next() Liam R. Howlett
2023-05-04  2:44   ` kernel test robot
2023-04-25 14:09 ` [PATCH 24/34] maple_tree: Try harder to keep active node with mas_prev() Liam R. Howlett
2023-04-25 14:09 ` [PATCH 25/34] maple_tree: Clear up index and last setting in single entry tree Liam R. Howlett
2023-04-27 11:19   ` Peng Zhang
2023-04-27 17:25     ` Liam R. Howlett
2023-04-25 14:09 ` [PATCH 26/34] maple_tree: Update testing code for mas_{next,prev,walk} Liam R. Howlett
2023-05-04  3:33   ` Peng Zhang
2023-05-04 18:50     ` Liam R. Howlett
2023-04-25 14:09 ` [PATCH 27/34] maple_tree: Introduce mas_next_slot() interface Liam R. Howlett
2023-04-26  1:21   ` kernel test robot
2023-04-26 13:16   ` kernel test robot
2023-04-28  6:48   ` Peng Zhang
2023-05-03 19:31     ` Liam R. Howlett
2023-04-28  8:39   ` kernel test robot
2023-04-25 14:09 ` [PATCH 28/34] maple_tree: Revise limit checks in mas_empty_area{_rev}() Liam R. Howlett
2023-04-28  7:06   ` Peng Zhang
2023-04-25 14:09 ` [PATCH 29/34] maple_tree: Introduce mas_prev_slot() interface Liam R. Howlett
2023-04-28  8:27   ` Peng Zhang
2023-05-03 19:29     ` Liam R. Howlett
2023-04-25 14:09 ` [PATCH 30/34] maple_tree: Fix comments for mas_next_entry() and mas_prev_entry() Liam R. Howlett
2023-04-25 14:09 ` [PATCH 31/34] maple_tree: Add mas_next_range() and mas_find_range() interfaces Liam R. Howlett
2023-04-25 14:09 ` [PATCH 32/34] maple_tree: Add mas_prev_range() and mas_find_range_rev interface Liam R. Howlett
2023-04-25 14:09 ` [PATCH 33/34] maple_tree: Add testing for mas_{prev,next}_range() Liam R. Howlett
2023-04-26  1:41   ` kernel test robot
2023-05-05 17:11     ` Liam R. Howlett
2023-04-25 14:09 ` [PATCH 34/34] mm: Add vma_iter_{next,prev}_range() to vma iterator Liam R. Howlett

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230425140955.3834476-18-Liam.Howlett@oracle.com \
    --to=liam.howlett@oracle.com \
    --cc=akpm@linux-foundation.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=maple-tree@lists.infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.