All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2 1/4] mm: kmsan: handle alloc failures in kmsan_vmap_pages_range_noflush()
@ 2023-04-13 13:12 Alexander Potapenko
  2023-04-13 13:12 ` [PATCH v2 2/4] mm: kmsan: handle alloc failures in kmsan_ioremap_page_range() Alexander Potapenko
                   ` (3 more replies)
  0 siblings, 4 replies; 8+ messages in thread
From: Alexander Potapenko @ 2023-04-13 13:12 UTC (permalink / raw)
  To: glider
  Cc: urezki, hch, linux-kernel, linux-mm, akpm, elver, dvyukov,
	kasan-dev, Dipanjan Das

As reported by Dipanjan Das, when KMSAN is used together with kernel
fault injection (or, generally, even without the latter), calls to
kcalloc() or __vmap_pages_range_noflush() may fail, leaving the
metadata mappings for the virtual mapping in an inconsistent state.
When these metadata mappings are accessed later, the kernel crashes.

To address the problem, we return a non-zero error code from
kmsan_vmap_pages_range_noflush() in the case of any allocation/mapping
failure inside it, and make vmap_pages_range_noflush() return an error
if KMSAN fails to allocate the metadata.

This patch also removes KMSAN_WARN_ON() from vmap_pages_range_noflush(),
as these allocation failures are not fatal anymore.

Reported-by: Dipanjan Das <mail.dipanjan.das@gmail.com>
Link: https://lore.kernel.org/linux-mm/CANX2M5ZRrRA64k0hOif02TjmY9kbbO2aCBPyq79es34RXZ=cAw@mail.gmail.com/
Fixes: b073d7f8aee4 ("mm: kmsan: maintain KMSAN metadata for page operations")
Signed-off-by: Alexander Potapenko <glider@google.com>

---
v2:
 -- return 0 from the inline version of kmsan_vmap_pages_range_noflush()
    (spotted by kernel test robot <lkp@intel.com>)
---
 include/linux/kmsan.h | 20 +++++++++++---------
 mm/kmsan/shadow.c     | 27 ++++++++++++++++++---------
 mm/vmalloc.c          |  6 +++++-
 3 files changed, 34 insertions(+), 19 deletions(-)

diff --git a/include/linux/kmsan.h b/include/linux/kmsan.h
index e38ae3c346184..c7ff3aefc5a13 100644
--- a/include/linux/kmsan.h
+++ b/include/linux/kmsan.h
@@ -134,11 +134,12 @@ void kmsan_kfree_large(const void *ptr);
  * @page_shift:	page_shift passed to vmap_range_noflush().
  *
  * KMSAN maps shadow and origin pages of @pages into contiguous ranges in
- * vmalloc metadata address range.
+ * vmalloc metadata address range. Returns 0 on success, callers must check
+ * for non-zero return value.
  */
-void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
-				    pgprot_t prot, struct page **pages,
-				    unsigned int page_shift);
+int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
+				   pgprot_t prot, struct page **pages,
+				   unsigned int page_shift);
 
 /**
  * kmsan_vunmap_kernel_range_noflush() - Notify KMSAN about a vunmap.
@@ -281,12 +282,13 @@ static inline void kmsan_kfree_large(const void *ptr)
 {
 }
 
-static inline void kmsan_vmap_pages_range_noflush(unsigned long start,
-						  unsigned long end,
-						  pgprot_t prot,
-						  struct page **pages,
-						  unsigned int page_shift)
+static inline int kmsan_vmap_pages_range_noflush(unsigned long start,
+						 unsigned long end,
+						 pgprot_t prot,
+						 struct page **pages,
+						 unsigned int page_shift)
 {
+	return 0;
 }
 
 static inline void kmsan_vunmap_range_noflush(unsigned long start,
diff --git a/mm/kmsan/shadow.c b/mm/kmsan/shadow.c
index a787c04e9583c..b8bb95eea5e3d 100644
--- a/mm/kmsan/shadow.c
+++ b/mm/kmsan/shadow.c
@@ -216,27 +216,29 @@ void kmsan_free_page(struct page *page, unsigned int order)
 	kmsan_leave_runtime();
 }
 
-void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
-				    pgprot_t prot, struct page **pages,
-				    unsigned int page_shift)
+int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
+				   pgprot_t prot, struct page **pages,
+				   unsigned int page_shift)
 {
 	unsigned long shadow_start, origin_start, shadow_end, origin_end;
 	struct page **s_pages, **o_pages;
-	int nr, mapped;
+	int nr, mapped, err = 0;
 
 	if (!kmsan_enabled)
-		return;
+		return 0;
 
 	shadow_start = vmalloc_meta((void *)start, KMSAN_META_SHADOW);
 	shadow_end = vmalloc_meta((void *)end, KMSAN_META_SHADOW);
 	if (!shadow_start)
-		return;
+		return 0;
 
 	nr = (end - start) / PAGE_SIZE;
 	s_pages = kcalloc(nr, sizeof(*s_pages), GFP_KERNEL);
 	o_pages = kcalloc(nr, sizeof(*o_pages), GFP_KERNEL);
-	if (!s_pages || !o_pages)
+	if (!s_pages || !o_pages) {
+		err = -ENOMEM;
 		goto ret;
+	}
 	for (int i = 0; i < nr; i++) {
 		s_pages[i] = shadow_page_for(pages[i]);
 		o_pages[i] = origin_page_for(pages[i]);
@@ -249,10 +251,16 @@ void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
 	kmsan_enter_runtime();
 	mapped = __vmap_pages_range_noflush(shadow_start, shadow_end, prot,
 					    s_pages, page_shift);
-	KMSAN_WARN_ON(mapped);
+	if (mapped) {
+		err = mapped;
+		goto ret;
+	}
 	mapped = __vmap_pages_range_noflush(origin_start, origin_end, prot,
 					    o_pages, page_shift);
-	KMSAN_WARN_ON(mapped);
+	if (mapped) {
+		err = mapped;
+		goto ret;
+	}
 	kmsan_leave_runtime();
 	flush_tlb_kernel_range(shadow_start, shadow_end);
 	flush_tlb_kernel_range(origin_start, origin_end);
@@ -262,6 +270,7 @@ void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
 ret:
 	kfree(s_pages);
 	kfree(o_pages);
+	return err;
 }
 
 /* Allocate metadata for pages allocated at boot time. */
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index a50072066221a..1355d95cce1ca 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -605,7 +605,11 @@ int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
 int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
 		pgprot_t prot, struct page **pages, unsigned int page_shift)
 {
-	kmsan_vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
+	int ret = kmsan_vmap_pages_range_noflush(addr, end, prot, pages,
+						 page_shift);
+
+	if (ret)
+		return ret;
 	return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
 }
 
-- 
2.40.0.577.gac1e443424-goog


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH v2 2/4] mm: kmsan: handle alloc failures in kmsan_ioremap_page_range()
  2023-04-13 13:12 [PATCH v2 1/4] mm: kmsan: handle alloc failures in kmsan_vmap_pages_range_noflush() Alexander Potapenko
@ 2023-04-13 13:12 ` Alexander Potapenko
  2023-04-18 10:10   ` Marco Elver
  2023-04-13 13:12 ` [PATCH v2 3/4] mm: kmsan: apply __must_check to non-void functions Alexander Potapenko
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 8+ messages in thread
From: Alexander Potapenko @ 2023-04-13 13:12 UTC (permalink / raw)
  To: glider
  Cc: urezki, hch, linux-kernel, linux-mm, akpm, elver, dvyukov,
	kasan-dev, Dipanjan Das

Similarly to kmsan_vmap_pages_range_noflush(),
kmsan_ioremap_page_range() must also properly handle allocation/mapping
failures. In the case of such, it must clean up the already created
metadata mappings and return an error code, so that the error can be
propagated to ioremap_page_range(). Without doing so, KMSAN may silently
fail to bring the metadata for the page range into a consistent state,
which will result in user-visible crashes when trying to access them.

Reported-by: Dipanjan Das <mail.dipanjan.das@gmail.com>
Link: https://lore.kernel.org/linux-mm/CANX2M5ZRrRA64k0hOif02TjmY9kbbO2aCBPyq79es34RXZ=cAw@mail.gmail.com/
Fixes: b073d7f8aee4 ("mm: kmsan: maintain KMSAN metadata for page operations")
Signed-off-by: Alexander Potapenko <glider@google.com>

---
v2:
 -- updated patch description as requested by Andrew Morton
 -- check the return value of __vmap_pages_range_noflush(), as suggested by Dipanjan Das
 -- return 0 from the inline version of kmsan_ioremap_page_range()
    (spotted by kernel test robot <lkp@intel.com>)
---
 include/linux/kmsan.h | 19 ++++++++-------
 mm/kmsan/hooks.c      | 55 ++++++++++++++++++++++++++++++++++++-------
 mm/vmalloc.c          |  4 ++--
 3 files changed, 59 insertions(+), 19 deletions(-)

diff --git a/include/linux/kmsan.h b/include/linux/kmsan.h
index c7ff3aefc5a13..30b17647ce3c7 100644
--- a/include/linux/kmsan.h
+++ b/include/linux/kmsan.h
@@ -160,11 +160,12 @@ void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end);
  * @page_shift:	page_shift argument passed to vmap_range_noflush().
  *
  * KMSAN creates new metadata pages for the physical pages mapped into the
- * virtual memory.
+ * virtual memory. Returns 0 on success, callers must check for non-zero return
+ * value.
  */
-void kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
-			      phys_addr_t phys_addr, pgprot_t prot,
-			      unsigned int page_shift);
+int kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
+			     phys_addr_t phys_addr, pgprot_t prot,
+			     unsigned int page_shift);
 
 /**
  * kmsan_iounmap_page_range() - Notify KMSAN about a iounmap_page_range() call.
@@ -296,12 +297,12 @@ static inline void kmsan_vunmap_range_noflush(unsigned long start,
 {
 }
 
-static inline void kmsan_ioremap_page_range(unsigned long start,
-					    unsigned long end,
-					    phys_addr_t phys_addr,
-					    pgprot_t prot,
-					    unsigned int page_shift)
+static inline int kmsan_ioremap_page_range(unsigned long start,
+					   unsigned long end,
+					   phys_addr_t phys_addr, pgprot_t prot,
+					   unsigned int page_shift)
 {
+	return 0;
 }
 
 static inline void kmsan_iounmap_page_range(unsigned long start,
diff --git a/mm/kmsan/hooks.c b/mm/kmsan/hooks.c
index 3807502766a3e..ec0da72e65aa0 100644
--- a/mm/kmsan/hooks.c
+++ b/mm/kmsan/hooks.c
@@ -148,35 +148,74 @@ void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end)
  * into the virtual memory. If those physical pages already had shadow/origin,
  * those are ignored.
  */
-void kmsan_ioremap_page_range(unsigned long start, unsigned long end,
-			      phys_addr_t phys_addr, pgprot_t prot,
-			      unsigned int page_shift)
+int kmsan_ioremap_page_range(unsigned long start, unsigned long end,
+			     phys_addr_t phys_addr, pgprot_t prot,
+			     unsigned int page_shift)
 {
 	gfp_t gfp_mask = GFP_KERNEL | __GFP_ZERO;
 	struct page *shadow, *origin;
 	unsigned long off = 0;
-	int nr;
+	int nr, err = 0, clean = 0, mapped;
 
 	if (!kmsan_enabled || kmsan_in_runtime())
-		return;
+		return 0;
 
 	nr = (end - start) / PAGE_SIZE;
 	kmsan_enter_runtime();
-	for (int i = 0; i < nr; i++, off += PAGE_SIZE) {
+	for (int i = 0; i < nr; i++, off += PAGE_SIZE, clean = i) {
 		shadow = alloc_pages(gfp_mask, 1);
 		origin = alloc_pages(gfp_mask, 1);
-		__vmap_pages_range_noflush(
+		if (!shadow || !origin) {
+			err = -ENOMEM;
+			goto ret;
+		}
+		mapped = __vmap_pages_range_noflush(
 			vmalloc_shadow(start + off),
 			vmalloc_shadow(start + off + PAGE_SIZE), prot, &shadow,
 			PAGE_SHIFT);
-		__vmap_pages_range_noflush(
+		if (mapped) {
+			err = mapped;
+			goto ret;
+		}
+		shadow = NULL;
+		mapped = __vmap_pages_range_noflush(
 			vmalloc_origin(start + off),
 			vmalloc_origin(start + off + PAGE_SIZE), prot, &origin,
 			PAGE_SHIFT);
+		if (mapped) {
+			__vunmap_range_noflush(
+				vmalloc_shadow(start + off),
+				vmalloc_shadow(start + off + PAGE_SIZE));
+			err = mapped;
+			goto ret;
+		}
+		origin = NULL;
+	}
+	/* Page mapping loop finished normally, nothing to clean up. */
+	clean = 0;
+
+ret:
+	if (clean > 0) {
+		/*
+		 * Something went wrong. Clean up shadow/origin pages allocated
+		 * on the last loop iteration, then delete mappings created
+		 * during the previous iterations.
+		 */
+		if (shadow)
+			__free_pages(shadow, 1);
+		if (origin)
+			__free_pages(origin, 1);
+		__vunmap_range_noflush(
+			vmalloc_shadow(start),
+			vmalloc_shadow(start + clean * PAGE_SIZE));
+		__vunmap_range_noflush(
+			vmalloc_origin(start),
+			vmalloc_origin(start + clean * PAGE_SIZE));
 	}
 	flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end));
 	flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end));
 	kmsan_leave_runtime();
+	return err;
 }
 
 void kmsan_iounmap_page_range(unsigned long start, unsigned long end)
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 1355d95cce1ca..31ff782d368b0 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -313,8 +313,8 @@ int ioremap_page_range(unsigned long addr, unsigned long end,
 				 ioremap_max_page_shift);
 	flush_cache_vmap(addr, end);
 	if (!err)
-		kmsan_ioremap_page_range(addr, end, phys_addr, prot,
-					 ioremap_max_page_shift);
+		err = kmsan_ioremap_page_range(addr, end, phys_addr, prot,
+					       ioremap_max_page_shift);
 	return err;
 }
 
-- 
2.40.0.577.gac1e443424-goog


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH v2 3/4] mm: kmsan: apply __must_check to non-void functions
  2023-04-13 13:12 [PATCH v2 1/4] mm: kmsan: handle alloc failures in kmsan_vmap_pages_range_noflush() Alexander Potapenko
  2023-04-13 13:12 ` [PATCH v2 2/4] mm: kmsan: handle alloc failures in kmsan_ioremap_page_range() Alexander Potapenko
@ 2023-04-13 13:12 ` Alexander Potapenko
  2023-04-18 10:10   ` Marco Elver
  2023-04-13 13:12 ` [PATCH v2 4/4] mm: apply __must_check to vmap_pages_range_noflush() Alexander Potapenko
  2023-04-18 10:10 ` [PATCH v2 1/4] mm: kmsan: handle alloc failures in kmsan_vmap_pages_range_noflush() Marco Elver
  3 siblings, 1 reply; 8+ messages in thread
From: Alexander Potapenko @ 2023-04-13 13:12 UTC (permalink / raw)
  To: glider
  Cc: urezki, hch, linux-kernel, linux-mm, akpm, elver, dvyukov, kasan-dev

Non-void KMSAN hooks may return error codes that indicate that KMSAN
failed to reflect the changed memory state in the metadata (e.g. it
could not create the necessary memory mappings). In such cases the
callers should handle the errors to prevent the tool from using the
inconsistent metadata in the future.

We mark non-void hooks with __must_check so that error handling is not
skipped.

Signed-off-by: Alexander Potapenko <glider@google.com>
---
 include/linux/kmsan.h | 43 ++++++++++++++++++++++---------------------
 1 file changed, 22 insertions(+), 21 deletions(-)

diff --git a/include/linux/kmsan.h b/include/linux/kmsan.h
index 30b17647ce3c7..e0c23a32cdf01 100644
--- a/include/linux/kmsan.h
+++ b/include/linux/kmsan.h
@@ -54,7 +54,8 @@ void __init kmsan_init_runtime(void);
  * Freed pages are either returned to buddy allocator or held back to be used
  * as metadata pages.
  */
-bool __init kmsan_memblock_free_pages(struct page *page, unsigned int order);
+bool __init __must_check kmsan_memblock_free_pages(struct page *page,
+						   unsigned int order);
 
 /**
  * kmsan_alloc_page() - Notify KMSAN about an alloc_pages() call.
@@ -137,9 +138,11 @@ void kmsan_kfree_large(const void *ptr);
  * vmalloc metadata address range. Returns 0 on success, callers must check
  * for non-zero return value.
  */
-int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
-				   pgprot_t prot, struct page **pages,
-				   unsigned int page_shift);
+int __must_check kmsan_vmap_pages_range_noflush(unsigned long start,
+						unsigned long end,
+						pgprot_t prot,
+						struct page **pages,
+						unsigned int page_shift);
 
 /**
  * kmsan_vunmap_kernel_range_noflush() - Notify KMSAN about a vunmap.
@@ -163,9 +166,9 @@ void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end);
  * virtual memory. Returns 0 on success, callers must check for non-zero return
  * value.
  */
-int kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
-			     phys_addr_t phys_addr, pgprot_t prot,
-			     unsigned int page_shift);
+int __must_check kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
+					  phys_addr_t phys_addr, pgprot_t prot,
+					  unsigned int page_shift);
 
 /**
  * kmsan_iounmap_page_range() - Notify KMSAN about a iounmap_page_range() call.
@@ -237,8 +240,8 @@ static inline void kmsan_init_runtime(void)
 {
 }
 
-static inline bool kmsan_memblock_free_pages(struct page *page,
-					     unsigned int order)
+static inline bool __must_check kmsan_memblock_free_pages(struct page *page,
+							  unsigned int order)
 {
 	return true;
 }
@@ -251,10 +254,9 @@ static inline void kmsan_task_exit(struct task_struct *task)
 {
 }
 
-static inline int kmsan_alloc_page(struct page *page, unsigned int order,
-				   gfp_t flags)
+static inline void kmsan_alloc_page(struct page *page, unsigned int order,
+				    gfp_t flags)
 {
-	return 0;
 }
 
 static inline void kmsan_free_page(struct page *page, unsigned int order)
@@ -283,11 +285,9 @@ static inline void kmsan_kfree_large(const void *ptr)
 {
 }
 
-static inline int kmsan_vmap_pages_range_noflush(unsigned long start,
-						 unsigned long end,
-						 pgprot_t prot,
-						 struct page **pages,
-						 unsigned int page_shift)
+static inline int __must_check kmsan_vmap_pages_range_noflush(
+	unsigned long start, unsigned long end, pgprot_t prot,
+	struct page **pages, unsigned int page_shift)
 {
 	return 0;
 }
@@ -297,10 +297,11 @@ static inline void kmsan_vunmap_range_noflush(unsigned long start,
 {
 }
 
-static inline int kmsan_ioremap_page_range(unsigned long start,
-					   unsigned long end,
-					   phys_addr_t phys_addr, pgprot_t prot,
-					   unsigned int page_shift)
+static inline int __must_check kmsan_ioremap_page_range(unsigned long start,
+							unsigned long end,
+							phys_addr_t phys_addr,
+							pgprot_t prot,
+							unsigned int page_shift)
 {
 	return 0;
 }
-- 
2.40.0.577.gac1e443424-goog


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH v2 4/4] mm: apply __must_check to vmap_pages_range_noflush()
  2023-04-13 13:12 [PATCH v2 1/4] mm: kmsan: handle alloc failures in kmsan_vmap_pages_range_noflush() Alexander Potapenko
  2023-04-13 13:12 ` [PATCH v2 2/4] mm: kmsan: handle alloc failures in kmsan_ioremap_page_range() Alexander Potapenko
  2023-04-13 13:12 ` [PATCH v2 3/4] mm: kmsan: apply __must_check to non-void functions Alexander Potapenko
@ 2023-04-13 13:12 ` Alexander Potapenko
  2023-04-18 10:10   ` Marco Elver
  2023-04-18 10:10 ` [PATCH v2 1/4] mm: kmsan: handle alloc failures in kmsan_vmap_pages_range_noflush() Marco Elver
  3 siblings, 1 reply; 8+ messages in thread
From: Alexander Potapenko @ 2023-04-13 13:12 UTC (permalink / raw)
  To: glider
  Cc: urezki, hch, linux-kernel, linux-mm, akpm, elver, dvyukov,
	kasan-dev, Dipanjan Das

To prevent errors when vmap_pages_range_noflush() or
__vmap_pages_range_noflush() silently fail (see the link below for an
example), annotate them with __must_check so that the callers do not
unconditionally assume the mapping succeeded.

Reported-by: Dipanjan Das <mail.dipanjan.das@gmail.com>
Link: https://lore.kernel.org/linux-mm/CANX2M5ZRrRA64k0hOif02TjmY9kbbO2aCBPyq79es34RXZ=cAw@mail.gmail.com/
Signed-off-by: Alexander Potapenko <glider@google.com>
---
 mm/internal.h | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/mm/internal.h b/mm/internal.h
index 7920a8b7982ec..a646cf7c41e8a 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -833,20 +833,20 @@ size_t splice_folio_into_pipe(struct pipe_inode_info *pipe,
  * mm/vmalloc.c
  */
 #ifdef CONFIG_MMU
-int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
-                pgprot_t prot, struct page **pages, unsigned int page_shift);
+int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
+		pgprot_t prot, struct page **pages, unsigned int page_shift);
 #else
 static inline
-int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
-                pgprot_t prot, struct page **pages, unsigned int page_shift)
+int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
+		pgprot_t prot, struct page **pages, unsigned int page_shift)
 {
 	return -EINVAL;
 }
 #endif
 
-int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
-			       pgprot_t prot, struct page **pages,
-			       unsigned int page_shift);
+int __must_check __vmap_pages_range_noflush(
+	unsigned long addr, unsigned long end, pgprot_t prot,
+	struct page **pages, unsigned int page_shift);
 
 void vunmap_range_noflush(unsigned long start, unsigned long end);
 
-- 
2.40.0.577.gac1e443424-goog


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH v2 1/4] mm: kmsan: handle alloc failures in kmsan_vmap_pages_range_noflush()
  2023-04-13 13:12 [PATCH v2 1/4] mm: kmsan: handle alloc failures in kmsan_vmap_pages_range_noflush() Alexander Potapenko
                   ` (2 preceding siblings ...)
  2023-04-13 13:12 ` [PATCH v2 4/4] mm: apply __must_check to vmap_pages_range_noflush() Alexander Potapenko
@ 2023-04-18 10:10 ` Marco Elver
  3 siblings, 0 replies; 8+ messages in thread
From: Marco Elver @ 2023-04-18 10:10 UTC (permalink / raw)
  To: Alexander Potapenko
  Cc: urezki, hch, linux-kernel, linux-mm, akpm, dvyukov, kasan-dev,
	Dipanjan Das

On Thu, 13 Apr 2023 at 15:12, Alexander Potapenko <glider@google.com> wrote:
>
> As reported by Dipanjan Das, when KMSAN is used together with kernel
> fault injection (or, generally, even without the latter), calls to
> kcalloc() or __vmap_pages_range_noflush() may fail, leaving the
> metadata mappings for the virtual mapping in an inconsistent state.
> When these metadata mappings are accessed later, the kernel crashes.
>
> To address the problem, we return a non-zero error code from
> kmsan_vmap_pages_range_noflush() in the case of any allocation/mapping
> failure inside it, and make vmap_pages_range_noflush() return an error
> if KMSAN fails to allocate the metadata.
>
> This patch also removes KMSAN_WARN_ON() from vmap_pages_range_noflush(),
> as these allocation failures are not fatal anymore.
>
> Reported-by: Dipanjan Das <mail.dipanjan.das@gmail.com>
> Link: https://lore.kernel.org/linux-mm/CANX2M5ZRrRA64k0hOif02TjmY9kbbO2aCBPyq79es34RXZ=cAw@mail.gmail.com/
> Fixes: b073d7f8aee4 ("mm: kmsan: maintain KMSAN metadata for page operations")
> Signed-off-by: Alexander Potapenko <glider@google.com>

Reviewed-by: Marco Elver <elver@google.com>

Looks reasonable, thanks.

> ---
> v2:
>  -- return 0 from the inline version of kmsan_vmap_pages_range_noflush()
>     (spotted by kernel test robot <lkp@intel.com>)
> ---
>  include/linux/kmsan.h | 20 +++++++++++---------
>  mm/kmsan/shadow.c     | 27 ++++++++++++++++++---------
>  mm/vmalloc.c          |  6 +++++-
>  3 files changed, 34 insertions(+), 19 deletions(-)
>
> diff --git a/include/linux/kmsan.h b/include/linux/kmsan.h
> index e38ae3c346184..c7ff3aefc5a13 100644
> --- a/include/linux/kmsan.h
> +++ b/include/linux/kmsan.h
> @@ -134,11 +134,12 @@ void kmsan_kfree_large(const void *ptr);
>   * @page_shift:        page_shift passed to vmap_range_noflush().
>   *
>   * KMSAN maps shadow and origin pages of @pages into contiguous ranges in
> - * vmalloc metadata address range.
> + * vmalloc metadata address range. Returns 0 on success, callers must check
> + * for non-zero return value.
>   */
> -void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
> -                                   pgprot_t prot, struct page **pages,
> -                                   unsigned int page_shift);
> +int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
> +                                  pgprot_t prot, struct page **pages,
> +                                  unsigned int page_shift);
>
>  /**
>   * kmsan_vunmap_kernel_range_noflush() - Notify KMSAN about a vunmap.
> @@ -281,12 +282,13 @@ static inline void kmsan_kfree_large(const void *ptr)
>  {
>  }
>
> -static inline void kmsan_vmap_pages_range_noflush(unsigned long start,
> -                                                 unsigned long end,
> -                                                 pgprot_t prot,
> -                                                 struct page **pages,
> -                                                 unsigned int page_shift)
> +static inline int kmsan_vmap_pages_range_noflush(unsigned long start,
> +                                                unsigned long end,
> +                                                pgprot_t prot,
> +                                                struct page **pages,
> +                                                unsigned int page_shift)
>  {
> +       return 0;
>  }
>
>  static inline void kmsan_vunmap_range_noflush(unsigned long start,
> diff --git a/mm/kmsan/shadow.c b/mm/kmsan/shadow.c
> index a787c04e9583c..b8bb95eea5e3d 100644
> --- a/mm/kmsan/shadow.c
> +++ b/mm/kmsan/shadow.c
> @@ -216,27 +216,29 @@ void kmsan_free_page(struct page *page, unsigned int order)
>         kmsan_leave_runtime();
>  }
>
> -void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
> -                                   pgprot_t prot, struct page **pages,
> -                                   unsigned int page_shift)
> +int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
> +                                  pgprot_t prot, struct page **pages,
> +                                  unsigned int page_shift)
>  {
>         unsigned long shadow_start, origin_start, shadow_end, origin_end;
>         struct page **s_pages, **o_pages;
> -       int nr, mapped;
> +       int nr, mapped, err = 0;
>
>         if (!kmsan_enabled)
> -               return;
> +               return 0;
>
>         shadow_start = vmalloc_meta((void *)start, KMSAN_META_SHADOW);
>         shadow_end = vmalloc_meta((void *)end, KMSAN_META_SHADOW);
>         if (!shadow_start)
> -               return;
> +               return 0;
>
>         nr = (end - start) / PAGE_SIZE;
>         s_pages = kcalloc(nr, sizeof(*s_pages), GFP_KERNEL);
>         o_pages = kcalloc(nr, sizeof(*o_pages), GFP_KERNEL);
> -       if (!s_pages || !o_pages)
> +       if (!s_pages || !o_pages) {
> +               err = -ENOMEM;
>                 goto ret;
> +       }
>         for (int i = 0; i < nr; i++) {
>                 s_pages[i] = shadow_page_for(pages[i]);
>                 o_pages[i] = origin_page_for(pages[i]);
> @@ -249,10 +251,16 @@ void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
>         kmsan_enter_runtime();
>         mapped = __vmap_pages_range_noflush(shadow_start, shadow_end, prot,
>                                             s_pages, page_shift);
> -       KMSAN_WARN_ON(mapped);
> +       if (mapped) {
> +               err = mapped;
> +               goto ret;
> +       }
>         mapped = __vmap_pages_range_noflush(origin_start, origin_end, prot,
>                                             o_pages, page_shift);
> -       KMSAN_WARN_ON(mapped);
> +       if (mapped) {
> +               err = mapped;
> +               goto ret;
> +       }
>         kmsan_leave_runtime();
>         flush_tlb_kernel_range(shadow_start, shadow_end);
>         flush_tlb_kernel_range(origin_start, origin_end);
> @@ -262,6 +270,7 @@ void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
>  ret:
>         kfree(s_pages);
>         kfree(o_pages);
> +       return err;
>  }
>
>  /* Allocate metadata for pages allocated at boot time. */
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index a50072066221a..1355d95cce1ca 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -605,7 +605,11 @@ int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
>  int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
>                 pgprot_t prot, struct page **pages, unsigned int page_shift)
>  {
> -       kmsan_vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
> +       int ret = kmsan_vmap_pages_range_noflush(addr, end, prot, pages,
> +                                                page_shift);
> +
> +       if (ret)
> +               return ret;
>         return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
>  }
>
> --
> 2.40.0.577.gac1e443424-goog
>

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v2 2/4] mm: kmsan: handle alloc failures in kmsan_ioremap_page_range()
  2023-04-13 13:12 ` [PATCH v2 2/4] mm: kmsan: handle alloc failures in kmsan_ioremap_page_range() Alexander Potapenko
@ 2023-04-18 10:10   ` Marco Elver
  0 siblings, 0 replies; 8+ messages in thread
From: Marco Elver @ 2023-04-18 10:10 UTC (permalink / raw)
  To: Alexander Potapenko
  Cc: urezki, hch, linux-kernel, linux-mm, akpm, dvyukov, kasan-dev,
	Dipanjan Das

On Thu, 13 Apr 2023 at 15:12, Alexander Potapenko <glider@google.com> wrote:
>
> Similarly to kmsan_vmap_pages_range_noflush(),
> kmsan_ioremap_page_range() must also properly handle allocation/mapping
> failures. In the case of such, it must clean up the already created
> metadata mappings and return an error code, so that the error can be
> propagated to ioremap_page_range(). Without doing so, KMSAN may silently
> fail to bring the metadata for the page range into a consistent state,
> which will result in user-visible crashes when trying to access them.
>
> Reported-by: Dipanjan Das <mail.dipanjan.das@gmail.com>
> Link: https://lore.kernel.org/linux-mm/CANX2M5ZRrRA64k0hOif02TjmY9kbbO2aCBPyq79es34RXZ=cAw@mail.gmail.com/
> Fixes: b073d7f8aee4 ("mm: kmsan: maintain KMSAN metadata for page operations")
> Signed-off-by: Alexander Potapenko <glider@google.com>

Reviewed-by: Marco Elver <elver@google.com>

> ---
> v2:
>  -- updated patch description as requested by Andrew Morton
>  -- check the return value of __vmap_pages_range_noflush(), as suggested by Dipanjan Das
>  -- return 0 from the inline version of kmsan_ioremap_page_range()
>     (spotted by kernel test robot <lkp@intel.com>)
> ---
>  include/linux/kmsan.h | 19 ++++++++-------
>  mm/kmsan/hooks.c      | 55 ++++++++++++++++++++++++++++++++++++-------
>  mm/vmalloc.c          |  4 ++--
>  3 files changed, 59 insertions(+), 19 deletions(-)
>
> diff --git a/include/linux/kmsan.h b/include/linux/kmsan.h
> index c7ff3aefc5a13..30b17647ce3c7 100644
> --- a/include/linux/kmsan.h
> +++ b/include/linux/kmsan.h
> @@ -160,11 +160,12 @@ void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end);
>   * @page_shift:        page_shift argument passed to vmap_range_noflush().
>   *
>   * KMSAN creates new metadata pages for the physical pages mapped into the
> - * virtual memory.
> + * virtual memory. Returns 0 on success, callers must check for non-zero return
> + * value.
>   */
> -void kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
> -                             phys_addr_t phys_addr, pgprot_t prot,
> -                             unsigned int page_shift);
> +int kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
> +                            phys_addr_t phys_addr, pgprot_t prot,
> +                            unsigned int page_shift);
>
>  /**
>   * kmsan_iounmap_page_range() - Notify KMSAN about a iounmap_page_range() call.
> @@ -296,12 +297,12 @@ static inline void kmsan_vunmap_range_noflush(unsigned long start,
>  {
>  }
>
> -static inline void kmsan_ioremap_page_range(unsigned long start,
> -                                           unsigned long end,
> -                                           phys_addr_t phys_addr,
> -                                           pgprot_t prot,
> -                                           unsigned int page_shift)
> +static inline int kmsan_ioremap_page_range(unsigned long start,
> +                                          unsigned long end,
> +                                          phys_addr_t phys_addr, pgprot_t prot,
> +                                          unsigned int page_shift)
>  {
> +       return 0;
>  }
>
>  static inline void kmsan_iounmap_page_range(unsigned long start,
> diff --git a/mm/kmsan/hooks.c b/mm/kmsan/hooks.c
> index 3807502766a3e..ec0da72e65aa0 100644
> --- a/mm/kmsan/hooks.c
> +++ b/mm/kmsan/hooks.c
> @@ -148,35 +148,74 @@ void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end)
>   * into the virtual memory. If those physical pages already had shadow/origin,
>   * those are ignored.
>   */
> -void kmsan_ioremap_page_range(unsigned long start, unsigned long end,
> -                             phys_addr_t phys_addr, pgprot_t prot,
> -                             unsigned int page_shift)
> +int kmsan_ioremap_page_range(unsigned long start, unsigned long end,
> +                            phys_addr_t phys_addr, pgprot_t prot,
> +                            unsigned int page_shift)
>  {
>         gfp_t gfp_mask = GFP_KERNEL | __GFP_ZERO;
>         struct page *shadow, *origin;
>         unsigned long off = 0;
> -       int nr;
> +       int nr, err = 0, clean = 0, mapped;
>
>         if (!kmsan_enabled || kmsan_in_runtime())
> -               return;
> +               return 0;
>
>         nr = (end - start) / PAGE_SIZE;
>         kmsan_enter_runtime();
> -       for (int i = 0; i < nr; i++, off += PAGE_SIZE) {
> +       for (int i = 0; i < nr; i++, off += PAGE_SIZE, clean = i) {
>                 shadow = alloc_pages(gfp_mask, 1);
>                 origin = alloc_pages(gfp_mask, 1);
> -               __vmap_pages_range_noflush(
> +               if (!shadow || !origin) {
> +                       err = -ENOMEM;
> +                       goto ret;
> +               }
> +               mapped = __vmap_pages_range_noflush(
>                         vmalloc_shadow(start + off),
>                         vmalloc_shadow(start + off + PAGE_SIZE), prot, &shadow,
>                         PAGE_SHIFT);
> -               __vmap_pages_range_noflush(
> +               if (mapped) {
> +                       err = mapped;
> +                       goto ret;
> +               }
> +               shadow = NULL;
> +               mapped = __vmap_pages_range_noflush(
>                         vmalloc_origin(start + off),
>                         vmalloc_origin(start + off + PAGE_SIZE), prot, &origin,
>                         PAGE_SHIFT);
> +               if (mapped) {
> +                       __vunmap_range_noflush(
> +                               vmalloc_shadow(start + off),
> +                               vmalloc_shadow(start + off + PAGE_SIZE));
> +                       err = mapped;
> +                       goto ret;
> +               }
> +               origin = NULL;
> +       }
> +       /* Page mapping loop finished normally, nothing to clean up. */
> +       clean = 0;
> +
> +ret:
> +       if (clean > 0) {
> +               /*
> +                * Something went wrong. Clean up shadow/origin pages allocated
> +                * on the last loop iteration, then delete mappings created
> +                * during the previous iterations.
> +                */
> +               if (shadow)
> +                       __free_pages(shadow, 1);
> +               if (origin)
> +                       __free_pages(origin, 1);
> +               __vunmap_range_noflush(
> +                       vmalloc_shadow(start),
> +                       vmalloc_shadow(start + clean * PAGE_SIZE));
> +               __vunmap_range_noflush(
> +                       vmalloc_origin(start),
> +                       vmalloc_origin(start + clean * PAGE_SIZE));
>         }
>         flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end));
>         flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end));
>         kmsan_leave_runtime();
> +       return err;
>  }
>
>  void kmsan_iounmap_page_range(unsigned long start, unsigned long end)
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index 1355d95cce1ca..31ff782d368b0 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -313,8 +313,8 @@ int ioremap_page_range(unsigned long addr, unsigned long end,
>                                  ioremap_max_page_shift);
>         flush_cache_vmap(addr, end);
>         if (!err)
> -               kmsan_ioremap_page_range(addr, end, phys_addr, prot,
> -                                        ioremap_max_page_shift);
> +               err = kmsan_ioremap_page_range(addr, end, phys_addr, prot,
> +                                              ioremap_max_page_shift);
>         return err;
>  }
>
> --
> 2.40.0.577.gac1e443424-goog
>

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v2 3/4] mm: kmsan: apply __must_check to non-void functions
  2023-04-13 13:12 ` [PATCH v2 3/4] mm: kmsan: apply __must_check to non-void functions Alexander Potapenko
@ 2023-04-18 10:10   ` Marco Elver
  0 siblings, 0 replies; 8+ messages in thread
From: Marco Elver @ 2023-04-18 10:10 UTC (permalink / raw)
  To: Alexander Potapenko
  Cc: urezki, hch, linux-kernel, linux-mm, akpm, dvyukov, kasan-dev

On Thu, 13 Apr 2023 at 15:12, 'Alexander Potapenko' via kasan-dev
<kasan-dev@googlegroups.com> wrote:
>
> Non-void KMSAN hooks may return error codes that indicate that KMSAN
> failed to reflect the changed memory state in the metadata (e.g. it
> could not create the necessary memory mappings). In such cases the
> callers should handle the errors to prevent the tool from using the
> inconsistent metadata in the future.
>
> We mark non-void hooks with __must_check so that error handling is not
> skipped.
>
> Signed-off-by: Alexander Potapenko <glider@google.com>

Reviewed-by: Marco Elver <elver@google.com>

> ---
>  include/linux/kmsan.h | 43 ++++++++++++++++++++++---------------------
>  1 file changed, 22 insertions(+), 21 deletions(-)
>
> diff --git a/include/linux/kmsan.h b/include/linux/kmsan.h
> index 30b17647ce3c7..e0c23a32cdf01 100644
> --- a/include/linux/kmsan.h
> +++ b/include/linux/kmsan.h
> @@ -54,7 +54,8 @@ void __init kmsan_init_runtime(void);
>   * Freed pages are either returned to buddy allocator or held back to be used
>   * as metadata pages.
>   */
> -bool __init kmsan_memblock_free_pages(struct page *page, unsigned int order);
> +bool __init __must_check kmsan_memblock_free_pages(struct page *page,
> +                                                  unsigned int order);
>
>  /**
>   * kmsan_alloc_page() - Notify KMSAN about an alloc_pages() call.
> @@ -137,9 +138,11 @@ void kmsan_kfree_large(const void *ptr);
>   * vmalloc metadata address range. Returns 0 on success, callers must check
>   * for non-zero return value.
>   */
> -int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
> -                                  pgprot_t prot, struct page **pages,
> -                                  unsigned int page_shift);
> +int __must_check kmsan_vmap_pages_range_noflush(unsigned long start,
> +                                               unsigned long end,
> +                                               pgprot_t prot,
> +                                               struct page **pages,
> +                                               unsigned int page_shift);
>
>  /**
>   * kmsan_vunmap_kernel_range_noflush() - Notify KMSAN about a vunmap.
> @@ -163,9 +166,9 @@ void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end);
>   * virtual memory. Returns 0 on success, callers must check for non-zero return
>   * value.
>   */
> -int kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
> -                            phys_addr_t phys_addr, pgprot_t prot,
> -                            unsigned int page_shift);
> +int __must_check kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
> +                                         phys_addr_t phys_addr, pgprot_t prot,
> +                                         unsigned int page_shift);
>
>  /**
>   * kmsan_iounmap_page_range() - Notify KMSAN about a iounmap_page_range() call.
> @@ -237,8 +240,8 @@ static inline void kmsan_init_runtime(void)
>  {
>  }
>
> -static inline bool kmsan_memblock_free_pages(struct page *page,
> -                                            unsigned int order)
> +static inline bool __must_check kmsan_memblock_free_pages(struct page *page,
> +                                                         unsigned int order)
>  {
>         return true;
>  }
> @@ -251,10 +254,9 @@ static inline void kmsan_task_exit(struct task_struct *task)
>  {
>  }
>
> -static inline int kmsan_alloc_page(struct page *page, unsigned int order,
> -                                  gfp_t flags)
> +static inline void kmsan_alloc_page(struct page *page, unsigned int order,
> +                                   gfp_t flags)
>  {
> -       return 0;
>  }
>
>  static inline void kmsan_free_page(struct page *page, unsigned int order)
> @@ -283,11 +285,9 @@ static inline void kmsan_kfree_large(const void *ptr)
>  {
>  }
>
> -static inline int kmsan_vmap_pages_range_noflush(unsigned long start,
> -                                                unsigned long end,
> -                                                pgprot_t prot,
> -                                                struct page **pages,
> -                                                unsigned int page_shift)
> +static inline int __must_check kmsan_vmap_pages_range_noflush(
> +       unsigned long start, unsigned long end, pgprot_t prot,
> +       struct page **pages, unsigned int page_shift)
>  {
>         return 0;
>  }
> @@ -297,10 +297,11 @@ static inline void kmsan_vunmap_range_noflush(unsigned long start,
>  {
>  }
>
> -static inline int kmsan_ioremap_page_range(unsigned long start,
> -                                          unsigned long end,
> -                                          phys_addr_t phys_addr, pgprot_t prot,
> -                                          unsigned int page_shift)
> +static inline int __must_check kmsan_ioremap_page_range(unsigned long start,
> +                                                       unsigned long end,
> +                                                       phys_addr_t phys_addr,
> +                                                       pgprot_t prot,
> +                                                       unsigned int page_shift)
>  {
>         return 0;
>  }
> --
> 2.40.0.577.gac1e443424-goog
>
> --
> You received this message because you are subscribed to the Google Groups "kasan-dev" group.
> To unsubscribe from this group and stop receiving emails from it, send an email to kasan-dev+unsubscribe@googlegroups.com.
> To view this discussion on the web visit https://groups.google.com/d/msgid/kasan-dev/20230413131223.4135168-3-glider%40google.com.

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v2 4/4] mm: apply __must_check to vmap_pages_range_noflush()
  2023-04-13 13:12 ` [PATCH v2 4/4] mm: apply __must_check to vmap_pages_range_noflush() Alexander Potapenko
@ 2023-04-18 10:10   ` Marco Elver
  0 siblings, 0 replies; 8+ messages in thread
From: Marco Elver @ 2023-04-18 10:10 UTC (permalink / raw)
  To: Alexander Potapenko
  Cc: urezki, hch, linux-kernel, linux-mm, akpm, dvyukov, kasan-dev,
	Dipanjan Das

On Thu, 13 Apr 2023 at 15:12, Alexander Potapenko <glider@google.com> wrote:
>
> To prevent errors when vmap_pages_range_noflush() or
> __vmap_pages_range_noflush() silently fail (see the link below for an
> example), annotate them with __must_check so that the callers do not
> unconditionally assume the mapping succeeded.
>
> Reported-by: Dipanjan Das <mail.dipanjan.das@gmail.com>
> Link: https://lore.kernel.org/linux-mm/CANX2M5ZRrRA64k0hOif02TjmY9kbbO2aCBPyq79es34RXZ=cAw@mail.gmail.com/
> Signed-off-by: Alexander Potapenko <glider@google.com>

Reviewed-by: Marco Elver <elver@google.com>

> ---
>  mm/internal.h | 14 +++++++-------
>  1 file changed, 7 insertions(+), 7 deletions(-)
>
> diff --git a/mm/internal.h b/mm/internal.h
> index 7920a8b7982ec..a646cf7c41e8a 100644
> --- a/mm/internal.h
> +++ b/mm/internal.h
> @@ -833,20 +833,20 @@ size_t splice_folio_into_pipe(struct pipe_inode_info *pipe,
>   * mm/vmalloc.c
>   */
>  #ifdef CONFIG_MMU
> -int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
> -                pgprot_t prot, struct page **pages, unsigned int page_shift);
> +int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
> +               pgprot_t prot, struct page **pages, unsigned int page_shift);
>  #else
>  static inline
> -int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
> -                pgprot_t prot, struct page **pages, unsigned int page_shift)
> +int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
> +               pgprot_t prot, struct page **pages, unsigned int page_shift)
>  {
>         return -EINVAL;
>  }
>  #endif
>
> -int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
> -                              pgprot_t prot, struct page **pages,
> -                              unsigned int page_shift);
> +int __must_check __vmap_pages_range_noflush(
> +       unsigned long addr, unsigned long end, pgprot_t prot,
> +       struct page **pages, unsigned int page_shift);
>
>  void vunmap_range_noflush(unsigned long start, unsigned long end);
>
> --
> 2.40.0.577.gac1e443424-goog
>

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2023-04-18 10:12 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-04-13 13:12 [PATCH v2 1/4] mm: kmsan: handle alloc failures in kmsan_vmap_pages_range_noflush() Alexander Potapenko
2023-04-13 13:12 ` [PATCH v2 2/4] mm: kmsan: handle alloc failures in kmsan_ioremap_page_range() Alexander Potapenko
2023-04-18 10:10   ` Marco Elver
2023-04-13 13:12 ` [PATCH v2 3/4] mm: kmsan: apply __must_check to non-void functions Alexander Potapenko
2023-04-18 10:10   ` Marco Elver
2023-04-13 13:12 ` [PATCH v2 4/4] mm: apply __must_check to vmap_pages_range_noflush() Alexander Potapenko
2023-04-18 10:10   ` Marco Elver
2023-04-18 10:10 ` [PATCH v2 1/4] mm: kmsan: handle alloc failures in kmsan_vmap_pages_range_noflush() Marco Elver

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.