mm-commits.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Andrew Morton <akpm@linux-foundation.org>
To: a.p.zijlstra@chello.nl, akpm@linux-foundation.org,
	andreyknvl@google.com, aryabinin@virtuozzo.com,
	brendanhiggins@google.com, davidgow@google.com,
	dvyukov@google.com, juri.lelli@redhat.com, linux-mm@kvack.org,
	mingo@redhat.com, mm-commits@vger.kernel.org, shuah@kernel.org,
	torvalds@linux-foundation.org, trishalfonso@google.com,
	vincent.guittot@linaro.org
Subject: [patch 122/181] KASAN: port KASAN Tests to KUnit
Date: Tue, 13 Oct 2020 16:55:06 -0700	[thread overview]
Message-ID: <20201013235506.qGSCuNQiT%akpm@linux-foundation.org> (raw)
In-Reply-To: <20201013164658.3bfd96cc224d8923e66a9f4e@linux-foundation.org>

From: Patricia Alfonso <trishalfonso@google.com>
Subject: KASAN: port KASAN Tests to KUnit

Transfer all previous tests for KASAN to KUnit so they can be run more
easily.  Using kunit_tool, developers can run these tests with their other
KUnit tests and see "pass" or "fail" with the appropriate KASAN report
instead of needing to parse each KASAN report to test KASAN
functionalities.  All KASAN reports are still printed to dmesg.

Stack tests do not work properly when KASAN_STACK is enabled so those
tests use a check for "if IS_ENABLED(CONFIG_KASAN_STACK)" so they only run
if stack instrumentation is enabled.  If KASAN_STACK is not enabled, KUnit
will print a statement to let the user know this test was not run with
KASAN_STACK enabled.

copy_user_test and kasan_rcu_uaf cannot be run in KUnit so there is a
separate test file for those tests, which can be run as before as a
module.

[trishalfonso@google.com: v14]
  Link: https://lkml.kernel.org/r/20200915035828.570483-4-davidgow@google.com
Link: https://lkml.kernel.org/r/20200910070331.3358048-4-davidgow@google.com
Signed-off-by: Patricia Alfonso <trishalfonso@google.com>
Signed-off-by: David Gow <davidgow@google.com>
Reviewed-by: Brendan Higgins <brendanhiggins@google.com>
Reviewed-by: Andrey Konovalov <andreyknvl@google.com>
Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
Tested-by: Andrey Konovalov <andreyknvl@google.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---

 lib/Kconfig.kasan       |   22 -
 lib/Makefile            |    4 
 lib/test_kasan.c        |  685 ++++++++++++++------------------------
 lib/test_kasan_module.c |  111 ++++++
 4 files changed, 385 insertions(+), 437 deletions(-)

--- a/lib/Kconfig.kasan~kasan-port-kasan-tests-to-kunit
+++ a/lib/Kconfig.kasan
@@ -166,12 +166,24 @@ config KASAN_VMALLOC
 	  for KASAN to detect more sorts of errors (and to support vmapped
 	  stacks), but at the cost of higher memory usage.
 
-config TEST_KASAN
-	tristate "Module for testing KASAN for bug detection"
-	depends on m
+config KASAN_KUNIT_TEST
+	tristate "KUnit-compatible tests of KASAN bug detection capabilities" if !KUNIT_ALL_TESTS
+	depends on KASAN && KUNIT
+	default KUNIT_ALL_TESTS
 	help
-	  This is a test module doing various nasty things like
-	  out of bounds accesses, use after free. It is useful for testing
+	  This is a KUnit test suite doing various nasty things like
+	  out of bounds and use after free accesses. It is useful for testing
 	  kernel debugging features like KASAN.
 
+	  For more information on KUnit and unit tests in general, please refer
+	  to the KUnit documentation in Documentation/dev-tools/kunit
+
+config TEST_KASAN_MODULE
+	tristate "KUnit-incompatible tests of KASAN bug detection capabilities"
+	depends on m && KASAN
+	help
+	  This is a part of the KASAN test suite that is incompatible with
+	  KUnit. Currently includes tests that do bad copy_from/to_user
+	  accesses.
+
 endif # KASAN
--- a/lib/Makefile~kasan-port-kasan-tests-to-kunit
+++ a/lib/Makefile
@@ -65,9 +65,11 @@ CFLAGS_test_bitops.o += -Werror
 obj-$(CONFIG_TEST_SYSCTL) += test_sysctl.o
 obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o
 obj-$(CONFIG_TEST_IDA) += test_ida.o
-obj-$(CONFIG_TEST_KASAN) += test_kasan.o
+obj-$(CONFIG_KASAN_KUNIT_TEST) += test_kasan.o
 CFLAGS_test_kasan.o += -fno-builtin
 CFLAGS_test_kasan.o += $(call cc-disable-warning, vla)
+obj-$(CONFIG_TEST_KASAN_MODULE) += test_kasan_module.o
+CFLAGS_test_kasan_module.o += -fno-builtin
 obj-$(CONFIG_TEST_UBSAN) += test_ubsan.o
 CFLAGS_test_ubsan.o += $(call cc-disable-warning, vla)
 UBSAN_SANITIZE_test_ubsan.o := y
--- a/lib/test_kasan.c~kasan-port-kasan-tests-to-kunit
+++ a/lib/test_kasan.c
@@ -5,8 +5,6 @@
  * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
  */
 
-#define pr_fmt(fmt) "kasan test: %s " fmt, __func__
-
 #include <linux/bitops.h>
 #include <linux/delay.h>
 #include <linux/kasan.h>
@@ -77,416 +75,327 @@ static void kasan_test_exit(struct kunit
 			fail_data.report_found); \
 } while (0)
 
-
-
-/*
- * Note: test functions are marked noinline so that their names appear in
- * reports.
- */
-static noinline void __init kmalloc_oob_right(void)
+static void kmalloc_oob_right(struct kunit *test)
 {
 	char *ptr;
 	size_t size = 123;
 
-	pr_info("out-of-bounds to right\n");
 	ptr = kmalloc(size, GFP_KERNEL);
-	if (!ptr) {
-		pr_err("Allocation failed\n");
-		return;
-	}
-
-	ptr[size + OOB_TAG_OFF] = 'x';
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 
+	KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 'x');
 	kfree(ptr);
 }
 
-static noinline void __init kmalloc_oob_left(void)
+static void kmalloc_oob_left(struct kunit *test)
 {
 	char *ptr;
 	size_t size = 15;
 
-	pr_info("out-of-bounds to left\n");
 	ptr = kmalloc(size, GFP_KERNEL);
-	if (!ptr) {
-		pr_err("Allocation failed\n");
-		return;
-	}
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 
-	*ptr = *(ptr - 1);
+	KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1));
 	kfree(ptr);
 }
 
-static noinline void __init kmalloc_node_oob_right(void)
+static void kmalloc_node_oob_right(struct kunit *test)
 {
 	char *ptr;
 	size_t size = 4096;
 
-	pr_info("kmalloc_node(): out-of-bounds to right\n");
 	ptr = kmalloc_node(size, GFP_KERNEL, 0);
-	if (!ptr) {
-		pr_err("Allocation failed\n");
-		return;
-	}
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 
-	ptr[size] = 0;
+	KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
 	kfree(ptr);
 }
 
-#ifdef CONFIG_SLUB
-static noinline void __init kmalloc_pagealloc_oob_right(void)
+static void kmalloc_pagealloc_oob_right(struct kunit *test)
 {
 	char *ptr;
 	size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
 
+	if (!IS_ENABLED(CONFIG_SLUB)) {
+		kunit_info(test, "CONFIG_SLUB is not enabled.");
+		return;
+	}
+
 	/* Allocate a chunk that does not fit into a SLUB cache to trigger
 	 * the page allocator fallback.
 	 */
-	pr_info("kmalloc pagealloc allocation: out-of-bounds to right\n");
 	ptr = kmalloc(size, GFP_KERNEL);
-	if (!ptr) {
-		pr_err("Allocation failed\n");
-		return;
-	}
-
-	ptr[size + OOB_TAG_OFF] = 0;
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 
+	KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0);
 	kfree(ptr);
 }
 
-static noinline void __init kmalloc_pagealloc_uaf(void)
+static void kmalloc_pagealloc_uaf(struct kunit *test)
 {
 	char *ptr;
 	size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
 
-	pr_info("kmalloc pagealloc allocation: use-after-free\n");
-	ptr = kmalloc(size, GFP_KERNEL);
-	if (!ptr) {
-		pr_err("Allocation failed\n");
+	if (!IS_ENABLED(CONFIG_SLUB)) {
+		kunit_info(test, "CONFIG_SLUB is not enabled.");
 		return;
 	}
 
+	ptr = kmalloc(size, GFP_KERNEL);
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+
 	kfree(ptr);
-	ptr[0] = 0;
+	KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0);
 }
 
-static noinline void __init kmalloc_pagealloc_invalid_free(void)
+static void kmalloc_pagealloc_invalid_free(struct kunit *test)
 {
 	char *ptr;
 	size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
 
-	pr_info("kmalloc pagealloc allocation: invalid-free\n");
-	ptr = kmalloc(size, GFP_KERNEL);
-	if (!ptr) {
-		pr_err("Allocation failed\n");
+	if (!IS_ENABLED(CONFIG_SLUB)) {
+		kunit_info(test, "CONFIG_SLUB is not enabled.");
 		return;
 	}
 
-	kfree(ptr + 1);
+	ptr = kmalloc(size, GFP_KERNEL);
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+
+	KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
 }
-#endif
 
-static noinline void __init kmalloc_large_oob_right(void)
+static void kmalloc_large_oob_right(struct kunit *test)
 {
 	char *ptr;
 	size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
 	/* Allocate a chunk that is large enough, but still fits into a slab
 	 * and does not trigger the page allocator fallback in SLUB.
 	 */
-	pr_info("kmalloc large allocation: out-of-bounds to right\n");
 	ptr = kmalloc(size, GFP_KERNEL);
-	if (!ptr) {
-		pr_err("Allocation failed\n");
-		return;
-	}
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 
-	ptr[size] = 0;
+	KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
 	kfree(ptr);
 }
 
-static noinline void __init kmalloc_oob_krealloc_more(void)
+static void kmalloc_oob_krealloc_more(struct kunit *test)
 {
 	char *ptr1, *ptr2;
 	size_t size1 = 17;
 	size_t size2 = 19;
 
-	pr_info("out-of-bounds after krealloc more\n");
 	ptr1 = kmalloc(size1, GFP_KERNEL);
-	ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
-	if (!ptr1 || !ptr2) {
-		pr_err("Allocation failed\n");
-		kfree(ptr1);
-		kfree(ptr2);
-		return;
-	}
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
 
-	ptr2[size2 + OOB_TAG_OFF] = 'x';
+	ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
 
+	KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2 + OOB_TAG_OFF] = 'x');
 	kfree(ptr2);
 }
 
-static noinline void __init kmalloc_oob_krealloc_less(void)
+static void kmalloc_oob_krealloc_less(struct kunit *test)
 {
 	char *ptr1, *ptr2;
 	size_t size1 = 17;
 	size_t size2 = 15;
 
-	pr_info("out-of-bounds after krealloc less\n");
 	ptr1 = kmalloc(size1, GFP_KERNEL);
-	ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
-	if (!ptr1 || !ptr2) {
-		pr_err("Allocation failed\n");
-		kfree(ptr1);
-		return;
-	}
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
 
-	ptr2[size2 + OOB_TAG_OFF] = 'x';
+	ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
 
+	KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2 + OOB_TAG_OFF] = 'x');
 	kfree(ptr2);
 }
 
-static noinline void __init kmalloc_oob_16(void)
+static void kmalloc_oob_16(struct kunit *test)
 {
 	struct {
 		u64 words[2];
 	} *ptr1, *ptr2;
 
-	pr_info("kmalloc out-of-bounds for 16-bytes access\n");
 	ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL);
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
+
 	ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
-	if (!ptr1 || !ptr2) {
-		pr_err("Allocation failed\n");
-		kfree(ptr1);
-		kfree(ptr2);
-		return;
-	}
-	*ptr1 = *ptr2;
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
+
+	KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
 	kfree(ptr1);
 	kfree(ptr2);
 }
 
-static noinline void __init kmalloc_oob_memset_2(void)
+static void kmalloc_oob_memset_2(struct kunit *test)
 {
 	char *ptr;
 	size_t size = 8;
 
-	pr_info("out-of-bounds in memset2\n");
 	ptr = kmalloc(size, GFP_KERNEL);
-	if (!ptr) {
-		pr_err("Allocation failed\n");
-		return;
-	}
-
-	memset(ptr + 7 + OOB_TAG_OFF, 0, 2);
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 
+	KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 7 + OOB_TAG_OFF, 0, 2));
 	kfree(ptr);
 }
 
-static noinline void __init kmalloc_oob_memset_4(void)
+static void kmalloc_oob_memset_4(struct kunit *test)
 {
 	char *ptr;
 	size_t size = 8;
 
-	pr_info("out-of-bounds in memset4\n");
 	ptr = kmalloc(size, GFP_KERNEL);
-	if (!ptr) {
-		pr_err("Allocation failed\n");
-		return;
-	}
-
-	memset(ptr + 5 + OOB_TAG_OFF, 0, 4);
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 
+	KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 5 + OOB_TAG_OFF, 0, 4));
 	kfree(ptr);
 }
 
 
-static noinline void __init kmalloc_oob_memset_8(void)
+static void kmalloc_oob_memset_8(struct kunit *test)
 {
 	char *ptr;
 	size_t size = 8;
 
-	pr_info("out-of-bounds in memset8\n");
 	ptr = kmalloc(size, GFP_KERNEL);
-	if (!ptr) {
-		pr_err("Allocation failed\n");
-		return;
-	}
-
-	memset(ptr + 1 + OOB_TAG_OFF, 0, 8);
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 
+	KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 1 + OOB_TAG_OFF, 0, 8));
 	kfree(ptr);
 }
 
-static noinline void __init kmalloc_oob_memset_16(void)
+static void kmalloc_oob_memset_16(struct kunit *test)
 {
 	char *ptr;
 	size_t size = 16;
 
-	pr_info("out-of-bounds in memset16\n");
 	ptr = kmalloc(size, GFP_KERNEL);
-	if (!ptr) {
-		pr_err("Allocation failed\n");
-		return;
-	}
-
-	memset(ptr + 1 + OOB_TAG_OFF, 0, 16);
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 
+	KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 1 + OOB_TAG_OFF, 0, 16));
 	kfree(ptr);
 }
 
-static noinline void __init kmalloc_oob_in_memset(void)
+static void kmalloc_oob_in_memset(struct kunit *test)
 {
 	char *ptr;
 	size_t size = 666;
 
-	pr_info("out-of-bounds in memset\n");
 	ptr = kmalloc(size, GFP_KERNEL);
-	if (!ptr) {
-		pr_err("Allocation failed\n");
-		return;
-	}
-
-	memset(ptr, 0, size + 5 + OOB_TAG_OFF);
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 
+	KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size + 5 + OOB_TAG_OFF));
 	kfree(ptr);
 }
 
-static noinline void __init kmalloc_memmove_invalid_size(void)
+static void kmalloc_memmove_invalid_size(struct kunit *test)
 {
 	char *ptr;
 	size_t size = 64;
 	volatile size_t invalid_size = -2;
 
-	pr_info("invalid size in memmove\n");
 	ptr = kmalloc(size, GFP_KERNEL);
-	if (!ptr) {
-		pr_err("Allocation failed\n");
-		return;
-	}
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 
 	memset((char *)ptr, 0, 64);
-	memmove((char *)ptr, (char *)ptr + 4, invalid_size);
+
+	KUNIT_EXPECT_KASAN_FAIL(test,
+		memmove((char *)ptr, (char *)ptr + 4, invalid_size));
 	kfree(ptr);
 }
 
-static noinline void __init kmalloc_uaf(void)
+static void kmalloc_uaf(struct kunit *test)
 {
 	char *ptr;
 	size_t size = 10;
 
-	pr_info("use-after-free\n");
 	ptr = kmalloc(size, GFP_KERNEL);
-	if (!ptr) {
-		pr_err("Allocation failed\n");
-		return;
-	}
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 
 	kfree(ptr);
-	*(ptr + 8) = 'x';
+	KUNIT_EXPECT_KASAN_FAIL(test, *(ptr + 8) = 'x');
 }
 
-static noinline void __init kmalloc_uaf_memset(void)
+static void kmalloc_uaf_memset(struct kunit *test)
 {
 	char *ptr;
 	size_t size = 33;
 
-	pr_info("use-after-free in memset\n");
 	ptr = kmalloc(size, GFP_KERNEL);
-	if (!ptr) {
-		pr_err("Allocation failed\n");
-		return;
-	}
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 
 	kfree(ptr);
-	memset(ptr, 0, size);
+	KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size));
 }
 
-static noinline void __init kmalloc_uaf2(void)
+static void kmalloc_uaf2(struct kunit *test)
 {
 	char *ptr1, *ptr2;
 	size_t size = 43;
 
-	pr_info("use-after-free after another kmalloc\n");
 	ptr1 = kmalloc(size, GFP_KERNEL);
-	if (!ptr1) {
-		pr_err("Allocation failed\n");
-		return;
-	}
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
 
 	kfree(ptr1);
+
 	ptr2 = kmalloc(size, GFP_KERNEL);
-	if (!ptr2) {
-		pr_err("Allocation failed\n");
-		return;
-	}
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
+
+	KUNIT_EXPECT_KASAN_FAIL(test, ptr1[40] = 'x');
+	KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2);
 
-	ptr1[40] = 'x';
-	if (ptr1 == ptr2)
-		pr_err("Could not detect use-after-free: ptr1 == ptr2\n");
 	kfree(ptr2);
 }
 
-static noinline void __init kfree_via_page(void)
+static void kfree_via_page(struct kunit *test)
 {
 	char *ptr;
 	size_t size = 8;
 	struct page *page;
 	unsigned long offset;
 
-	pr_info("invalid-free false positive (via page)\n");
 	ptr = kmalloc(size, GFP_KERNEL);
-	if (!ptr) {
-		pr_err("Allocation failed\n");
-		return;
-	}
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 
 	page = virt_to_page(ptr);
 	offset = offset_in_page(ptr);
 	kfree(page_address(page) + offset);
 }
 
-static noinline void __init kfree_via_phys(void)
+static void kfree_via_phys(struct kunit *test)
 {
 	char *ptr;
 	size_t size = 8;
 	phys_addr_t phys;
 
-	pr_info("invalid-free false positive (via phys)\n");
 	ptr = kmalloc(size, GFP_KERNEL);
-	if (!ptr) {
-		pr_err("Allocation failed\n");
-		return;
-	}
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 
 	phys = virt_to_phys(ptr);
 	kfree(phys_to_virt(phys));
 }
 
-static noinline void __init kmem_cache_oob(void)
+static void kmem_cache_oob(struct kunit *test)
 {
 	char *p;
 	size_t size = 200;
 	struct kmem_cache *cache = kmem_cache_create("test_cache",
 						size, 0,
 						0, NULL);
-	if (!cache) {
-		pr_err("Cache allocation failed\n");
-		return;
-	}
-	pr_info("out-of-bounds in kmem_cache_alloc\n");
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
 	p = kmem_cache_alloc(cache, GFP_KERNEL);
 	if (!p) {
-		pr_err("Allocation failed\n");
+		kunit_err(test, "Allocation failed: %s\n", __func__);
 		kmem_cache_destroy(cache);
 		return;
 	}
 
-	*p = p[size + OOB_TAG_OFF];
-
+	KUNIT_EXPECT_KASAN_FAIL(test, *p = p[size + OOB_TAG_OFF]);
 	kmem_cache_free(cache, p);
 	kmem_cache_destroy(cache);
 }
 
-static noinline void __init memcg_accounted_kmem_cache(void)
+static void memcg_accounted_kmem_cache(struct kunit *test)
 {
 	int i;
 	char *p;
@@ -494,12 +403,8 @@ static noinline void __init memcg_accoun
 	struct kmem_cache *cache;
 
 	cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL);
-	if (!cache) {
-		pr_err("Cache allocation failed\n");
-		return;
-	}
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
 
-	pr_info("allocate memcg accounted object\n");
 	/*
 	 * Several allocations with a delay to allow for lazy per memcg kmem
 	 * cache creation.
@@ -519,134 +424,93 @@ free_cache:
 
 static char global_array[10];
 
-static noinline void __init kasan_global_oob(void)
+static void kasan_global_oob(struct kunit *test)
 {
 	volatile int i = 3;
 	char *p = &global_array[ARRAY_SIZE(global_array) + i];
 
-	pr_info("out-of-bounds global variable\n");
-	*(volatile char *)p;
-}
-
-static noinline void __init kasan_stack_oob(void)
-{
-	char stack_array[10];
-	volatile int i = OOB_TAG_OFF;
-	char *p = &stack_array[ARRAY_SIZE(stack_array) + i];
-
-	pr_info("out-of-bounds on stack\n");
-	*(volatile char *)p;
+	KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
 }
 
-static noinline void __init ksize_unpoisons_memory(void)
+static void ksize_unpoisons_memory(struct kunit *test)
 {
 	char *ptr;
 	size_t size = 123, real_size;
 
-	pr_info("ksize() unpoisons the whole allocated chunk\n");
 	ptr = kmalloc(size, GFP_KERNEL);
-	if (!ptr) {
-		pr_err("Allocation failed\n");
-		return;
-	}
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 	real_size = ksize(ptr);
 	/* This access doesn't trigger an error. */
 	ptr[size] = 'x';
 	/* This one does. */
-	ptr[real_size] = 'y';
+	KUNIT_EXPECT_KASAN_FAIL(test, ptr[real_size] = 'y');
 	kfree(ptr);
 }
 
-static noinline void __init copy_user_test(void)
+static void kasan_stack_oob(struct kunit *test)
 {
-	char *kmem;
-	char __user *usermem;
-	size_t size = 10;
-	int unused;
-
-	kmem = kmalloc(size, GFP_KERNEL);
-	if (!kmem)
-		return;
+	char stack_array[10];
+	volatile int i = OOB_TAG_OFF;
+	char *p = &stack_array[ARRAY_SIZE(stack_array) + i];
 
-	usermem = (char __user *)vm_mmap(NULL, 0, PAGE_SIZE,
-			    PROT_READ | PROT_WRITE | PROT_EXEC,
-			    MAP_ANONYMOUS | MAP_PRIVATE, 0);
-	if (IS_ERR(usermem)) {
-		pr_err("Failed to allocate user memory\n");
-		kfree(kmem);
+	if (!IS_ENABLED(CONFIG_KASAN_STACK)) {
+		kunit_info(test, "CONFIG_KASAN_STACK is not enabled");
 		return;
 	}
 
-	pr_info("out-of-bounds in copy_from_user()\n");
-	unused = copy_from_user(kmem, usermem, size + 1 + OOB_TAG_OFF);
-
-	pr_info("out-of-bounds in copy_to_user()\n");
-	unused = copy_to_user(usermem, kmem, size + 1 + OOB_TAG_OFF);
-
-	pr_info("out-of-bounds in __copy_from_user()\n");
-	unused = __copy_from_user(kmem, usermem, size + 1 + OOB_TAG_OFF);
-
-	pr_info("out-of-bounds in __copy_to_user()\n");
-	unused = __copy_to_user(usermem, kmem, size + 1 + OOB_TAG_OFF);
-
-	pr_info("out-of-bounds in __copy_from_user_inatomic()\n");
-	unused = __copy_from_user_inatomic(kmem, usermem, size + 1 + OOB_TAG_OFF);
-
-	pr_info("out-of-bounds in __copy_to_user_inatomic()\n");
-	unused = __copy_to_user_inatomic(usermem, kmem, size + 1 + OOB_TAG_OFF);
-
-	pr_info("out-of-bounds in strncpy_from_user()\n");
-	unused = strncpy_from_user(kmem, usermem, size + 1 + OOB_TAG_OFF);
-
-	vm_munmap((unsigned long)usermem, PAGE_SIZE);
-	kfree(kmem);
+	KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
 }
 
-static noinline void __init kasan_alloca_oob_left(void)
+static void kasan_alloca_oob_left(struct kunit *test)
 {
 	volatile int i = 10;
 	char alloca_array[i];
 	char *p = alloca_array - 1;
 
-	pr_info("out-of-bounds to left on alloca\n");
-	*(volatile char *)p;
+	if (!IS_ENABLED(CONFIG_KASAN_STACK)) {
+		kunit_info(test, "CONFIG_KASAN_STACK is not enabled");
+		return;
+	}
+
+	KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
 }
 
-static noinline void __init kasan_alloca_oob_right(void)
+static void kasan_alloca_oob_right(struct kunit *test)
 {
 	volatile int i = 10;
 	char alloca_array[i];
 	char *p = alloca_array + i;
 
-	pr_info("out-of-bounds to right on alloca\n");
-	*(volatile char *)p;
+	if (!IS_ENABLED(CONFIG_KASAN_STACK)) {
+		kunit_info(test, "CONFIG_KASAN_STACK is not enabled");
+		return;
+	}
+
+	KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
 }
 
-static noinline void __init kmem_cache_double_free(void)
+static void kmem_cache_double_free(struct kunit *test)
 {
 	char *p;
 	size_t size = 200;
 	struct kmem_cache *cache;
 
 	cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
-	if (!cache) {
-		pr_err("Cache allocation failed\n");
-		return;
-	}
-	pr_info("double-free on heap object\n");
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
+
 	p = kmem_cache_alloc(cache, GFP_KERNEL);
 	if (!p) {
-		pr_err("Allocation failed\n");
+		kunit_err(test, "Allocation failed: %s\n", __func__);
 		kmem_cache_destroy(cache);
 		return;
 	}
 
 	kmem_cache_free(cache, p);
-	kmem_cache_free(cache, p);
+	KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p));
 	kmem_cache_destroy(cache);
 }
 
-static noinline void __init kmem_cache_invalid_free(void)
+static void kmem_cache_invalid_free(struct kunit *test)
 {
 	char *p;
 	size_t size = 200;
@@ -654,20 +518,17 @@ static noinline void __init kmem_cache_i
 
 	cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
 				  NULL);
-	if (!cache) {
-		pr_err("Cache allocation failed\n");
-		return;
-	}
-	pr_info("invalid-free of heap object\n");
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
+
 	p = kmem_cache_alloc(cache, GFP_KERNEL);
 	if (!p) {
-		pr_err("Allocation failed\n");
+		kunit_err(test, "Allocation failed: %s\n", __func__);
 		kmem_cache_destroy(cache);
 		return;
 	}
 
 	/* Trigger invalid free, the object doesn't get freed */
-	kmem_cache_free(cache, p + 1);
+	KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1));
 
 	/*
 	 * Properly free the object to prevent the "Objects remaining in
@@ -678,45 +539,63 @@ static noinline void __init kmem_cache_i
 	kmem_cache_destroy(cache);
 }
 
-static noinline void __init kasan_memchr(void)
+static void kasan_memchr(struct kunit *test)
 {
 	char *ptr;
 	size_t size = 24;
 
-	pr_info("out-of-bounds in memchr\n");
-	ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
-	if (!ptr)
+	/* See https://bugzilla.kernel.org/show_bug.cgi?id=206337 */
+	if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
+		kunit_info(test,
+			"str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT");
 		return;
+	}
+
+	ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+
+	KUNIT_EXPECT_KASAN_FAIL(test,
+		kasan_ptr_result = memchr(ptr, '1', size + 1));
 
-	kasan_ptr_result = memchr(ptr, '1', size + 1);
 	kfree(ptr);
 }
 
-static noinline void __init kasan_memcmp(void)
+static void kasan_memcmp(struct kunit *test)
 {
 	char *ptr;
 	size_t size = 24;
 	int arr[9];
 
-	pr_info("out-of-bounds in memcmp\n");
-	ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
-	if (!ptr)
+	/* See https://bugzilla.kernel.org/show_bug.cgi?id=206337 */
+	if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
+		kunit_info(test,
+			"str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT");
 		return;
+	}
 
+	ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 	memset(arr, 0, sizeof(arr));
-	kasan_int_result = memcmp(ptr, arr, size + 1);
+
+	KUNIT_EXPECT_KASAN_FAIL(test,
+		kasan_int_result = memcmp(ptr, arr, size+1));
 	kfree(ptr);
 }
 
-static noinline void __init kasan_strings(void)
+static void kasan_strings(struct kunit *test)
 {
 	char *ptr;
 	size_t size = 24;
 
-	pr_info("use-after-free in strchr\n");
-	ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
-	if (!ptr)
+	/* See https://bugzilla.kernel.org/show_bug.cgi?id=206337 */
+	if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
+		kunit_info(test,
+			"str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT");
 		return;
+	}
+
+	ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 
 	kfree(ptr);
 
@@ -727,220 +606,164 @@ static noinline void __init kasan_string
 	 * will likely point to zeroed byte.
 	 */
 	ptr += 16;
-	kasan_ptr_result = strchr(ptr, '1');
+	KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strchr(ptr, '1'));
 
-	pr_info("use-after-free in strrchr\n");
-	kasan_ptr_result = strrchr(ptr, '1');
+	KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strrchr(ptr, '1'));
 
-	pr_info("use-after-free in strcmp\n");
-	kasan_int_result = strcmp(ptr, "2");
+	KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strcmp(ptr, "2"));
 
-	pr_info("use-after-free in strncmp\n");
-	kasan_int_result = strncmp(ptr, "2", 1);
+	KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strncmp(ptr, "2", 1));
 
-	pr_info("use-after-free in strlen\n");
-	kasan_int_result = strlen(ptr);
+	KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strlen(ptr));
 
-	pr_info("use-after-free in strnlen\n");
-	kasan_int_result = strnlen(ptr, 1);
+	KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1));
 }
 
-static noinline void __init kasan_bitops(void)
+static void kasan_bitops(struct kunit *test)
 {
 	/*
 	 * Allocate 1 more byte, which causes kzalloc to round up to 16-bytes;
 	 * this way we do not actually corrupt other memory.
 	 */
 	long *bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
-	if (!bits)
-		return;
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
 
 	/*
 	 * Below calls try to access bit within allocated memory; however, the
 	 * below accesses are still out-of-bounds, since bitops are defined to
 	 * operate on the whole long the bit is in.
 	 */
-	pr_info("out-of-bounds in set_bit\n");
-	set_bit(BITS_PER_LONG, bits);
+	KUNIT_EXPECT_KASAN_FAIL(test, set_bit(BITS_PER_LONG, bits));
 
-	pr_info("out-of-bounds in __set_bit\n");
-	__set_bit(BITS_PER_LONG, bits);
+	KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(BITS_PER_LONG, bits));
 
-	pr_info("out-of-bounds in clear_bit\n");
-	clear_bit(BITS_PER_LONG, bits);
+	KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(BITS_PER_LONG, bits));
 
-	pr_info("out-of-bounds in __clear_bit\n");
-	__clear_bit(BITS_PER_LONG, bits);
+	KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(BITS_PER_LONG, bits));
 
-	pr_info("out-of-bounds in clear_bit_unlock\n");
-	clear_bit_unlock(BITS_PER_LONG, bits);
+	KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(BITS_PER_LONG, bits));
 
-	pr_info("out-of-bounds in __clear_bit_unlock\n");
-	__clear_bit_unlock(BITS_PER_LONG, bits);
+	KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(BITS_PER_LONG, bits));
 
-	pr_info("out-of-bounds in change_bit\n");
-	change_bit(BITS_PER_LONG, bits);
+	KUNIT_EXPECT_KASAN_FAIL(test, change_bit(BITS_PER_LONG, bits));
 
-	pr_info("out-of-bounds in __change_bit\n");
-	__change_bit(BITS_PER_LONG, bits);
+	KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(BITS_PER_LONG, bits));
 
 	/*
 	 * Below calls try to access bit beyond allocated memory.
 	 */
-	pr_info("out-of-bounds in test_and_set_bit\n");
-	test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+	KUNIT_EXPECT_KASAN_FAIL(test,
+		test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
 
-	pr_info("out-of-bounds in __test_and_set_bit\n");
-	__test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+	KUNIT_EXPECT_KASAN_FAIL(test,
+		__test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
 
-	pr_info("out-of-bounds in test_and_set_bit_lock\n");
-	test_and_set_bit_lock(BITS_PER_LONG + BITS_PER_BYTE, bits);
+	KUNIT_EXPECT_KASAN_FAIL(test,
+		test_and_set_bit_lock(BITS_PER_LONG + BITS_PER_BYTE, bits));
 
-	pr_info("out-of-bounds in test_and_clear_bit\n");
-	test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+	KUNIT_EXPECT_KASAN_FAIL(test,
+		test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
 
-	pr_info("out-of-bounds in __test_and_clear_bit\n");
-	__test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+	KUNIT_EXPECT_KASAN_FAIL(test,
+		__test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
 
-	pr_info("out-of-bounds in test_and_change_bit\n");
-	test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+	KUNIT_EXPECT_KASAN_FAIL(test,
+		test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
 
-	pr_info("out-of-bounds in __test_and_change_bit\n");
-	__test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+	KUNIT_EXPECT_KASAN_FAIL(test,
+		__test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
 
-	pr_info("out-of-bounds in test_bit\n");
-	kasan_int_result = test_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+	KUNIT_EXPECT_KASAN_FAIL(test,
+		kasan_int_result =
+			test_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
 
 #if defined(clear_bit_unlock_is_negative_byte)
-	pr_info("out-of-bounds in clear_bit_unlock_is_negative_byte\n");
-	kasan_int_result = clear_bit_unlock_is_negative_byte(BITS_PER_LONG +
-		BITS_PER_BYTE, bits);
+	KUNIT_EXPECT_KASAN_FAIL(test,
+		kasan_int_result = clear_bit_unlock_is_negative_byte(
+			BITS_PER_LONG + BITS_PER_BYTE, bits));
 #endif
 	kfree(bits);
 }
 
-static noinline void __init kmalloc_double_kzfree(void)
+static void kmalloc_double_kzfree(struct kunit *test)
 {
 	char *ptr;
 	size_t size = 16;
 
-	pr_info("double-free (kfree_sensitive)\n");
 	ptr = kmalloc(size, GFP_KERNEL);
-	if (!ptr) {
-		pr_err("Allocation failed\n");
-		return;
-	}
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 
 	kfree_sensitive(ptr);
-	kfree_sensitive(ptr);
+	KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr));
 }
 
-#ifdef CONFIG_KASAN_VMALLOC
-static noinline void __init vmalloc_oob(void)
+static void vmalloc_oob(struct kunit *test)
 {
 	void *area;
 
-	pr_info("vmalloc out-of-bounds\n");
+	if (!IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
+		kunit_info(test, "CONFIG_KASAN_VMALLOC is not enabled.");
+		return;
+	}
 
 	/*
 	 * We have to be careful not to hit the guard page.
 	 * The MMU will catch that and crash us.
 	 */
 	area = vmalloc(3000);
-	if (!area) {
-		pr_err("Allocation failed\n");
-		return;
-	}
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, area);
 
-	((volatile char *)area)[3100];
+	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)area)[3100]);
 	vfree(area);
 }
-#else
-static void __init vmalloc_oob(void) {}
-#endif
-
-static struct kasan_rcu_info {
-	int i;
-	struct rcu_head rcu;
-} *global_rcu_ptr;
-
-static noinline void __init kasan_rcu_reclaim(struct rcu_head *rp)
-{
-	struct kasan_rcu_info *fp = container_of(rp,
-						struct kasan_rcu_info, rcu);
-
-	kfree(fp);
-	fp->i = 1;
-}
-
-static noinline void __init kasan_rcu_uaf(void)
-{
-	struct kasan_rcu_info *ptr;
 
-	pr_info("use-after-free in kasan_rcu_reclaim\n");
-	ptr = kmalloc(sizeof(struct kasan_rcu_info), GFP_KERNEL);
-	if (!ptr) {
-		pr_err("Allocation failed\n");
-		return;
-	}
-
-	global_rcu_ptr = rcu_dereference_protected(ptr, NULL);
-	call_rcu(&global_rcu_ptr->rcu, kasan_rcu_reclaim);
-}
+static struct kunit_case kasan_kunit_test_cases[] = {
+	KUNIT_CASE(kmalloc_oob_right),
+	KUNIT_CASE(kmalloc_oob_left),
+	KUNIT_CASE(kmalloc_node_oob_right),
+	KUNIT_CASE(kmalloc_pagealloc_oob_right),
+	KUNIT_CASE(kmalloc_pagealloc_uaf),
+	KUNIT_CASE(kmalloc_pagealloc_invalid_free),
+	KUNIT_CASE(kmalloc_large_oob_right),
+	KUNIT_CASE(kmalloc_oob_krealloc_more),
+	KUNIT_CASE(kmalloc_oob_krealloc_less),
+	KUNIT_CASE(kmalloc_oob_16),
+	KUNIT_CASE(kmalloc_oob_in_memset),
+	KUNIT_CASE(kmalloc_oob_memset_2),
+	KUNIT_CASE(kmalloc_oob_memset_4),
+	KUNIT_CASE(kmalloc_oob_memset_8),
+	KUNIT_CASE(kmalloc_oob_memset_16),
+	KUNIT_CASE(kmalloc_memmove_invalid_size),
+	KUNIT_CASE(kmalloc_uaf),
+	KUNIT_CASE(kmalloc_uaf_memset),
+	KUNIT_CASE(kmalloc_uaf2),
+	KUNIT_CASE(kfree_via_page),
+	KUNIT_CASE(kfree_via_phys),
+	KUNIT_CASE(kmem_cache_oob),
+	KUNIT_CASE(memcg_accounted_kmem_cache),
+	KUNIT_CASE(kasan_global_oob),
+	KUNIT_CASE(kasan_stack_oob),
+	KUNIT_CASE(kasan_alloca_oob_left),
+	KUNIT_CASE(kasan_alloca_oob_right),
+	KUNIT_CASE(ksize_unpoisons_memory),
+	KUNIT_CASE(kmem_cache_double_free),
+	KUNIT_CASE(kmem_cache_invalid_free),
+	KUNIT_CASE(kasan_memchr),
+	KUNIT_CASE(kasan_memcmp),
+	KUNIT_CASE(kasan_strings),
+	KUNIT_CASE(kasan_bitops),
+	KUNIT_CASE(kmalloc_double_kzfree),
+	KUNIT_CASE(vmalloc_oob),
+	{}
+};
+
+static struct kunit_suite kasan_kunit_test_suite = {
+	.name = "kasan",
+	.init = kasan_test_init,
+	.test_cases = kasan_kunit_test_cases,
+	.exit = kasan_test_exit,
+};
 
-static int __init kmalloc_tests_init(void)
-{
-	/*
-	 * Temporarily enable multi-shot mode. Otherwise, we'd only get a
-	 * report for the first case.
-	 */
-	bool multishot = kasan_save_enable_multi_shot();
-
-	kmalloc_oob_right();
-	kmalloc_oob_left();
-	kmalloc_node_oob_right();
-#ifdef CONFIG_SLUB
-	kmalloc_pagealloc_oob_right();
-	kmalloc_pagealloc_uaf();
-	kmalloc_pagealloc_invalid_free();
-#endif
-	kmalloc_large_oob_right();
-	kmalloc_oob_krealloc_more();
-	kmalloc_oob_krealloc_less();
-	kmalloc_oob_16();
-	kmalloc_oob_in_memset();
-	kmalloc_oob_memset_2();
-	kmalloc_oob_memset_4();
-	kmalloc_oob_memset_8();
-	kmalloc_oob_memset_16();
-	kmalloc_memmove_invalid_size();
-	kmalloc_uaf();
-	kmalloc_uaf_memset();
-	kmalloc_uaf2();
-	kfree_via_page();
-	kfree_via_phys();
-	kmem_cache_oob();
-	memcg_accounted_kmem_cache();
-	kasan_stack_oob();
-	kasan_global_oob();
-	kasan_alloca_oob_left();
-	kasan_alloca_oob_right();
-	ksize_unpoisons_memory();
-	copy_user_test();
-	kmem_cache_double_free();
-	kmem_cache_invalid_free();
-	kasan_memchr();
-	kasan_memcmp();
-	kasan_strings();
-	kasan_bitops();
-	kmalloc_double_kzfree();
-	vmalloc_oob();
-	kasan_rcu_uaf();
-
-	kasan_restore_multi_shot(multishot);
-
-	return -EAGAIN;
-}
+kunit_test_suite(kasan_kunit_test_suite);
 
-module_init(kmalloc_tests_init);
 MODULE_LICENSE("GPL");
--- /dev/null
+++ a/lib/test_kasan_module.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
+ */
+
+#define pr_fmt(fmt) "kasan test: %s " fmt, __func__
+
+#include <linux/mman.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include "../mm/kasan/kasan.h"
+
+#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_SHADOW_SCALE_SIZE)
+
+static noinline void __init copy_user_test(void)
+{
+	char *kmem;
+	char __user *usermem;
+	size_t size = 10;
+	int unused;
+
+	kmem = kmalloc(size, GFP_KERNEL);
+	if (!kmem)
+		return;
+
+	usermem = (char __user *)vm_mmap(NULL, 0, PAGE_SIZE,
+			    PROT_READ | PROT_WRITE | PROT_EXEC,
+			    MAP_ANONYMOUS | MAP_PRIVATE, 0);
+	if (IS_ERR(usermem)) {
+		pr_err("Failed to allocate user memory\n");
+		kfree(kmem);
+		return;
+	}
+
+	pr_info("out-of-bounds in copy_from_user()\n");
+	unused = copy_from_user(kmem, usermem, size + 1 + OOB_TAG_OFF);
+
+	pr_info("out-of-bounds in copy_to_user()\n");
+	unused = copy_to_user(usermem, kmem, size + 1 + OOB_TAG_OFF);
+
+	pr_info("out-of-bounds in __copy_from_user()\n");
+	unused = __copy_from_user(kmem, usermem, size + 1 + OOB_TAG_OFF);
+
+	pr_info("out-of-bounds in __copy_to_user()\n");
+	unused = __copy_to_user(usermem, kmem, size + 1 + OOB_TAG_OFF);
+
+	pr_info("out-of-bounds in __copy_from_user_inatomic()\n");
+	unused = __copy_from_user_inatomic(kmem, usermem, size + 1 + OOB_TAG_OFF);
+
+	pr_info("out-of-bounds in __copy_to_user_inatomic()\n");
+	unused = __copy_to_user_inatomic(usermem, kmem, size + 1 + OOB_TAG_OFF);
+
+	pr_info("out-of-bounds in strncpy_from_user()\n");
+	unused = strncpy_from_user(kmem, usermem, size + 1 + OOB_TAG_OFF);
+
+	vm_munmap((unsigned long)usermem, PAGE_SIZE);
+	kfree(kmem);
+}
+
+static struct kasan_rcu_info {
+	int i;
+	struct rcu_head rcu;
+} *global_rcu_ptr;
+
+static noinline void __init kasan_rcu_reclaim(struct rcu_head *rp)
+{
+	struct kasan_rcu_info *fp = container_of(rp,
+						struct kasan_rcu_info, rcu);
+
+	kfree(fp);
+	fp->i = 1;
+}
+
+static noinline void __init kasan_rcu_uaf(void)
+{
+	struct kasan_rcu_info *ptr;
+
+	pr_info("use-after-free in kasan_rcu_reclaim\n");
+	ptr = kmalloc(sizeof(struct kasan_rcu_info), GFP_KERNEL);
+	if (!ptr) {
+		pr_err("Allocation failed\n");
+		return;
+	}
+
+	global_rcu_ptr = rcu_dereference_protected(ptr, NULL);
+	call_rcu(&global_rcu_ptr->rcu, kasan_rcu_reclaim);
+}
+
+
+static int __init test_kasan_module_init(void)
+{
+	/*
+	 * Temporarily enable multi-shot mode. Otherwise, we'd only get a
+	 * report for the first case.
+	 */
+	bool multishot = kasan_save_enable_multi_shot();
+
+	copy_user_test();
+	kasan_rcu_uaf();
+
+	kasan_restore_multi_shot(multishot);
+	return -EAGAIN;
+}
+
+module_init(test_kasan_module_init);
+MODULE_LICENSE("GPL");
_

  parent reply	other threads:[~2020-10-13 23:55 UTC|newest]

Thread overview: 186+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-10-13 23:46 incoming Andrew Morton
2020-10-13 23:47 ` [patch 001/181] compiler-clang: add build check for clang 10.0.1 Andrew Morton
2020-10-13 23:47 ` [patch 002/181] Revert "kbuild: disable clang's default use of -fmerge-all-constants" Andrew Morton
2020-10-13 23:47 ` [patch 003/181] Revert "arm64: bti: Require clang >= 10.0.1 for in-kernel BTI support" Andrew Morton
2020-10-13 23:47 ` [patch 004/181] Revert "arm64: vdso: Fix compilation with clang older than 8" Andrew Morton
2020-10-13 23:47 ` [patch 005/181] Partially revert "ARM: 8905/1: Emit __gnu_mcount_nc when using Clang 10.0.0 or newer" Andrew Morton
2020-10-13 23:47 ` [patch 006/181] kasan: remove mentions of unsupported Clang versions Andrew Morton
2020-10-13 23:47 ` [patch 007/181] compiler-gcc: improve version error Andrew Morton
2020-10-13 23:47 ` [patch 008/181] compiler.h: avoid escaped section names Andrew Morton
2020-10-13 23:48 ` [patch 009/181] export.h: fix section name for CONFIG_TRIM_UNUSED_KSYMS for Clang Andrew Morton
2020-10-13 23:48 ` [patch 010/181] kbuild: doc: describe proper script invocation Andrew Morton
2020-10-13 23:48 ` [patch 011/181] scripts/spelling.txt: increase error-prone spell checking Andrew Morton
2020-10-13 23:48 ` [patch 012/181] scripts/spelling.txt: add "arbitrary" typo Andrew Morton
2020-10-13 23:48 ` [patch 013/181] scripts/decodecode: add the capability to supply the program counter Andrew Morton
2020-10-13 23:48 ` [patch 014/181] ntfs: add check for mft record size in superblock Andrew Morton
2020-10-13 23:48 ` [patch 015/181] ocfs2: delete repeated words in comments Andrew Morton
2020-10-13 23:48 ` [patch 016/181] ocfs2: fix potential soft lockup during fstrim Andrew Morton
2020-10-13 23:48 ` [patch 017/181] fs/xattr.c: fix kernel-doc warnings for setxattr & removexattr Andrew Morton
2020-10-13 23:48 ` [patch 018/181] fs_parse: mark fs_param_bad_value() as static Andrew Morton
2020-10-13 23:48 ` [patch 019/181] mm/slab.c: clean code by removing redundant if condition Andrew Morton
2020-10-13 23:48 ` [patch 020/181] include/linux/slab.h: fix a typo error in comment Andrew Morton
2020-10-13 23:48 ` [patch 021/181] mm/slub.c: branch optimization in free slowpath Andrew Morton
2020-10-13 23:48 ` [patch 022/181] mm/slub: fix missing ALLOC_SLOWPATH stat when bulk alloc Andrew Morton
2020-10-13 23:48 ` [patch 023/181] mm/slub: make add_full() condition more explicit Andrew Morton
2020-10-13 23:48 ` [patch 024/181] mm/kmemleak: rely on rcu for task stack scanning Andrew Morton
2020-10-13 23:48 ` [patch 025/181] mm,kmemleak-test.c: move kmemleak-test.c to samples dir Andrew Morton
2020-10-13 23:48 ` [patch 026/181] x86/numa: cleanup configuration dependent command-line options Andrew Morton
2020-10-13 23:49 ` [patch 027/181] x86/numa: add 'nohmat' option Andrew Morton
2020-10-13 23:49 ` [patch 028/181] efi/fake_mem: arrange for a resource entry per efi_fake_mem instance Andrew Morton
2020-10-13 23:49 ` [patch 029/181] ACPI: HMAT: refactor hmat_register_target_device to hmem_register_device Andrew Morton
2020-10-13 23:49 ` [patch 030/181] resource: report parent to walk_iomem_res_desc() callback Andrew Morton
2020-10-13 23:49 ` [patch 031/181] mm/memory_hotplug: introduce default phys_to_target_node() implementation Andrew Morton
2020-10-13 23:49 ` [patch 032/181] ACPI: HMAT: attach a device for each soft-reserved range Andrew Morton
2020-10-13 23:49 ` [patch 033/181] device-dax: drop the dax_region.pfn_flags attribute Andrew Morton
2020-10-13 23:49 ` [patch 034/181] device-dax: move instance creation parameters to 'struct dev_dax_data' Andrew Morton
2020-10-13 23:49 ` [patch 035/181] device-dax: make pgmap optional for instance creation Andrew Morton
2020-10-13 23:49 ` [patch 036/181] device-dax/kmem: introduce dax_kmem_range() Andrew Morton
2020-10-13 23:49 ` [patch 037/181] device-dax/kmem: move resource name tracking to drvdata Andrew Morton
2020-10-13 23:49 ` [patch 038/181] device-dax/kmem: replace release_resource() with release_mem_region() Andrew Morton
2020-10-13 23:50 ` [patch 039/181] device-dax: add an allocation interface for device-dax instances Andrew Morton
2020-10-13 23:50 ` [patch 040/181] device-dax: introduce 'struct dev_dax' typed-driver operations Andrew Morton
2020-10-13 23:50 ` [patch 041/181] device-dax: introduce 'seed' devices Andrew Morton
2020-10-13 23:50 ` [patch 042/181] drivers/base: make device_find_child_by_name() compatible with sysfs inputs Andrew Morton
2020-10-13 23:50 ` [patch 043/181] device-dax: add resize support Andrew Morton
2020-10-13 23:50 ` [patch 044/181] mm/memremap_pages: convert to 'struct range' Andrew Morton
2020-10-13 23:50 ` [patch 045/181] mm/memremap_pages: support multiple ranges per invocation Andrew Morton
2020-10-13 23:50 ` [patch 046/181] device-dax: add dis-contiguous resource support Andrew Morton
2020-10-13 23:50 ` [patch 047/181] device-dax: introduce 'mapping' devices Andrew Morton
2020-10-13 23:50 ` [patch 048/181] device-dax: make align a per-device property Andrew Morton
2020-10-13 23:50 ` [patch 049/181] device-dax: add an 'align' attribute Andrew Morton
2020-10-13 23:51 ` [patch 050/181] dax/hmem: introduce dax_hmem.region_idle parameter Andrew Morton
2020-10-13 23:51 ` [patch 051/181] device-dax: add a range mapping allocation attribute Andrew Morton
2020-10-13 23:51 ` [patch 052/181] mm/debug.c: do not dereference i_ino blindly Andrew Morton
2020-10-13 23:51 ` [patch 053/181] mm, dump_page: rename head_mapcount() --> head_compound_mapcount() Andrew Morton
2020-10-13 23:51 ` [patch 054/181] mm: factor find_get_incore_page out of mincore_page Andrew Morton
2020-10-13 23:51 ` [patch 055/181] mm: use find_get_incore_page in memcontrol Andrew Morton
2020-10-13 23:51 ` [patch 056/181] mm: optimise madvise WILLNEED Andrew Morton
2020-10-13 23:51 ` [patch 057/181] proc: optimise smaps for shmem entries Andrew Morton
2020-10-13 23:51 ` [patch 058/181] i915: use find_lock_page instead of find_lock_entry Andrew Morton
2020-10-13 23:51 ` [patch 059/181] mm: convert find_get_entry to return the head page Andrew Morton
2020-10-13 23:51 ` [patch 060/181] mm/shmem: return head page from find_lock_entry Andrew Morton
2020-10-13 23:51 ` [patch 061/181] mm: add find_lock_head Andrew Morton
2020-10-13 23:51 ` [patch 062/181] mm/filemap: fix filemap_map_pages for THP Andrew Morton
2020-10-13 23:51 ` [patch 063/181] mm, fadvise: improve the expensive remote LRU cache draining after FADV_DONTNEED Andrew Morton
2020-10-13 23:51 ` [patch 064/181] mm/gup_benchmark: update the documentation in Kconfig Andrew Morton
2020-10-13 23:51 ` [patch 065/181] mm/gup_benchmark: use pin_user_pages for FOLL_LONGTERM flag Andrew Morton
2020-10-13 23:51 ` [patch 066/181] mm/gup: don't permit users to call get_user_pages with FOLL_LONGTERM Andrew Morton
2020-10-13 23:52 ` [patch 067/181] mm/gup: protect unpin_user_pages() against npages==-ERRNO Andrew Morton
2020-10-13 23:52 ` [patch 068/181] swap: rename SWP_FS to SWAP_FS_OPS to avoid ambiguity Andrew Morton
2020-10-13 23:52 ` [patch 069/181] mm: remove activate_page() from unuse_pte() Andrew Morton
2020-10-13 23:52 ` [patch 070/181] mm: remove superfluous __ClearPageActive() Andrew Morton
2020-10-13 23:52 ` [patch 071/181] mm/swap.c: fix confusing comment in release_pages() Andrew Morton
2020-10-13 23:52 ` [patch 072/181] mm/swap_slots.c: remove always zero and unused return value of enable_swap_slots_cache() Andrew Morton
2020-10-13 23:52 ` [patch 073/181] mm/page_io.c: remove useless out label in __swap_writepage() Andrew Morton
2020-10-13 23:52 ` [patch 074/181] mm/swap.c: fix incomplete comment in lru_cache_add_inactive_or_unevictable() Andrew Morton
2020-10-13 23:52 ` [patch 075/181] mm/swapfile.c: remove unnecessary goto out in _swap_info_get() Andrew Morton
2020-10-13 23:52 ` [patch 076/181] mm/swapfile.c: fix potential memory leak in sys_swapon Andrew Morton
2020-10-13 23:52 ` [patch 077/181] mm/memremap.c: convert devmap static branch to {inc,dec} Andrew Morton
2020-10-13 23:52 ` [patch 078/181] mm: memcontrol: use flex_array_size() helper in memcpy() Andrew Morton
2020-10-13 23:52 ` [patch 079/181] mm: memcontrol: use the preferred form for passing the size of a structure type Andrew Morton
2020-10-13 23:52 ` [patch 080/181] mm: memcg/slab: fix racy access to page->mem_cgroup in mem_cgroup_from_obj() Andrew Morton
2020-10-13 23:52 ` [patch 081/181] mm: memcontrol: correct the comment of mem_cgroup_iter() Andrew Morton
2020-10-13 23:52 ` [patch 082/181] mm/memcg: clean up obsolete enum charge_type Andrew Morton
2020-10-13 23:52 ` [patch 083/181] mm/memcg: simplify mem_cgroup_get_max() Andrew Morton
2020-10-13 23:52 ` [patch 084/181] mm/memcg: unify swap and memsw page counters Andrew Morton
2020-10-13 23:52 ` [patch 085/181] mm: memcontrol: add the missing numa_stat interface for cgroup v2 Andrew Morton
2020-10-13 23:53 ` [patch 086/181] mm/page_counter: correct the obsolete func name in the comment of page_counter_try_charge() Andrew Morton
2020-10-13 23:53 ` [patch 087/181] mm: memcontrol: reword obsolete comment of mem_cgroup_unmark_under_oom() Andrew Morton
2020-10-13 23:53 ` [patch 088/181] mm: memcg/slab: uncharge during kmem_cache_free_bulk() Andrew Morton
2020-10-13 23:53 ` [patch 089/181] mm/memcg: fix device private memcg accounting Andrew Morton
2020-10-13 23:53 ` [patch 090/181] selftests/vm: fix false build success on the second and later attempts Andrew Morton
2020-10-13 23:53 ` [patch 091/181] selftests/vm: fix incorrect gcc invocation in some cases Andrew Morton
2020-10-13 23:53 ` [patch 092/181] mm: account PMD tables like PTE tables Andrew Morton
2020-10-13 23:53 ` [patch 093/181] mm/memory.c: fix typo in __do_fault() comment Andrew Morton
2020-10-13 23:53 ` [patch 094/181] mm/memory.c: replace vmf->vma with variable vma Andrew Morton
2020-10-13 23:53 ` [patch 095/181] mm/mmap: rename __vma_unlink_common() to __vma_unlink() Andrew Morton
2020-10-13 23:53 ` [patch 096/181] mm/mmap: leverage vma_rb_erase_ignore() to implement vma_rb_erase() Andrew Morton
2020-10-13 23:53 ` [patch 097/181] mmap locking API: add mmap_lock_is_contended() Andrew Morton
2020-10-13 23:53 ` [patch 098/181] mm: smaps*: extend smap_gather_stats to support specified beginning Andrew Morton
2020-10-13 23:53 ` [patch 099/181] mm: proc: smaps_rollup: do not stall write attempts on mmap_lock Andrew Morton
2020-10-13 23:53 ` [patch 100/181] mm: move PageDoubleMap bit Andrew Morton
2020-10-13 23:53 ` [patch 101/181] mm: simplify PageDoubleMap with PF_SECOND policy Andrew Morton
2020-10-13 23:53 ` [patch 102/181] mm/mmap: leave adjust_next as virtual address instead of page frame number Andrew Morton
2020-10-13 23:54 ` [patch 103/181] mm/memory.c: fix spello of "function" Andrew Morton
2020-10-13 23:54 ` [patch 104/181] mm/mmap: not necessary to check mapping separately Andrew Morton
2020-10-13 23:54 ` [patch 105/181] mm/mmap: check on file instead of the rb_root_cached of its address_space Andrew Morton
2020-10-13 23:54 ` [patch 106/181] mm: use helper function mapping_allow_writable() Andrew Morton
2020-10-13 23:54 ` [patch 107/181] mm/mmap.c: use helper function allow_write_access() in __remove_shared_vm_struct() Andrew Morton
2020-10-13 23:54 ` [patch 108/181] mm/mmap.c: replace do_brk with do_brk_flags in comment of insert_vm_struct() Andrew Morton
2020-10-13 23:54 ` [patch 109/181] mm: remove src/dst mm parameter in copy_page_range() Andrew Morton
2020-10-13 23:54 ` [patch 110/181] include/linux/huge_mm.h: remove mincore_huge_pmd declaration Andrew Morton
2020-10-13 23:54 ` [patch 111/181] tools/testing/selftests/vm/hmm-tests.c: use the new SKIP() macro Andrew Morton
2020-10-13 23:54 ` [patch 112/181] lib/test_hmm.c: remove unused dmirror_zero_page Andrew Morton
2020-10-13 23:54 ` [patch 113/181] mm/dmapool.c: replace open-coded list_for_each_entry_safe() Andrew Morton
2020-10-13 23:54 ` [patch 114/181] mm/dmapool.c: replace hard coded function name with __func__ Andrew Morton
2020-10-13 23:54 ` [patch 115/181] mm/memory-failure: do pgoff calculation before for_each_process() Andrew Morton
2020-10-13 23:54 ` [patch 116/181] mm/memory-failure.c: remove unused macro `writeback' Andrew Morton
2020-10-13 23:54 ` [patch 117/181] mm/vmalloc.c: update the comment in __vmalloc_area_node() Andrew Morton
2020-10-13 23:54 ` [patch 118/181] mm/vmalloc.c: fix the comment of find_vm_area Andrew Morton
2020-10-13 23:54 ` [patch 119/181] docs/vm: fix 'mm_count' vs 'mm_users' counter confusion Andrew Morton
2020-10-13 23:54 ` [patch 120/181] kasan/kunit: add KUnit Struct to Current Task Andrew Morton
2020-10-13 23:55 ` [patch 121/181] KUnit: KASAN Integration Andrew Morton
2020-10-13 23:55 ` Andrew Morton [this message]
2020-10-13 23:55 ` [patch 123/181] KASAN: Testing Documentation Andrew Morton
2020-10-13 23:55 ` [patch 124/181] mm: kasan: do not panic if both panic_on_warn and kasan_multishot set Andrew Morton
2020-10-13 23:55 ` [patch 125/181] mm/page_alloc: tweak comments in has_unmovable_pages() Andrew Morton
2020-10-13 23:55 ` [patch 126/181] mm/page_isolation: exit early when pageblock is isolated in set_migratetype_isolate() Andrew Morton
2020-10-13 23:55 ` [patch 127/181] mm/page_isolation: drop WARN_ON_ONCE() " Andrew Morton
2020-10-13 23:55 ` [patch 128/181] mm/page_isolation: cleanup set_migratetype_isolate() Andrew Morton
2020-10-13 23:55 ` [patch 129/181] virtio-mem: don't special-case ZONE_MOVABLE Andrew Morton
2020-10-13 23:55 ` [patch 130/181] mm: document semantics of ZONE_MOVABLE Andrew Morton
2020-10-13 23:55 ` [patch 131/181] mm, isolation: avoid checking unmovable pages across pageblock boundary Andrew Morton
2020-10-13 23:55 ` [patch 132/181] mm/page_alloc.c: clean code by removing unnecessary initialization Andrew Morton
2020-10-13 23:55 ` [patch 133/181] mm/page_alloc.c: micro-optimization remove unnecessary branch Andrew Morton
2020-10-13 23:55 ` [patch 134/181] mm/page_alloc.c: fix early params garbage value accesses Andrew Morton
2020-10-13 23:55 ` [patch 135/181] mm/page_alloc.c: clean code by merging two functions Andrew Morton
2020-10-13 23:55 ` [patch 136/181] mm/page_alloc.c: __perform_reclaim should return 'unsigned long' Andrew Morton
2020-10-13 23:55 ` [patch 137/181] mmzone: clean code by removing unused macro parameter Andrew Morton
2020-10-13 23:56 ` [patch 138/181] mm: move call to compound_head() in release_pages() Andrew Morton
2020-10-13 23:56 ` [patch 139/181] mm/page_alloc.c: fix freeing non-compound pages Andrew Morton
2020-10-13 23:56 ` [patch 140/181] include/linux/gfp.h: clarify usage of GFP_ATOMIC in !preemptible contexts Andrew Morton
2020-10-13 23:56 ` [patch 141/181] mm/hugetlb.c: make is_hugetlb_entry_hwpoisoned return bool Andrew Morton
2020-10-13 23:56 ` [patch 142/181] mm/hugetlb.c: remove the unnecessary non_swap_entry() Andrew Morton
2020-10-13 23:56 ` [patch 143/181] doc/vm: fix typo in the hugetlb admin documentation Andrew Morton
2020-10-13 23:56 ` [patch 144/181] mm/hugetlb: not necessary to coalesce regions recursively Andrew Morton
2020-10-13 23:56 ` [patch 145/181] mm/hugetlb: remove VM_BUG_ON(!nrg) in get_file_region_entry_from_cache() Andrew Morton
2020-10-13 23:56 ` [patch 146/181] mm/hugetlb: use list_splice to merge two list at once Andrew Morton
2020-10-13 23:56 ` [patch 147/181] mm/hugetlb: count file_region to be added when regions_needed != NULL Andrew Morton
2020-10-13 23:56 ` [patch 148/181] mm/hugetlb: a page from buddy is not on any list Andrew Morton
2020-10-13 23:56 ` [patch 149/181] mm/hugetlb: narrow the hugetlb_lock protection area during preparing huge page Andrew Morton
2020-10-13 23:56 ` [patch 150/181] mm/hugetlb: take the free hpage during the iteration directly Andrew Morton
2020-10-13 23:56 ` [patch 151/181] hugetlb: add lockdep check for i_mmap_rwsem held in huge_pmd_share Andrew Morton
2020-10-13 23:56 ` [patch 152/181] mm/vmscan: fix infinite loop in drop_slab_node Andrew Morton
2020-10-13 23:56 ` [patch 153/181] mm/vmscan: fix comments for isolate_lru_page() Andrew Morton
2020-10-13 23:56 ` [patch 154/181] mm/z3fold.c: use xx_zalloc instead xx_alloc and memset Andrew Morton
2020-10-13 23:56 ` [patch 155/181] mm/zbud: remove redundant initialization Andrew Morton
2020-10-13 23:56 ` [patch 156/181] mm/compaction.c: micro-optimization remove unnecessary branch Andrew Morton
2020-10-13 23:57 ` [patch 157/181] include/linux/compaction.h: clean code by removing unused enum value Andrew Morton
2020-10-13 23:57 ` [patch 158/181] selftests/vm: 8x compaction_test speedup Andrew Morton
2020-10-13 23:57 ` [patch 159/181] mm/mempolicy: remove or narrow the lock on current Andrew Morton
2020-10-13 23:57 ` [patch 160/181] mm: remove unused alloc_page_vma_node() Andrew Morton
2020-10-13 23:57 ` [patch 161/181] mm/mempool: add 'else' to split mutually exclusive case Andrew Morton
2020-10-13 23:57 ` [patch 162/181] KVM: PPC: Book3S HV: simplify kvm_cma_reserve() Andrew Morton
2020-10-13 23:57 ` [patch 163/181] dma-contiguous: simplify cma_early_percent_memory() Andrew Morton
2020-10-13 23:57 ` [patch 164/181] arm, xtensa: simplify initialization of high memory pages Andrew Morton
2020-10-13 23:57 ` [patch 165/181] arm64: numa: simplify dummy_numa_init() Andrew Morton
2020-10-13 23:57 ` [patch 166/181] h8300, nds32, openrisc: simplify detection of memory extents Andrew Morton
2020-10-13 23:57 ` [patch 167/181] riscv: drop unneeded node initialization Andrew Morton
2020-10-13 23:57 ` [patch 168/181] mircoblaze: drop unneeded NUMA and sparsemem initializations Andrew Morton
2020-10-13 23:57 ` [patch 169/181] memblock: make for_each_memblock_type() iterator private Andrew Morton
2020-10-13 23:57 ` [patch 170/181] memblock: make memblock_debug and related functionality private Andrew Morton
2020-10-13 23:57 ` [patch 171/181] memblock: reduce number of parameters in for_each_mem_range() Andrew Morton
2020-10-13 23:58 ` [patch 172/181] arch, mm: replace for_each_memblock() with for_each_mem_pfn_range() Andrew Morton
2020-10-13 23:58 ` [patch 173/181] arch, drivers: replace for_each_membock() with for_each_mem_range() Andrew Morton
2020-10-13 23:58 ` [patch 174/181] x86/setup: simplify initrd relocation and reservation Andrew Morton
2020-10-13 23:58 ` [patch 175/181] x86/setup: simplify reserve_crashkernel() Andrew Morton
2020-10-13 23:58 ` [patch 176/181] memblock: remove unused memblock_mem_size() Andrew Morton
2020-10-13 23:58 ` [patch 177/181] memblock: implement for_each_reserved_mem_region() using __next_mem_region() Andrew Morton
2020-10-13 23:58 ` [patch 178/181] memblock: use separate iterators for memory and reserved regions Andrew Morton
2020-10-13 23:58 ` [patch 179/181] mm, oom_adj: don't loop through tasks in __set_oom_adj when not necessary Andrew Morton
2020-10-13 23:58 ` [patch 180/181] mm/migrate: remove cpages-- in migrate_vma_finalize() Andrew Morton
2020-10-13 23:58 ` [patch 181/181] mm/migrate: remove obsolete comment about device public Andrew Morton
2020-10-15 23:53 ` + maintainers-jarkkosakkinen-linuxintelcom-jarkko-kernelorg.patch added to -mm tree Andrew Morton
2020-10-16  0:01 ` + mm-debug_vm_pgtable-avoid-none-pte-in-pte_clear_test-fix.patch " Andrew Morton
2020-10-16  0:02 ` + device-dax-kmem-fix-resource-release.patch " Andrew Morton
2020-10-16  0:03 ` + xen-unpopulated-alloc-consolidate-pgmap-manipulation.patch " Andrew Morton

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201013235506.qGSCuNQiT%akpm@linux-foundation.org \
    --to=akpm@linux-foundation.org \
    --cc=a.p.zijlstra@chello.nl \
    --cc=andreyknvl@google.com \
    --cc=aryabinin@virtuozzo.com \
    --cc=brendanhiggins@google.com \
    --cc=davidgow@google.com \
    --cc=dvyukov@google.com \
    --cc=juri.lelli@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mingo@redhat.com \
    --cc=mm-commits@vger.kernel.org \
    --cc=shuah@kernel.org \
    --cc=torvalds@linux-foundation.org \
    --cc=trishalfonso@google.com \
    --cc=vincent.guittot@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).