All of lore.kernel.org
 help / color / mirror / Atom feed
From: Marco Elver <elver@google.com>
To: Alexander Potapenko <glider@google.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>,
	Alexei Starovoitov <ast@kernel.org>,
	Andrew Morton <akpm@linux-foundation.org>,
	Andrey Konovalov <andreyknvl@google.com>,
	Andy Lutomirski <luto@kernel.org>, Arnd Bergmann <arnd@arndb.de>,
	Borislav Petkov <bp@alien8.de>, Christoph Hellwig <hch@lst.de>,
	Christoph Lameter <cl@linux.com>,
	David Rientjes <rientjes@google.com>,
	Dmitry Vyukov <dvyukov@google.com>,
	Eric Dumazet <edumazet@google.com>,
	Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
	Herbert Xu <herbert@gondor.apana.org.au>,
	Ilya Leoshkevich <iii@linux.ibm.com>,
	Ingo Molnar <mingo@redhat.com>, Jens Axboe <axboe@kernel.dk>,
	Joonsoo Kim <iamjoonsoo.kim@lge.com>,
	Kees Cook <keescook@chromium.org>,
	Mark Rutland <mark.rutland@arm.com>,
	Matthew Wilcox <willy@infradead.org>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	Pekka Enberg <penberg@kernel.org>,
	Peter Zijlstra <peterz@infradead.org>,
	Petr Mladek <pmladek@suse.com>,
	Steven Rostedt <rostedt@goodmis.org>,
	Thomas Gleixner <tglx@linutronix.de>,
	Vasily Gorbik <gor@linux.ibm.com>,
	Vegard Nossum <vegard.nossum@oracle.com>,
	Vlastimil Babka <vbabka@suse.cz>,
	kasan-dev@googlegroups.com, linux-mm@kvack.org,
	linux-arch@vger.kernel.org, linux-kernel@vger.kernel.org
Subject: Re: [PATCH v4 25/45] kmsan: add tests for KMSAN
Date: Tue, 12 Jul 2022 16:16:32 +0200	[thread overview]
Message-ID: <CANpmjNPeW=pQ_rU5ACTpBX8W4TH4vdcDn=hqPhHGtYU96iHF0A@mail.gmail.com> (raw)
In-Reply-To: <20220701142310.2188015-26-glider@google.com>

)

On Fri, 1 Jul 2022 at 16:24, 'Alexander Potapenko' via kasan-dev
<kasan-dev@googlegroups.com> wrote:
>
> The testing module triggers KMSAN warnings in different cases and checks
> that the errors are properly reported, using console probes to capture
> the tool's output.
>
> Signed-off-by: Alexander Potapenko <glider@google.com>
> ---
> v2:
>  -- add memcpy tests
>
> v4:
>  -- change sizeof(type) to sizeof(*ptr)
>  -- add test expectations for CONFIG_KMSAN_CHECK_PARAM_RETVAL
>
> Link: https://linux-review.googlesource.com/id/I49c3f59014cc37fd13541c80beb0b75a75244650
> ---
>  lib/Kconfig.kmsan     |  12 +
>  mm/kmsan/Makefile     |   4 +
>  mm/kmsan/kmsan_test.c | 552 ++++++++++++++++++++++++++++++++++++++++++
>  3 files changed, 568 insertions(+)
>  create mode 100644 mm/kmsan/kmsan_test.c
>
> diff --git a/lib/Kconfig.kmsan b/lib/Kconfig.kmsan
> index 8f768d4034e3c..f56ed7f7c7090 100644
> --- a/lib/Kconfig.kmsan
> +++ b/lib/Kconfig.kmsan
> @@ -47,4 +47,16 @@ config KMSAN_CHECK_PARAM_RETVAL
>           may potentially report errors in corner cases when non-instrumented
>           functions call instrumented ones.
>
> +config KMSAN_KUNIT_TEST
> +       tristate "KMSAN integration test suite" if !KUNIT_ALL_TESTS
> +       default KUNIT_ALL_TESTS
> +       depends on TRACEPOINTS && KUNIT
> +       help
> +         Test suite for KMSAN, testing various error detection scenarios,
> +         and checking that reports are correctly output to console.
> +
> +         Say Y here if you want the test to be built into the kernel and run
> +         during boot; say M if you want the test to build as a module; say N
> +         if you are unsure.
> +
>  endif
> diff --git a/mm/kmsan/Makefile b/mm/kmsan/Makefile
> index 401acb1a491ce..98eab2856626f 100644
> --- a/mm/kmsan/Makefile
> +++ b/mm/kmsan/Makefile
> @@ -22,3 +22,7 @@ CFLAGS_init.o := $(CC_FLAGS_KMSAN_RUNTIME)
>  CFLAGS_instrumentation.o := $(CC_FLAGS_KMSAN_RUNTIME)
>  CFLAGS_report.o := $(CC_FLAGS_KMSAN_RUNTIME)
>  CFLAGS_shadow.o := $(CC_FLAGS_KMSAN_RUNTIME)
> +
> +obj-$(CONFIG_KMSAN_KUNIT_TEST) += kmsan_test.o
> +KMSAN_SANITIZE_kmsan_test.o := y
> +CFLAGS_kmsan_test.o += $(call cc-disable-warning, uninitialized)
> diff --git a/mm/kmsan/kmsan_test.c b/mm/kmsan/kmsan_test.c
> new file mode 100644
> index 0000000000000..1b8da71ae0d4f
> --- /dev/null
> +++ b/mm/kmsan/kmsan_test.c
> @@ -0,0 +1,552 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * Test cases for KMSAN.
> + * For each test case checks the presence (or absence) of generated reports.
> + * Relies on 'console' tracepoint to capture reports as they appear in the
> + * kernel log.
> + *
> + * Copyright (C) 2021-2022, Google LLC.
> + * Author: Alexander Potapenko <glider@google.com>
> + *
> + */
> +
> +#include <kunit/test.h>
> +#include "kmsan.h"
> +
> +#include <linux/jiffies.h>
> +#include <linux/kernel.h>
> +#include <linux/kmsan.h>
> +#include <linux/mm.h>
> +#include <linux/random.h>
> +#include <linux/slab.h>
> +#include <linux/spinlock.h>
> +#include <linux/string.h>
> +#include <linux/tracepoint.h>
> +#include <trace/events/printk.h>
> +
> +static DEFINE_PER_CPU(int, per_cpu_var);
> +
> +/* Report as observed from console. */
> +static struct {
> +       spinlock_t lock;
> +       bool available;
> +       bool ignore; /* Stop console output collection. */
> +       char header[256];
> +} observed = {
> +       .lock = __SPIN_LOCK_UNLOCKED(observed.lock),
> +};
> +
> +/* Probe for console output: obtains observed lines of interest. */
> +static void probe_console(void *ignore, const char *buf, size_t len)
> +{
> +       unsigned long flags;
> +
> +       if (observed.ignore)
> +               return;
> +       spin_lock_irqsave(&observed.lock, flags);
> +
> +       if (strnstr(buf, "BUG: KMSAN: ", len)) {
> +               /*
> +                * KMSAN report and related to the test.
> +                *
> +                * The provided @buf is not NUL-terminated; copy no more than
> +                * @len bytes and let strscpy() add the missing NUL-terminator.
> +                */
> +               strscpy(observed.header, buf,
> +                       min(len + 1, sizeof(observed.header)));
> +               WRITE_ONCE(observed.available, true);
> +               observed.ignore = true;
> +       }
> +       spin_unlock_irqrestore(&observed.lock, flags);
> +}
> +
> +/* Check if a report related to the test exists. */
> +static bool report_available(void)
> +{
> +       return READ_ONCE(observed.available);
> +}
> +
> +/* Information we expect in a report. */
> +struct expect_report {
> +       const char *error_type; /* Error type. */
> +       /*
> +        * Kernel symbol from the error header, or NULL if no report is
> +        * expected.
> +        */
> +       const char *symbol;
> +};
> +
> +/* Check observed report matches information in @r. */
> +static bool report_matches(const struct expect_report *r)
> +{
> +       typeof(observed.header) expected_header;
> +       unsigned long flags;
> +       bool ret = false;
> +       const char *end;
> +       char *cur;
> +
> +       /* Doubled-checked locking. */
> +       if (!report_available() || !r->symbol)
> +               return (!report_available() && !r->symbol);
> +
> +       /* Generate expected report contents. */
> +
> +       /* Title */
> +       cur = expected_header;
> +       end = &expected_header[sizeof(expected_header) - 1];
> +
> +       cur += scnprintf(cur, end - cur, "BUG: KMSAN: %s", r->error_type);
> +
> +       scnprintf(cur, end - cur, " in %s", r->symbol);
> +       /* The exact offset won't match, remove it; also strip module name. */
> +       cur = strchr(expected_header, '+');
> +       if (cur)
> +               *cur = '\0';
> +
> +       spin_lock_irqsave(&observed.lock, flags);
> +       if (!report_available())
> +               goto out; /* A new report is being captured. */
> +
> +       /* Finally match expected output to what we actually observed. */
> +       ret = strstr(observed.header, expected_header);
> +out:
> +       spin_unlock_irqrestore(&observed.lock, flags);
> +
> +       return ret;
> +}
> +
> +/* ===== Test cases ===== */
> +
> +/* Prevent replacing branch with select in LLVM. */
> +static noinline void check_true(char *arg)
> +{
> +       pr_info("%s is true\n", arg);
> +}
> +
> +static noinline void check_false(char *arg)
> +{
> +       pr_info("%s is false\n", arg);
> +}
> +
> +#define USE(x)                                                                 \
> +       do {                                                                   \
> +               if (x)                                                         \
> +                       check_true(#x);                                        \
> +               else                                                           \
> +                       check_false(#x);                                       \
> +       } while (0)
> +
> +#define EXPECTATION_ETYPE_FN(e, reason, fn)                                    \
> +       struct expect_report e = {                                             \
> +               .error_type = reason,                                          \
> +               .symbol = fn,                                                  \
> +       }
> +
> +#define EXPECTATION_NO_REPORT(e) EXPECTATION_ETYPE_FN(e, NULL, NULL)
> +#define EXPECTATION_UNINIT_VALUE_FN(e, fn)                                     \
> +       EXPECTATION_ETYPE_FN(e, "uninit-value", fn)
> +#define EXPECTATION_UNINIT_VALUE(e) EXPECTATION_UNINIT_VALUE_FN(e, __func__)
> +#define EXPECTATION_USE_AFTER_FREE(e)                                          \
> +       EXPECTATION_ETYPE_FN(e, "use-after-free", __func__)
> +
> +/* Test case: ensure that kmalloc() returns uninitialized memory. */
> +static void test_uninit_kmalloc(struct kunit *test)
> +{
> +       EXPECTATION_UNINIT_VALUE(expect);
> +       int *ptr;
> +
> +       kunit_info(test, "uninitialized kmalloc test (UMR report)\n");
> +       ptr = kmalloc(sizeof(*ptr), GFP_KERNEL);
> +       USE(*ptr);
> +       KUNIT_EXPECT_TRUE(test, report_matches(&expect));
> +}
> +
> +/*
> + * Test case: ensure that kmalloc'ed memory becomes initialized after memset().
> + */
> +static void test_init_kmalloc(struct kunit *test)
> +{
> +       EXPECTATION_NO_REPORT(expect);
> +       int *ptr;
> +
> +       kunit_info(test, "initialized kmalloc test (no reports)\n");
> +       ptr = kmalloc(sizeof(*ptr), GFP_KERNEL);
> +       memset(ptr, 0, sizeof(*ptr));
> +       USE(*ptr);
> +       KUNIT_EXPECT_TRUE(test, report_matches(&expect));
> +}
> +
> +/* Test case: ensure that kzalloc() returns initialized memory. */
> +static void test_init_kzalloc(struct kunit *test)
> +{
> +       EXPECTATION_NO_REPORT(expect);
> +       int *ptr;
> +
> +       kunit_info(test, "initialized kzalloc test (no reports)\n");
> +       ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
> +       USE(*ptr);
> +       KUNIT_EXPECT_TRUE(test, report_matches(&expect));
> +}
> +
> +/* Test case: ensure that local variables are uninitialized by default. */
> +static void test_uninit_stack_var(struct kunit *test)
> +{
> +       EXPECTATION_UNINIT_VALUE(expect);
> +       volatile int cond;
> +
> +       kunit_info(test, "uninitialized stack variable (UMR report)\n");
> +       USE(cond);
> +       KUNIT_EXPECT_TRUE(test, report_matches(&expect));
> +}
> +
> +/* Test case: ensure that local variables with initializers are initialized. */
> +static void test_init_stack_var(struct kunit *test)
> +{
> +       EXPECTATION_NO_REPORT(expect);
> +       volatile int cond = 1;
> +
> +       kunit_info(test, "initialized stack variable (no reports)\n");
> +       USE(cond);
> +       KUNIT_EXPECT_TRUE(test, report_matches(&expect));
> +}
> +
> +static noinline void two_param_fn_2(int arg1, int arg2)
> +{
> +       USE(arg1);
> +       USE(arg2);
> +}
> +
> +static noinline void one_param_fn(int arg)
> +{
> +       two_param_fn_2(arg, arg);
> +       USE(arg);
> +}
> +
> +static noinline void two_param_fn(int arg1, int arg2)
> +{
> +       int init = 0;
> +
> +       one_param_fn(init);
> +       USE(arg1);
> +       USE(arg2);
> +}
> +
> +static void test_params(struct kunit *test)
> +{
> +#ifdef CONFIG_KMSAN_CHECK_PARAM_RETVAL

if (IS_ENABLED(...))

> +       /*
> +        * With eager param/retval checking enabled, KMSAN will report an error
> +        * before the call to two_param_fn().
> +        */
> +       EXPECTATION_UNINIT_VALUE_FN(expect, "test_params");
> +#else
> +       EXPECTATION_UNINIT_VALUE_FN(expect, "two_param_fn");
> +#endif
> +       volatile int uninit, init = 1;
> +
> +       kunit_info(test,
> +                  "uninit passed through a function parameter (UMR report)\n");
> +       two_param_fn(uninit, init);
> +       KUNIT_EXPECT_TRUE(test, report_matches(&expect));
> +}
> +
> +static int signed_sum3(int a, int b, int c)
> +{
> +       return a + b + c;
> +}
> +
> +/*
> + * Test case: ensure that uninitialized values are tracked through function
> + * arguments.
> + */
> +static void test_uninit_multiple_params(struct kunit *test)
> +{
> +       EXPECTATION_UNINIT_VALUE(expect);
> +       volatile char b = 3, c;
> +       volatile int a;
> +
> +       kunit_info(test, "uninitialized local passed to fn (UMR report)\n");
> +       USE(signed_sum3(a, b, c));
> +       KUNIT_EXPECT_TRUE(test, report_matches(&expect));
> +}
> +
> +/* Helper function to make an array uninitialized. */
> +static noinline void do_uninit_local_array(char *array, int start, int stop)
> +{
> +       volatile char uninit;
> +       int i;
> +
> +       for (i = start; i < stop; i++)
> +               array[i] = uninit;
> +}
> +
> +/*
> + * Test case: ensure kmsan_check_memory() reports an error when checking
> + * uninitialized memory.
> + */
> +static void test_uninit_kmsan_check_memory(struct kunit *test)
> +{
> +       EXPECTATION_UNINIT_VALUE_FN(expect, "test_uninit_kmsan_check_memory");
> +       volatile char local_array[8];
> +
> +       kunit_info(
> +               test,
> +               "kmsan_check_memory() called on uninit local (UMR report)\n");
> +       do_uninit_local_array((char *)local_array, 5, 7);
> +
> +       kmsan_check_memory((char *)local_array, 8);
> +       KUNIT_EXPECT_TRUE(test, report_matches(&expect));
> +}
> +
> +/*
> + * Test case: check that a virtual memory range created with vmap() from
> + * initialized pages is still considered as initialized.
> + */
> +static void test_init_kmsan_vmap_vunmap(struct kunit *test)
> +{
> +       EXPECTATION_NO_REPORT(expect);
> +       const int npages = 2;
> +       struct page **pages;
> +       void *vbuf;
> +       int i;
> +
> +       kunit_info(test, "pages initialized via vmap (no reports)\n");
> +
> +       pages = kmalloc_array(npages, sizeof(*pages), GFP_KERNEL);
> +       for (i = 0; i < npages; i++)
> +               pages[i] = alloc_page(GFP_KERNEL);
> +       vbuf = vmap(pages, npages, VM_MAP, PAGE_KERNEL);
> +       memset(vbuf, 0xfe, npages * PAGE_SIZE);
> +       for (i = 0; i < npages; i++)
> +               kmsan_check_memory(page_address(pages[i]), PAGE_SIZE);
> +
> +       if (vbuf)
> +               vunmap(vbuf);
> +       for (i = 0; i < npages; i++)

add { }

> +               if (pages[i])
> +                       __free_page(pages[i]);
> +       kfree(pages);
> +       KUNIT_EXPECT_TRUE(test, report_matches(&expect));
> +}
> +
> +/*
> + * Test case: ensure that memset() can initialize a buffer allocated via
> + * vmalloc().
> + */
> +static void test_init_vmalloc(struct kunit *test)
> +{
> +       EXPECTATION_NO_REPORT(expect);
> +       int npages = 8, i;
> +       char *buf;
> +
> +       kunit_info(test, "vmalloc buffer can be initialized (no reports)\n");
> +       buf = vmalloc(PAGE_SIZE * npages);
> +       buf[0] = 1;
> +       memset(buf, 0xfe, PAGE_SIZE * npages);
> +       USE(buf[0]);
> +       for (i = 0; i < npages; i++)
> +               kmsan_check_memory(&buf[PAGE_SIZE * i], PAGE_SIZE);
> +       vfree(buf);
> +       KUNIT_EXPECT_TRUE(test, report_matches(&expect));
> +}
> +
> +/* Test case: ensure that use-after-free reporting works. */
> +static void test_uaf(struct kunit *test)
> +{
> +       EXPECTATION_USE_AFTER_FREE(expect);
> +       volatile int value;
> +       volatile int *var;
> +
> +       kunit_info(test, "use-after-free in kmalloc-ed buffer (UMR report)\n");
> +       var = kmalloc(80, GFP_KERNEL);
> +       var[3] = 0xfeedface;
> +       kfree((int *)var);
> +       /* Copy the invalid value before checking it. */
> +       value = var[3];
> +       USE(value);
> +       KUNIT_EXPECT_TRUE(test, report_matches(&expect));
> +}
> +
> +/*
> + * Test case: ensure that uninitialized values are propagated through per-CPU
> + * memory.
> + */
> +static void test_percpu_propagate(struct kunit *test)
> +{
> +       EXPECTATION_UNINIT_VALUE(expect);
> +       volatile int uninit, check;
> +
> +       kunit_info(test,
> +                  "uninit local stored to per_cpu memory (UMR report)\n");
> +
> +       this_cpu_write(per_cpu_var, uninit);
> +       check = this_cpu_read(per_cpu_var);
> +       USE(check);
> +       KUNIT_EXPECT_TRUE(test, report_matches(&expect));
> +}
> +
> +/*
> + * Test case: ensure that passing uninitialized values to printk() leads to an
> + * error report.
> + */
> +static void test_printk(struct kunit *test)
> +{
> +#ifdef CONFIG_KMSAN_CHECK_PARAM_RETVAL

if (IS_ENABLED(CONFIG_KMSAN_CHECK_PARAM_RETVAL))

> +       /*
> +        * With eager param/retval checking enabled, KMSAN will report an error
> +        * before the call to pr_info().
> +        */
> +       EXPECTATION_UNINIT_VALUE_FN(expect, "test_printk");
> +#else
> +       EXPECTATION_UNINIT_VALUE_FN(expect, "number");
> +#endif
> +       volatile int uninit;
> +
> +       kunit_info(test, "uninit local passed to pr_info() (UMR report)\n");
> +       pr_info("%px contains %d\n", &uninit, uninit);
> +       KUNIT_EXPECT_TRUE(test, report_matches(&expect));
> +}
> +
> +/*
> + * Test case: ensure that memcpy() correctly copies uninitialized values between
> + * aligned `src` and `dst`.
> + */
> +static void test_memcpy_aligned_to_aligned(struct kunit *test)
> +{
> +       EXPECTATION_UNINIT_VALUE_FN(expect, "test_memcpy_aligned_to_aligned");
> +       volatile int uninit_src;
> +       volatile int dst = 0;
> +
> +       kunit_info(test, "memcpy()ing aligned uninit src to aligned dst (UMR report)\n");
> +       memcpy((void *)&dst, (void *)&uninit_src, sizeof(uninit_src));
> +       kmsan_check_memory((void *)&dst, sizeof(dst));
> +       KUNIT_EXPECT_TRUE(test, report_matches(&expect));
> +}
> +
> +/*
> + * Test case: ensure that memcpy() correctly copies uninitialized values between
> + * aligned `src` and unaligned `dst`.
> + *
> + * Copying aligned 4-byte value to an unaligned one leads to touching two
> + * aligned 4-byte values. This test case checks that KMSAN correctly reports an
> + * error on the first of the two values.
> + */
> +static void test_memcpy_aligned_to_unaligned(struct kunit *test)
> +{
> +       EXPECTATION_UNINIT_VALUE_FN(expect, "test_memcpy_aligned_to_unaligned");
> +       volatile int uninit_src;
> +       volatile char dst[8] = {0};
> +
> +       kunit_info(test, "memcpy()ing aligned uninit src to unaligned dst (UMR report)\n");
> +       memcpy((void *)&dst[1], (void *)&uninit_src, sizeof(uninit_src));
> +       kmsan_check_memory((void *)dst, 4);
> +       KUNIT_EXPECT_TRUE(test, report_matches(&expect));
> +}
> +
> +/*
> + * Test case: ensure that memcpy() correctly copies uninitialized values between
> + * aligned `src` and unaligned `dst`.
> + *
> + * Copying aligned 4-byte value to an unaligned one leads to touching two
> + * aligned 4-byte values. This test case checks that KMSAN correctly reports an
> + * error on the second of the two values.
> + */
> +static void test_memcpy_aligned_to_unaligned2(struct kunit *test)
> +{
> +       EXPECTATION_UNINIT_VALUE_FN(expect, "test_memcpy_aligned_to_unaligned2");
> +       volatile int uninit_src;
> +       volatile char dst[8] = {0};
> +
> +       kunit_info(test, "memcpy()ing aligned uninit src to unaligned dst - part 2 (UMR report)\n");
> +       memcpy((void *)&dst[1], (void *)&uninit_src, sizeof(uninit_src));
> +       kmsan_check_memory((void *)&dst[4], sizeof(uninit_src));
> +       KUNIT_EXPECT_TRUE(test, report_matches(&expect));
> +}
> +
> +static struct kunit_case kmsan_test_cases[] = {
> +       KUNIT_CASE(test_uninit_kmalloc),
> +       KUNIT_CASE(test_init_kmalloc),
> +       KUNIT_CASE(test_init_kzalloc),
> +       KUNIT_CASE(test_uninit_stack_var),
> +       KUNIT_CASE(test_init_stack_var),
> +       KUNIT_CASE(test_params),
> +       KUNIT_CASE(test_uninit_multiple_params),
> +       KUNIT_CASE(test_uninit_kmsan_check_memory),
> +       KUNIT_CASE(test_init_kmsan_vmap_vunmap),
> +       KUNIT_CASE(test_init_vmalloc),
> +       KUNIT_CASE(test_uaf),
> +       KUNIT_CASE(test_percpu_propagate),
> +       KUNIT_CASE(test_printk),
> +       KUNIT_CASE(test_memcpy_aligned_to_aligned),
> +       KUNIT_CASE(test_memcpy_aligned_to_unaligned),
> +       KUNIT_CASE(test_memcpy_aligned_to_unaligned2),
> +       {},
> +};
> +
> +/* ===== End test cases ===== */
> +
> +static int test_init(struct kunit *test)
> +{
> +       unsigned long flags;
> +
> +       spin_lock_irqsave(&observed.lock, flags);
> +       observed.header[0] = '\0';
> +       observed.ignore = false;
> +       observed.available = false;
> +       spin_unlock_irqrestore(&observed.lock, flags);
> +
> +       return 0;
> +}
> +
> +static void test_exit(struct kunit *test)
> +{
> +}
> +
> +static struct kunit_suite kmsan_test_suite = {
> +       .name = "kmsan",
> +       .test_cases = kmsan_test_cases,
> +       .init = test_init,
> +       .exit = test_exit,
> +};
> +static struct kunit_suite *kmsan_test_suites[] = { &kmsan_test_suite, NULL };
> +
> +static void register_tracepoints(struct tracepoint *tp, void *ignore)
> +{
> +       check_trace_callback_type_console(probe_console);
> +       if (!strcmp(tp->name, "console"))
> +               WARN_ON(tracepoint_probe_register(tp, probe_console, NULL));
> +}
> +
> +static void unregister_tracepoints(struct tracepoint *tp, void *ignore)
> +{
> +       if (!strcmp(tp->name, "console"))
> +               tracepoint_probe_unregister(tp, probe_console, NULL);
> +}
> +
> +/*
> + * We only want to do tracepoints setup and teardown once, therefore we have to
> + * customize the init and exit functions and cannot rely on kunit_test_suite().
> + */

This is no longer true. See a recent version of
mm/kfence/kfence_test.c which uses the new suite_init/exit.

> +static int __init kmsan_test_init(void)
> +{
> +       /*
> +        * Because we want to be able to build the test as a module, we need to
> +        * iterate through all known tracepoints, since the static registration
> +        * won't work here.
> +        */
> +       for_each_kernel_tracepoint(register_tracepoints, NULL);
> +       return __kunit_test_suites_init(kmsan_test_suites);
> +}
> +
> +static void kmsan_test_exit(void)
> +{
> +       __kunit_test_suites_exit(kmsan_test_suites);
> +       for_each_kernel_tracepoint(unregister_tracepoints, NULL);
> +       tracepoint_synchronize_unregister();
> +}
> +
> +late_initcall_sync(kmsan_test_init);
> +module_exit(kmsan_test_exit);
> +
> +MODULE_LICENSE("GPL v2");

A recent version of checkpatch should complain about this, wanting
only "GPL" instead of "GPL v2".

> +MODULE_AUTHOR("Alexander Potapenko <glider@google.com>");

  reply	other threads:[~2022-07-12 14:17 UTC|newest]

Thread overview: 147+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-07-01 14:22 [PATCH v4 00/45] Add KernelMemorySanitizer infrastructure Alexander Potapenko
2022-07-01 14:22 ` [PATCH v4 01/45] x86: add missing include to sparsemem.h Alexander Potapenko
2022-07-01 14:22 ` [PATCH v4 02/45] stackdepot: reserve 5 extra bits in depot_stack_handle_t Alexander Potapenko
2022-07-12 14:17   ` Marco Elver
2022-07-01 14:22 ` [PATCH v4 03/45] instrumented.h: allow instrumenting both sides of copy_from_user() Alexander Potapenko
2022-07-12 14:17   ` Marco Elver
2022-07-01 14:22 ` [PATCH v4 04/45] x86: asm: instrument usercopy in get_user() and __put_user_size() Alexander Potapenko
2022-07-02  3:47   ` kernel test robot
2022-07-15 14:03     ` Alexander Potapenko
2022-07-15 14:03       ` Alexander Potapenko
2022-07-02 10:45   ` kernel test robot
2022-07-15 16:44     ` Alexander Potapenko
2022-07-15 16:44       ` Alexander Potapenko
2022-07-02 13:09   ` kernel test robot
2022-07-07 10:13   ` Marco Elver
2022-08-07 17:33     ` Alexander Potapenko
2022-07-01 14:22 ` [PATCH v4 05/45] asm-generic: instrument usercopy in cacheflush.h Alexander Potapenko
2022-07-12 14:17   ` Marco Elver
2022-07-01 14:22 ` [PATCH v4 06/45] kmsan: add ReST documentation Alexander Potapenko
2022-07-07 12:34   ` Marco Elver
2022-07-15  7:42     ` Alexander Potapenko
2022-07-15  8:52       ` Marco Elver
2022-07-01 14:22 ` [PATCH v4 07/45] kmsan: introduce __no_sanitize_memory and __no_kmsan_checks Alexander Potapenko
2022-07-12 14:17   ` Marco Elver
2022-07-01 14:22 ` [PATCH v4 08/45] kmsan: mark noinstr as __no_sanitize_memory Alexander Potapenko
2022-07-12 14:17   ` Marco Elver
2022-07-01 14:22 ` [PATCH v4 09/45] x86: kmsan: pgtable: reduce vmalloc space Alexander Potapenko
2022-07-11 16:12   ` Marco Elver
2022-07-01 14:22 ` [PATCH v4 10/45] libnvdimm/pfn_dev: increase MAX_STRUCT_PAGE_SIZE Alexander Potapenko
2022-07-11 16:26   ` Marco Elver
2022-08-03  9:41     ` Alexander Potapenko
2022-08-03  9:44     ` Alexander Potapenko
2023-01-05 22:08       ` Dan Williams
2023-01-09  9:51         ` Alexander Potapenko
2023-01-09 22:06           ` Dan Williams
2023-01-10  5:56             ` Greg Kroah-Hartman
2023-01-10  6:55               ` Dan Williams
2023-01-10  8:48                 ` Alexander Potapenko
2023-01-10  8:52                   ` Alexander Potapenko
2023-01-10  8:53                   ` Eric Dumazet
2023-01-10  8:55                     ` Christoph Hellwig
2023-01-10 15:35                       ` Steven Rostedt
2023-01-10  9:14                     ` Alexander Potapenko
2023-01-30  8:34         ` Alexander Potapenko
2023-01-30 18:57           ` Dan Williams
2022-07-01 14:22 ` [PATCH v4 11/45] kmsan: add KMSAN runtime core Alexander Potapenko
2022-07-02  0:18   ` Hillf Danton
2022-08-03 17:25     ` Alexander Potapenko
2022-07-11 16:49   ` Marco Elver
2022-08-03 18:14     ` Alexander Potapenko
2022-07-13 10:04   ` Marco Elver
2022-08-03 17:45     ` Alexander Potapenko
2022-07-01 14:22 ` [PATCH v4 12/45] kmsan: disable instrumentation of unsupported common kernel code Alexander Potapenko
2022-07-12 11:54   ` Marco Elver
2022-07-01 14:22 ` [PATCH v4 13/45] MAINTAINERS: add entry for KMSAN Alexander Potapenko
2022-07-12 12:06   ` Marco Elver
2022-08-02 16:39     ` Alexander Potapenko
2022-07-01 14:22 ` [PATCH v4 14/45] mm: kmsan: maintain KMSAN metadata for page operations Alexander Potapenko
2022-07-12 12:20   ` Marco Elver
2022-08-03 10:30     ` Alexander Potapenko
2022-07-01 14:22 ` [PATCH v4 15/45] mm: kmsan: call KMSAN hooks from SLUB code Alexander Potapenko
2022-07-12 13:13   ` Marco Elver
2022-08-02 16:31     ` Alexander Potapenko
2022-07-01 14:22 ` [PATCH v4 16/45] kmsan: handle task creation and exiting Alexander Potapenko
2022-07-12 13:17   ` Marco Elver
2022-08-02 15:47     ` Alexander Potapenko
2022-07-01 14:22 ` [PATCH v4 17/45] init: kmsan: call KMSAN initialization routines Alexander Potapenko
2022-07-12 14:05   ` Marco Elver
2022-08-02 20:07     ` Alexander Potapenko
2022-08-03  9:08       ` Marco Elver
2022-07-01 14:22 ` [PATCH v4 18/45] instrumented.h: add KMSAN support Alexander Potapenko
2022-07-12 13:51   ` Marco Elver
2022-08-03 11:17     ` Alexander Potapenko
2022-07-01 14:22 ` [PATCH v4 19/45] kmsan: unpoison @tlb in arch_tlb_gather_mmu() Alexander Potapenko
2022-07-13  9:28   ` Marco Elver
2022-07-01 14:22 ` [PATCH v4 20/45] kmsan: add iomap support Alexander Potapenko
2022-07-01 14:22 ` [PATCH v4 21/45] Input: libps2: mark data received in __ps2_command() as initialized Alexander Potapenko
2022-07-01 14:22 ` [PATCH v4 22/45] dma: kmsan: unpoison DMA mappings Alexander Potapenko
2022-07-01 14:22 ` [PATCH v4 23/45] virtio: kmsan: check/unpoison scatterlist in vring_map_one_sg() Alexander Potapenko
2022-07-01 14:22 ` [PATCH v4 24/45] kmsan: handle memory sent to/from USB Alexander Potapenko
2022-07-01 14:22 ` [PATCH v4 25/45] kmsan: add tests for KMSAN Alexander Potapenko
2022-07-12 14:16   ` Marco Elver [this message]
2022-08-02 17:29     ` Alexander Potapenko
2022-07-01 14:22 ` [PATCH v4 26/45] kmsan: disable strscpy() optimization under KMSAN Alexander Potapenko
2022-07-01 14:22 ` [PATCH v4 27/45] crypto: kmsan: disable accelerated configs " Alexander Potapenko
2022-07-01 14:22 ` [PATCH v4 28/45] kmsan: disable physical page merging in biovec Alexander Potapenko
2022-07-01 14:22 ` [PATCH v4 29/45] block: kmsan: skip bio block merging logic for KMSAN Alexander Potapenko
2022-07-13 10:22   ` Marco Elver
2022-08-02 17:47     ` Alexander Potapenko
2022-07-01 14:22 ` [PATCH v4 30/45] kcov: kmsan: unpoison area->list in kcov_remote_area_put() Alexander Potapenko
2022-07-01 14:22 ` [PATCH v4 31/45] security: kmsan: fix interoperability with auto-initialization Alexander Potapenko
2022-07-01 14:22 ` [PATCH v4 32/45] objtool: kmsan: list KMSAN API functions as uaccess-safe Alexander Potapenko
2022-07-01 14:22 ` [PATCH v4 33/45] x86: kmsan: disable instrumentation of unsupported code Alexander Potapenko
2022-07-12 13:43   ` Marco Elver
2022-08-03 10:52     ` Alexander Potapenko
2022-07-01 14:22 ` [PATCH v4 34/45] x86: kmsan: skip shadow checks in __switch_to() Alexander Potapenko
2022-07-01 14:23 ` [PATCH v4 35/45] x86: kmsan: handle open-coded assembly in lib/iomem.c Alexander Potapenko
2022-07-01 14:23 ` [PATCH v4 36/45] x86: kmsan: use __msan_ string functions where possible Alexander Potapenko
2022-07-01 14:23 ` [PATCH v4 37/45] x86: kmsan: sync metadata pages on page fault Alexander Potapenko
2022-07-01 14:23 ` [PATCH v4 38/45] x86: kasan: kmsan: support CONFIG_GENERIC_CSUM on x86, enable it for KASAN/KMSAN Alexander Potapenko
2022-07-01 14:23 ` [PATCH v4 39/45] x86: fs: kmsan: disable CONFIG_DCACHE_WORD_ACCESS Alexander Potapenko
2022-07-01 14:23 ` [PATCH v4 40/45] x86: kmsan: don't instrument stack walking functions Alexander Potapenko
2022-07-01 14:23 ` [PATCH v4 41/45] entry: kmsan: introduce kmsan_unpoison_entry_regs() Alexander Potapenko
2022-07-01 14:23 ` [PATCH v4 42/45] bpf: kmsan: initialize BPF registers with zeroes Alexander Potapenko
2022-07-01 14:23 ` [PATCH v4 43/45] namei: initialize parameters passed to step_into() Alexander Potapenko
2022-07-02 17:23   ` Linus Torvalds
2022-07-03  3:59     ` Al Viro
2022-07-04  2:52     ` Al Viro
2022-07-04  8:20       ` Alexander Potapenko
2022-07-04 13:44         ` Al Viro
2022-07-04 13:55           ` Al Viro
2022-07-04 15:49           ` Alexander Potapenko
2022-07-04 16:03             ` Greg Kroah-Hartman
2022-07-04 16:33               ` Alexander Potapenko
2022-07-04 18:23             ` Segher Boessenkool
2022-07-04 16:00           ` Al Viro
2022-07-04 16:47             ` Alexander Potapenko
2022-07-04 17:36       ` Linus Torvalds
2022-07-04 19:02         ` Al Viro
2022-07-04 19:16           ` Linus Torvalds
2022-07-04 19:55             ` Al Viro
2022-07-04 20:24               ` Linus Torvalds
2022-07-04 20:46                 ` Al Viro
2022-07-04 20:51                   ` Linus Torvalds
2022-07-04 21:04                     ` Al Viro
2022-07-04 23:13                       ` [PATCH 1/7] __follow_mount_rcu(): verify that mount_lock remains unchanged Al Viro
2022-07-04 23:14                         ` [PATCH 2/7] follow_dotdot{,_rcu}(): change calling conventions Al Viro
2022-07-04 23:14                         ` [PATCH 3/7] namei: stash the sampled ->d_seq into nameidata Al Viro
2022-07-04 23:15                         ` [PATCH 4/7] step_into(): lose inode argument Al Viro
2022-07-04 23:15                         ` [PATCH 5/7] follow_dotdot{,_rcu}(): don't bother with inode Al Viro
2022-07-04 23:16                         ` [PATCH 6/7] lookup_fast(): " Al Viro
2022-07-04 23:17                         ` [PATCH 7/7] step_into(): move fetching ->d_inode past handle_mounts() Al Viro
2022-07-04 23:19                         ` [PATCH 1/7] __follow_mount_rcu(): verify that mount_lock remains unchanged Al Viro
2022-07-05  0:06                           ` Linus Torvalds
2022-07-05  3:48                             ` Al Viro
2022-07-04 20:47                 ` [PATCH v4 43/45] namei: initialize parameters passed to step_into() Linus Torvalds
2022-08-08 16:37   ` Alexander Potapenko
2022-07-01 14:23 ` [PATCH v4 44/45] mm: fs: initialize fsdata passed to write_begin/write_end interface Alexander Potapenko
2022-07-04 20:07   ` Matthew Wilcox
2022-07-04 20:30     ` Al Viro
2022-08-25 15:39     ` Alexander Potapenko
2022-08-25 16:33       ` Linus Torvalds
2022-08-25 21:57         ` Segher Boessenkool
2022-08-26 19:41           ` Linus Torvalds
2022-08-31 13:32             ` Alexander Potapenko
2022-08-25 22:13         ` Segher Boessenkool
2022-07-01 14:23 ` [PATCH v4 45/45] x86: kmsan: enable KMSAN builds for x86 Alexander Potapenko

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to='CANpmjNPeW=pQ_rU5ACTpBX8W4TH4vdcDn=hqPhHGtYU96iHF0A@mail.gmail.com' \
    --to=elver@google.com \
    --cc=akpm@linux-foundation.org \
    --cc=andreyknvl@google.com \
    --cc=arnd@arndb.de \
    --cc=ast@kernel.org \
    --cc=axboe@kernel.dk \
    --cc=bp@alien8.de \
    --cc=cl@linux.com \
    --cc=dvyukov@google.com \
    --cc=edumazet@google.com \
    --cc=glider@google.com \
    --cc=gor@linux.ibm.com \
    --cc=gregkh@linuxfoundation.org \
    --cc=hch@lst.de \
    --cc=herbert@gondor.apana.org.au \
    --cc=iamjoonsoo.kim@lge.com \
    --cc=iii@linux.ibm.com \
    --cc=kasan-dev@googlegroups.com \
    --cc=keescook@chromium.org \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=luto@kernel.org \
    --cc=mark.rutland@arm.com \
    --cc=mingo@redhat.com \
    --cc=mst@redhat.com \
    --cc=penberg@kernel.org \
    --cc=peterz@infradead.org \
    --cc=pmladek@suse.com \
    --cc=rientjes@google.com \
    --cc=rostedt@goodmis.org \
    --cc=tglx@linutronix.de \
    --cc=vbabka@suse.cz \
    --cc=vegard.nossum@oracle.com \
    --cc=viro@zeniv.linux.org.uk \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.