All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH kcsan 01/32] kcsan: Prefer __always_inline for fast-path
  2020-03-09 19:03 [PATCH kcsan 0/32] KCSAN commits for v5.7 Paul E. McKenney
@ 2020-03-09 19:03 ` paulmck
  2020-03-09 19:03 ` [PATCH kcsan 02/32] kcsan: Show full access type in report paulmck
                   ` (30 subsequent siblings)
  31 siblings, 0 replies; 50+ messages in thread
From: paulmck @ 2020-03-09 19:03 UTC (permalink / raw)
  To: linux-kernel, kasan-dev, kernel-team, mingo
  Cc: elver, andreyknvl, glider, dvyukov, cai, boqun.feng, Paul E . McKenney

From: Marco Elver <elver@google.com>

Prefer __always_inline for fast-path functions that are called outside
of user_access_save, to avoid generating UACCESS warnings when
optimizing for size (CC_OPTIMIZE_FOR_SIZE). It will also avoid future
surprises with compiler versions that change the inlining heuristic even
when optimizing for performance.

Report: http://lkml.kernel.org/r/58708908-84a0-0a81-a836-ad97e33dbb62@infradead.org
Reported-by: Randy Dunlap <rdunlap@infradead.org>
Acked-by: Randy Dunlap <rdunlap@infradead.org> # build-tested
Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
---
 kernel/kcsan/atomic.h   |  2 +-
 kernel/kcsan/core.c     | 18 +++++++++---------
 kernel/kcsan/encoding.h | 14 +++++++-------
 3 files changed, 17 insertions(+), 17 deletions(-)

diff --git a/kernel/kcsan/atomic.h b/kernel/kcsan/atomic.h
index 576e03d..a9c1930 100644
--- a/kernel/kcsan/atomic.h
+++ b/kernel/kcsan/atomic.h
@@ -18,7 +18,7 @@
  * than cast to volatile. Eventually, we hope to be able to remove this
  * function.
  */
-static inline bool kcsan_is_atomic(const volatile void *ptr)
+static __always_inline bool kcsan_is_atomic(const volatile void *ptr)
 {
 	/* only jiffies for now */
 	return ptr == &jiffies;
diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index 3314fc2..4d4ab5c 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -78,10 +78,10 @@ static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];
  */
 static DEFINE_PER_CPU(long, kcsan_skip);
 
-static inline atomic_long_t *find_watchpoint(unsigned long addr,
-					     size_t size,
-					     bool expect_write,
-					     long *encoded_watchpoint)
+static __always_inline atomic_long_t *find_watchpoint(unsigned long addr,
+						      size_t size,
+						      bool expect_write,
+						      long *encoded_watchpoint)
 {
 	const int slot = watchpoint_slot(addr);
 	const unsigned long addr_masked = addr & WATCHPOINT_ADDR_MASK;
@@ -146,7 +146,7 @@ insert_watchpoint(unsigned long addr, size_t size, bool is_write)
  *	2. the thread that set up the watchpoint already removed it;
  *	3. the watchpoint was removed and then re-used.
  */
-static inline bool
+static __always_inline bool
 try_consume_watchpoint(atomic_long_t *watchpoint, long encoded_watchpoint)
 {
 	return atomic_long_try_cmpxchg_relaxed(watchpoint, &encoded_watchpoint, CONSUMED_WATCHPOINT);
@@ -160,7 +160,7 @@ static inline bool remove_watchpoint(atomic_long_t *watchpoint)
 	return atomic_long_xchg_relaxed(watchpoint, INVALID_WATCHPOINT) != CONSUMED_WATCHPOINT;
 }
 
-static inline struct kcsan_ctx *get_ctx(void)
+static __always_inline struct kcsan_ctx *get_ctx(void)
 {
 	/*
 	 * In interrupts, use raw_cpu_ptr to avoid unnecessary checks, that would
@@ -169,7 +169,7 @@ static inline struct kcsan_ctx *get_ctx(void)
 	return in_task() ? &current->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx);
 }
 
-static inline bool is_atomic(const volatile void *ptr)
+static __always_inline bool is_atomic(const volatile void *ptr)
 {
 	struct kcsan_ctx *ctx = get_ctx();
 
@@ -193,7 +193,7 @@ static inline bool is_atomic(const volatile void *ptr)
 	return kcsan_is_atomic(ptr);
 }
 
-static inline bool should_watch(const volatile void *ptr, int type)
+static __always_inline bool should_watch(const volatile void *ptr, int type)
 {
 	/*
 	 * Never set up watchpoints when memory operations are atomic.
@@ -226,7 +226,7 @@ static inline void reset_kcsan_skip(void)
 	this_cpu_write(kcsan_skip, skip_count);
 }
 
-static inline bool kcsan_is_enabled(void)
+static __always_inline bool kcsan_is_enabled(void)
 {
 	return READ_ONCE(kcsan_enabled) && get_ctx()->disable_count == 0;
 }
diff --git a/kernel/kcsan/encoding.h b/kernel/kcsan/encoding.h
index b63890e8..f03562a 100644
--- a/kernel/kcsan/encoding.h
+++ b/kernel/kcsan/encoding.h
@@ -59,10 +59,10 @@ encode_watchpoint(unsigned long addr, size_t size, bool is_write)
 		      (addr & WATCHPOINT_ADDR_MASK));
 }
 
-static inline bool decode_watchpoint(long watchpoint,
-				     unsigned long *addr_masked,
-				     size_t *size,
-				     bool *is_write)
+static __always_inline bool decode_watchpoint(long watchpoint,
+					      unsigned long *addr_masked,
+					      size_t *size,
+					      bool *is_write)
 {
 	if (watchpoint == INVALID_WATCHPOINT ||
 	    watchpoint == CONSUMED_WATCHPOINT)
@@ -78,13 +78,13 @@ static inline bool decode_watchpoint(long watchpoint,
 /*
  * Return watchpoint slot for an address.
  */
-static inline int watchpoint_slot(unsigned long addr)
+static __always_inline int watchpoint_slot(unsigned long addr)
 {
 	return (addr / PAGE_SIZE) % CONFIG_KCSAN_NUM_WATCHPOINTS;
 }
 
-static inline bool matching_access(unsigned long addr1, size_t size1,
-				   unsigned long addr2, size_t size2)
+static __always_inline bool matching_access(unsigned long addr1, size_t size1,
+					    unsigned long addr2, size_t size2)
 {
 	unsigned long end_range1 = addr1 + size1 - 1;
 	unsigned long end_range2 = addr2 + size2 - 1;
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 50+ messages in thread

* [PATCH kcsan 02/32] kcsan: Show full access type in report
  2020-03-09 19:03 [PATCH kcsan 0/32] KCSAN commits for v5.7 Paul E. McKenney
  2020-03-09 19:03 ` [PATCH kcsan 01/32] kcsan: Prefer __always_inline for fast-path paulmck
@ 2020-03-09 19:03 ` paulmck
  2020-03-09 19:03 ` [PATCH kcsan 03/32] kcsan: Rate-limit reporting per data races paulmck
                   ` (29 subsequent siblings)
  31 siblings, 0 replies; 50+ messages in thread
From: paulmck @ 2020-03-09 19:03 UTC (permalink / raw)
  To: linux-kernel, kasan-dev, kernel-team, mingo
  Cc: elver, andreyknvl, glider, dvyukov, cai, boqun.feng, Paul E . McKenney

From: Marco Elver <elver@google.com>

This commit adds access-type information to KCSAN's reports as follows:
"read", "read (marked)", "write", and "write (marked)".

Suggested-by: Paul E. McKenney <paulmck@kernel.org>
Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
---
 kernel/kcsan/core.c   | 15 ++++++++-------
 kernel/kcsan/kcsan.h  |  2 +-
 kernel/kcsan/report.c | 43 ++++++++++++++++++++++++++++---------------
 3 files changed, 37 insertions(+), 23 deletions(-)

diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index 4d4ab5c..87bf857 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -255,7 +255,7 @@ static inline unsigned int get_delay(void)
 
 static noinline void kcsan_found_watchpoint(const volatile void *ptr,
 					    size_t size,
-					    bool is_write,
+					    int type,
 					    atomic_long_t *watchpoint,
 					    long encoded_watchpoint)
 {
@@ -276,7 +276,7 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr,
 	flags = user_access_save();
 
 	if (consumed) {
-		kcsan_report(ptr, size, is_write, true, raw_smp_processor_id(),
+		kcsan_report(ptr, size, type, true, raw_smp_processor_id(),
 			     KCSAN_REPORT_CONSUMED_WATCHPOINT);
 	} else {
 		/*
@@ -292,8 +292,9 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr,
 }
 
 static noinline void
-kcsan_setup_watchpoint(const volatile void *ptr, size_t size, bool is_write)
+kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
 {
+	const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
 	atomic_long_t *watchpoint;
 	union {
 		u8 _1;
@@ -415,13 +416,13 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, bool is_write)
 		 * No need to increment 'data_races' counter, as the racing
 		 * thread already did.
 		 */
-		kcsan_report(ptr, size, is_write, size > 8 || value_change,
+		kcsan_report(ptr, size, type, size > 8 || value_change,
 			     smp_processor_id(), KCSAN_REPORT_RACE_SIGNAL);
 	} else if (value_change) {
 		/* Inferring a race, since the value should not have changed. */
 		kcsan_counter_inc(KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN);
 		if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN))
-			kcsan_report(ptr, size, is_write, true,
+			kcsan_report(ptr, size, type, true,
 				     smp_processor_id(),
 				     KCSAN_REPORT_RACE_UNKNOWN_ORIGIN);
 	}
@@ -455,10 +456,10 @@ static __always_inline void check_access(const volatile void *ptr, size_t size,
 	 */
 
 	if (unlikely(watchpoint != NULL))
-		kcsan_found_watchpoint(ptr, size, is_write, watchpoint,
+		kcsan_found_watchpoint(ptr, size, type, watchpoint,
 				       encoded_watchpoint);
 	else if (unlikely(should_watch(ptr, type)))
-		kcsan_setup_watchpoint(ptr, size, is_write);
+		kcsan_setup_watchpoint(ptr, size, type);
 }
 
 /* === Public interface ===================================================== */
diff --git a/kernel/kcsan/kcsan.h b/kernel/kcsan/kcsan.h
index d3b9a96..8492da4 100644
--- a/kernel/kcsan/kcsan.h
+++ b/kernel/kcsan/kcsan.h
@@ -103,7 +103,7 @@ enum kcsan_report_type {
 /*
  * Print a race report from thread that encountered the race.
  */
-extern void kcsan_report(const volatile void *ptr, size_t size, bool is_write,
+extern void kcsan_report(const volatile void *ptr, size_t size, int access_type,
 			 bool value_change, int cpu_id, enum kcsan_report_type type);
 
 #endif /* _KERNEL_KCSAN_KCSAN_H */
diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c
index 0eea05a..9f503ca 100644
--- a/kernel/kcsan/report.c
+++ b/kernel/kcsan/report.c
@@ -24,7 +24,7 @@
 static struct {
 	const volatile void	*ptr;
 	size_t			size;
-	bool			is_write;
+	int			access_type;
 	int			task_pid;
 	int			cpu_id;
 	unsigned long		stack_entries[NUM_STACK_ENTRIES];
@@ -41,8 +41,10 @@ static DEFINE_SPINLOCK(report_lock);
  * Special rules to skip reporting.
  */
 static bool
-skip_report(bool is_write, bool value_change, unsigned long top_frame)
+skip_report(int access_type, bool value_change, unsigned long top_frame)
 {
+	const bool is_write = (access_type & KCSAN_ACCESS_WRITE) != 0;
+
 	if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY) && is_write &&
 	    !value_change) {
 		/*
@@ -63,9 +65,20 @@ skip_report(bool is_write, bool value_change, unsigned long top_frame)
 	return kcsan_skip_report_debugfs(top_frame);
 }
 
-static inline const char *get_access_type(bool is_write)
+static const char *get_access_type(int type)
 {
-	return is_write ? "write" : "read";
+	switch (type) {
+	case 0:
+		return "read";
+	case KCSAN_ACCESS_ATOMIC:
+		return "read (marked)";
+	case KCSAN_ACCESS_WRITE:
+		return "write";
+	case KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
+		return "write (marked)";
+	default:
+		BUG();
+	}
 }
 
 /* Return thread description: in task or interrupt. */
@@ -112,7 +125,7 @@ static int sym_strcmp(void *addr1, void *addr2)
 /*
  * Returns true if a report was generated, false otherwise.
  */
-static bool print_report(const volatile void *ptr, size_t size, bool is_write,
+static bool print_report(const volatile void *ptr, size_t size, int access_type,
 			 bool value_change, int cpu_id,
 			 enum kcsan_report_type type)
 {
@@ -124,7 +137,7 @@ static bool print_report(const volatile void *ptr, size_t size, bool is_write,
 	/*
 	 * Must check report filter rules before starting to print.
 	 */
-	if (skip_report(is_write, true, stack_entries[skipnr]))
+	if (skip_report(access_type, true, stack_entries[skipnr]))
 		return false;
 
 	if (type == KCSAN_REPORT_RACE_SIGNAL) {
@@ -132,7 +145,7 @@ static bool print_report(const volatile void *ptr, size_t size, bool is_write,
 						other_info.num_stack_entries);
 
 		/* @value_change is only known for the other thread */
-		if (skip_report(other_info.is_write, value_change,
+		if (skip_report(other_info.access_type, value_change,
 				other_info.stack_entries[other_skipnr]))
 			return false;
 	}
@@ -170,7 +183,7 @@ static bool print_report(const volatile void *ptr, size_t size, bool is_write,
 	switch (type) {
 	case KCSAN_REPORT_RACE_SIGNAL:
 		pr_err("%s to 0x%px of %zu bytes by %s on cpu %i:\n",
-		       get_access_type(other_info.is_write), other_info.ptr,
+		       get_access_type(other_info.access_type), other_info.ptr,
 		       other_info.size, get_thread_desc(other_info.task_pid),
 		       other_info.cpu_id);
 
@@ -181,14 +194,14 @@ static bool print_report(const volatile void *ptr, size_t size, bool is_write,
 
 		pr_err("\n");
 		pr_err("%s to 0x%px of %zu bytes by %s on cpu %i:\n",
-		       get_access_type(is_write), ptr, size,
+		       get_access_type(access_type), ptr, size,
 		       get_thread_desc(in_task() ? task_pid_nr(current) : -1),
 		       cpu_id);
 		break;
 
 	case KCSAN_REPORT_RACE_UNKNOWN_ORIGIN:
 		pr_err("race at unknown origin, with %s to 0x%px of %zu bytes by %s on cpu %i:\n",
-		       get_access_type(is_write), ptr, size,
+		       get_access_type(access_type), ptr, size,
 		       get_thread_desc(in_task() ? task_pid_nr(current) : -1),
 		       cpu_id);
 		break;
@@ -223,7 +236,7 @@ static void release_report(unsigned long *flags, enum kcsan_report_type type)
  * required for the report type, simply acquires report_lock and returns true.
  */
 static bool prepare_report(unsigned long *flags, const volatile void *ptr,
-			   size_t size, bool is_write, int cpu_id,
+			   size_t size, int access_type, int cpu_id,
 			   enum kcsan_report_type type)
 {
 	if (type != KCSAN_REPORT_CONSUMED_WATCHPOINT &&
@@ -243,7 +256,7 @@ static bool prepare_report(unsigned long *flags, const volatile void *ptr,
 
 		other_info.ptr			= ptr;
 		other_info.size			= size;
-		other_info.is_write		= is_write;
+		other_info.access_type		= access_type;
 		other_info.task_pid		= in_task() ? task_pid_nr(current) : -1;
 		other_info.cpu_id		= cpu_id;
 		other_info.num_stack_entries	= stack_trace_save(other_info.stack_entries, NUM_STACK_ENTRIES, 1);
@@ -302,14 +315,14 @@ static bool prepare_report(unsigned long *flags, const volatile void *ptr,
 	goto retry;
 }
 
-void kcsan_report(const volatile void *ptr, size_t size, bool is_write,
+void kcsan_report(const volatile void *ptr, size_t size, int access_type,
 		  bool value_change, int cpu_id, enum kcsan_report_type type)
 {
 	unsigned long flags = 0;
 
 	kcsan_disable_current();
-	if (prepare_report(&flags, ptr, size, is_write, cpu_id, type)) {
-		if (print_report(ptr, size, is_write, value_change, cpu_id, type) && panic_on_warn)
+	if (prepare_report(&flags, ptr, size, access_type, cpu_id, type)) {
+		if (print_report(ptr, size, access_type, value_change, cpu_id, type) && panic_on_warn)
 			panic("panic_on_warn set ...\n");
 
 		release_report(&flags, type);
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 50+ messages in thread

* [PATCH kcsan 03/32] kcsan: Rate-limit reporting per data races
  2020-03-09 19:03 [PATCH kcsan 0/32] KCSAN commits for v5.7 Paul E. McKenney
  2020-03-09 19:03 ` [PATCH kcsan 01/32] kcsan: Prefer __always_inline for fast-path paulmck
  2020-03-09 19:03 ` [PATCH kcsan 02/32] kcsan: Show full access type in report paulmck
@ 2020-03-09 19:03 ` paulmck
  2020-03-09 19:03 ` [PATCH kcsan 04/32] kcsan: Make KCSAN compatible with lockdep paulmck
                   ` (28 subsequent siblings)
  31 siblings, 0 replies; 50+ messages in thread
From: paulmck @ 2020-03-09 19:03 UTC (permalink / raw)
  To: linux-kernel, kasan-dev, kernel-team, mingo
  Cc: elver, andreyknvl, glider, dvyukov, cai, boqun.feng, Paul E . McKenney

From: Marco Elver <elver@google.com>

KCSAN data-race reports can occur quite frequently, so much so as
to render the system useless.  This commit therefore adds support for
time-based rate-limiting KCSAN reports, with the time interval specified
by a new KCSAN_REPORT_ONCE_IN_MS Kconfig option.  The default is 3000
milliseconds, also known as three seconds.

Because KCSAN must detect data races in allocators and in other contexts
where use of allocation is ill-advised, a fixed-size array is used to
buffer reports during each reporting interval.  To reduce the number of
reports lost due to array overflow, this commit stores only one instance
of duplicate reports, which has the benefit of further reducing KCSAN's
console output rate.

Reported-by: Qian Cai <cai@lca.pw>
Suggested-by: Paul E. McKenney <paulmck@kernel.org>
Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
---
 kernel/kcsan/report.c | 110 +++++++++++++++++++++++++++++++++++++++++++++-----
 lib/Kconfig.kcsan     |  10 +++++
 2 files changed, 110 insertions(+), 10 deletions(-)

diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c
index 9f503ca..b5b4fee 100644
--- a/kernel/kcsan/report.c
+++ b/kernel/kcsan/report.c
@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 
+#include <linux/jiffies.h>
 #include <linux/kernel.h>
 #include <linux/preempt.h>
 #include <linux/printk.h>
@@ -32,12 +33,99 @@ static struct {
 } other_info = { .ptr = NULL };
 
 /*
+ * Information about reported data races; used to rate limit reporting.
+ */
+struct report_time {
+	/*
+	 * The last time the data race was reported.
+	 */
+	unsigned long time;
+
+	/*
+	 * The frames of the 2 threads; if only 1 thread is known, one frame
+	 * will be 0.
+	 */
+	unsigned long frame1;
+	unsigned long frame2;
+};
+
+/*
+ * Since we also want to be able to debug allocators with KCSAN, to avoid
+ * deadlock, report_times cannot be dynamically resized with krealloc in
+ * rate_limit_report.
+ *
+ * Therefore, we use a fixed-size array, which at most will occupy a page. This
+ * still adequately rate limits reports, assuming that a) number of unique data
+ * races is not excessive, and b) occurrence of unique data races within the
+ * same time window is limited.
+ */
+#define REPORT_TIMES_MAX (PAGE_SIZE / sizeof(struct report_time))
+#define REPORT_TIMES_SIZE                                                      \
+	(CONFIG_KCSAN_REPORT_ONCE_IN_MS > REPORT_TIMES_MAX ?                   \
+		 REPORT_TIMES_MAX :                                            \
+		 CONFIG_KCSAN_REPORT_ONCE_IN_MS)
+static struct report_time report_times[REPORT_TIMES_SIZE];
+
+/*
  * This spinlock protects reporting and other_info, since other_info is usually
  * required when reporting.
  */
 static DEFINE_SPINLOCK(report_lock);
 
 /*
+ * Checks if the data race identified by thread frames frame1 and frame2 has
+ * been reported since (now - KCSAN_REPORT_ONCE_IN_MS).
+ */
+static bool rate_limit_report(unsigned long frame1, unsigned long frame2)
+{
+	struct report_time *use_entry = &report_times[0];
+	unsigned long invalid_before;
+	int i;
+
+	BUILD_BUG_ON(CONFIG_KCSAN_REPORT_ONCE_IN_MS != 0 && REPORT_TIMES_SIZE == 0);
+
+	if (CONFIG_KCSAN_REPORT_ONCE_IN_MS == 0)
+		return false;
+
+	invalid_before = jiffies - msecs_to_jiffies(CONFIG_KCSAN_REPORT_ONCE_IN_MS);
+
+	/* Check if a matching data race report exists. */
+	for (i = 0; i < REPORT_TIMES_SIZE; ++i) {
+		struct report_time *rt = &report_times[i];
+
+		/*
+		 * Must always select an entry for use to store info as we
+		 * cannot resize report_times; at the end of the scan, use_entry
+		 * will be the oldest entry, which ideally also happened before
+		 * KCSAN_REPORT_ONCE_IN_MS ago.
+		 */
+		if (time_before(rt->time, use_entry->time))
+			use_entry = rt;
+
+		/*
+		 * Initially, no need to check any further as this entry as well
+		 * as following entries have never been used.
+		 */
+		if (rt->time == 0)
+			break;
+
+		/* Check if entry expired. */
+		if (time_before(rt->time, invalid_before))
+			continue; /* before KCSAN_REPORT_ONCE_IN_MS ago */
+
+		/* Reported recently, check if data race matches. */
+		if ((rt->frame1 == frame1 && rt->frame2 == frame2) ||
+		    (rt->frame1 == frame2 && rt->frame2 == frame1))
+			return true;
+	}
+
+	use_entry->time = jiffies;
+	use_entry->frame1 = frame1;
+	use_entry->frame2 = frame2;
+	return false;
+}
+
+/*
  * Special rules to skip reporting.
  */
 static bool
@@ -132,7 +220,9 @@ static bool print_report(const volatile void *ptr, size_t size, int access_type,
 	unsigned long stack_entries[NUM_STACK_ENTRIES] = { 0 };
 	int num_stack_entries = stack_trace_save(stack_entries, NUM_STACK_ENTRIES, 1);
 	int skipnr = get_stack_skipnr(stack_entries, num_stack_entries);
-	int other_skipnr;
+	unsigned long this_frame = stack_entries[skipnr];
+	unsigned long other_frame = 0;
+	int other_skipnr = 0; /* silence uninit warnings */
 
 	/*
 	 * Must check report filter rules before starting to print.
@@ -143,34 +233,34 @@ static bool print_report(const volatile void *ptr, size_t size, int access_type,
 	if (type == KCSAN_REPORT_RACE_SIGNAL) {
 		other_skipnr = get_stack_skipnr(other_info.stack_entries,
 						other_info.num_stack_entries);
+		other_frame = other_info.stack_entries[other_skipnr];
 
 		/* @value_change is only known for the other thread */
-		if (skip_report(other_info.access_type, value_change,
-				other_info.stack_entries[other_skipnr]))
+		if (skip_report(other_info.access_type, value_change, other_frame))
 			return false;
 	}
 
+	if (rate_limit_report(this_frame, other_frame))
+		return false;
+
 	/* Print report header. */
 	pr_err("==================================================================\n");
 	switch (type) {
 	case KCSAN_REPORT_RACE_SIGNAL: {
-		void *this_fn = (void *)stack_entries[skipnr];
-		void *other_fn = (void *)other_info.stack_entries[other_skipnr];
 		int cmp;
 
 		/*
 		 * Order functions lexographically for consistent bug titles.
 		 * Do not print offset of functions to keep title short.
 		 */
-		cmp = sym_strcmp(other_fn, this_fn);
+		cmp = sym_strcmp((void *)other_frame, (void *)this_frame);
 		pr_err("BUG: KCSAN: data-race in %ps / %ps\n",
-		       cmp < 0 ? other_fn : this_fn,
-		       cmp < 0 ? this_fn : other_fn);
+		       (void *)(cmp < 0 ? other_frame : this_frame),
+		       (void *)(cmp < 0 ? this_frame : other_frame));
 	} break;
 
 	case KCSAN_REPORT_RACE_UNKNOWN_ORIGIN:
-		pr_err("BUG: KCSAN: data-race in %pS\n",
-		       (void *)stack_entries[skipnr]);
+		pr_err("BUG: KCSAN: data-race in %pS\n", (void *)this_frame);
 		break;
 
 	default:
diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan
index 3f78b14..3552990 100644
--- a/lib/Kconfig.kcsan
+++ b/lib/Kconfig.kcsan
@@ -81,6 +81,16 @@ config KCSAN_SKIP_WATCH_RANDOMIZE
 	  KCSAN_WATCH_SKIP. If false, the chosen value is always
 	  KCSAN_WATCH_SKIP.
 
+config KCSAN_REPORT_ONCE_IN_MS
+	int "Duration in milliseconds, in which any given data race is only reported once"
+	default 3000
+	help
+	  Any given data race is only reported once in the defined time window.
+	  Different data races may still generate reports within a duration
+	  that is smaller than the duration defined here. This allows rate
+	  limiting reporting to avoid flooding the console with reports.
+	  Setting this to 0 disables rate limiting.
+
 # Note that, while some of the below options could be turned into boot
 # parameters, to optimize for the common use-case, we avoid this because: (a)
 # it would impact performance (and we want to avoid static branch for all
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 50+ messages in thread

* [PATCH kcsan 04/32] kcsan: Make KCSAN compatible with lockdep
  2020-03-09 19:03 [PATCH kcsan 0/32] KCSAN commits for v5.7 Paul E. McKenney
                   ` (2 preceding siblings ...)
  2020-03-09 19:03 ` [PATCH kcsan 03/32] kcsan: Rate-limit reporting per data races paulmck
@ 2020-03-09 19:03 ` paulmck
  2020-03-09 19:03 ` [PATCH kcsan 05/32] kcsan: Address missing case with KCSAN_REPORT_VALUE_CHANGE_ONLY paulmck
                   ` (27 subsequent siblings)
  31 siblings, 0 replies; 50+ messages in thread
From: paulmck @ 2020-03-09 19:03 UTC (permalink / raw)
  To: linux-kernel, kasan-dev, kernel-team, mingo
  Cc: elver, andreyknvl, glider, dvyukov, cai, boqun.feng, Paul E . McKenney

From: Marco Elver <elver@google.com>

We must avoid any recursion into lockdep if KCSAN is enabled on utilities
used by lockdep. One manifestation of this is corruption of lockdep's
IRQ trace state (if TRACE_IRQFLAGS), resulting in spurious warnings
(see below).  This commit fixes this by:

1. Using raw_local_irq{save,restore} in kcsan_setup_watchpoint().
2. Disabling lockdep in kcsan_report().

Tested with:

  CONFIG_LOCKDEP=y
  CONFIG_DEBUG_LOCKDEP=y
  CONFIG_TRACE_IRQFLAGS=y

This fix eliminates spurious warnings such as the following one:

    WARNING: CPU: 0 PID: 2 at kernel/locking/lockdep.c:4406 check_flags.part.0+0x101/0x220
    Modules linked in:
    CPU: 0 PID: 2 Comm: kthreadd Not tainted 5.5.0-rc1+ #11
    Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-1 04/01/2014
    RIP: 0010:check_flags.part.0+0x101/0x220
    <snip>
    Call Trace:
     lock_is_held_type+0x69/0x150
     freezer_fork+0x20b/0x370
     cgroup_post_fork+0x2c9/0x5c0
     copy_process+0x2675/0x3b40
     _do_fork+0xbe/0xa30
     ? _raw_spin_unlock_irqrestore+0x40/0x50
     ? match_held_lock+0x56/0x250
     ? kthread_park+0xf0/0xf0
     kernel_thread+0xa6/0xd0
     ? kthread_park+0xf0/0xf0
     kthreadd+0x321/0x3d0
     ? kthread_create_on_cpu+0x130/0x130
     ret_from_fork+0x3a/0x50
    irq event stamp: 64
    hardirqs last  enabled at (63): [<ffffffff9a7995d0>] _raw_spin_unlock_irqrestore+0x40/0x50
    hardirqs last disabled at (64): [<ffffffff992a96d2>] kcsan_setup_watchpoint+0x92/0x460
    softirqs last  enabled at (32): [<ffffffff990489b8>] fpu__copy+0xe8/0x470
    softirqs last disabled at (30): [<ffffffff99048939>] fpu__copy+0x69/0x470

Reported-by: Qian Cai <cai@lca.pw>
Signed-off-by: Marco Elver <elver@google.com>
Acked-by: Alexander Potapenko <glider@google.com>
Tested-by: Qian Cai <cai@lca.pw>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
---
 kernel/kcsan/core.c     |  6 ++++--
 kernel/kcsan/report.c   | 11 +++++++++++
 kernel/locking/Makefile |  3 +++
 3 files changed, 18 insertions(+), 2 deletions(-)

diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index 87bf857..64b30f7 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -336,8 +336,10 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
 	 *      CPU-local data accesses), it makes more sense (from a data race
 	 *      detection point of view) to simply disable preemptions to ensure
 	 *      as many tasks as possible run on other CPUs.
+	 *
+	 * Use raw versions, to avoid lockdep recursion via IRQ flags tracing.
 	 */
-	local_irq_save(irq_flags);
+	raw_local_irq_save(irq_flags);
 
 	watchpoint = insert_watchpoint((unsigned long)ptr, size, is_write);
 	if (watchpoint == NULL) {
@@ -429,7 +431,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
 
 	kcsan_counter_dec(KCSAN_COUNTER_USED_WATCHPOINTS);
 out_unlock:
-	local_irq_restore(irq_flags);
+	raw_local_irq_restore(irq_flags);
 out:
 	user_access_restore(ua_flags);
 }
diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c
index b5b4fee..33bdf8b 100644
--- a/kernel/kcsan/report.c
+++ b/kernel/kcsan/report.c
@@ -2,6 +2,7 @@
 
 #include <linux/jiffies.h>
 #include <linux/kernel.h>
+#include <linux/lockdep.h>
 #include <linux/preempt.h>
 #include <linux/printk.h>
 #include <linux/sched.h>
@@ -410,6 +411,14 @@ void kcsan_report(const volatile void *ptr, size_t size, int access_type,
 {
 	unsigned long flags = 0;
 
+	/*
+	 * With TRACE_IRQFLAGS, lockdep's IRQ trace state becomes corrupted if
+	 * we do not turn off lockdep here; this could happen due to recursion
+	 * into lockdep via KCSAN if we detect a data race in utilities used by
+	 * lockdep.
+	 */
+	lockdep_off();
+
 	kcsan_disable_current();
 	if (prepare_report(&flags, ptr, size, access_type, cpu_id, type)) {
 		if (print_report(ptr, size, access_type, value_change, cpu_id, type) && panic_on_warn)
@@ -418,4 +427,6 @@ void kcsan_report(const volatile void *ptr, size_t size, int access_type,
 		release_report(&flags, type);
 	}
 	kcsan_enable_current();
+
+	lockdep_on();
 }
diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile
index 45452fa..6d11cfb 100644
--- a/kernel/locking/Makefile
+++ b/kernel/locking/Makefile
@@ -5,6 +5,9 @@ KCOV_INSTRUMENT		:= n
 
 obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o
 
+# Avoid recursion lockdep -> KCSAN -> ... -> lockdep.
+KCSAN_SANITIZE_lockdep.o := n
+
 ifdef CONFIG_FUNCTION_TRACER
 CFLAGS_REMOVE_lockdep.o = $(CC_FLAGS_FTRACE)
 CFLAGS_REMOVE_lockdep_proc.o = $(CC_FLAGS_FTRACE)
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 50+ messages in thread

* [PATCH kcsan 05/32] kcsan: Address missing case with KCSAN_REPORT_VALUE_CHANGE_ONLY
  2020-03-09 19:03 [PATCH kcsan 0/32] KCSAN commits for v5.7 Paul E. McKenney
                   ` (3 preceding siblings ...)
  2020-03-09 19:03 ` [PATCH kcsan 04/32] kcsan: Make KCSAN compatible with lockdep paulmck
@ 2020-03-09 19:03 ` paulmck
  2020-03-09 19:03 ` [PATCH kcsan 06/32] include/linux: Add instrumented.h infrastructure paulmck
                   ` (26 subsequent siblings)
  31 siblings, 0 replies; 50+ messages in thread
From: paulmck @ 2020-03-09 19:03 UTC (permalink / raw)
  To: linux-kernel, kasan-dev, kernel-team, mingo
  Cc: elver, andreyknvl, glider, dvyukov, cai, boqun.feng, Paul E . McKenney

From: Marco Elver <elver@google.com>

Even with KCSAN_REPORT_VALUE_CHANGE_ONLY, KCSAN still reports data
races between reads and watchpointed writes, even if the writes wrote
values already present.  This commit causes KCSAN to unconditionally
skip reporting in this case.

Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
---
 kernel/kcsan/report.c | 27 ++++++++++++++++++++-------
 1 file changed, 20 insertions(+), 7 deletions(-)

diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c
index 33bdf8b..7cd3428 100644
--- a/kernel/kcsan/report.c
+++ b/kernel/kcsan/report.c
@@ -130,12 +130,25 @@ static bool rate_limit_report(unsigned long frame1, unsigned long frame2)
  * Special rules to skip reporting.
  */
 static bool
-skip_report(int access_type, bool value_change, unsigned long top_frame)
+skip_report(bool value_change, unsigned long top_frame)
 {
-	const bool is_write = (access_type & KCSAN_ACCESS_WRITE) != 0;
-
-	if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY) && is_write &&
-	    !value_change) {
+	/*
+	 * The first call to skip_report always has value_change==true, since we
+	 * cannot know the value written of an instrumented access. For the 2nd
+	 * call there are 6 cases with CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY:
+	 *
+	 * 1. read watchpoint, conflicting write (value_change==true): report;
+	 * 2. read watchpoint, conflicting write (value_change==false): skip;
+	 * 3. write watchpoint, conflicting write (value_change==true): report;
+	 * 4. write watchpoint, conflicting write (value_change==false): skip;
+	 * 5. write watchpoint, conflicting read (value_change==false): skip;
+	 * 6. write watchpoint, conflicting read (value_change==true): impossible;
+	 *
+	 * Cases 1-4 are intuitive and expected; case 5 ensures we do not report
+	 * data races where the write may have rewritten the same value; and
+	 * case 6 is simply impossible.
+	 */
+	if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY) && !value_change) {
 		/*
 		 * The access is a write, but the data value did not change.
 		 *
@@ -228,7 +241,7 @@ static bool print_report(const volatile void *ptr, size_t size, int access_type,
 	/*
 	 * Must check report filter rules before starting to print.
 	 */
-	if (skip_report(access_type, true, stack_entries[skipnr]))
+	if (skip_report(true, stack_entries[skipnr]))
 		return false;
 
 	if (type == KCSAN_REPORT_RACE_SIGNAL) {
@@ -237,7 +250,7 @@ static bool print_report(const volatile void *ptr, size_t size, int access_type,
 		other_frame = other_info.stack_entries[other_skipnr];
 
 		/* @value_change is only known for the other thread */
-		if (skip_report(other_info.access_type, value_change, other_frame))
+		if (skip_report(value_change, other_frame))
 			return false;
 	}
 
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 50+ messages in thread

* [PATCH kcsan 06/32] include/linux: Add instrumented.h infrastructure
  2020-03-09 19:03 [PATCH kcsan 0/32] KCSAN commits for v5.7 Paul E. McKenney
                   ` (4 preceding siblings ...)
  2020-03-09 19:03 ` [PATCH kcsan 05/32] kcsan: Address missing case with KCSAN_REPORT_VALUE_CHANGE_ONLY paulmck
@ 2020-03-09 19:03 ` paulmck
  2020-03-09 19:03 ` [PATCH kcsan 07/32] asm-generic, atomic-instrumented: Use generic instrumented.h paulmck
                   ` (25 subsequent siblings)
  31 siblings, 0 replies; 50+ messages in thread
From: paulmck @ 2020-03-09 19:03 UTC (permalink / raw)
  To: linux-kernel, kasan-dev, kernel-team, mingo
  Cc: elver, andreyknvl, glider, dvyukov, cai, boqun.feng, Paul E . McKenney

From: Marco Elver <elver@google.com>

This adds instrumented.h, which provides generic wrappers for memory
access instrumentation that the compiler cannot emit for various
sanitizers. Currently this unifies KASAN and KCSAN instrumentation. In
future this will also include KMSAN instrumentation.

Note that, copy_{to,from}_user should use special instrumentation, since
we should be able to instrument both source and destination memory
accesses if both are kernel memory.

The current patch only instruments the memory access where the address
is always in kernel space, however, both may in fact be kernel addresses
when a compat syscall passes an argument allocated in the kernel to a
real syscall. In a future change, both KASAN and KCSAN should check both
addresses in such cases, as well as KMSAN will make use of both
addresses. [It made more sense to provide the completed function
signature, rather than updating it and changing all locations again at a
later time.]

Suggested-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Marco Elver <elver@google.com>
Acked-by: Alexander Potapenko <glider@google.com>
Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
---
 include/linux/instrumented.h | 109 +++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 109 insertions(+)
 create mode 100644 include/linux/instrumented.h

diff --git a/include/linux/instrumented.h b/include/linux/instrumented.h
new file mode 100644
index 0000000..43e6ea5
--- /dev/null
+++ b/include/linux/instrumented.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * This header provides generic wrappers for memory access instrumentation that
+ * the compiler cannot emit for: KASAN, KCSAN.
+ */
+#ifndef _LINUX_INSTRUMENTED_H
+#define _LINUX_INSTRUMENTED_H
+
+#include <linux/compiler.h>
+#include <linux/kasan-checks.h>
+#include <linux/kcsan-checks.h>
+#include <linux/types.h>
+
+/**
+ * instrument_read - instrument regular read access
+ *
+ * Instrument a regular read access. The instrumentation should be inserted
+ * before the actual read happens.
+ *
+ * @ptr address of access
+ * @size size of access
+ */
+static __always_inline void instrument_read(const volatile void *v, size_t size)
+{
+	kasan_check_read(v, size);
+	kcsan_check_read(v, size);
+}
+
+/**
+ * instrument_write - instrument regular write access
+ *
+ * Instrument a regular write access. The instrumentation should be inserted
+ * before the actual write happens.
+ *
+ * @ptr address of access
+ * @size size of access
+ */
+static __always_inline void instrument_write(const volatile void *v, size_t size)
+{
+	kasan_check_write(v, size);
+	kcsan_check_write(v, size);
+}
+
+/**
+ * instrument_atomic_read - instrument atomic read access
+ *
+ * Instrument an atomic read access. The instrumentation should be inserted
+ * before the actual read happens.
+ *
+ * @ptr address of access
+ * @size size of access
+ */
+static __always_inline void instrument_atomic_read(const volatile void *v, size_t size)
+{
+	kasan_check_read(v, size);
+	kcsan_check_atomic_read(v, size);
+}
+
+/**
+ * instrument_atomic_write - instrument atomic write access
+ *
+ * Instrument an atomic write access. The instrumentation should be inserted
+ * before the actual write happens.
+ *
+ * @ptr address of access
+ * @size size of access
+ */
+static __always_inline void instrument_atomic_write(const volatile void *v, size_t size)
+{
+	kasan_check_write(v, size);
+	kcsan_check_atomic_write(v, size);
+}
+
+/**
+ * instrument_copy_to_user - instrument reads of copy_to_user
+ *
+ * Instrument reads from kernel memory, that are due to copy_to_user (and
+ * variants). The instrumentation must be inserted before the accesses.
+ *
+ * @to destination address
+ * @from source address
+ * @n number of bytes to copy
+ */
+static __always_inline void
+instrument_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+	kasan_check_read(from, n);
+	kcsan_check_read(from, n);
+}
+
+/**
+ * instrument_copy_from_user - instrument writes of copy_from_user
+ *
+ * Instrument writes to kernel memory, that are due to copy_from_user (and
+ * variants). The instrumentation should be inserted before the accesses.
+ *
+ * @to destination address
+ * @from source address
+ * @n number of bytes to copy
+ */
+static __always_inline void
+instrument_copy_from_user(const void *to, const void __user *from, unsigned long n)
+{
+	kasan_check_write(to, n);
+	kcsan_check_write(to, n);
+}
+
+#endif /* _LINUX_INSTRUMENTED_H */
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 50+ messages in thread

* [PATCH kcsan 07/32] asm-generic, atomic-instrumented: Use generic instrumented.h
  2020-03-09 19:03 [PATCH kcsan 0/32] KCSAN commits for v5.7 Paul E. McKenney
                   ` (5 preceding siblings ...)
  2020-03-09 19:03 ` [PATCH kcsan 06/32] include/linux: Add instrumented.h infrastructure paulmck
@ 2020-03-09 19:03 ` paulmck
  2020-03-09 19:03 ` [PATCH kcsan 08/32] asm-generic, kcsan: Add KCSAN instrumentation for bitops paulmck
                   ` (24 subsequent siblings)
  31 siblings, 0 replies; 50+ messages in thread
From: paulmck @ 2020-03-09 19:03 UTC (permalink / raw)
  To: linux-kernel, kasan-dev, kernel-team, mingo
  Cc: elver, andreyknvl, glider, dvyukov, cai, boqun.feng, Paul E . McKenney

From: Marco Elver <elver@google.com>

This switches atomic-instrumented.h to use the generic instrumentation
wrappers provided by instrumented.h.

No functional change intended.

Suggested-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Marco Elver <elver@google.com>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
---
 include/asm-generic/atomic-instrumented.h | 395 +++++++++++++++---------------
 scripts/atomic/gen-atomic-instrumented.sh |  19 +-
 2 files changed, 194 insertions(+), 220 deletions(-)

diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h
index 63869de..379986e 100644
--- a/include/asm-generic/atomic-instrumented.h
+++ b/include/asm-generic/atomic-instrumented.h
@@ -19,25 +19,12 @@
 
 #include <linux/build_bug.h>
 #include <linux/compiler.h>
-#include <linux/kasan-checks.h>
-#include <linux/kcsan-checks.h>
-
-static __always_inline void __atomic_check_read(const volatile void *v, size_t size)
-{
-	kasan_check_read(v, size);
-	kcsan_check_atomic_read(v, size);
-}
-
-static __always_inline void __atomic_check_write(const volatile void *v, size_t size)
-{
-	kasan_check_write(v, size);
-	kcsan_check_atomic_write(v, size);
-}
+#include <linux/instrumented.h>
 
 static __always_inline int
 atomic_read(const atomic_t *v)
 {
-	__atomic_check_read(v, sizeof(*v));
+	instrument_atomic_read(v, sizeof(*v));
 	return arch_atomic_read(v);
 }
 #define atomic_read atomic_read
@@ -46,7 +33,7 @@ atomic_read(const atomic_t *v)
 static __always_inline int
 atomic_read_acquire(const atomic_t *v)
 {
-	__atomic_check_read(v, sizeof(*v));
+	instrument_atomic_read(v, sizeof(*v));
 	return arch_atomic_read_acquire(v);
 }
 #define atomic_read_acquire atomic_read_acquire
@@ -55,7 +42,7 @@ atomic_read_acquire(const atomic_t *v)
 static __always_inline void
 atomic_set(atomic_t *v, int i)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	arch_atomic_set(v, i);
 }
 #define atomic_set atomic_set
@@ -64,7 +51,7 @@ atomic_set(atomic_t *v, int i)
 static __always_inline void
 atomic_set_release(atomic_t *v, int i)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	arch_atomic_set_release(v, i);
 }
 #define atomic_set_release atomic_set_release
@@ -73,7 +60,7 @@ atomic_set_release(atomic_t *v, int i)
 static __always_inline void
 atomic_add(int i, atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	arch_atomic_add(i, v);
 }
 #define atomic_add atomic_add
@@ -82,7 +69,7 @@ atomic_add(int i, atomic_t *v)
 static __always_inline int
 atomic_add_return(int i, atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_add_return(i, v);
 }
 #define atomic_add_return atomic_add_return
@@ -92,7 +79,7 @@ atomic_add_return(int i, atomic_t *v)
 static __always_inline int
 atomic_add_return_acquire(int i, atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_add_return_acquire(i, v);
 }
 #define atomic_add_return_acquire atomic_add_return_acquire
@@ -102,7 +89,7 @@ atomic_add_return_acquire(int i, atomic_t *v)
 static __always_inline int
 atomic_add_return_release(int i, atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_add_return_release(i, v);
 }
 #define atomic_add_return_release atomic_add_return_release
@@ -112,7 +99,7 @@ atomic_add_return_release(int i, atomic_t *v)
 static __always_inline int
 atomic_add_return_relaxed(int i, atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_add_return_relaxed(i, v);
 }
 #define atomic_add_return_relaxed atomic_add_return_relaxed
@@ -122,7 +109,7 @@ atomic_add_return_relaxed(int i, atomic_t *v)
 static __always_inline int
 atomic_fetch_add(int i, atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_fetch_add(i, v);
 }
 #define atomic_fetch_add atomic_fetch_add
@@ -132,7 +119,7 @@ atomic_fetch_add(int i, atomic_t *v)
 static __always_inline int
 atomic_fetch_add_acquire(int i, atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_fetch_add_acquire(i, v);
 }
 #define atomic_fetch_add_acquire atomic_fetch_add_acquire
@@ -142,7 +129,7 @@ atomic_fetch_add_acquire(int i, atomic_t *v)
 static __always_inline int
 atomic_fetch_add_release(int i, atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_fetch_add_release(i, v);
 }
 #define atomic_fetch_add_release atomic_fetch_add_release
@@ -152,7 +139,7 @@ atomic_fetch_add_release(int i, atomic_t *v)
 static __always_inline int
 atomic_fetch_add_relaxed(int i, atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_fetch_add_relaxed(i, v);
 }
 #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
@@ -161,7 +148,7 @@ atomic_fetch_add_relaxed(int i, atomic_t *v)
 static __always_inline void
 atomic_sub(int i, atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	arch_atomic_sub(i, v);
 }
 #define atomic_sub atomic_sub
@@ -170,7 +157,7 @@ atomic_sub(int i, atomic_t *v)
 static __always_inline int
 atomic_sub_return(int i, atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_sub_return(i, v);
 }
 #define atomic_sub_return atomic_sub_return
@@ -180,7 +167,7 @@ atomic_sub_return(int i, atomic_t *v)
 static __always_inline int
 atomic_sub_return_acquire(int i, atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_sub_return_acquire(i, v);
 }
 #define atomic_sub_return_acquire atomic_sub_return_acquire
@@ -190,7 +177,7 @@ atomic_sub_return_acquire(int i, atomic_t *v)
 static __always_inline int
 atomic_sub_return_release(int i, atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_sub_return_release(i, v);
 }
 #define atomic_sub_return_release atomic_sub_return_release
@@ -200,7 +187,7 @@ atomic_sub_return_release(int i, atomic_t *v)
 static __always_inline int
 atomic_sub_return_relaxed(int i, atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_sub_return_relaxed(i, v);
 }
 #define atomic_sub_return_relaxed atomic_sub_return_relaxed
@@ -210,7 +197,7 @@ atomic_sub_return_relaxed(int i, atomic_t *v)
 static __always_inline int
 atomic_fetch_sub(int i, atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_fetch_sub(i, v);
 }
 #define atomic_fetch_sub atomic_fetch_sub
@@ -220,7 +207,7 @@ atomic_fetch_sub(int i, atomic_t *v)
 static __always_inline int
 atomic_fetch_sub_acquire(int i, atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_fetch_sub_acquire(i, v);
 }
 #define atomic_fetch_sub_acquire atomic_fetch_sub_acquire
@@ -230,7 +217,7 @@ atomic_fetch_sub_acquire(int i, atomic_t *v)
 static __always_inline int
 atomic_fetch_sub_release(int i, atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_fetch_sub_release(i, v);
 }
 #define atomic_fetch_sub_release atomic_fetch_sub_release
@@ -240,7 +227,7 @@ atomic_fetch_sub_release(int i, atomic_t *v)
 static __always_inline int
 atomic_fetch_sub_relaxed(int i, atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_fetch_sub_relaxed(i, v);
 }
 #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
@@ -250,7 +237,7 @@ atomic_fetch_sub_relaxed(int i, atomic_t *v)
 static __always_inline void
 atomic_inc(atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	arch_atomic_inc(v);
 }
 #define atomic_inc atomic_inc
@@ -260,7 +247,7 @@ atomic_inc(atomic_t *v)
 static __always_inline int
 atomic_inc_return(atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_inc_return(v);
 }
 #define atomic_inc_return atomic_inc_return
@@ -270,7 +257,7 @@ atomic_inc_return(atomic_t *v)
 static __always_inline int
 atomic_inc_return_acquire(atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_inc_return_acquire(v);
 }
 #define atomic_inc_return_acquire atomic_inc_return_acquire
@@ -280,7 +267,7 @@ atomic_inc_return_acquire(atomic_t *v)
 static __always_inline int
 atomic_inc_return_release(atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_inc_return_release(v);
 }
 #define atomic_inc_return_release atomic_inc_return_release
@@ -290,7 +277,7 @@ atomic_inc_return_release(atomic_t *v)
 static __always_inline int
 atomic_inc_return_relaxed(atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_inc_return_relaxed(v);
 }
 #define atomic_inc_return_relaxed atomic_inc_return_relaxed
@@ -300,7 +287,7 @@ atomic_inc_return_relaxed(atomic_t *v)
 static __always_inline int
 atomic_fetch_inc(atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_fetch_inc(v);
 }
 #define atomic_fetch_inc atomic_fetch_inc
@@ -310,7 +297,7 @@ atomic_fetch_inc(atomic_t *v)
 static __always_inline int
 atomic_fetch_inc_acquire(atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_fetch_inc_acquire(v);
 }
 #define atomic_fetch_inc_acquire atomic_fetch_inc_acquire
@@ -320,7 +307,7 @@ atomic_fetch_inc_acquire(atomic_t *v)
 static __always_inline int
 atomic_fetch_inc_release(atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_fetch_inc_release(v);
 }
 #define atomic_fetch_inc_release atomic_fetch_inc_release
@@ -330,7 +317,7 @@ atomic_fetch_inc_release(atomic_t *v)
 static __always_inline int
 atomic_fetch_inc_relaxed(atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_fetch_inc_relaxed(v);
 }
 #define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed
@@ -340,7 +327,7 @@ atomic_fetch_inc_relaxed(atomic_t *v)
 static __always_inline void
 atomic_dec(atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	arch_atomic_dec(v);
 }
 #define atomic_dec atomic_dec
@@ -350,7 +337,7 @@ atomic_dec(atomic_t *v)
 static __always_inline int
 atomic_dec_return(atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_dec_return(v);
 }
 #define atomic_dec_return atomic_dec_return
@@ -360,7 +347,7 @@ atomic_dec_return(atomic_t *v)
 static __always_inline int
 atomic_dec_return_acquire(atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_dec_return_acquire(v);
 }
 #define atomic_dec_return_acquire atomic_dec_return_acquire
@@ -370,7 +357,7 @@ atomic_dec_return_acquire(atomic_t *v)
 static __always_inline int
 atomic_dec_return_release(atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_dec_return_release(v);
 }
 #define atomic_dec_return_release atomic_dec_return_release
@@ -380,7 +367,7 @@ atomic_dec_return_release(atomic_t *v)
 static __always_inline int
 atomic_dec_return_relaxed(atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_dec_return_relaxed(v);
 }
 #define atomic_dec_return_relaxed atomic_dec_return_relaxed
@@ -390,7 +377,7 @@ atomic_dec_return_relaxed(atomic_t *v)
 static __always_inline int
 atomic_fetch_dec(atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_fetch_dec(v);
 }
 #define atomic_fetch_dec atomic_fetch_dec
@@ -400,7 +387,7 @@ atomic_fetch_dec(atomic_t *v)
 static __always_inline int
 atomic_fetch_dec_acquire(atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_fetch_dec_acquire(v);
 }
 #define atomic_fetch_dec_acquire atomic_fetch_dec_acquire
@@ -410,7 +397,7 @@ atomic_fetch_dec_acquire(atomic_t *v)
 static __always_inline int
 atomic_fetch_dec_release(atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_fetch_dec_release(v);
 }
 #define atomic_fetch_dec_release atomic_fetch_dec_release
@@ -420,7 +407,7 @@ atomic_fetch_dec_release(atomic_t *v)
 static __always_inline int
 atomic_fetch_dec_relaxed(atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_fetch_dec_relaxed(v);
 }
 #define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed
@@ -429,7 +416,7 @@ atomic_fetch_dec_relaxed(atomic_t *v)
 static __always_inline void
 atomic_and(int i, atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	arch_atomic_and(i, v);
 }
 #define atomic_and atomic_and
@@ -438,7 +425,7 @@ atomic_and(int i, atomic_t *v)
 static __always_inline int
 atomic_fetch_and(int i, atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_fetch_and(i, v);
 }
 #define atomic_fetch_and atomic_fetch_and
@@ -448,7 +435,7 @@ atomic_fetch_and(int i, atomic_t *v)
 static __always_inline int
 atomic_fetch_and_acquire(int i, atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_fetch_and_acquire(i, v);
 }
 #define atomic_fetch_and_acquire atomic_fetch_and_acquire
@@ -458,7 +445,7 @@ atomic_fetch_and_acquire(int i, atomic_t *v)
 static __always_inline int
 atomic_fetch_and_release(int i, atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_fetch_and_release(i, v);
 }
 #define atomic_fetch_and_release atomic_fetch_and_release
@@ -468,7 +455,7 @@ atomic_fetch_and_release(int i, atomic_t *v)
 static __always_inline int
 atomic_fetch_and_relaxed(int i, atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_fetch_and_relaxed(i, v);
 }
 #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
@@ -478,7 +465,7 @@ atomic_fetch_and_relaxed(int i, atomic_t *v)
 static __always_inline void
 atomic_andnot(int i, atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	arch_atomic_andnot(i, v);
 }
 #define atomic_andnot atomic_andnot
@@ -488,7 +475,7 @@ atomic_andnot(int i, atomic_t *v)
 static __always_inline int
 atomic_fetch_andnot(int i, atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_fetch_andnot(i, v);
 }
 #define atomic_fetch_andnot atomic_fetch_andnot
@@ -498,7 +485,7 @@ atomic_fetch_andnot(int i, atomic_t *v)
 static __always_inline int
 atomic_fetch_andnot_acquire(int i, atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_fetch_andnot_acquire(i, v);
 }
 #define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire
@@ -508,7 +495,7 @@ atomic_fetch_andnot_acquire(int i, atomic_t *v)
 static __always_inline int
 atomic_fetch_andnot_release(int i, atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_fetch_andnot_release(i, v);
 }
 #define atomic_fetch_andnot_release atomic_fetch_andnot_release
@@ -518,7 +505,7 @@ atomic_fetch_andnot_release(int i, atomic_t *v)
 static __always_inline int
 atomic_fetch_andnot_relaxed(int i, atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_fetch_andnot_relaxed(i, v);
 }
 #define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
@@ -527,7 +514,7 @@ atomic_fetch_andnot_relaxed(int i, atomic_t *v)
 static __always_inline void
 atomic_or(int i, atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	arch_atomic_or(i, v);
 }
 #define atomic_or atomic_or
@@ -536,7 +523,7 @@ atomic_or(int i, atomic_t *v)
 static __always_inline int
 atomic_fetch_or(int i, atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_fetch_or(i, v);
 }
 #define atomic_fetch_or atomic_fetch_or
@@ -546,7 +533,7 @@ atomic_fetch_or(int i, atomic_t *v)
 static __always_inline int
 atomic_fetch_or_acquire(int i, atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_fetch_or_acquire(i, v);
 }
 #define atomic_fetch_or_acquire atomic_fetch_or_acquire
@@ -556,7 +543,7 @@ atomic_fetch_or_acquire(int i, atomic_t *v)
 static __always_inline int
 atomic_fetch_or_release(int i, atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_fetch_or_release(i, v);
 }
 #define atomic_fetch_or_release atomic_fetch_or_release
@@ -566,7 +553,7 @@ atomic_fetch_or_release(int i, atomic_t *v)
 static __always_inline int
 atomic_fetch_or_relaxed(int i, atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_fetch_or_relaxed(i, v);
 }
 #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
@@ -575,7 +562,7 @@ atomic_fetch_or_relaxed(int i, atomic_t *v)
 static __always_inline void
 atomic_xor(int i, atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	arch_atomic_xor(i, v);
 }
 #define atomic_xor atomic_xor
@@ -584,7 +571,7 @@ atomic_xor(int i, atomic_t *v)
 static __always_inline int
 atomic_fetch_xor(int i, atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_fetch_xor(i, v);
 }
 #define atomic_fetch_xor atomic_fetch_xor
@@ -594,7 +581,7 @@ atomic_fetch_xor(int i, atomic_t *v)
 static __always_inline int
 atomic_fetch_xor_acquire(int i, atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_fetch_xor_acquire(i, v);
 }
 #define atomic_fetch_xor_acquire atomic_fetch_xor_acquire
@@ -604,7 +591,7 @@ atomic_fetch_xor_acquire(int i, atomic_t *v)
 static __always_inline int
 atomic_fetch_xor_release(int i, atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_fetch_xor_release(i, v);
 }
 #define atomic_fetch_xor_release atomic_fetch_xor_release
@@ -614,7 +601,7 @@ atomic_fetch_xor_release(int i, atomic_t *v)
 static __always_inline int
 atomic_fetch_xor_relaxed(int i, atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_fetch_xor_relaxed(i, v);
 }
 #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
@@ -624,7 +611,7 @@ atomic_fetch_xor_relaxed(int i, atomic_t *v)
 static __always_inline int
 atomic_xchg(atomic_t *v, int i)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_xchg(v, i);
 }
 #define atomic_xchg atomic_xchg
@@ -634,7 +621,7 @@ atomic_xchg(atomic_t *v, int i)
 static __always_inline int
 atomic_xchg_acquire(atomic_t *v, int i)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_xchg_acquire(v, i);
 }
 #define atomic_xchg_acquire atomic_xchg_acquire
@@ -644,7 +631,7 @@ atomic_xchg_acquire(atomic_t *v, int i)
 static __always_inline int
 atomic_xchg_release(atomic_t *v, int i)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_xchg_release(v, i);
 }
 #define atomic_xchg_release atomic_xchg_release
@@ -654,7 +641,7 @@ atomic_xchg_release(atomic_t *v, int i)
 static __always_inline int
 atomic_xchg_relaxed(atomic_t *v, int i)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_xchg_relaxed(v, i);
 }
 #define atomic_xchg_relaxed atomic_xchg_relaxed
@@ -664,7 +651,7 @@ atomic_xchg_relaxed(atomic_t *v, int i)
 static __always_inline int
 atomic_cmpxchg(atomic_t *v, int old, int new)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_cmpxchg(v, old, new);
 }
 #define atomic_cmpxchg atomic_cmpxchg
@@ -674,7 +661,7 @@ atomic_cmpxchg(atomic_t *v, int old, int new)
 static __always_inline int
 atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_cmpxchg_acquire(v, old, new);
 }
 #define atomic_cmpxchg_acquire atomic_cmpxchg_acquire
@@ -684,7 +671,7 @@ atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
 static __always_inline int
 atomic_cmpxchg_release(atomic_t *v, int old, int new)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_cmpxchg_release(v, old, new);
 }
 #define atomic_cmpxchg_release atomic_cmpxchg_release
@@ -694,7 +681,7 @@ atomic_cmpxchg_release(atomic_t *v, int old, int new)
 static __always_inline int
 atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_cmpxchg_relaxed(v, old, new);
 }
 #define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
@@ -704,8 +691,8 @@ atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
 static __always_inline bool
 atomic_try_cmpxchg(atomic_t *v, int *old, int new)
 {
-	__atomic_check_write(v, sizeof(*v));
-	__atomic_check_write(old, sizeof(*old));
+	instrument_atomic_write(v, sizeof(*v));
+	instrument_atomic_write(old, sizeof(*old));
 	return arch_atomic_try_cmpxchg(v, old, new);
 }
 #define atomic_try_cmpxchg atomic_try_cmpxchg
@@ -715,8 +702,8 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new)
 static __always_inline bool
 atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
 {
-	__atomic_check_write(v, sizeof(*v));
-	__atomic_check_write(old, sizeof(*old));
+	instrument_atomic_write(v, sizeof(*v));
+	instrument_atomic_write(old, sizeof(*old));
 	return arch_atomic_try_cmpxchg_acquire(v, old, new);
 }
 #define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire
@@ -726,8 +713,8 @@ atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
 static __always_inline bool
 atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
 {
-	__atomic_check_write(v, sizeof(*v));
-	__atomic_check_write(old, sizeof(*old));
+	instrument_atomic_write(v, sizeof(*v));
+	instrument_atomic_write(old, sizeof(*old));
 	return arch_atomic_try_cmpxchg_release(v, old, new);
 }
 #define atomic_try_cmpxchg_release atomic_try_cmpxchg_release
@@ -737,8 +724,8 @@ atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
 static __always_inline bool
 atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
 {
-	__atomic_check_write(v, sizeof(*v));
-	__atomic_check_write(old, sizeof(*old));
+	instrument_atomic_write(v, sizeof(*v));
+	instrument_atomic_write(old, sizeof(*old));
 	return arch_atomic_try_cmpxchg_relaxed(v, old, new);
 }
 #define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed
@@ -748,7 +735,7 @@ atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
 static __always_inline bool
 atomic_sub_and_test(int i, atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_sub_and_test(i, v);
 }
 #define atomic_sub_and_test atomic_sub_and_test
@@ -758,7 +745,7 @@ atomic_sub_and_test(int i, atomic_t *v)
 static __always_inline bool
 atomic_dec_and_test(atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_dec_and_test(v);
 }
 #define atomic_dec_and_test atomic_dec_and_test
@@ -768,7 +755,7 @@ atomic_dec_and_test(atomic_t *v)
 static __always_inline bool
 atomic_inc_and_test(atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_inc_and_test(v);
 }
 #define atomic_inc_and_test atomic_inc_and_test
@@ -778,7 +765,7 @@ atomic_inc_and_test(atomic_t *v)
 static __always_inline bool
 atomic_add_negative(int i, atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_add_negative(i, v);
 }
 #define atomic_add_negative atomic_add_negative
@@ -788,7 +775,7 @@ atomic_add_negative(int i, atomic_t *v)
 static __always_inline int
 atomic_fetch_add_unless(atomic_t *v, int a, int u)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_fetch_add_unless(v, a, u);
 }
 #define atomic_fetch_add_unless atomic_fetch_add_unless
@@ -798,7 +785,7 @@ atomic_fetch_add_unless(atomic_t *v, int a, int u)
 static __always_inline bool
 atomic_add_unless(atomic_t *v, int a, int u)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_add_unless(v, a, u);
 }
 #define atomic_add_unless atomic_add_unless
@@ -808,7 +795,7 @@ atomic_add_unless(atomic_t *v, int a, int u)
 static __always_inline bool
 atomic_inc_not_zero(atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_inc_not_zero(v);
 }
 #define atomic_inc_not_zero atomic_inc_not_zero
@@ -818,7 +805,7 @@ atomic_inc_not_zero(atomic_t *v)
 static __always_inline bool
 atomic_inc_unless_negative(atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_inc_unless_negative(v);
 }
 #define atomic_inc_unless_negative atomic_inc_unless_negative
@@ -828,7 +815,7 @@ atomic_inc_unless_negative(atomic_t *v)
 static __always_inline bool
 atomic_dec_unless_positive(atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_dec_unless_positive(v);
 }
 #define atomic_dec_unless_positive atomic_dec_unless_positive
@@ -838,7 +825,7 @@ atomic_dec_unless_positive(atomic_t *v)
 static __always_inline int
 atomic_dec_if_positive(atomic_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic_dec_if_positive(v);
 }
 #define atomic_dec_if_positive atomic_dec_if_positive
@@ -847,7 +834,7 @@ atomic_dec_if_positive(atomic_t *v)
 static __always_inline s64
 atomic64_read(const atomic64_t *v)
 {
-	__atomic_check_read(v, sizeof(*v));
+	instrument_atomic_read(v, sizeof(*v));
 	return arch_atomic64_read(v);
 }
 #define atomic64_read atomic64_read
@@ -856,7 +843,7 @@ atomic64_read(const atomic64_t *v)
 static __always_inline s64
 atomic64_read_acquire(const atomic64_t *v)
 {
-	__atomic_check_read(v, sizeof(*v));
+	instrument_atomic_read(v, sizeof(*v));
 	return arch_atomic64_read_acquire(v);
 }
 #define atomic64_read_acquire atomic64_read_acquire
@@ -865,7 +852,7 @@ atomic64_read_acquire(const atomic64_t *v)
 static __always_inline void
 atomic64_set(atomic64_t *v, s64 i)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	arch_atomic64_set(v, i);
 }
 #define atomic64_set atomic64_set
@@ -874,7 +861,7 @@ atomic64_set(atomic64_t *v, s64 i)
 static __always_inline void
 atomic64_set_release(atomic64_t *v, s64 i)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	arch_atomic64_set_release(v, i);
 }
 #define atomic64_set_release atomic64_set_release
@@ -883,7 +870,7 @@ atomic64_set_release(atomic64_t *v, s64 i)
 static __always_inline void
 atomic64_add(s64 i, atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	arch_atomic64_add(i, v);
 }
 #define atomic64_add atomic64_add
@@ -892,7 +879,7 @@ atomic64_add(s64 i, atomic64_t *v)
 static __always_inline s64
 atomic64_add_return(s64 i, atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_add_return(i, v);
 }
 #define atomic64_add_return atomic64_add_return
@@ -902,7 +889,7 @@ atomic64_add_return(s64 i, atomic64_t *v)
 static __always_inline s64
 atomic64_add_return_acquire(s64 i, atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_add_return_acquire(i, v);
 }
 #define atomic64_add_return_acquire atomic64_add_return_acquire
@@ -912,7 +899,7 @@ atomic64_add_return_acquire(s64 i, atomic64_t *v)
 static __always_inline s64
 atomic64_add_return_release(s64 i, atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_add_return_release(i, v);
 }
 #define atomic64_add_return_release atomic64_add_return_release
@@ -922,7 +909,7 @@ atomic64_add_return_release(s64 i, atomic64_t *v)
 static __always_inline s64
 atomic64_add_return_relaxed(s64 i, atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_add_return_relaxed(i, v);
 }
 #define atomic64_add_return_relaxed atomic64_add_return_relaxed
@@ -932,7 +919,7 @@ atomic64_add_return_relaxed(s64 i, atomic64_t *v)
 static __always_inline s64
 atomic64_fetch_add(s64 i, atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_fetch_add(i, v);
 }
 #define atomic64_fetch_add atomic64_fetch_add
@@ -942,7 +929,7 @@ atomic64_fetch_add(s64 i, atomic64_t *v)
 static __always_inline s64
 atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_fetch_add_acquire(i, v);
 }
 #define atomic64_fetch_add_acquire atomic64_fetch_add_acquire
@@ -952,7 +939,7 @@ atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
 static __always_inline s64
 atomic64_fetch_add_release(s64 i, atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_fetch_add_release(i, v);
 }
 #define atomic64_fetch_add_release atomic64_fetch_add_release
@@ -962,7 +949,7 @@ atomic64_fetch_add_release(s64 i, atomic64_t *v)
 static __always_inline s64
 atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_fetch_add_relaxed(i, v);
 }
 #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
@@ -971,7 +958,7 @@ atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
 static __always_inline void
 atomic64_sub(s64 i, atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	arch_atomic64_sub(i, v);
 }
 #define atomic64_sub atomic64_sub
@@ -980,7 +967,7 @@ atomic64_sub(s64 i, atomic64_t *v)
 static __always_inline s64
 atomic64_sub_return(s64 i, atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_sub_return(i, v);
 }
 #define atomic64_sub_return atomic64_sub_return
@@ -990,7 +977,7 @@ atomic64_sub_return(s64 i, atomic64_t *v)
 static __always_inline s64
 atomic64_sub_return_acquire(s64 i, atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_sub_return_acquire(i, v);
 }
 #define atomic64_sub_return_acquire atomic64_sub_return_acquire
@@ -1000,7 +987,7 @@ atomic64_sub_return_acquire(s64 i, atomic64_t *v)
 static __always_inline s64
 atomic64_sub_return_release(s64 i, atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_sub_return_release(i, v);
 }
 #define atomic64_sub_return_release atomic64_sub_return_release
@@ -1010,7 +997,7 @@ atomic64_sub_return_release(s64 i, atomic64_t *v)
 static __always_inline s64
 atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_sub_return_relaxed(i, v);
 }
 #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
@@ -1020,7 +1007,7 @@ atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
 static __always_inline s64
 atomic64_fetch_sub(s64 i, atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_fetch_sub(i, v);
 }
 #define atomic64_fetch_sub atomic64_fetch_sub
@@ -1030,7 +1017,7 @@ atomic64_fetch_sub(s64 i, atomic64_t *v)
 static __always_inline s64
 atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_fetch_sub_acquire(i, v);
 }
 #define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire
@@ -1040,7 +1027,7 @@ atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
 static __always_inline s64
 atomic64_fetch_sub_release(s64 i, atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_fetch_sub_release(i, v);
 }
 #define atomic64_fetch_sub_release atomic64_fetch_sub_release
@@ -1050,7 +1037,7 @@ atomic64_fetch_sub_release(s64 i, atomic64_t *v)
 static __always_inline s64
 atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_fetch_sub_relaxed(i, v);
 }
 #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
@@ -1060,7 +1047,7 @@ atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
 static __always_inline void
 atomic64_inc(atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	arch_atomic64_inc(v);
 }
 #define atomic64_inc atomic64_inc
@@ -1070,7 +1057,7 @@ atomic64_inc(atomic64_t *v)
 static __always_inline s64
 atomic64_inc_return(atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_inc_return(v);
 }
 #define atomic64_inc_return atomic64_inc_return
@@ -1080,7 +1067,7 @@ atomic64_inc_return(atomic64_t *v)
 static __always_inline s64
 atomic64_inc_return_acquire(atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_inc_return_acquire(v);
 }
 #define atomic64_inc_return_acquire atomic64_inc_return_acquire
@@ -1090,7 +1077,7 @@ atomic64_inc_return_acquire(atomic64_t *v)
 static __always_inline s64
 atomic64_inc_return_release(atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_inc_return_release(v);
 }
 #define atomic64_inc_return_release atomic64_inc_return_release
@@ -1100,7 +1087,7 @@ atomic64_inc_return_release(atomic64_t *v)
 static __always_inline s64
 atomic64_inc_return_relaxed(atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_inc_return_relaxed(v);
 }
 #define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
@@ -1110,7 +1097,7 @@ atomic64_inc_return_relaxed(atomic64_t *v)
 static __always_inline s64
 atomic64_fetch_inc(atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_fetch_inc(v);
 }
 #define atomic64_fetch_inc atomic64_fetch_inc
@@ -1120,7 +1107,7 @@ atomic64_fetch_inc(atomic64_t *v)
 static __always_inline s64
 atomic64_fetch_inc_acquire(atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_fetch_inc_acquire(v);
 }
 #define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire
@@ -1130,7 +1117,7 @@ atomic64_fetch_inc_acquire(atomic64_t *v)
 static __always_inline s64
 atomic64_fetch_inc_release(atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_fetch_inc_release(v);
 }
 #define atomic64_fetch_inc_release atomic64_fetch_inc_release
@@ -1140,7 +1127,7 @@ atomic64_fetch_inc_release(atomic64_t *v)
 static __always_inline s64
 atomic64_fetch_inc_relaxed(atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_fetch_inc_relaxed(v);
 }
 #define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed
@@ -1150,7 +1137,7 @@ atomic64_fetch_inc_relaxed(atomic64_t *v)
 static __always_inline void
 atomic64_dec(atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	arch_atomic64_dec(v);
 }
 #define atomic64_dec atomic64_dec
@@ -1160,7 +1147,7 @@ atomic64_dec(atomic64_t *v)
 static __always_inline s64
 atomic64_dec_return(atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_dec_return(v);
 }
 #define atomic64_dec_return atomic64_dec_return
@@ -1170,7 +1157,7 @@ atomic64_dec_return(atomic64_t *v)
 static __always_inline s64
 atomic64_dec_return_acquire(atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_dec_return_acquire(v);
 }
 #define atomic64_dec_return_acquire atomic64_dec_return_acquire
@@ -1180,7 +1167,7 @@ atomic64_dec_return_acquire(atomic64_t *v)
 static __always_inline s64
 atomic64_dec_return_release(atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_dec_return_release(v);
 }
 #define atomic64_dec_return_release atomic64_dec_return_release
@@ -1190,7 +1177,7 @@ atomic64_dec_return_release(atomic64_t *v)
 static __always_inline s64
 atomic64_dec_return_relaxed(atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_dec_return_relaxed(v);
 }
 #define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
@@ -1200,7 +1187,7 @@ atomic64_dec_return_relaxed(atomic64_t *v)
 static __always_inline s64
 atomic64_fetch_dec(atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_fetch_dec(v);
 }
 #define atomic64_fetch_dec atomic64_fetch_dec
@@ -1210,7 +1197,7 @@ atomic64_fetch_dec(atomic64_t *v)
 static __always_inline s64
 atomic64_fetch_dec_acquire(atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_fetch_dec_acquire(v);
 }
 #define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire
@@ -1220,7 +1207,7 @@ atomic64_fetch_dec_acquire(atomic64_t *v)
 static __always_inline s64
 atomic64_fetch_dec_release(atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_fetch_dec_release(v);
 }
 #define atomic64_fetch_dec_release atomic64_fetch_dec_release
@@ -1230,7 +1217,7 @@ atomic64_fetch_dec_release(atomic64_t *v)
 static __always_inline s64
 atomic64_fetch_dec_relaxed(atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_fetch_dec_relaxed(v);
 }
 #define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed
@@ -1239,7 +1226,7 @@ atomic64_fetch_dec_relaxed(atomic64_t *v)
 static __always_inline void
 atomic64_and(s64 i, atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	arch_atomic64_and(i, v);
 }
 #define atomic64_and atomic64_and
@@ -1248,7 +1235,7 @@ atomic64_and(s64 i, atomic64_t *v)
 static __always_inline s64
 atomic64_fetch_and(s64 i, atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_fetch_and(i, v);
 }
 #define atomic64_fetch_and atomic64_fetch_and
@@ -1258,7 +1245,7 @@ atomic64_fetch_and(s64 i, atomic64_t *v)
 static __always_inline s64
 atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_fetch_and_acquire(i, v);
 }
 #define atomic64_fetch_and_acquire atomic64_fetch_and_acquire
@@ -1268,7 +1255,7 @@ atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
 static __always_inline s64
 atomic64_fetch_and_release(s64 i, atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_fetch_and_release(i, v);
 }
 #define atomic64_fetch_and_release atomic64_fetch_and_release
@@ -1278,7 +1265,7 @@ atomic64_fetch_and_release(s64 i, atomic64_t *v)
 static __always_inline s64
 atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_fetch_and_relaxed(i, v);
 }
 #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
@@ -1288,7 +1275,7 @@ atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
 static __always_inline void
 atomic64_andnot(s64 i, atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	arch_atomic64_andnot(i, v);
 }
 #define atomic64_andnot atomic64_andnot
@@ -1298,7 +1285,7 @@ atomic64_andnot(s64 i, atomic64_t *v)
 static __always_inline s64
 atomic64_fetch_andnot(s64 i, atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_fetch_andnot(i, v);
 }
 #define atomic64_fetch_andnot atomic64_fetch_andnot
@@ -1308,7 +1295,7 @@ atomic64_fetch_andnot(s64 i, atomic64_t *v)
 static __always_inline s64
 atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_fetch_andnot_acquire(i, v);
 }
 #define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire
@@ -1318,7 +1305,7 @@ atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
 static __always_inline s64
 atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_fetch_andnot_release(i, v);
 }
 #define atomic64_fetch_andnot_release atomic64_fetch_andnot_release
@@ -1328,7 +1315,7 @@ atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
 static __always_inline s64
 atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_fetch_andnot_relaxed(i, v);
 }
 #define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
@@ -1337,7 +1324,7 @@ atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
 static __always_inline void
 atomic64_or(s64 i, atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	arch_atomic64_or(i, v);
 }
 #define atomic64_or atomic64_or
@@ -1346,7 +1333,7 @@ atomic64_or(s64 i, atomic64_t *v)
 static __always_inline s64
 atomic64_fetch_or(s64 i, atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_fetch_or(i, v);
 }
 #define atomic64_fetch_or atomic64_fetch_or
@@ -1356,7 +1343,7 @@ atomic64_fetch_or(s64 i, atomic64_t *v)
 static __always_inline s64
 atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_fetch_or_acquire(i, v);
 }
 #define atomic64_fetch_or_acquire atomic64_fetch_or_acquire
@@ -1366,7 +1353,7 @@ atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
 static __always_inline s64
 atomic64_fetch_or_release(s64 i, atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_fetch_or_release(i, v);
 }
 #define atomic64_fetch_or_release atomic64_fetch_or_release
@@ -1376,7 +1363,7 @@ atomic64_fetch_or_release(s64 i, atomic64_t *v)
 static __always_inline s64
 atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_fetch_or_relaxed(i, v);
 }
 #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
@@ -1385,7 +1372,7 @@ atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
 static __always_inline void
 atomic64_xor(s64 i, atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	arch_atomic64_xor(i, v);
 }
 #define atomic64_xor atomic64_xor
@@ -1394,7 +1381,7 @@ atomic64_xor(s64 i, atomic64_t *v)
 static __always_inline s64
 atomic64_fetch_xor(s64 i, atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_fetch_xor(i, v);
 }
 #define atomic64_fetch_xor atomic64_fetch_xor
@@ -1404,7 +1391,7 @@ atomic64_fetch_xor(s64 i, atomic64_t *v)
 static __always_inline s64
 atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_fetch_xor_acquire(i, v);
 }
 #define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire
@@ -1414,7 +1401,7 @@ atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
 static __always_inline s64
 atomic64_fetch_xor_release(s64 i, atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_fetch_xor_release(i, v);
 }
 #define atomic64_fetch_xor_release atomic64_fetch_xor_release
@@ -1424,7 +1411,7 @@ atomic64_fetch_xor_release(s64 i, atomic64_t *v)
 static __always_inline s64
 atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_fetch_xor_relaxed(i, v);
 }
 #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
@@ -1434,7 +1421,7 @@ atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
 static __always_inline s64
 atomic64_xchg(atomic64_t *v, s64 i)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_xchg(v, i);
 }
 #define atomic64_xchg atomic64_xchg
@@ -1444,7 +1431,7 @@ atomic64_xchg(atomic64_t *v, s64 i)
 static __always_inline s64
 atomic64_xchg_acquire(atomic64_t *v, s64 i)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_xchg_acquire(v, i);
 }
 #define atomic64_xchg_acquire atomic64_xchg_acquire
@@ -1454,7 +1441,7 @@ atomic64_xchg_acquire(atomic64_t *v, s64 i)
 static __always_inline s64
 atomic64_xchg_release(atomic64_t *v, s64 i)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_xchg_release(v, i);
 }
 #define atomic64_xchg_release atomic64_xchg_release
@@ -1464,7 +1451,7 @@ atomic64_xchg_release(atomic64_t *v, s64 i)
 static __always_inline s64
 atomic64_xchg_relaxed(atomic64_t *v, s64 i)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_xchg_relaxed(v, i);
 }
 #define atomic64_xchg_relaxed atomic64_xchg_relaxed
@@ -1474,7 +1461,7 @@ atomic64_xchg_relaxed(atomic64_t *v, s64 i)
 static __always_inline s64
 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_cmpxchg(v, old, new);
 }
 #define atomic64_cmpxchg atomic64_cmpxchg
@@ -1484,7 +1471,7 @@ atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
 static __always_inline s64
 atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_cmpxchg_acquire(v, old, new);
 }
 #define atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire
@@ -1494,7 +1481,7 @@ atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
 static __always_inline s64
 atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_cmpxchg_release(v, old, new);
 }
 #define atomic64_cmpxchg_release atomic64_cmpxchg_release
@@ -1504,7 +1491,7 @@ atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
 static __always_inline s64
 atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_cmpxchg_relaxed(v, old, new);
 }
 #define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
@@ -1514,8 +1501,8 @@ atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
 static __always_inline bool
 atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
 {
-	__atomic_check_write(v, sizeof(*v));
-	__atomic_check_write(old, sizeof(*old));
+	instrument_atomic_write(v, sizeof(*v));
+	instrument_atomic_write(old, sizeof(*old));
 	return arch_atomic64_try_cmpxchg(v, old, new);
 }
 #define atomic64_try_cmpxchg atomic64_try_cmpxchg
@@ -1525,8 +1512,8 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
 static __always_inline bool
 atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
 {
-	__atomic_check_write(v, sizeof(*v));
-	__atomic_check_write(old, sizeof(*old));
+	instrument_atomic_write(v, sizeof(*v));
+	instrument_atomic_write(old, sizeof(*old));
 	return arch_atomic64_try_cmpxchg_acquire(v, old, new);
 }
 #define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire
@@ -1536,8 +1523,8 @@ atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
 static __always_inline bool
 atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
 {
-	__atomic_check_write(v, sizeof(*v));
-	__atomic_check_write(old, sizeof(*old));
+	instrument_atomic_write(v, sizeof(*v));
+	instrument_atomic_write(old, sizeof(*old));
 	return arch_atomic64_try_cmpxchg_release(v, old, new);
 }
 #define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release
@@ -1547,8 +1534,8 @@ atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
 static __always_inline bool
 atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
 {
-	__atomic_check_write(v, sizeof(*v));
-	__atomic_check_write(old, sizeof(*old));
+	instrument_atomic_write(v, sizeof(*v));
+	instrument_atomic_write(old, sizeof(*old));
 	return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
 }
 #define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed
@@ -1558,7 +1545,7 @@ atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
 static __always_inline bool
 atomic64_sub_and_test(s64 i, atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_sub_and_test(i, v);
 }
 #define atomic64_sub_and_test atomic64_sub_and_test
@@ -1568,7 +1555,7 @@ atomic64_sub_and_test(s64 i, atomic64_t *v)
 static __always_inline bool
 atomic64_dec_and_test(atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_dec_and_test(v);
 }
 #define atomic64_dec_and_test atomic64_dec_and_test
@@ -1578,7 +1565,7 @@ atomic64_dec_and_test(atomic64_t *v)
 static __always_inline bool
 atomic64_inc_and_test(atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_inc_and_test(v);
 }
 #define atomic64_inc_and_test atomic64_inc_and_test
@@ -1588,7 +1575,7 @@ atomic64_inc_and_test(atomic64_t *v)
 static __always_inline bool
 atomic64_add_negative(s64 i, atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_add_negative(i, v);
 }
 #define atomic64_add_negative atomic64_add_negative
@@ -1598,7 +1585,7 @@ atomic64_add_negative(s64 i, atomic64_t *v)
 static __always_inline s64
 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_fetch_add_unless(v, a, u);
 }
 #define atomic64_fetch_add_unless atomic64_fetch_add_unless
@@ -1608,7 +1595,7 @@ atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
 static __always_inline bool
 atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_add_unless(v, a, u);
 }
 #define atomic64_add_unless atomic64_add_unless
@@ -1618,7 +1605,7 @@ atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
 static __always_inline bool
 atomic64_inc_not_zero(atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_inc_not_zero(v);
 }
 #define atomic64_inc_not_zero atomic64_inc_not_zero
@@ -1628,7 +1615,7 @@ atomic64_inc_not_zero(atomic64_t *v)
 static __always_inline bool
 atomic64_inc_unless_negative(atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_inc_unless_negative(v);
 }
 #define atomic64_inc_unless_negative atomic64_inc_unless_negative
@@ -1638,7 +1625,7 @@ atomic64_inc_unless_negative(atomic64_t *v)
 static __always_inline bool
 atomic64_dec_unless_positive(atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_dec_unless_positive(v);
 }
 #define atomic64_dec_unless_positive atomic64_dec_unless_positive
@@ -1648,7 +1635,7 @@ atomic64_dec_unless_positive(atomic64_t *v)
 static __always_inline s64
 atomic64_dec_if_positive(atomic64_t *v)
 {
-	__atomic_check_write(v, sizeof(*v));
+	instrument_atomic_write(v, sizeof(*v));
 	return arch_atomic64_dec_if_positive(v);
 }
 #define atomic64_dec_if_positive atomic64_dec_if_positive
@@ -1658,7 +1645,7 @@ atomic64_dec_if_positive(atomic64_t *v)
 #define xchg(ptr, ...)						\
 ({									\
 	typeof(ptr) __ai_ptr = (ptr);					\
-	__atomic_check_write(__ai_ptr, sizeof(*__ai_ptr));		\
+	instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr));		\
 	arch_xchg(__ai_ptr, __VA_ARGS__);				\
 })
 #endif
@@ -1667,7 +1654,7 @@ atomic64_dec_if_positive(atomic64_t *v)
 #define xchg_acquire(ptr, ...)						\
 ({									\
 	typeof(ptr) __ai_ptr = (ptr);					\
-	__atomic_check_write(__ai_ptr, sizeof(*__ai_ptr));		\
+	instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr));		\
 	arch_xchg_acquire(__ai_ptr, __VA_ARGS__);				\
 })
 #endif
@@ -1676,7 +1663,7 @@ atomic64_dec_if_positive(atomic64_t *v)
 #define xchg_release(ptr, ...)						\
 ({									\
 	typeof(ptr) __ai_ptr = (ptr);					\
-	__atomic_check_write(__ai_ptr, sizeof(*__ai_ptr));		\
+	instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr));		\
 	arch_xchg_release(__ai_ptr, __VA_ARGS__);				\
 })
 #endif
@@ -1685,7 +1672,7 @@ atomic64_dec_if_positive(atomic64_t *v)
 #define xchg_relaxed(ptr, ...)						\
 ({									\
 	typeof(ptr) __ai_ptr = (ptr);					\
-	__atomic_check_write(__ai_ptr, sizeof(*__ai_ptr));		\
+	instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr));		\
 	arch_xchg_relaxed(__ai_ptr, __VA_ARGS__);				\
 })
 #endif
@@ -1694,7 +1681,7 @@ atomic64_dec_if_positive(atomic64_t *v)
 #define cmpxchg(ptr, ...)						\
 ({									\
 	typeof(ptr) __ai_ptr = (ptr);					\
-	__atomic_check_write(__ai_ptr, sizeof(*__ai_ptr));		\
+	instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr));		\
 	arch_cmpxchg(__ai_ptr, __VA_ARGS__);				\
 })
 #endif
@@ -1703,7 +1690,7 @@ atomic64_dec_if_positive(atomic64_t *v)
 #define cmpxchg_acquire(ptr, ...)						\
 ({									\
 	typeof(ptr) __ai_ptr = (ptr);					\
-	__atomic_check_write(__ai_ptr, sizeof(*__ai_ptr));		\
+	instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr));		\
 	arch_cmpxchg_acquire(__ai_ptr, __VA_ARGS__);				\
 })
 #endif
@@ -1712,7 +1699,7 @@ atomic64_dec_if_positive(atomic64_t *v)
 #define cmpxchg_release(ptr, ...)						\
 ({									\
 	typeof(ptr) __ai_ptr = (ptr);					\
-	__atomic_check_write(__ai_ptr, sizeof(*__ai_ptr));		\
+	instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr));		\
 	arch_cmpxchg_release(__ai_ptr, __VA_ARGS__);				\
 })
 #endif
@@ -1721,7 +1708,7 @@ atomic64_dec_if_positive(atomic64_t *v)
 #define cmpxchg_relaxed(ptr, ...)						\
 ({									\
 	typeof(ptr) __ai_ptr = (ptr);					\
-	__atomic_check_write(__ai_ptr, sizeof(*__ai_ptr));		\
+	instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr));		\
 	arch_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__);				\
 })
 #endif
@@ -1730,7 +1717,7 @@ atomic64_dec_if_positive(atomic64_t *v)
 #define cmpxchg64(ptr, ...)						\
 ({									\
 	typeof(ptr) __ai_ptr = (ptr);					\
-	__atomic_check_write(__ai_ptr, sizeof(*__ai_ptr));		\
+	instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr));		\
 	arch_cmpxchg64(__ai_ptr, __VA_ARGS__);				\
 })
 #endif
@@ -1739,7 +1726,7 @@ atomic64_dec_if_positive(atomic64_t *v)
 #define cmpxchg64_acquire(ptr, ...)						\
 ({									\
 	typeof(ptr) __ai_ptr = (ptr);					\
-	__atomic_check_write(__ai_ptr, sizeof(*__ai_ptr));		\
+	instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr));		\
 	arch_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__);				\
 })
 #endif
@@ -1748,7 +1735,7 @@ atomic64_dec_if_positive(atomic64_t *v)
 #define cmpxchg64_release(ptr, ...)						\
 ({									\
 	typeof(ptr) __ai_ptr = (ptr);					\
-	__atomic_check_write(__ai_ptr, sizeof(*__ai_ptr));		\
+	instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr));		\
 	arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__);				\
 })
 #endif
@@ -1757,7 +1744,7 @@ atomic64_dec_if_positive(atomic64_t *v)
 #define cmpxchg64_relaxed(ptr, ...)						\
 ({									\
 	typeof(ptr) __ai_ptr = (ptr);					\
-	__atomic_check_write(__ai_ptr, sizeof(*__ai_ptr));		\
+	instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr));		\
 	arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__);				\
 })
 #endif
@@ -1765,28 +1752,28 @@ atomic64_dec_if_positive(atomic64_t *v)
 #define cmpxchg_local(ptr, ...)						\
 ({									\
 	typeof(ptr) __ai_ptr = (ptr);					\
-	__atomic_check_write(__ai_ptr, sizeof(*__ai_ptr));		\
+	instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr));		\
 	arch_cmpxchg_local(__ai_ptr, __VA_ARGS__);				\
 })
 
 #define cmpxchg64_local(ptr, ...)						\
 ({									\
 	typeof(ptr) __ai_ptr = (ptr);					\
-	__atomic_check_write(__ai_ptr, sizeof(*__ai_ptr));		\
+	instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr));		\
 	arch_cmpxchg64_local(__ai_ptr, __VA_ARGS__);				\
 })
 
 #define sync_cmpxchg(ptr, ...)						\
 ({									\
 	typeof(ptr) __ai_ptr = (ptr);					\
-	__atomic_check_write(__ai_ptr, sizeof(*__ai_ptr));		\
+	instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr));		\
 	arch_sync_cmpxchg(__ai_ptr, __VA_ARGS__);				\
 })
 
 #define cmpxchg_double(ptr, ...)						\
 ({									\
 	typeof(ptr) __ai_ptr = (ptr);					\
-	__atomic_check_write(__ai_ptr, 2 * sizeof(*__ai_ptr));		\
+	instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr));		\
 	arch_cmpxchg_double(__ai_ptr, __VA_ARGS__);				\
 })
 
@@ -1794,9 +1781,9 @@ atomic64_dec_if_positive(atomic64_t *v)
 #define cmpxchg_double_local(ptr, ...)						\
 ({									\
 	typeof(ptr) __ai_ptr = (ptr);					\
-	__atomic_check_write(__ai_ptr, 2 * sizeof(*__ai_ptr));		\
+	instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr));		\
 	arch_cmpxchg_double_local(__ai_ptr, __VA_ARGS__);				\
 })
 
 #endif /* _ASM_GENERIC_ATOMIC_INSTRUMENTED_H */
-// 7b7e2af0e75c8ecb6f02298a7075f503f30d244c
+// 89bf97f3a7509b740845e51ddf31055b48a81f40
diff --git a/scripts/atomic/gen-atomic-instrumented.sh b/scripts/atomic/gen-atomic-instrumented.sh
index fb42225..6afadf7 100755
--- a/scripts/atomic/gen-atomic-instrumented.sh
+++ b/scripts/atomic/gen-atomic-instrumented.sh
@@ -20,7 +20,7 @@ gen_param_check()
 	# We don't write to constant parameters
 	[ ${type#c} != ${type} ] && rw="read"
 
-	printf "\t__atomic_check_${rw}(${name}, sizeof(*${name}));\n"
+	printf "\tinstrument_atomic_${rw}(${name}, sizeof(*${name}));\n"
 }
 
 #gen_param_check(arg...)
@@ -107,7 +107,7 @@ cat <<EOF
 #define ${xchg}(ptr, ...)						\\
 ({									\\
 	typeof(ptr) __ai_ptr = (ptr);					\\
-	__atomic_check_write(__ai_ptr, ${mult}sizeof(*__ai_ptr));		\\
+	instrument_atomic_write(__ai_ptr, ${mult}sizeof(*__ai_ptr));		\\
 	arch_${xchg}(__ai_ptr, __VA_ARGS__);				\\
 })
 EOF
@@ -148,20 +148,7 @@ cat << EOF
 
 #include <linux/build_bug.h>
 #include <linux/compiler.h>
-#include <linux/kasan-checks.h>
-#include <linux/kcsan-checks.h>
-
-static __always_inline void __atomic_check_read(const volatile void *v, size_t size)
-{
-	kasan_check_read(v, size);
-	kcsan_check_atomic_read(v, size);
-}
-
-static __always_inline void __atomic_check_write(const volatile void *v, size_t size)
-{
-	kasan_check_write(v, size);
-	kcsan_check_atomic_write(v, size);
-}
+#include <linux/instrumented.h>
 
 EOF
 
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 50+ messages in thread

* [PATCH kcsan 08/32] asm-generic, kcsan: Add KCSAN instrumentation for bitops
  2020-03-09 19:03 [PATCH kcsan 0/32] KCSAN commits for v5.7 Paul E. McKenney
                   ` (6 preceding siblings ...)
  2020-03-09 19:03 ` [PATCH kcsan 07/32] asm-generic, atomic-instrumented: Use generic instrumented.h paulmck
@ 2020-03-09 19:03 ` paulmck
  2020-03-09 19:03 ` [PATCH kcsan 09/32] iov_iter: Use generic instrumented.h paulmck
                   ` (23 subsequent siblings)
  31 siblings, 0 replies; 50+ messages in thread
From: paulmck @ 2020-03-09 19:03 UTC (permalink / raw)
  To: linux-kernel, kasan-dev, kernel-team, mingo
  Cc: elver, andreyknvl, glider, dvyukov, cai, boqun.feng, Paul E . McKenney

From: Marco Elver <elver@google.com>

Add explicit KCSAN checks for bitops.

Signed-off-by: Marco Elver <elver@google.com>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
---
 include/asm-generic/bitops/instrumented-atomic.h     | 14 +++++++-------
 include/asm-generic/bitops/instrumented-lock.h       | 10 +++++-----
 include/asm-generic/bitops/instrumented-non-atomic.h | 16 ++++++++--------
 3 files changed, 20 insertions(+), 20 deletions(-)

diff --git a/include/asm-generic/bitops/instrumented-atomic.h b/include/asm-generic/bitops/instrumented-atomic.h
index 18ce3c9..fb2cb33 100644
--- a/include/asm-generic/bitops/instrumented-atomic.h
+++ b/include/asm-generic/bitops/instrumented-atomic.h
@@ -11,7 +11,7 @@
 #ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H
 #define _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H
 
-#include <linux/kasan-checks.h>
+#include <linux/instrumented.h>
 
 /**
  * set_bit - Atomically set a bit in memory
@@ -25,7 +25,7 @@
  */
 static inline void set_bit(long nr, volatile unsigned long *addr)
 {
-	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
 	arch_set_bit(nr, addr);
 }
 
@@ -38,7 +38,7 @@ static inline void set_bit(long nr, volatile unsigned long *addr)
  */
 static inline void clear_bit(long nr, volatile unsigned long *addr)
 {
-	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
 	arch_clear_bit(nr, addr);
 }
 
@@ -54,7 +54,7 @@ static inline void clear_bit(long nr, volatile unsigned long *addr)
  */
 static inline void change_bit(long nr, volatile unsigned long *addr)
 {
-	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
 	arch_change_bit(nr, addr);
 }
 
@@ -67,7 +67,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
  */
 static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
 {
-	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
 	return arch_test_and_set_bit(nr, addr);
 }
 
@@ -80,7 +80,7 @@ static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
  */
 static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
 {
-	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
 	return arch_test_and_clear_bit(nr, addr);
 }
 
@@ -93,7 +93,7 @@ static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
  */
 static inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
 {
-	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
 	return arch_test_and_change_bit(nr, addr);
 }
 
diff --git a/include/asm-generic/bitops/instrumented-lock.h b/include/asm-generic/bitops/instrumented-lock.h
index ec53fde..b9bec46 100644
--- a/include/asm-generic/bitops/instrumented-lock.h
+++ b/include/asm-generic/bitops/instrumented-lock.h
@@ -11,7 +11,7 @@
 #ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H
 #define _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H
 
-#include <linux/kasan-checks.h>
+#include <linux/instrumented.h>
 
 /**
  * clear_bit_unlock - Clear a bit in memory, for unlock
@@ -22,7 +22,7 @@
  */
 static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
 {
-	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
 	arch_clear_bit_unlock(nr, addr);
 }
 
@@ -37,7 +37,7 @@ static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
  */
 static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
 {
-	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	instrument_write(addr + BIT_WORD(nr), sizeof(long));
 	arch___clear_bit_unlock(nr, addr);
 }
 
@@ -52,7 +52,7 @@ static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
  */
 static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr)
 {
-	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
 	return arch_test_and_set_bit_lock(nr, addr);
 }
 
@@ -71,7 +71,7 @@ static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr)
 static inline bool
 clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
 {
-	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
 	return arch_clear_bit_unlock_is_negative_byte(nr, addr);
 }
 /* Let everybody know we have it. */
diff --git a/include/asm-generic/bitops/instrumented-non-atomic.h b/include/asm-generic/bitops/instrumented-non-atomic.h
index 95ff28d..20f788a 100644
--- a/include/asm-generic/bitops/instrumented-non-atomic.h
+++ b/include/asm-generic/bitops/instrumented-non-atomic.h
@@ -11,7 +11,7 @@
 #ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H
 #define _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H
 
-#include <linux/kasan-checks.h>
+#include <linux/instrumented.h>
 
 /**
  * __set_bit - Set a bit in memory
@@ -24,7 +24,7 @@
  */
 static inline void __set_bit(long nr, volatile unsigned long *addr)
 {
-	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	instrument_write(addr + BIT_WORD(nr), sizeof(long));
 	arch___set_bit(nr, addr);
 }
 
@@ -39,7 +39,7 @@ static inline void __set_bit(long nr, volatile unsigned long *addr)
  */
 static inline void __clear_bit(long nr, volatile unsigned long *addr)
 {
-	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	instrument_write(addr + BIT_WORD(nr), sizeof(long));
 	arch___clear_bit(nr, addr);
 }
 
@@ -54,7 +54,7 @@ static inline void __clear_bit(long nr, volatile unsigned long *addr)
  */
 static inline void __change_bit(long nr, volatile unsigned long *addr)
 {
-	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	instrument_write(addr + BIT_WORD(nr), sizeof(long));
 	arch___change_bit(nr, addr);
 }
 
@@ -68,7 +68,7 @@ static inline void __change_bit(long nr, volatile unsigned long *addr)
  */
 static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
 {
-	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	instrument_write(addr + BIT_WORD(nr), sizeof(long));
 	return arch___test_and_set_bit(nr, addr);
 }
 
@@ -82,7 +82,7 @@ static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
  */
 static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
 {
-	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	instrument_write(addr + BIT_WORD(nr), sizeof(long));
 	return arch___test_and_clear_bit(nr, addr);
 }
 
@@ -96,7 +96,7 @@ static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
  */
 static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
 {
-	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	instrument_write(addr + BIT_WORD(nr), sizeof(long));
 	return arch___test_and_change_bit(nr, addr);
 }
 
@@ -107,7 +107,7 @@ static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
  */
 static inline bool test_bit(long nr, const volatile unsigned long *addr)
 {
-	kasan_check_read(addr + BIT_WORD(nr), sizeof(long));
+	instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long));
 	return arch_test_bit(nr, addr);
 }
 
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 50+ messages in thread

* [PATCH kcsan 09/32] iov_iter: Use generic instrumented.h
  2020-03-09 19:03 [PATCH kcsan 0/32] KCSAN commits for v5.7 Paul E. McKenney
                   ` (7 preceding siblings ...)
  2020-03-09 19:03 ` [PATCH kcsan 08/32] asm-generic, kcsan: Add KCSAN instrumentation for bitops paulmck
@ 2020-03-09 19:03 ` paulmck
  2020-03-09 19:03 ` [PATCH kcsan 10/32] copy_to_user, copy_from_user: " paulmck
                   ` (22 subsequent siblings)
  31 siblings, 0 replies; 50+ messages in thread
From: paulmck @ 2020-03-09 19:03 UTC (permalink / raw)
  To: linux-kernel, kasan-dev, kernel-team, mingo
  Cc: elver, andreyknvl, glider, dvyukov, cai, boqun.feng, Paul E . McKenney

From: Marco Elver <elver@google.com>

This replaces the kasan instrumentation with generic instrumentation,
implicitly adding KCSAN instrumentation support.

For KASAN no functional change is intended.

Suggested-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
---
 lib/iov_iter.c | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index fb29c02..614b699 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -8,6 +8,7 @@
 #include <linux/splice.h>
 #include <net/checksum.h>
 #include <linux/scatterlist.h>
+#include <linux/instrumented.h>
 
 #define PIPE_PARANOIA /* for now */
 
@@ -138,7 +139,7 @@
 static int copyout(void __user *to, const void *from, size_t n)
 {
 	if (access_ok(to, n)) {
-		kasan_check_read(from, n);
+		instrument_copy_to_user(to, from, n);
 		n = raw_copy_to_user(to, from, n);
 	}
 	return n;
@@ -147,7 +148,7 @@ static int copyout(void __user *to, const void *from, size_t n)
 static int copyin(void *to, const void __user *from, size_t n)
 {
 	if (access_ok(from, n)) {
-		kasan_check_write(to, n);
+		instrument_copy_from_user(to, from, n);
 		n = raw_copy_from_user(to, from, n);
 	}
 	return n;
@@ -639,7 +640,7 @@ EXPORT_SYMBOL(_copy_to_iter);
 static int copyout_mcsafe(void __user *to, const void *from, size_t n)
 {
 	if (access_ok(to, n)) {
-		kasan_check_read(from, n);
+		instrument_copy_to_user(to, from, n);
 		n = copy_to_user_mcsafe((__force void *) to, from, n);
 	}
 	return n;
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 50+ messages in thread

* [PATCH kcsan 10/32] copy_to_user, copy_from_user: Use generic instrumented.h
  2020-03-09 19:03 [PATCH kcsan 0/32] KCSAN commits for v5.7 Paul E. McKenney
                   ` (8 preceding siblings ...)
  2020-03-09 19:03 ` [PATCH kcsan 09/32] iov_iter: Use generic instrumented.h paulmck
@ 2020-03-09 19:03 ` paulmck
  2020-03-09 19:03 ` [PATCH kcsan 11/32] kcsan: Add docbook header for data_race() paulmck
                   ` (21 subsequent siblings)
  31 siblings, 0 replies; 50+ messages in thread
From: paulmck @ 2020-03-09 19:03 UTC (permalink / raw)
  To: linux-kernel, kasan-dev, kernel-team, mingo
  Cc: elver, andreyknvl, glider, dvyukov, cai, boqun.feng, Paul E . McKenney

From: Marco Elver <elver@google.com>

This replaces the KASAN instrumentation with generic instrumentation,
implicitly adding KCSAN instrumentation support.

For KASAN no functional change is intended.

Suggested-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
---
 include/linux/uaccess.h | 14 +++++++-------
 lib/usercopy.c          |  7 ++++---
 2 files changed, 11 insertions(+), 10 deletions(-)

diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 67f01601..8a215c5 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -2,9 +2,9 @@
 #ifndef __LINUX_UACCESS_H__
 #define __LINUX_UACCESS_H__
 
+#include <linux/instrumented.h>
 #include <linux/sched.h>
 #include <linux/thread_info.h>
-#include <linux/kasan-checks.h>
 
 #define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS)
 
@@ -58,7 +58,7 @@
 static __always_inline __must_check unsigned long
 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
 {
-	kasan_check_write(to, n);
+	instrument_copy_from_user(to, from, n);
 	check_object_size(to, n, false);
 	return raw_copy_from_user(to, from, n);
 }
@@ -67,7 +67,7 @@ static __always_inline __must_check unsigned long
 __copy_from_user(void *to, const void __user *from, unsigned long n)
 {
 	might_fault();
-	kasan_check_write(to, n);
+	instrument_copy_from_user(to, from, n);
 	check_object_size(to, n, false);
 	return raw_copy_from_user(to, from, n);
 }
@@ -88,7 +88,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
 static __always_inline __must_check unsigned long
 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
 {
-	kasan_check_read(from, n);
+	instrument_copy_to_user(to, from, n);
 	check_object_size(from, n, true);
 	return raw_copy_to_user(to, from, n);
 }
@@ -97,7 +97,7 @@ static __always_inline __must_check unsigned long
 __copy_to_user(void __user *to, const void *from, unsigned long n)
 {
 	might_fault();
-	kasan_check_read(from, n);
+	instrument_copy_to_user(to, from, n);
 	check_object_size(from, n, true);
 	return raw_copy_to_user(to, from, n);
 }
@@ -109,7 +109,7 @@ _copy_from_user(void *to, const void __user *from, unsigned long n)
 	unsigned long res = n;
 	might_fault();
 	if (likely(access_ok(from, n))) {
-		kasan_check_write(to, n);
+		instrument_copy_from_user(to, from, n);
 		res = raw_copy_from_user(to, from, n);
 	}
 	if (unlikely(res))
@@ -127,7 +127,7 @@ _copy_to_user(void __user *to, const void *from, unsigned long n)
 {
 	might_fault();
 	if (access_ok(to, n)) {
-		kasan_check_read(from, n);
+		instrument_copy_to_user(to, from, n);
 		n = raw_copy_to_user(to, from, n);
 	}
 	return n;
diff --git a/lib/usercopy.c b/lib/usercopy.c
index cbb4d9e..4bb1c5e 100644
--- a/lib/usercopy.c
+++ b/lib/usercopy.c
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
-#include <linux/uaccess.h>
 #include <linux/bitops.h>
+#include <linux/instrumented.h>
+#include <linux/uaccess.h>
 
 /* out-of-line parts */
 
@@ -10,7 +11,7 @@ unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n
 	unsigned long res = n;
 	might_fault();
 	if (likely(access_ok(from, n))) {
-		kasan_check_write(to, n);
+		instrument_copy_from_user(to, from, n);
 		res = raw_copy_from_user(to, from, n);
 	}
 	if (unlikely(res))
@@ -25,7 +26,7 @@ unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n)
 {
 	might_fault();
 	if (likely(access_ok(to, n))) {
-		kasan_check_read(from, n);
+		instrument_copy_to_user(to, from, n);
 		n = raw_copy_to_user(to, from, n);
 	}
 	return n;
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 50+ messages in thread

* [PATCH kcsan 0/32] KCSAN commits for v5.7
@ 2020-03-09 19:03 Paul E. McKenney
  2020-03-09 19:03 ` [PATCH kcsan 01/32] kcsan: Prefer __always_inline for fast-path paulmck
                   ` (31 more replies)
  0 siblings, 32 replies; 50+ messages in thread
From: Paul E. McKenney @ 2020-03-09 19:03 UTC (permalink / raw)
  To: linux-kernel, kasan-dev, kernel-team, mingo
  Cc: elver, andreyknvl, glider, dvyukov, cai, boqun.feng

Hello!

The patches in this series have already been posted, so this posting
is just to give a heads up as to  which of them are likely to be part
of next week's KCSAN pull request.  Unless otherwise noted, these are
courtesy of Marco Elver.

1.	kcsan: Prefer __always_inline for fast-path.
2.	kcsan: Show full access type in report.
3.	kcsan: Rate-limit reporting per data races.
4.	kcsan: Make KCSAN compatible with lockdep.
5.	kcsan: Address missing case with KCSAN_REPORT_VALUE_CHANGE_ONLY.
6.	include/linux: Add instrumented.h infrastructure.
7.	asm-generic, atomic-instrumented: Use generic instrumented.h.
8.	asm-generic, kcsan: Add KCSAN instrumentation for bitops.
9.	iov_iter: Use generic instrumented.h.
10.	copy_to_user, copy_from_user: Use generic instrumented.h.
11.	kcsan: Add docbook header for data_race(), courtesy of yours truly.
12.	kcsan: Add option to assume plain aligned writes up to word size
	are atomic.
13.	kcsan: Clarify Kconfig option.
14.	kcsan: Cleanup of main KCSAN Kconfig option.
15.	kcsan: Fix 0-sized checks.
16.	kcsan: Introduce KCSAN_ACCESS_ASSERT access type.
17.	kcsan: Introduce ASSERT_EXCLUSIVE_* macros.
18.	kcsan: Add test to generate conflicts via debugfs.
19.	kcsan: Expose core configuration parameters as module params.
20.	kcsan: Fix misreporting if concurrent races on same address.
21.	kcsan: Move interfaces that affects checks to kcsan-checks.h.
22.	compiler.h, seqlock.h: Remove unnecessary kcsan.h includes.
23.	kcsan: Introduce kcsan_value_change type.
24.	kcsan: Add kcsan_set_access_mask() support.
25.	kcsan: Introduce ASSERT_EXCLUSIVE_BITS(var, mask).
26.	kcsan, trace: Make KCSAN compatible with tracing.
27.	kcsan: Add option to allow watcher interruptions.
28.	kcsan: Add option for verbose reporting.
29.	kcsan: Add current->state to implicitly atomic.
30.	kcsan: Fix a typo in a comment, courtesy of Qiujun Huang.
31.	kcsan: Update Documentation/dev-tools/kcsan.rst.
32.	kcsan: Update API documentation in kcsan-checks.h.

							Thanx, Paul

------------------------------------------------------------------------

 Documentation/dev-tools/kcsan.rst                    |  227 ++++++----
 arch/x86/lib/Makefile                                |    5 
 include/asm-generic/atomic-instrumented.h            |  395 ++++++++----------
 include/asm-generic/bitops/instrumented-atomic.h     |   14 
 include/asm-generic/bitops/instrumented-lock.h       |   10 
 include/asm-generic/bitops/instrumented-non-atomic.h |   16 
 include/linux/compiler.h                             |   16 
 include/linux/instrumented.h                         |  109 +++++
 include/linux/kcsan-checks.h                         |  284 ++++++++++---
 include/linux/kcsan.h                                |   46 --
 include/linux/seqlock.h                              |    2 
 include/linux/uaccess.h                              |   14 
 init/init_task.c                                     |    1 
 kernel/kcsan/Makefile                                |    2 
 kernel/kcsan/atomic.h                                |   23 -
 kernel/kcsan/core.c                                  |  279 ++++++++----
 kernel/kcsan/debugfs.c                               |   94 +++-
 kernel/kcsan/encoding.h                              |   14 
 kernel/kcsan/kcsan.h                                 |   36 +
 kernel/kcsan/report.c                                |  414 ++++++++++++++++---
 kernel/kcsan/test.c                                  |   10 
 kernel/locking/Makefile                              |    3 
 kernel/trace/Makefile                                |    3 
 lib/Kconfig.kcsan                                    |  114 ++++-
 lib/iov_iter.c                                       |    7 
 lib/usercopy.c                                       |    7 
 scripts/atomic/gen-atomic-instrumented.sh            |   19 
 27 files changed, 1517 insertions(+), 647 deletions(-)

^ permalink raw reply	[flat|nested] 50+ messages in thread

* [PATCH kcsan 11/32] kcsan: Add docbook header for data_race()
  2020-03-09 19:03 [PATCH kcsan 0/32] KCSAN commits for v5.7 Paul E. McKenney
                   ` (9 preceding siblings ...)
  2020-03-09 19:03 ` [PATCH kcsan 10/32] copy_to_user, copy_from_user: " paulmck
@ 2020-03-09 19:03 ` paulmck
  2020-03-09 19:04 ` [PATCH kcsan 12/32] kcsan: Add option to assume plain aligned writes up to word size are atomic paulmck
                   ` (20 subsequent siblings)
  31 siblings, 0 replies; 50+ messages in thread
From: paulmck @ 2020-03-09 19:03 UTC (permalink / raw)
  To: linux-kernel, kasan-dev, kernel-team, mingo
  Cc: elver, andreyknvl, glider, dvyukov, cai, boqun.feng, Paul E. McKenney

From: "Paul E. McKenney" <paulmck@kernel.org>

Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Cc: Marco Elver <elver@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
---
 include/linux/compiler.h | 14 ++++++++------
 1 file changed, 8 insertions(+), 6 deletions(-)

diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 8c0beb1..c1bdf37 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -315,13 +315,15 @@ unsigned long read_word_at_a_time(const void *addr)
 
 #include <linux/kcsan.h>
 
-/*
- * data_race(): macro to document that accesses in an expression may conflict with
- * other concurrent accesses resulting in data races, but the resulting
- * behaviour is deemed safe regardless.
+/**
+ * data_race - mark an expression as containing intentional data races
+ *
+ * This data_race() macro is useful for situations in which data races
+ * should be forgiven.  One example is diagnostic code that accesses
+ * shared variables but is not a part of the core synchronization design.
  *
- * This macro *does not* affect normal code generation, but is a hint to tooling
- * that data races here should be ignored.
+ * This macro *does not* affect normal code generation, but is a hint
+ * to tooling that data races here are to be ignored.
  */
 #define data_race(expr)                                                        \
 	({                                                                     \
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 50+ messages in thread

* [PATCH kcsan 12/32] kcsan: Add option to assume plain aligned writes up to word size are atomic
  2020-03-09 19:03 [PATCH kcsan 0/32] KCSAN commits for v5.7 Paul E. McKenney
                   ` (10 preceding siblings ...)
  2020-03-09 19:03 ` [PATCH kcsan 11/32] kcsan: Add docbook header for data_race() paulmck
@ 2020-03-09 19:04 ` paulmck
  2020-03-09 19:04 ` [PATCH kcsan 13/32] kcsan: Clarify Kconfig option KCSAN_IGNORE_ATOMICS paulmck
                   ` (19 subsequent siblings)
  31 siblings, 0 replies; 50+ messages in thread
From: paulmck @ 2020-03-09 19:04 UTC (permalink / raw)
  To: linux-kernel, kasan-dev, kernel-team, mingo
  Cc: elver, andreyknvl, glider, dvyukov, cai, boqun.feng, Paul E . McKenney

From: Marco Elver <elver@google.com>

This adds option KCSAN_ASSUME_PLAIN_WRITES_ATOMIC. If enabled, plain
aligned writes up to word size are assumed to be atomic, and also not
subject to other unsafe compiler optimizations resulting in data races.

This option has been enabled by default to reflect current kernel-wide
preferences.

Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
---
 kernel/kcsan/core.c | 22 +++++++++++++++++-----
 lib/Kconfig.kcsan   | 27 ++++++++++++++++++++-------
 2 files changed, 37 insertions(+), 12 deletions(-)

diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index 64b30f7..e3c7d8f 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -5,6 +5,7 @@
 #include <linux/delay.h>
 #include <linux/export.h>
 #include <linux/init.h>
+#include <linux/kernel.h>
 #include <linux/percpu.h>
 #include <linux/preempt.h>
 #include <linux/random.h>
@@ -169,10 +170,20 @@ static __always_inline struct kcsan_ctx *get_ctx(void)
 	return in_task() ? &current->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx);
 }
 
-static __always_inline bool is_atomic(const volatile void *ptr)
+static __always_inline bool
+is_atomic(const volatile void *ptr, size_t size, int type)
 {
-	struct kcsan_ctx *ctx = get_ctx();
+	struct kcsan_ctx *ctx;
+
+	if ((type & KCSAN_ACCESS_ATOMIC) != 0)
+		return true;
 
+	if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC) &&
+	    (type & KCSAN_ACCESS_WRITE) != 0 && size <= sizeof(long) &&
+	    IS_ALIGNED((unsigned long)ptr, size))
+		return true; /* Assume aligned writes up to word size are atomic. */
+
+	ctx = get_ctx();
 	if (unlikely(ctx->atomic_next > 0)) {
 		/*
 		 * Because we do not have separate contexts for nested
@@ -193,7 +204,8 @@ static __always_inline bool is_atomic(const volatile void *ptr)
 	return kcsan_is_atomic(ptr);
 }
 
-static __always_inline bool should_watch(const volatile void *ptr, int type)
+static __always_inline bool
+should_watch(const volatile void *ptr, size_t size, int type)
 {
 	/*
 	 * Never set up watchpoints when memory operations are atomic.
@@ -202,7 +214,7 @@ static __always_inline bool should_watch(const volatile void *ptr, int type)
 	 * should not count towards skipped instructions, and (2) to actually
 	 * decrement kcsan_atomic_next for consecutive instruction stream.
 	 */
-	if ((type & KCSAN_ACCESS_ATOMIC) != 0 || is_atomic(ptr))
+	if (is_atomic(ptr, size, type))
 		return false;
 
 	if (this_cpu_dec_return(kcsan_skip) >= 0)
@@ -460,7 +472,7 @@ static __always_inline void check_access(const volatile void *ptr, size_t size,
 	if (unlikely(watchpoint != NULL))
 		kcsan_found_watchpoint(ptr, size, type, watchpoint,
 				       encoded_watchpoint);
-	else if (unlikely(should_watch(ptr, type)))
+	else if (unlikely(should_watch(ptr, size, type)))
 		kcsan_setup_watchpoint(ptr, size, type);
 }
 
diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan
index 3552990..6612685 100644
--- a/lib/Kconfig.kcsan
+++ b/lib/Kconfig.kcsan
@@ -91,13 +91,13 @@ config KCSAN_REPORT_ONCE_IN_MS
 	  limiting reporting to avoid flooding the console with reports.
 	  Setting this to 0 disables rate limiting.
 
-# Note that, while some of the below options could be turned into boot
-# parameters, to optimize for the common use-case, we avoid this because: (a)
-# it would impact performance (and we want to avoid static branch for all
-# {READ,WRITE}_ONCE, atomic_*, bitops, etc.), and (b) complicate the design
-# without real benefit. The main purpose of the below options is for use in
-# fuzzer configs to control reported data races, and they are not expected
-# to be switched frequently by a user.
+# The main purpose of the below options is to control reported data races (e.g.
+# in fuzzer configs), and are not expected to be switched frequently by other
+# users. We could turn some of them into boot parameters, but given they should
+# not be switched normally, let's keep them here to simplify configuration.
+#
+# The defaults below are chosen to be very conservative, and may miss certain
+# bugs.
 
 config KCSAN_REPORT_RACE_UNKNOWN_ORIGIN
 	bool "Report races of unknown origin"
@@ -116,6 +116,19 @@ config KCSAN_REPORT_VALUE_CHANGE_ONLY
 	  the data value of the memory location was observed to remain
 	  unchanged, do not report the data race.
 
+config KCSAN_ASSUME_PLAIN_WRITES_ATOMIC
+	bool "Assume that plain aligned writes up to word size are atomic"
+	default y
+	help
+	  Assume that plain aligned writes up to word size are atomic by
+	  default, and also not subject to other unsafe compiler optimizations
+	  resulting in data races. This will cause KCSAN to not report data
+	  races due to conflicts where the only plain accesses are aligned
+	  writes up to word size: conflicts between marked reads and plain
+	  aligned writes up to word size will not be reported as data races;
+	  notice that data races between two conflicting plain aligned writes
+	  will also not be reported.
+
 config KCSAN_IGNORE_ATOMICS
 	bool "Do not instrument marked atomic accesses"
 	help
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 50+ messages in thread

* [PATCH kcsan 13/32] kcsan: Clarify Kconfig option KCSAN_IGNORE_ATOMICS
  2020-03-09 19:03 [PATCH kcsan 0/32] KCSAN commits for v5.7 Paul E. McKenney
                   ` (11 preceding siblings ...)
  2020-03-09 19:04 ` [PATCH kcsan 12/32] kcsan: Add option to assume plain aligned writes up to word size are atomic paulmck
@ 2020-03-09 19:04 ` paulmck
  2020-03-09 19:04 ` [PATCH kcsan 14/32] kcsan: Cleanup of main KCSAN Kconfig option paulmck
                   ` (18 subsequent siblings)
  31 siblings, 0 replies; 50+ messages in thread
From: paulmck @ 2020-03-09 19:04 UTC (permalink / raw)
  To: linux-kernel, kasan-dev, kernel-team, mingo
  Cc: elver, andreyknvl, glider, dvyukov, cai, boqun.feng, Paul E . McKenney

From: Marco Elver <elver@google.com>

Clarify difference between options KCSAN_IGNORE_ATOMICS and
KCSAN_ASSUME_PLAIN_WRITES_ATOMIC in help text.

Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
---
 lib/Kconfig.kcsan | 16 +++++++++++++---
 1 file changed, 13 insertions(+), 3 deletions(-)

diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan
index 6612685..020ac63 100644
--- a/lib/Kconfig.kcsan
+++ b/lib/Kconfig.kcsan
@@ -132,8 +132,18 @@ config KCSAN_ASSUME_PLAIN_WRITES_ATOMIC
 config KCSAN_IGNORE_ATOMICS
 	bool "Do not instrument marked atomic accesses"
 	help
-	  If enabled, never instruments marked atomic accesses. This results in
-	  not reporting data races where one access is atomic and the other is
-	  a plain access.
+	  Never instrument marked atomic accesses. This option can be used for
+	  additional filtering. Conflicting marked atomic reads and plain
+	  writes will never be reported as a data race, however, will cause
+	  plain reads and marked writes to result in "unknown origin" reports.
+	  If combined with CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN=n, data
+	  races where at least one access is marked atomic will never be
+	  reported.
+
+	  Similar to KCSAN_ASSUME_PLAIN_WRITES_ATOMIC, but including unaligned
+	  accesses, conflicting marked atomic reads and plain writes will not
+	  be reported as data races; however, unlike that option, data races
+	  due to two conflicting plain writes will be reported (aligned and
+	  unaligned, if CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=n).
 
 endif # KCSAN
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 50+ messages in thread

* [PATCH kcsan 14/32] kcsan: Cleanup of main KCSAN Kconfig option
  2020-03-09 19:03 [PATCH kcsan 0/32] KCSAN commits for v5.7 Paul E. McKenney
                   ` (12 preceding siblings ...)
  2020-03-09 19:04 ` [PATCH kcsan 13/32] kcsan: Clarify Kconfig option KCSAN_IGNORE_ATOMICS paulmck
@ 2020-03-09 19:04 ` paulmck
  2020-03-09 19:04 ` [PATCH kcsan 15/32] kcsan: Fix 0-sized checks paulmck
                   ` (17 subsequent siblings)
  31 siblings, 0 replies; 50+ messages in thread
From: paulmck @ 2020-03-09 19:04 UTC (permalink / raw)
  To: linux-kernel, kasan-dev, kernel-team, mingo
  Cc: elver, andreyknvl, glider, dvyukov, cai, boqun.feng, Paul E . McKenney

From: Marco Elver <elver@google.com>

This patch cleans up the rules of the 'KCSAN' Kconfig option by:
  1. implicitly selecting 'STACKTRACE' instead of depending on it;
  2. depending on DEBUG_KERNEL, to avoid accidentally turning KCSAN on if
     the kernel is not meant to be a debug kernel;
  3. updating the short and long summaries.

Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
---
 lib/Kconfig.kcsan | 13 ++++++++-----
 1 file changed, 8 insertions(+), 5 deletions(-)

diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan
index 020ac63..9785bbf 100644
--- a/lib/Kconfig.kcsan
+++ b/lib/Kconfig.kcsan
@@ -4,12 +4,15 @@ config HAVE_ARCH_KCSAN
 	bool
 
 menuconfig KCSAN
-	bool "KCSAN: watchpoint-based dynamic data race detector"
-	depends on HAVE_ARCH_KCSAN && !KASAN && STACKTRACE
+	bool "KCSAN: dynamic data race detector"
+	depends on HAVE_ARCH_KCSAN && DEBUG_KERNEL && !KASAN
+	select STACKTRACE
 	help
-	  Kernel Concurrency Sanitizer is a dynamic data race detector, which
-	  uses a watchpoint-based sampling approach to detect races. See
-	  <file:Documentation/dev-tools/kcsan.rst> for more details.
+	  The Kernel Concurrency Sanitizer (KCSAN) is a dynamic data race
+	  detector, which relies on compile-time instrumentation, and uses a
+	  watchpoint-based sampling approach to detect data races.
+
+	  See <file:Documentation/dev-tools/kcsan.rst> for more details.
 
 if KCSAN
 
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 50+ messages in thread

* [PATCH kcsan 15/32] kcsan: Fix 0-sized checks
  2020-03-09 19:03 [PATCH kcsan 0/32] KCSAN commits for v5.7 Paul E. McKenney
                   ` (13 preceding siblings ...)
  2020-03-09 19:04 ` [PATCH kcsan 14/32] kcsan: Cleanup of main KCSAN Kconfig option paulmck
@ 2020-03-09 19:04 ` paulmck
  2020-03-09 19:04 ` [PATCH kcsan 16/32] kcsan: Introduce KCSAN_ACCESS_ASSERT access type paulmck
                   ` (16 subsequent siblings)
  31 siblings, 0 replies; 50+ messages in thread
From: paulmck @ 2020-03-09 19:04 UTC (permalink / raw)
  To: linux-kernel, kasan-dev, kernel-team, mingo
  Cc: elver, andreyknvl, glider, dvyukov, cai, boqun.feng, Paul E . McKenney

From: Marco Elver <elver@google.com>

Instrumentation of arbitrary memory-copy functions, such as user-copies,
may be called with size of 0, which could lead to false positives.

To avoid this, add a comparison in check_access() for size==0, which
will be optimized out for constant sized instrumentation
(__tsan_{read,write}N), and therefore not affect the common-case
fast-path.

Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
---
 kernel/kcsan/core.c |  7 +++++++
 kernel/kcsan/test.c | 10 ++++++++++
 2 files changed, 17 insertions(+)

diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index e3c7d8f..82c2bef 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -456,6 +456,13 @@ static __always_inline void check_access(const volatile void *ptr, size_t size,
 	long encoded_watchpoint;
 
 	/*
+	 * Do nothing for 0 sized check; this comparison will be optimized out
+	 * for constant sized instrumentation (__tsan_{read,write}N).
+	 */
+	if (unlikely(size == 0))
+		return;
+
+	/*
 	 * Avoid user_access_save in fast-path: find_watchpoint is safe without
 	 * user_access_save, as the address that ptr points to is only used to
 	 * check if a watchpoint exists; ptr is never dereferenced.
diff --git a/kernel/kcsan/test.c b/kernel/kcsan/test.c
index cc60002..d26a052 100644
--- a/kernel/kcsan/test.c
+++ b/kernel/kcsan/test.c
@@ -92,6 +92,16 @@ static bool test_matching_access(void)
 		return false;
 	if (WARN_ON(matching_access(9, 1, 10, 1)))
 		return false;
+
+	/*
+	 * An access of size 0 could match another access, as demonstrated here.
+	 * Rather than add more comparisons to 'matching_access()', which would
+	 * end up in the fast-path for *all* checks, check_access() simply
+	 * returns for all accesses of size 0.
+	 */
+	if (WARN_ON(!matching_access(8, 8, 12, 0)))
+		return false;
+
 	return true;
 }
 
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 50+ messages in thread

* [PATCH kcsan 16/32] kcsan: Introduce KCSAN_ACCESS_ASSERT access type
  2020-03-09 19:03 [PATCH kcsan 0/32] KCSAN commits for v5.7 Paul E. McKenney
                   ` (14 preceding siblings ...)
  2020-03-09 19:04 ` [PATCH kcsan 15/32] kcsan: Fix 0-sized checks paulmck
@ 2020-03-09 19:04 ` paulmck
  2020-03-09 19:04 ` [PATCH kcsan 17/32] kcsan: Introduce ASSERT_EXCLUSIVE_* macros paulmck
                   ` (15 subsequent siblings)
  31 siblings, 0 replies; 50+ messages in thread
From: paulmck @ 2020-03-09 19:04 UTC (permalink / raw)
  To: linux-kernel, kasan-dev, kernel-team, mingo
  Cc: elver, andreyknvl, glider, dvyukov, cai, boqun.feng, Paul E . McKenney

From: Marco Elver <elver@google.com>

The KCSAN_ACCESS_ASSERT access type may be used to introduce dummy reads
and writes to assert certain properties of concurrent code, where bugs
could not be detected as normal data races.

For example, a variable that is only meant to be written by a single
CPU, but may be read (without locking) by other CPUs must still be
marked properly to avoid data races. However, concurrent writes,
regardless if WRITE_ONCE() or not, would be a bug. Using
kcsan_check_access(&x, sizeof(x), KCSAN_ACCESS_ASSERT) would allow
catching such bugs.

To support KCSAN_ACCESS_ASSERT the following notable changes were made:
  * If an access is of type KCSAN_ASSERT_ACCESS, disable various filters
    that only apply to data races, so that all races that KCSAN observes are
    reported.
  * Bug reports that involve an ASSERT access type will be reported as
    "KCSAN: assert: race in ..." instead of "data-race"; this will help
    more easily distinguish them.
  * Update a few comments to just mention 'races' where we do not always
    mean pure data races.

Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
---
 include/linux/kcsan-checks.h | 18 ++++++++++++------
 kernel/kcsan/core.c          | 44 ++++++++++++++++++++++++++++++++++++++------
 kernel/kcsan/debugfs.c       |  1 +
 kernel/kcsan/kcsan.h         |  7 +++++++
 kernel/kcsan/report.c        | 43 +++++++++++++++++++++++++++++++------------
 lib/Kconfig.kcsan            | 24 ++++++++++++++----------
 6 files changed, 103 insertions(+), 34 deletions(-)

diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h
index ef3ee23..5dcadc2 100644
--- a/include/linux/kcsan-checks.h
+++ b/include/linux/kcsan-checks.h
@@ -6,10 +6,16 @@
 #include <linux/types.h>
 
 /*
- * Access type modifiers.
+ * ACCESS TYPE MODIFIERS
+ *
+ *   <none>: normal read access;
+ *   WRITE : write access;
+ *   ATOMIC: access is atomic;
+ *   ASSERT: access is not a regular access, but an assertion;
  */
 #define KCSAN_ACCESS_WRITE  0x1
 #define KCSAN_ACCESS_ATOMIC 0x2
+#define KCSAN_ACCESS_ASSERT 0x4
 
 /*
  * __kcsan_*: Always calls into the runtime when KCSAN is enabled. This may be used
@@ -18,7 +24,7 @@
  */
 #ifdef CONFIG_KCSAN
 /**
- * __kcsan_check_access - check generic access for data races
+ * __kcsan_check_access - check generic access for races
  *
  * @ptr address of access
  * @size size of access
@@ -43,7 +49,7 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size,
 #endif
 
 /**
- * __kcsan_check_read - check regular read access for data races
+ * __kcsan_check_read - check regular read access for races
  *
  * @ptr address of access
  * @size size of access
@@ -51,7 +57,7 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size,
 #define __kcsan_check_read(ptr, size) __kcsan_check_access(ptr, size, 0)
 
 /**
- * __kcsan_check_write - check regular write access for data races
+ * __kcsan_check_write - check regular write access for races
  *
  * @ptr address of access
  * @size size of access
@@ -60,7 +66,7 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size,
 	__kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
 
 /**
- * kcsan_check_read - check regular read access for data races
+ * kcsan_check_read - check regular read access for races
  *
  * @ptr address of access
  * @size size of access
@@ -68,7 +74,7 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size,
 #define kcsan_check_read(ptr, size) kcsan_check_access(ptr, size, 0)
 
 /**
- * kcsan_check_write - check regular write access for data races
+ * kcsan_check_write - check regular write access for races
  *
  * @ptr address of access
  * @size size of access
diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index 82c2bef..87ef01e 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -56,7 +56,7 @@ static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = {
 
 /*
  * SLOT_IDX_FAST is used in the fast-path. Not first checking the address's primary
- * slot (middle) is fine if we assume that data races occur rarely. The set of
+ * slot (middle) is fine if we assume that races occur rarely. The set of
  * indices {SLOT_IDX(slot, i) | i in [0, NUM_SLOTS)} is equivalent to
  * {SLOT_IDX_FAST(slot, i) | i in [0, NUM_SLOTS)}.
  */
@@ -178,6 +178,14 @@ is_atomic(const volatile void *ptr, size_t size, int type)
 	if ((type & KCSAN_ACCESS_ATOMIC) != 0)
 		return true;
 
+	/*
+	 * Unless explicitly declared atomic, never consider an assertion access
+	 * as atomic. This allows using them also in atomic regions, such as
+	 * seqlocks, without implicitly changing their semantics.
+	 */
+	if ((type & KCSAN_ACCESS_ASSERT) != 0)
+		return false;
+
 	if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC) &&
 	    (type & KCSAN_ACCESS_WRITE) != 0 && size <= sizeof(long) &&
 	    IS_ALIGNED((unsigned long)ptr, size))
@@ -298,7 +306,11 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr,
 		 */
 		kcsan_counter_inc(KCSAN_COUNTER_REPORT_RACES);
 	}
-	kcsan_counter_inc(KCSAN_COUNTER_DATA_RACES);
+
+	if ((type & KCSAN_ACCESS_ASSERT) != 0)
+		kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES);
+	else
+		kcsan_counter_inc(KCSAN_COUNTER_DATA_RACES);
 
 	user_access_restore(flags);
 }
@@ -307,6 +319,7 @@ static noinline void
 kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
 {
 	const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
+	const bool is_assert = (type & KCSAN_ACCESS_ASSERT) != 0;
 	atomic_long_t *watchpoint;
 	union {
 		u8 _1;
@@ -429,13 +442,32 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
 		/*
 		 * No need to increment 'data_races' counter, as the racing
 		 * thread already did.
+		 *
+		 * Count 'assert_failures' for each failed ASSERT access,
+		 * therefore both this thread and the racing thread may
+		 * increment this counter.
 		 */
-		kcsan_report(ptr, size, type, size > 8 || value_change,
-			     smp_processor_id(), KCSAN_REPORT_RACE_SIGNAL);
+		if (is_assert)
+			kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES);
+
+		/*
+		 * - If we were not able to observe a value change due to size
+		 *   constraints, always assume a value change.
+		 * - If the access type is an assertion, we also always assume a
+		 *   value change to always report the race.
+		 */
+		value_change = value_change || size > 8 || is_assert;
+
+		kcsan_report(ptr, size, type, value_change, smp_processor_id(),
+			     KCSAN_REPORT_RACE_SIGNAL);
 	} else if (value_change) {
 		/* Inferring a race, since the value should not have changed. */
+
 		kcsan_counter_inc(KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN);
-		if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN))
+		if (is_assert)
+			kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES);
+
+		if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert)
 			kcsan_report(ptr, size, type, true,
 				     smp_processor_id(),
 				     KCSAN_REPORT_RACE_UNKNOWN_ORIGIN);
@@ -471,7 +503,7 @@ static __always_inline void check_access(const volatile void *ptr, size_t size,
 				     &encoded_watchpoint);
 	/*
 	 * It is safe to check kcsan_is_enabled() after find_watchpoint in the
-	 * slow-path, as long as no state changes that cause a data race to be
+	 * slow-path, as long as no state changes that cause a race to be
 	 * detected and reported have occurred until kcsan_is_enabled() is
 	 * checked.
 	 */
diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c
index bec42da..a9dad44 100644
--- a/kernel/kcsan/debugfs.c
+++ b/kernel/kcsan/debugfs.c
@@ -44,6 +44,7 @@ static const char *counter_to_name(enum kcsan_counter_id id)
 	case KCSAN_COUNTER_USED_WATCHPOINTS:		return "used_watchpoints";
 	case KCSAN_COUNTER_SETUP_WATCHPOINTS:		return "setup_watchpoints";
 	case KCSAN_COUNTER_DATA_RACES:			return "data_races";
+	case KCSAN_COUNTER_ASSERT_FAILURES:		return "assert_failures";
 	case KCSAN_COUNTER_NO_CAPACITY:			return "no_capacity";
 	case KCSAN_COUNTER_REPORT_RACES:		return "report_races";
 	case KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN:	return "races_unknown_origin";
diff --git a/kernel/kcsan/kcsan.h b/kernel/kcsan/kcsan.h
index 8492da4..50078e7 100644
--- a/kernel/kcsan/kcsan.h
+++ b/kernel/kcsan/kcsan.h
@@ -40,6 +40,13 @@ enum kcsan_counter_id {
 	KCSAN_COUNTER_DATA_RACES,
 
 	/*
+	 * Total number of ASSERT failures due to races. If the observed race is
+	 * due to two conflicting ASSERT type accesses, then both will be
+	 * counted.
+	 */
+	KCSAN_COUNTER_ASSERT_FAILURES,
+
+	/*
 	 * Number of times no watchpoints were available.
 	 */
 	KCSAN_COUNTER_NO_CAPACITY,
diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c
index 7cd3428..3bc590e 100644
--- a/kernel/kcsan/report.c
+++ b/kernel/kcsan/report.c
@@ -34,11 +34,11 @@ static struct {
 } other_info = { .ptr = NULL };
 
 /*
- * Information about reported data races; used to rate limit reporting.
+ * Information about reported races; used to rate limit reporting.
  */
 struct report_time {
 	/*
-	 * The last time the data race was reported.
+	 * The last time the race was reported.
 	 */
 	unsigned long time;
 
@@ -57,7 +57,7 @@ struct report_time {
  *
  * Therefore, we use a fixed-size array, which at most will occupy a page. This
  * still adequately rate limits reports, assuming that a) number of unique data
- * races is not excessive, and b) occurrence of unique data races within the
+ * races is not excessive, and b) occurrence of unique races within the
  * same time window is limited.
  */
 #define REPORT_TIMES_MAX (PAGE_SIZE / sizeof(struct report_time))
@@ -74,7 +74,7 @@ static struct report_time report_times[REPORT_TIMES_SIZE];
 static DEFINE_SPINLOCK(report_lock);
 
 /*
- * Checks if the data race identified by thread frames frame1 and frame2 has
+ * Checks if the race identified by thread frames frame1 and frame2 has
  * been reported since (now - KCSAN_REPORT_ONCE_IN_MS).
  */
 static bool rate_limit_report(unsigned long frame1, unsigned long frame2)
@@ -90,7 +90,7 @@ static bool rate_limit_report(unsigned long frame1, unsigned long frame2)
 
 	invalid_before = jiffies - msecs_to_jiffies(CONFIG_KCSAN_REPORT_ONCE_IN_MS);
 
-	/* Check if a matching data race report exists. */
+	/* Check if a matching race report exists. */
 	for (i = 0; i < REPORT_TIMES_SIZE; ++i) {
 		struct report_time *rt = &report_times[i];
 
@@ -114,7 +114,7 @@ static bool rate_limit_report(unsigned long frame1, unsigned long frame2)
 		if (time_before(rt->time, invalid_before))
 			continue; /* before KCSAN_REPORT_ONCE_IN_MS ago */
 
-		/* Reported recently, check if data race matches. */
+		/* Reported recently, check if race matches. */
 		if ((rt->frame1 == frame1 && rt->frame2 == frame2) ||
 		    (rt->frame1 == frame2 && rt->frame2 == frame1))
 			return true;
@@ -142,11 +142,12 @@ skip_report(bool value_change, unsigned long top_frame)
 	 * 3. write watchpoint, conflicting write (value_change==true): report;
 	 * 4. write watchpoint, conflicting write (value_change==false): skip;
 	 * 5. write watchpoint, conflicting read (value_change==false): skip;
-	 * 6. write watchpoint, conflicting read (value_change==true): impossible;
+	 * 6. write watchpoint, conflicting read (value_change==true): report;
 	 *
 	 * Cases 1-4 are intuitive and expected; case 5 ensures we do not report
-	 * data races where the write may have rewritten the same value; and
-	 * case 6 is simply impossible.
+	 * data races where the write may have rewritten the same value; case 6
+	 * is possible either if the size is larger than what we check value
+	 * changes for or the access type is KCSAN_ACCESS_ASSERT.
 	 */
 	if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY) && !value_change) {
 		/*
@@ -178,11 +179,27 @@ static const char *get_access_type(int type)
 		return "write";
 	case KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
 		return "write (marked)";
+
+	/*
+	 * ASSERT variants:
+	 */
+	case KCSAN_ACCESS_ASSERT:
+	case KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_ATOMIC:
+		return "assert no writes";
+	case KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE:
+	case KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
+		return "assert no accesses";
+
 	default:
 		BUG();
 	}
 }
 
+static const char *get_bug_type(int type)
+{
+	return (type & KCSAN_ACCESS_ASSERT) != 0 ? "assert: race" : "data-race";
+}
+
 /* Return thread description: in task or interrupt. */
 static const char *get_thread_desc(int task_id)
 {
@@ -268,13 +285,15 @@ static bool print_report(const volatile void *ptr, size_t size, int access_type,
 		 * Do not print offset of functions to keep title short.
 		 */
 		cmp = sym_strcmp((void *)other_frame, (void *)this_frame);
-		pr_err("BUG: KCSAN: data-race in %ps / %ps\n",
+		pr_err("BUG: KCSAN: %s in %ps / %ps\n",
+		       get_bug_type(access_type | other_info.access_type),
 		       (void *)(cmp < 0 ? other_frame : this_frame),
 		       (void *)(cmp < 0 ? this_frame : other_frame));
 	} break;
 
 	case KCSAN_REPORT_RACE_UNKNOWN_ORIGIN:
-		pr_err("BUG: KCSAN: data-race in %pS\n", (void *)this_frame);
+		pr_err("BUG: KCSAN: %s in %pS\n", get_bug_type(access_type),
+		       (void *)this_frame);
 		break;
 
 	default:
@@ -427,7 +446,7 @@ void kcsan_report(const volatile void *ptr, size_t size, int access_type,
 	/*
 	 * With TRACE_IRQFLAGS, lockdep's IRQ trace state becomes corrupted if
 	 * we do not turn off lockdep here; this could happen due to recursion
-	 * into lockdep via KCSAN if we detect a data race in utilities used by
+	 * into lockdep via KCSAN if we detect a race in utilities used by
 	 * lockdep.
 	 */
 	lockdep_off();
diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan
index 9785bbf..f0b7911 100644
--- a/lib/Kconfig.kcsan
+++ b/lib/Kconfig.kcsan
@@ -4,13 +4,17 @@ config HAVE_ARCH_KCSAN
 	bool
 
 menuconfig KCSAN
-	bool "KCSAN: dynamic data race detector"
+	bool "KCSAN: dynamic race detector"
 	depends on HAVE_ARCH_KCSAN && DEBUG_KERNEL && !KASAN
 	select STACKTRACE
 	help
-	  The Kernel Concurrency Sanitizer (KCSAN) is a dynamic data race
-	  detector, which relies on compile-time instrumentation, and uses a
-	  watchpoint-based sampling approach to detect data races.
+	  The Kernel Concurrency Sanitizer (KCSAN) is a dynamic race detector,
+	  which relies on compile-time instrumentation, and uses a
+	  watchpoint-based sampling approach to detect races.
+
+	  KCSAN's primary purpose is to detect data races. KCSAN can also be
+	  used to check properties, with the help of provided assertions, of
+	  concurrent code where bugs do not manifest as data races.
 
 	  See <file:Documentation/dev-tools/kcsan.rst> for more details.
 
@@ -85,14 +89,14 @@ config KCSAN_SKIP_WATCH_RANDOMIZE
 	  KCSAN_WATCH_SKIP.
 
 config KCSAN_REPORT_ONCE_IN_MS
-	int "Duration in milliseconds, in which any given data race is only reported once"
+	int "Duration in milliseconds, in which any given race is only reported once"
 	default 3000
 	help
-	  Any given data race is only reported once in the defined time window.
-	  Different data races may still generate reports within a duration
-	  that is smaller than the duration defined here. This allows rate
-	  limiting reporting to avoid flooding the console with reports.
-	  Setting this to 0 disables rate limiting.
+	  Any given race is only reported once in the defined time window.
+	  Different races may still generate reports within a duration that is
+	  smaller than the duration defined here. This allows rate limiting
+	  reporting to avoid flooding the console with reports.  Setting this
+	  to 0 disables rate limiting.
 
 # The main purpose of the below options is to control reported data races (e.g.
 # in fuzzer configs), and are not expected to be switched frequently by other
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 50+ messages in thread

* [PATCH kcsan 17/32] kcsan: Introduce ASSERT_EXCLUSIVE_* macros
  2020-03-09 19:03 [PATCH kcsan 0/32] KCSAN commits for v5.7 Paul E. McKenney
                   ` (15 preceding siblings ...)
  2020-03-09 19:04 ` [PATCH kcsan 16/32] kcsan: Introduce KCSAN_ACCESS_ASSERT access type paulmck
@ 2020-03-09 19:04 ` paulmck
  2020-03-13  8:52   ` Boqun Feng
  2020-03-09 19:04 ` [PATCH kcsan 18/32] kcsan: Add test to generate conflicts via debugfs paulmck
                   ` (14 subsequent siblings)
  31 siblings, 1 reply; 50+ messages in thread
From: paulmck @ 2020-03-09 19:04 UTC (permalink / raw)
  To: linux-kernel, kasan-dev, kernel-team, mingo
  Cc: elver, andreyknvl, glider, dvyukov, cai, boqun.feng, Paul E . McKenney

From: Marco Elver <elver@google.com>

Introduces ASSERT_EXCLUSIVE_WRITER and ASSERT_EXCLUSIVE_ACCESS, which
may be used to assert properties of synchronization logic, where
violation cannot be detected as a normal data race.

Examples of the reports that may be generated:

    ==================================================================
    BUG: KCSAN: assert: race in test_thread / test_thread

    write to 0xffffffffab3d1540 of 8 bytes by task 466 on cpu 2:
     test_thread+0x8d/0x111
     debugfs_write.cold+0x32/0x44
     ...

    assert no writes to 0xffffffffab3d1540 of 8 bytes by task 464 on cpu 0:
     test_thread+0xa3/0x111
     debugfs_write.cold+0x32/0x44
     ...
    ==================================================================

    ==================================================================
    BUG: KCSAN: assert: race in test_thread / test_thread

    assert no accesses to 0xffffffffab3d1540 of 8 bytes by task 465 on cpu 1:
     test_thread+0xb9/0x111
     debugfs_write.cold+0x32/0x44
     ...

    read to 0xffffffffab3d1540 of 8 bytes by task 464 on cpu 0:
     test_thread+0x77/0x111
     debugfs_write.cold+0x32/0x44
     ...
    ==================================================================

Signed-off-by: Marco Elver <elver@google.com>
Suggested-by: Paul E. McKenney <paulmck@kernel.org>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
---
 include/linux/kcsan-checks.h | 40 ++++++++++++++++++++++++++++++++++++++++
 1 file changed, 40 insertions(+)

diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h
index 5dcadc2..cf69617 100644
--- a/include/linux/kcsan-checks.h
+++ b/include/linux/kcsan-checks.h
@@ -96,4 +96,44 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size,
 	kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE)
 #endif
 
+/**
+ * ASSERT_EXCLUSIVE_WRITER - assert no other threads are writing @var
+ *
+ * Assert that there are no other threads writing @var; other readers are
+ * allowed. This assertion can be used to specify properties of concurrent code,
+ * where violation cannot be detected as a normal data race.
+ *
+ * For example, if a per-CPU variable is only meant to be written by a single
+ * CPU, but may be read from other CPUs; in this case, reads and writes must be
+ * marked properly, however, if an off-CPU WRITE_ONCE() races with the owning
+ * CPU's WRITE_ONCE(), would not constitute a data race but could be a harmful
+ * race condition. Using this macro allows specifying this property in the code
+ * and catch such bugs.
+ *
+ * @var variable to assert on
+ */
+#define ASSERT_EXCLUSIVE_WRITER(var)                                           \
+	__kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT)
+
+/**
+ * ASSERT_EXCLUSIVE_ACCESS - assert no other threads are accessing @var
+ *
+ * Assert that no other thread is accessing @var (no readers nor writers). This
+ * assertion can be used to specify properties of concurrent code, where
+ * violation cannot be detected as a normal data race.
+ *
+ * For example, in a reference-counting algorithm where exclusive access is
+ * expected after the refcount reaches 0. We can check that this property
+ * actually holds as follows:
+ *
+ *	if (refcount_dec_and_test(&obj->refcnt)) {
+ *		ASSERT_EXCLUSIVE_ACCESS(*obj);
+ *		safely_dispose_of(obj);
+ *	}
+ *
+ * @var variable to assert on
+ */
+#define ASSERT_EXCLUSIVE_ACCESS(var)                                           \
+	__kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT)
+
 #endif /* _LINUX_KCSAN_CHECKS_H */
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 50+ messages in thread

* [PATCH kcsan 18/32] kcsan: Add test to generate conflicts via debugfs
  2020-03-09 19:03 [PATCH kcsan 0/32] KCSAN commits for v5.7 Paul E. McKenney
                   ` (16 preceding siblings ...)
  2020-03-09 19:04 ` [PATCH kcsan 17/32] kcsan: Introduce ASSERT_EXCLUSIVE_* macros paulmck
@ 2020-03-09 19:04 ` paulmck
  2020-03-09 19:04 ` [PATCH kcsan 19/32] kcsan: Expose core configuration parameters as module params paulmck
                   ` (13 subsequent siblings)
  31 siblings, 0 replies; 50+ messages in thread
From: paulmck @ 2020-03-09 19:04 UTC (permalink / raw)
  To: linux-kernel, kasan-dev, kernel-team, mingo
  Cc: elver, andreyknvl, glider, dvyukov, cai, boqun.feng, Paul E . McKenney

From: Marco Elver <elver@google.com>

Add 'test=<iters>' option to KCSAN's debugfs interface to invoke KCSAN
checks on a dummy variable. By writing 'test=<iters>' to the debugfs
file from multiple tasks, we can generate real conflicts, and trigger
data race reports.

Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
---
 kernel/kcsan/debugfs.c | 51 +++++++++++++++++++++++++++++++++++++++++++++-----
 1 file changed, 46 insertions(+), 5 deletions(-)

diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c
index a9dad44..9bbba0e 100644
--- a/kernel/kcsan/debugfs.c
+++ b/kernel/kcsan/debugfs.c
@@ -6,6 +6,7 @@
 #include <linux/debugfs.h>
 #include <linux/init.h>
 #include <linux/kallsyms.h>
+#include <linux/sched.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
 #include <linux/sort.h>
@@ -69,9 +70,9 @@ void kcsan_counter_dec(enum kcsan_counter_id id)
 /*
  * The microbenchmark allows benchmarking KCSAN core runtime only. To run
  * multiple threads, pipe 'microbench=<iters>' from multiple tasks into the
- * debugfs file.
+ * debugfs file. This will not generate any conflicts, and tests fast-path only.
  */
-static void microbenchmark(unsigned long iters)
+static noinline void microbenchmark(unsigned long iters)
 {
 	cycles_t cycles;
 
@@ -81,18 +82,52 @@ static void microbenchmark(unsigned long iters)
 	while (iters--) {
 		/*
 		 * We can run this benchmark from multiple tasks; this address
-		 * calculation increases likelyhood of some accesses overlapping
-		 * (they still won't conflict because all are reads).
+		 * calculation increases likelyhood of some accesses
+		 * overlapping. Make the access type an atomic read, to never
+		 * set up watchpoints and test the fast-path only.
 		 */
 		unsigned long addr =
 			iters % (CONFIG_KCSAN_NUM_WATCHPOINTS * PAGE_SIZE);
-		__kcsan_check_read((void *)addr, sizeof(long));
+		__kcsan_check_access((void *)addr, sizeof(long), KCSAN_ACCESS_ATOMIC);
 	}
 	cycles = get_cycles() - cycles;
 
 	pr_info("KCSAN: %s end   | cycles: %llu\n", __func__, cycles);
 }
 
+/*
+ * Simple test to create conflicting accesses. Write 'test=<iters>' to KCSAN's
+ * debugfs file from multiple tasks to generate real conflicts and show reports.
+ */
+static long test_dummy;
+static noinline void test_thread(unsigned long iters)
+{
+	const struct kcsan_ctx ctx_save = current->kcsan_ctx;
+	cycles_t cycles;
+
+	/* We may have been called from an atomic region; reset context. */
+	memset(&current->kcsan_ctx, 0, sizeof(current->kcsan_ctx));
+
+	pr_info("KCSAN: %s begin | iters: %lu\n", __func__, iters);
+
+	cycles = get_cycles();
+	while (iters--) {
+		__kcsan_check_read(&test_dummy, sizeof(test_dummy));
+		__kcsan_check_write(&test_dummy, sizeof(test_dummy));
+		ASSERT_EXCLUSIVE_WRITER(test_dummy);
+		ASSERT_EXCLUSIVE_ACCESS(test_dummy);
+
+		/* not actually instrumented */
+		WRITE_ONCE(test_dummy, iters);  /* to observe value-change */
+	}
+	cycles = get_cycles() - cycles;
+
+	pr_info("KCSAN: %s end   | cycles: %llu\n", __func__, cycles);
+
+	/* restore context */
+	current->kcsan_ctx = ctx_save;
+}
+
 static int cmp_filterlist_addrs(const void *rhs, const void *lhs)
 {
 	const unsigned long a = *(const unsigned long *)rhs;
@@ -242,6 +277,12 @@ debugfs_write(struct file *file, const char __user *buf, size_t count, loff_t *o
 		if (kstrtoul(&arg[sizeof("microbench=") - 1], 0, &iters))
 			return -EINVAL;
 		microbenchmark(iters);
+	} else if (!strncmp(arg, "test=", sizeof("test=") - 1)) {
+		unsigned long iters;
+
+		if (kstrtoul(&arg[sizeof("test=") - 1], 0, &iters))
+			return -EINVAL;
+		test_thread(iters);
 	} else if (!strcmp(arg, "whitelist")) {
 		set_report_filterlist_whitelist(true);
 	} else if (!strcmp(arg, "blacklist")) {
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 50+ messages in thread

* [PATCH kcsan 19/32] kcsan: Expose core configuration parameters as module params
  2020-03-09 19:03 [PATCH kcsan 0/32] KCSAN commits for v5.7 Paul E. McKenney
                   ` (17 preceding siblings ...)
  2020-03-09 19:04 ` [PATCH kcsan 18/32] kcsan: Add test to generate conflicts via debugfs paulmck
@ 2020-03-09 19:04 ` paulmck
  2020-03-09 19:04 ` [PATCH kcsan 20/32] kcsan: Fix misreporting if concurrent races on same address paulmck
                   ` (12 subsequent siblings)
  31 siblings, 0 replies; 50+ messages in thread
From: paulmck @ 2020-03-09 19:04 UTC (permalink / raw)
  To: linux-kernel, kasan-dev, kernel-team, mingo
  Cc: elver, andreyknvl, glider, dvyukov, cai, boqun.feng, Paul E . McKenney

From: Marco Elver <elver@google.com>

This adds early_boot, udelay_{task,interrupt}, and skip_watch as module
params. The latter parameters are useful to modify at runtime to tune
KCSAN's performance on new systems. This will also permit auto-tuning
these parameters to maximize overall system performance and KCSAN's race
detection ability.

None of the parameters are used in the fast-path and referring to them
via static variables instead of CONFIG constants will not affect
performance.

Signed-off-by: Marco Elver <elver@google.com>
Cc: Qian Cai <cai@lca.pw>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
---
 kernel/kcsan/core.c | 24 +++++++++++++++++++-----
 1 file changed, 19 insertions(+), 5 deletions(-)

diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index 87ef01e..498b1eb 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -6,6 +6,7 @@
 #include <linux/export.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
+#include <linux/moduleparam.h>
 #include <linux/percpu.h>
 #include <linux/preempt.h>
 #include <linux/random.h>
@@ -16,6 +17,20 @@
 #include "encoding.h"
 #include "kcsan.h"
 
+static bool kcsan_early_enable = IS_ENABLED(CONFIG_KCSAN_EARLY_ENABLE);
+static unsigned int kcsan_udelay_task = CONFIG_KCSAN_UDELAY_TASK;
+static unsigned int kcsan_udelay_interrupt = CONFIG_KCSAN_UDELAY_INTERRUPT;
+static long kcsan_skip_watch = CONFIG_KCSAN_SKIP_WATCH;
+
+#ifdef MODULE_PARAM_PREFIX
+#undef MODULE_PARAM_PREFIX
+#endif
+#define MODULE_PARAM_PREFIX "kcsan."
+module_param_named(early_enable, kcsan_early_enable, bool, 0);
+module_param_named(udelay_task, kcsan_udelay_task, uint, 0644);
+module_param_named(udelay_interrupt, kcsan_udelay_interrupt, uint, 0644);
+module_param_named(skip_watch, kcsan_skip_watch, long, 0644);
+
 bool kcsan_enabled;
 
 /* Per-CPU kcsan_ctx for interrupts */
@@ -239,9 +254,9 @@ should_watch(const volatile void *ptr, size_t size, int type)
 
 static inline void reset_kcsan_skip(void)
 {
-	long skip_count = CONFIG_KCSAN_SKIP_WATCH -
+	long skip_count = kcsan_skip_watch -
 			  (IS_ENABLED(CONFIG_KCSAN_SKIP_WATCH_RANDOMIZE) ?
-				   prandom_u32_max(CONFIG_KCSAN_SKIP_WATCH) :
+				   prandom_u32_max(kcsan_skip_watch) :
 				   0);
 	this_cpu_write(kcsan_skip, skip_count);
 }
@@ -253,8 +268,7 @@ static __always_inline bool kcsan_is_enabled(void)
 
 static inline unsigned int get_delay(void)
 {
-	unsigned int delay = in_task() ? CONFIG_KCSAN_UDELAY_TASK :
-					 CONFIG_KCSAN_UDELAY_INTERRUPT;
+	unsigned int delay = in_task() ? kcsan_udelay_task : kcsan_udelay_interrupt;
 	return delay - (IS_ENABLED(CONFIG_KCSAN_DELAY_RANDOMIZE) ?
 				prandom_u32_max(delay) :
 				0);
@@ -527,7 +541,7 @@ void __init kcsan_init(void)
 	 * We are in the init task, and no other tasks should be running;
 	 * WRITE_ONCE without memory barrier is sufficient.
 	 */
-	if (IS_ENABLED(CONFIG_KCSAN_EARLY_ENABLE))
+	if (kcsan_early_enable)
 		WRITE_ONCE(kcsan_enabled, true);
 }
 
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 50+ messages in thread

* [PATCH kcsan 20/32] kcsan: Fix misreporting if concurrent races on same address
  2020-03-09 19:03 [PATCH kcsan 0/32] KCSAN commits for v5.7 Paul E. McKenney
                   ` (18 preceding siblings ...)
  2020-03-09 19:04 ` [PATCH kcsan 19/32] kcsan: Expose core configuration parameters as module params paulmck
@ 2020-03-09 19:04 ` paulmck
  2020-03-09 19:04 ` [PATCH kcsan 21/32] kcsan: Move interfaces that affects checks to kcsan-checks.h paulmck
                   ` (11 subsequent siblings)
  31 siblings, 0 replies; 50+ messages in thread
From: paulmck @ 2020-03-09 19:04 UTC (permalink / raw)
  To: linux-kernel, kasan-dev, kernel-team, mingo
  Cc: elver, andreyknvl, glider, dvyukov, cai, boqun.feng, Paul E . McKenney

From: Marco Elver <elver@google.com>

If there are at least 4 threads racing on the same address, it can
happen that one of the readers may observe another matching reader in
other_info. To avoid locking up, we have to consume 'other_info'
regardless, but skip the report. See the added comment for more details.

Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
---
 kernel/kcsan/report.c | 38 ++++++++++++++++++++++++++++++++++++++
 1 file changed, 38 insertions(+)

diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c
index 3bc590e..abf6852 100644
--- a/kernel/kcsan/report.c
+++ b/kernel/kcsan/report.c
@@ -422,6 +422,44 @@ static bool prepare_report(unsigned long *flags, const volatile void *ptr,
 			return false;
 		}
 
+		access_type |= other_info.access_type;
+		if ((access_type & KCSAN_ACCESS_WRITE) == 0) {
+			/*
+			 * While the address matches, this is not the other_info
+			 * from the thread that consumed our watchpoint, since
+			 * neither this nor the access in other_info is a write.
+			 * It is invalid to continue with the report, since we
+			 * only have information about reads.
+			 *
+			 * This can happen due to concurrent races on the same
+			 * address, with at least 4 threads. To avoid locking up
+			 * other_info and all other threads, we have to consume
+			 * it regardless.
+			 *
+			 * A concrete case to illustrate why we might lock up if
+			 * we do not consume other_info:
+			 *
+			 *   We have 4 threads, all accessing the same address
+			 *   (or matching address ranges). Assume the following
+			 *   watcher and watchpoint consumer pairs:
+			 *   write1-read1, read2-write2. The first to populate
+			 *   other_info is write2, however, write1 consumes it,
+			 *   resulting in a report of write1-write2. This report
+			 *   is valid, however, now read1 populates other_info;
+			 *   read2-read1 is an invalid conflict, yet, no other
+			 *   conflicting access is left. Therefore, we must
+			 *   consume read1's other_info.
+			 *
+			 * Since this case is assumed to be rare, it is
+			 * reasonable to omit this report: one of the other
+			 * reports includes information about the same shared
+			 * data, and at this point the likelihood that we
+			 * re-report the same race again is high.
+			 */
+			release_report(flags, KCSAN_REPORT_RACE_SIGNAL);
+			return false;
+		}
+
 		/*
 		 * Matching & usable access in other_info: keep other_info_lock
 		 * locked, as this thread consumes it to print the full report;
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 50+ messages in thread

* [PATCH kcsan 21/32] kcsan: Move interfaces that affects checks to kcsan-checks.h
  2020-03-09 19:03 [PATCH kcsan 0/32] KCSAN commits for v5.7 Paul E. McKenney
                   ` (19 preceding siblings ...)
  2020-03-09 19:04 ` [PATCH kcsan 20/32] kcsan: Fix misreporting if concurrent races on same address paulmck
@ 2020-03-09 19:04 ` paulmck
  2020-03-09 19:04 ` [PATCH kcsan 22/32] compiler.h, seqlock.h: Remove unnecessary kcsan.h includes paulmck
                   ` (10 subsequent siblings)
  31 siblings, 0 replies; 50+ messages in thread
From: paulmck @ 2020-03-09 19:04 UTC (permalink / raw)
  To: linux-kernel, kasan-dev, kernel-team, mingo
  Cc: elver, andreyknvl, glider, dvyukov, cai, boqun.feng, Paul E . McKenney

From: Marco Elver <elver@google.com>

This moves functions that affect state changing the behaviour of
kcsan_check_access() to kcsan-checks.h. Since these are likely used with
kcsan_check_access() it makes more sense to have them in kcsan-checks.h,
to avoid including all of 'include/linux/kcsan.h'.

No functional change intended.

Signed-off-by: Marco Elver <elver@google.com>
Acked-by: John Hubbard <jhubbard@nvidia.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
---
 include/linux/kcsan-checks.h | 48 ++++++++++++++++++++++++++++++++++++++++++--
 include/linux/kcsan.h        | 41 -------------------------------------
 2 files changed, 46 insertions(+), 43 deletions(-)

diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h
index cf69617..8675411 100644
--- a/include/linux/kcsan-checks.h
+++ b/include/linux/kcsan-checks.h
@@ -32,10 +32,54 @@
  */
 void __kcsan_check_access(const volatile void *ptr, size_t size, int type);
 
-#else
+/**
+ * kcsan_nestable_atomic_begin - begin nestable atomic region
+ *
+ * Accesses within the atomic region may appear to race with other accesses but
+ * should be considered atomic.
+ */
+void kcsan_nestable_atomic_begin(void);
+
+/**
+ * kcsan_nestable_atomic_end - end nestable atomic region
+ */
+void kcsan_nestable_atomic_end(void);
+
+/**
+ * kcsan_flat_atomic_begin - begin flat atomic region
+ *
+ * Accesses within the atomic region may appear to race with other accesses but
+ * should be considered atomic.
+ */
+void kcsan_flat_atomic_begin(void);
+
+/**
+ * kcsan_flat_atomic_end - end flat atomic region
+ */
+void kcsan_flat_atomic_end(void);
+
+/**
+ * kcsan_atomic_next - consider following accesses as atomic
+ *
+ * Force treating the next n memory accesses for the current context as atomic
+ * operations.
+ *
+ * @n number of following memory accesses to treat as atomic.
+ */
+void kcsan_atomic_next(int n);
+
+#else /* CONFIG_KCSAN */
+
 static inline void __kcsan_check_access(const volatile void *ptr, size_t size,
 					int type) { }
-#endif
+
+static inline void kcsan_nestable_atomic_begin(void)	{ }
+static inline void kcsan_nestable_atomic_end(void)	{ }
+static inline void kcsan_flat_atomic_begin(void)	{ }
+static inline void kcsan_flat_atomic_end(void)		{ }
+static inline void kcsan_atomic_next(int n)		{ }
+
+#endif /* CONFIG_KCSAN */
 
 /*
  * kcsan_*: Only calls into the runtime when the particular compilation unit has
diff --git a/include/linux/kcsan.h b/include/linux/kcsan.h
index 1019e3a..7a614ca 100644
--- a/include/linux/kcsan.h
+++ b/include/linux/kcsan.h
@@ -56,52 +56,11 @@ void kcsan_disable_current(void);
  */
 void kcsan_enable_current(void);
 
-/**
- * kcsan_nestable_atomic_begin - begin nestable atomic region
- *
- * Accesses within the atomic region may appear to race with other accesses but
- * should be considered atomic.
- */
-void kcsan_nestable_atomic_begin(void);
-
-/**
- * kcsan_nestable_atomic_end - end nestable atomic region
- */
-void kcsan_nestable_atomic_end(void);
-
-/**
- * kcsan_flat_atomic_begin - begin flat atomic region
- *
- * Accesses within the atomic region may appear to race with other accesses but
- * should be considered atomic.
- */
-void kcsan_flat_atomic_begin(void);
-
-/**
- * kcsan_flat_atomic_end - end flat atomic region
- */
-void kcsan_flat_atomic_end(void);
-
-/**
- * kcsan_atomic_next - consider following accesses as atomic
- *
- * Force treating the next n memory accesses for the current context as atomic
- * operations.
- *
- * @n number of following memory accesses to treat as atomic.
- */
-void kcsan_atomic_next(int n);
-
 #else /* CONFIG_KCSAN */
 
 static inline void kcsan_init(void)			{ }
 static inline void kcsan_disable_current(void)		{ }
 static inline void kcsan_enable_current(void)		{ }
-static inline void kcsan_nestable_atomic_begin(void)	{ }
-static inline void kcsan_nestable_atomic_end(void)	{ }
-static inline void kcsan_flat_atomic_begin(void)	{ }
-static inline void kcsan_flat_atomic_end(void)		{ }
-static inline void kcsan_atomic_next(int n)		{ }
 
 #endif /* CONFIG_KCSAN */
 
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 50+ messages in thread

* [PATCH kcsan 22/32] compiler.h, seqlock.h: Remove unnecessary kcsan.h includes
  2020-03-09 19:03 [PATCH kcsan 0/32] KCSAN commits for v5.7 Paul E. McKenney
                   ` (20 preceding siblings ...)
  2020-03-09 19:04 ` [PATCH kcsan 21/32] kcsan: Move interfaces that affects checks to kcsan-checks.h paulmck
@ 2020-03-09 19:04 ` paulmck
  2020-03-09 19:04 ` [PATCH kcsan 23/32] kcsan: Introduce kcsan_value_change type paulmck
                   ` (9 subsequent siblings)
  31 siblings, 0 replies; 50+ messages in thread
From: paulmck @ 2020-03-09 19:04 UTC (permalink / raw)
  To: linux-kernel, kasan-dev, kernel-team, mingo
  Cc: elver, andreyknvl, glider, dvyukov, cai, boqun.feng, Paul E . McKenney

From: Marco Elver <elver@google.com>

No we longer have to include kcsan.h, since the required KCSAN interface
for both compiler.h and seqlock.h are now provided by kcsan-checks.h.

Signed-off-by: Marco Elver <elver@google.com>
Acked-by: John Hubbard <jhubbard@nvidia.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
---
 include/linux/compiler.h | 2 --
 include/linux/seqlock.h  | 2 +-
 2 files changed, 1 insertion(+), 3 deletions(-)

diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index c1bdf37..f504ede 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -313,8 +313,6 @@ unsigned long read_word_at_a_time(const void *addr)
 	__u.__val;					\
 })
 
-#include <linux/kcsan.h>
-
 /**
  * data_race - mark an expression as containing intentional data races
  *
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 239701c..8b97204 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -37,7 +37,7 @@
 #include <linux/preempt.h>
 #include <linux/lockdep.h>
 #include <linux/compiler.h>
-#include <linux/kcsan.h>
+#include <linux/kcsan-checks.h>
 #include <asm/processor.h>
 
 /*
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 50+ messages in thread

* [PATCH kcsan 23/32] kcsan: Introduce kcsan_value_change type
  2020-03-09 19:03 [PATCH kcsan 0/32] KCSAN commits for v5.7 Paul E. McKenney
                   ` (21 preceding siblings ...)
  2020-03-09 19:04 ` [PATCH kcsan 22/32] compiler.h, seqlock.h: Remove unnecessary kcsan.h includes paulmck
@ 2020-03-09 19:04 ` paulmck
  2020-03-09 19:04 ` [PATCH kcsan 24/32] kcsan: Add kcsan_set_access_mask() support paulmck
                   ` (8 subsequent siblings)
  31 siblings, 0 replies; 50+ messages in thread
From: paulmck @ 2020-03-09 19:04 UTC (permalink / raw)
  To: linux-kernel, kasan-dev, kernel-team, mingo
  Cc: elver, andreyknvl, glider, dvyukov, cai, boqun.feng, Paul E . McKenney

From: Marco Elver <elver@google.com>

Introduces kcsan_value_change type, which explicitly points out if we
either observed a value-change (TRUE), or we could not observe one but
cannot rule out a value-change happened (MAYBE). The MAYBE state can
either be reported or not, depending on configuration preferences.

A follow-up patch introduces the FALSE state, which should never be
reported.

No functional change intended.

Signed-off-by: Marco Elver <elver@google.com>
Acked-by: John Hubbard <jhubbard@nvidia.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
---
 kernel/kcsan/core.c   | 38 ++++++++++++++++++++++----------------
 kernel/kcsan/kcsan.h  | 19 ++++++++++++++++++-
 kernel/kcsan/report.c | 26 ++++++++++++++------------
 3 files changed, 54 insertions(+), 29 deletions(-)

diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index 498b1eb..3f89801 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -341,7 +341,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
 		u32 _4;
 		u64 _8;
 	} expect_value;
-	bool value_change = false;
+	enum kcsan_value_change value_change = KCSAN_VALUE_CHANGE_MAYBE;
 	unsigned long ua_flags = user_access_save();
 	unsigned long irq_flags;
 
@@ -398,6 +398,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
 	 * Read the current value, to later check and infer a race if the data
 	 * was modified via a non-instrumented access, e.g. from a device.
 	 */
+	expect_value._8 = 0;
 	switch (size) {
 	case 1:
 		expect_value._1 = READ_ONCE(*(const u8 *)ptr);
@@ -436,24 +437,37 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
 	 */
 	switch (size) {
 	case 1:
-		value_change = expect_value._1 != READ_ONCE(*(const u8 *)ptr);
+		expect_value._1 ^= READ_ONCE(*(const u8 *)ptr);
 		break;
 	case 2:
-		value_change = expect_value._2 != READ_ONCE(*(const u16 *)ptr);
+		expect_value._2 ^= READ_ONCE(*(const u16 *)ptr);
 		break;
 	case 4:
-		value_change = expect_value._4 != READ_ONCE(*(const u32 *)ptr);
+		expect_value._4 ^= READ_ONCE(*(const u32 *)ptr);
 		break;
 	case 8:
-		value_change = expect_value._8 != READ_ONCE(*(const u64 *)ptr);
+		expect_value._8 ^= READ_ONCE(*(const u64 *)ptr);
 		break;
 	default:
 		break; /* ignore; we do not diff the values */
 	}
 
+	/* Were we able to observe a value-change? */
+	if (expect_value._8 != 0)
+		value_change = KCSAN_VALUE_CHANGE_TRUE;
+
 	/* Check if this access raced with another. */
 	if (!remove_watchpoint(watchpoint)) {
 		/*
+		 * Depending on the access type, map a value_change of MAYBE to
+		 * TRUE (require reporting).
+		 */
+		if (value_change == KCSAN_VALUE_CHANGE_MAYBE && (size > 8 || is_assert)) {
+			/* Always assume a value-change. */
+			value_change = KCSAN_VALUE_CHANGE_TRUE;
+		}
+
+		/*
 		 * No need to increment 'data_races' counter, as the racing
 		 * thread already did.
 		 *
@@ -461,20 +475,12 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
 		 * therefore both this thread and the racing thread may
 		 * increment this counter.
 		 */
-		if (is_assert)
+		if (is_assert && value_change == KCSAN_VALUE_CHANGE_TRUE)
 			kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES);
 
-		/*
-		 * - If we were not able to observe a value change due to size
-		 *   constraints, always assume a value change.
-		 * - If the access type is an assertion, we also always assume a
-		 *   value change to always report the race.
-		 */
-		value_change = value_change || size > 8 || is_assert;
-
 		kcsan_report(ptr, size, type, value_change, smp_processor_id(),
 			     KCSAN_REPORT_RACE_SIGNAL);
-	} else if (value_change) {
+	} else if (value_change == KCSAN_VALUE_CHANGE_TRUE) {
 		/* Inferring a race, since the value should not have changed. */
 
 		kcsan_counter_inc(KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN);
@@ -482,7 +488,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
 			kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES);
 
 		if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert)
-			kcsan_report(ptr, size, type, true,
+			kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_TRUE,
 				     smp_processor_id(),
 				     KCSAN_REPORT_RACE_UNKNOWN_ORIGIN);
 	}
diff --git a/kernel/kcsan/kcsan.h b/kernel/kcsan/kcsan.h
index 50078e7..83a79b0 100644
--- a/kernel/kcsan/kcsan.h
+++ b/kernel/kcsan/kcsan.h
@@ -88,6 +88,22 @@ extern void kcsan_counter_dec(enum kcsan_counter_id id);
  */
 extern bool kcsan_skip_report_debugfs(unsigned long func_addr);
 
+/*
+ * Value-change states.
+ */
+enum kcsan_value_change {
+	/*
+	 * Did not observe a value-change, however, it is valid to report the
+	 * race, depending on preferences.
+	 */
+	KCSAN_VALUE_CHANGE_MAYBE,
+
+	/*
+	 * The value was observed to change, and the race should be reported.
+	 */
+	KCSAN_VALUE_CHANGE_TRUE,
+};
+
 enum kcsan_report_type {
 	/*
 	 * The thread that set up the watchpoint and briefly stalled was
@@ -111,6 +127,7 @@ enum kcsan_report_type {
  * Print a race report from thread that encountered the race.
  */
 extern void kcsan_report(const volatile void *ptr, size_t size, int access_type,
-			 bool value_change, int cpu_id, enum kcsan_report_type type);
+			 enum kcsan_value_change value_change, int cpu_id,
+			 enum kcsan_report_type type);
 
 #endif /* _KERNEL_KCSAN_KCSAN_H */
diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c
index abf6852..d871476 100644
--- a/kernel/kcsan/report.c
+++ b/kernel/kcsan/report.c
@@ -130,26 +130,27 @@ static bool rate_limit_report(unsigned long frame1, unsigned long frame2)
  * Special rules to skip reporting.
  */
 static bool
-skip_report(bool value_change, unsigned long top_frame)
+skip_report(enum kcsan_value_change value_change, unsigned long top_frame)
 {
 	/*
-	 * The first call to skip_report always has value_change==true, since we
+	 * The first call to skip_report always has value_change==TRUE, since we
 	 * cannot know the value written of an instrumented access. For the 2nd
 	 * call there are 6 cases with CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY:
 	 *
-	 * 1. read watchpoint, conflicting write (value_change==true): report;
-	 * 2. read watchpoint, conflicting write (value_change==false): skip;
-	 * 3. write watchpoint, conflicting write (value_change==true): report;
-	 * 4. write watchpoint, conflicting write (value_change==false): skip;
-	 * 5. write watchpoint, conflicting read (value_change==false): skip;
-	 * 6. write watchpoint, conflicting read (value_change==true): report;
+	 * 1. read watchpoint, conflicting write (value_change==TRUE): report;
+	 * 2. read watchpoint, conflicting write (value_change==MAYBE): skip;
+	 * 3. write watchpoint, conflicting write (value_change==TRUE): report;
+	 * 4. write watchpoint, conflicting write (value_change==MAYBE): skip;
+	 * 5. write watchpoint, conflicting read (value_change==MAYBE): skip;
+	 * 6. write watchpoint, conflicting read (value_change==TRUE): report;
 	 *
 	 * Cases 1-4 are intuitive and expected; case 5 ensures we do not report
 	 * data races where the write may have rewritten the same value; case 6
 	 * is possible either if the size is larger than what we check value
 	 * changes for or the access type is KCSAN_ACCESS_ASSERT.
 	 */
-	if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY) && !value_change) {
+	if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY) &&
+	    value_change == KCSAN_VALUE_CHANGE_MAYBE) {
 		/*
 		 * The access is a write, but the data value did not change.
 		 *
@@ -245,7 +246,7 @@ static int sym_strcmp(void *addr1, void *addr2)
  * Returns true if a report was generated, false otherwise.
  */
 static bool print_report(const volatile void *ptr, size_t size, int access_type,
-			 bool value_change, int cpu_id,
+			 enum kcsan_value_change value_change, int cpu_id,
 			 enum kcsan_report_type type)
 {
 	unsigned long stack_entries[NUM_STACK_ENTRIES] = { 0 };
@@ -258,7 +259,7 @@ static bool print_report(const volatile void *ptr, size_t size, int access_type,
 	/*
 	 * Must check report filter rules before starting to print.
 	 */
-	if (skip_report(true, stack_entries[skipnr]))
+	if (skip_report(KCSAN_VALUE_CHANGE_TRUE, stack_entries[skipnr]))
 		return false;
 
 	if (type == KCSAN_REPORT_RACE_SIGNAL) {
@@ -477,7 +478,8 @@ static bool prepare_report(unsigned long *flags, const volatile void *ptr,
 }
 
 void kcsan_report(const volatile void *ptr, size_t size, int access_type,
-		  bool value_change, int cpu_id, enum kcsan_report_type type)
+		  enum kcsan_value_change value_change, int cpu_id,
+		  enum kcsan_report_type type)
 {
 	unsigned long flags = 0;
 
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 50+ messages in thread

* [PATCH kcsan 24/32] kcsan: Add kcsan_set_access_mask() support
  2020-03-09 19:03 [PATCH kcsan 0/32] KCSAN commits for v5.7 Paul E. McKenney
                   ` (22 preceding siblings ...)
  2020-03-09 19:04 ` [PATCH kcsan 23/32] kcsan: Introduce kcsan_value_change type paulmck
@ 2020-03-09 19:04 ` paulmck
  2020-03-09 19:04 ` [PATCH kcsan 25/32] kcsan: Introduce ASSERT_EXCLUSIVE_BITS(var, mask) paulmck
                   ` (7 subsequent siblings)
  31 siblings, 0 replies; 50+ messages in thread
From: paulmck @ 2020-03-09 19:04 UTC (permalink / raw)
  To: linux-kernel, kasan-dev, kernel-team, mingo
  Cc: elver, andreyknvl, glider, dvyukov, cai, boqun.feng, Paul E . McKenney

From: Marco Elver <elver@google.com>

When setting up an access mask with kcsan_set_access_mask(), KCSAN will
only report races if concurrent changes to bits set in access_mask are
observed. Conveying access_mask via a separate call avoids introducing
overhead in the common-case fast-path.

Signed-off-by: Marco Elver <elver@google.com>
Acked-by: John Hubbard <jhubbard@nvidia.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
---
 include/linux/kcsan-checks.h | 11 +++++++++++
 include/linux/kcsan.h        |  5 +++++
 init/init_task.c             |  1 +
 kernel/kcsan/core.c          | 43 +++++++++++++++++++++++++++++++++++++++----
 kernel/kcsan/kcsan.h         |  5 +++++
 kernel/kcsan/report.c        | 13 ++++++++++++-
 6 files changed, 73 insertions(+), 5 deletions(-)

diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h
index 8675411..4ef5233 100644
--- a/include/linux/kcsan-checks.h
+++ b/include/linux/kcsan-checks.h
@@ -68,6 +68,16 @@ void kcsan_flat_atomic_end(void);
  */
 void kcsan_atomic_next(int n);
 
+/**
+ * kcsan_set_access_mask - set access mask
+ *
+ * Set the access mask for all accesses for the current context if non-zero.
+ * Only value changes to bits set in the mask will be reported.
+ *
+ * @mask bitmask
+ */
+void kcsan_set_access_mask(unsigned long mask);
+
 #else /* CONFIG_KCSAN */
 
 static inline void __kcsan_check_access(const volatile void *ptr, size_t size,
@@ -78,6 +88,7 @@ static inline void kcsan_nestable_atomic_end(void)	{ }
 static inline void kcsan_flat_atomic_begin(void)	{ }
 static inline void kcsan_flat_atomic_end(void)		{ }
 static inline void kcsan_atomic_next(int n)		{ }
+static inline void kcsan_set_access_mask(unsigned long mask) { }
 
 #endif /* CONFIG_KCSAN */
 
diff --git a/include/linux/kcsan.h b/include/linux/kcsan.h
index 7a614ca..3b84606 100644
--- a/include/linux/kcsan.h
+++ b/include/linux/kcsan.h
@@ -35,6 +35,11 @@ struct kcsan_ctx {
 	 */
 	int atomic_nest_count;
 	bool in_flat_atomic;
+
+	/*
+	 * Access mask for all accesses if non-zero.
+	 */
+	unsigned long access_mask;
 };
 
 /**
diff --git a/init/init_task.c b/init/init_task.c
index 2b4fe98..096191d 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -167,6 +167,7 @@ struct task_struct init_task
 		.atomic_next		= 0,
 		.atomic_nest_count	= 0,
 		.in_flat_atomic		= false,
+		.access_mask		= 0,
 	},
 #endif
 #ifdef CONFIG_TRACE_IRQFLAGS
diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index 3f89801..589b1e7 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -39,6 +39,7 @@ static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = {
 	.atomic_next		= 0,
 	.atomic_nest_count	= 0,
 	.in_flat_atomic		= false,
+	.access_mask		= 0,
 };
 
 /*
@@ -298,6 +299,15 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr,
 
 	if (!kcsan_is_enabled())
 		return;
+
+	/*
+	 * The access_mask check relies on value-change comparison. To avoid
+	 * reporting a race where e.g. the writer set up the watchpoint, but the
+	 * reader has access_mask!=0, we have to ignore the found watchpoint.
+	 */
+	if (get_ctx()->access_mask != 0)
+		return;
+
 	/*
 	 * Consume the watchpoint as soon as possible, to minimize the chances
 	 * of !consumed. Consuming the watchpoint must always be guarded by
@@ -341,6 +351,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
 		u32 _4;
 		u64 _8;
 	} expect_value;
+	unsigned long access_mask;
 	enum kcsan_value_change value_change = KCSAN_VALUE_CHANGE_MAYBE;
 	unsigned long ua_flags = user_access_save();
 	unsigned long irq_flags;
@@ -435,18 +446,27 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
 	 * Re-read value, and check if it is as expected; if not, we infer a
 	 * racy access.
 	 */
+	access_mask = get_ctx()->access_mask;
 	switch (size) {
 	case 1:
 		expect_value._1 ^= READ_ONCE(*(const u8 *)ptr);
+		if (access_mask)
+			expect_value._1 &= (u8)access_mask;
 		break;
 	case 2:
 		expect_value._2 ^= READ_ONCE(*(const u16 *)ptr);
+		if (access_mask)
+			expect_value._2 &= (u16)access_mask;
 		break;
 	case 4:
 		expect_value._4 ^= READ_ONCE(*(const u32 *)ptr);
+		if (access_mask)
+			expect_value._4 &= (u32)access_mask;
 		break;
 	case 8:
 		expect_value._8 ^= READ_ONCE(*(const u64 *)ptr);
+		if (access_mask)
+			expect_value._8 &= (u64)access_mask;
 		break;
 	default:
 		break; /* ignore; we do not diff the values */
@@ -460,11 +480,20 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
 	if (!remove_watchpoint(watchpoint)) {
 		/*
 		 * Depending on the access type, map a value_change of MAYBE to
-		 * TRUE (require reporting).
+		 * TRUE (always report) or FALSE (never report).
 		 */
-		if (value_change == KCSAN_VALUE_CHANGE_MAYBE && (size > 8 || is_assert)) {
-			/* Always assume a value-change. */
-			value_change = KCSAN_VALUE_CHANGE_TRUE;
+		if (value_change == KCSAN_VALUE_CHANGE_MAYBE) {
+			if (access_mask != 0) {
+				/*
+				 * For access with access_mask, we require a
+				 * value-change, as it is likely that races on
+				 * ~access_mask bits are expected.
+				 */
+				value_change = KCSAN_VALUE_CHANGE_FALSE;
+			} else if (size > 8 || is_assert) {
+				/* Always assume a value-change. */
+				value_change = KCSAN_VALUE_CHANGE_TRUE;
+			}
 		}
 
 		/*
@@ -622,6 +651,12 @@ void kcsan_atomic_next(int n)
 }
 EXPORT_SYMBOL(kcsan_atomic_next);
 
+void kcsan_set_access_mask(unsigned long mask)
+{
+	get_ctx()->access_mask = mask;
+}
+EXPORT_SYMBOL(kcsan_set_access_mask);
+
 void __kcsan_check_access(const volatile void *ptr, size_t size, int type)
 {
 	check_access(ptr, size, type);
diff --git a/kernel/kcsan/kcsan.h b/kernel/kcsan/kcsan.h
index 83a79b0..892de51 100644
--- a/kernel/kcsan/kcsan.h
+++ b/kernel/kcsan/kcsan.h
@@ -99,6 +99,11 @@ enum kcsan_value_change {
 	KCSAN_VALUE_CHANGE_MAYBE,
 
 	/*
+	 * Did not observe a value-change, and it is invalid to report the race.
+	 */
+	KCSAN_VALUE_CHANGE_FALSE,
+
+	/*
 	 * The value was observed to change, and the race should be reported.
 	 */
 	KCSAN_VALUE_CHANGE_TRUE,
diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c
index d871476..11c791b 100644
--- a/kernel/kcsan/report.c
+++ b/kernel/kcsan/report.c
@@ -132,6 +132,9 @@ static bool rate_limit_report(unsigned long frame1, unsigned long frame2)
 static bool
 skip_report(enum kcsan_value_change value_change, unsigned long top_frame)
 {
+	/* Should never get here if value_change==FALSE. */
+	WARN_ON_ONCE(value_change == KCSAN_VALUE_CHANGE_FALSE);
+
 	/*
 	 * The first call to skip_report always has value_change==TRUE, since we
 	 * cannot know the value written of an instrumented access. For the 2nd
@@ -493,7 +496,15 @@ void kcsan_report(const volatile void *ptr, size_t size, int access_type,
 
 	kcsan_disable_current();
 	if (prepare_report(&flags, ptr, size, access_type, cpu_id, type)) {
-		if (print_report(ptr, size, access_type, value_change, cpu_id, type) && panic_on_warn)
+		/*
+		 * Never report if value_change is FALSE, only if we it is
+		 * either TRUE or MAYBE. In case of MAYBE, further filtering may
+		 * be done once we know the full stack trace in print_report().
+		 */
+		bool reported = value_change != KCSAN_VALUE_CHANGE_FALSE &&
+				print_report(ptr, size, access_type, value_change, cpu_id, type);
+
+		if (reported && panic_on_warn)
 			panic("panic_on_warn set ...\n");
 
 		release_report(&flags, type);
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 50+ messages in thread

* [PATCH kcsan 25/32] kcsan: Introduce ASSERT_EXCLUSIVE_BITS(var, mask)
  2020-03-09 19:03 [PATCH kcsan 0/32] KCSAN commits for v5.7 Paul E. McKenney
                   ` (23 preceding siblings ...)
  2020-03-09 19:04 ` [PATCH kcsan 24/32] kcsan: Add kcsan_set_access_mask() support paulmck
@ 2020-03-09 19:04 ` paulmck
  2020-03-09 19:04 ` [PATCH kcsan 26/32] kcsan, trace: Make KCSAN compatible with tracing paulmck
                   ` (6 subsequent siblings)
  31 siblings, 0 replies; 50+ messages in thread
From: paulmck @ 2020-03-09 19:04 UTC (permalink / raw)
  To: linux-kernel, kasan-dev, kernel-team, mingo
  Cc: elver, andreyknvl, glider, dvyukov, cai, boqun.feng,
	Andrew Morton, David Hildenbrand, Jan Kara, Paul E . McKenney

From: Marco Elver <elver@google.com>

This introduces ASSERT_EXCLUSIVE_BITS(var, mask).
ASSERT_EXCLUSIVE_BITS(var, mask) will cause KCSAN to assume that the
following access is safe w.r.t. data races (however, please see the
docbook comment for disclaimer here).

For more context on why this was considered necessary, please see:
  http://lkml.kernel.org/r/1580995070-25139-1-git-send-email-cai@lca.pw

In particular, before this patch, data races between reads (that use
@mask bits of an access that should not be modified concurrently) and
writes (that change ~@mask bits not used by the readers) would have been
annotated with "data_race()" (or "READ_ONCE()"). However, doing so would
then hide real problems: we would no longer be able to detect harmful
races between reads to @mask bits and writes to @mask bits.

Therefore, by using ASSERT_EXCLUSIVE_BITS(var, mask), we accomplish:

  1. Avoid proliferation of specific macros at the call sites: by
     including a single mask in the argument list, we can use the same
     macro in a wide variety of call sites, regardless of how and which
     bits in a field each call site actually accesses.

  2. The existing code does not need to be modified (although READ_ONCE()
     may still be advisable if we cannot prove that the data race is
     always safe).

  3. We catch bugs where the exclusive bits are modified concurrently.

  4. We document properties of the current code.

Signed-off-by: Marco Elver <elver@google.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Paul E. McKenney <paulmck@kernel.org>
Cc: Qian Cai <cai@lca.pw>
Acked-by: John Hubbard <jhubbard@nvidia.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
---
 include/linux/kcsan-checks.h | 69 ++++++++++++++++++++++++++++++++++++++++----
 kernel/kcsan/debugfs.c       | 15 +++++++++-
 2 files changed, 77 insertions(+), 7 deletions(-)

diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h
index 4ef5233..1b8aac5 100644
--- a/include/linux/kcsan-checks.h
+++ b/include/linux/kcsan-checks.h
@@ -152,9 +152,9 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size,
 #endif
 
 /**
- * ASSERT_EXCLUSIVE_WRITER - assert no other threads are writing @var
+ * ASSERT_EXCLUSIVE_WRITER - assert no concurrent writes to @var
  *
- * Assert that there are no other threads writing @var; other readers are
+ * Assert that there are no concurrent writes to @var; other readers are
  * allowed. This assertion can be used to specify properties of concurrent code,
  * where violation cannot be detected as a normal data race.
  *
@@ -171,11 +171,11 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size,
 	__kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT)
 
 /**
- * ASSERT_EXCLUSIVE_ACCESS - assert no other threads are accessing @var
+ * ASSERT_EXCLUSIVE_ACCESS - assert no concurrent accesses to @var
  *
- * Assert that no other thread is accessing @var (no readers nor writers). This
- * assertion can be used to specify properties of concurrent code, where
- * violation cannot be detected as a normal data race.
+ * Assert that there are no concurrent accesses to @var (no readers nor
+ * writers). This assertion can be used to specify properties of concurrent
+ * code, where violation cannot be detected as a normal data race.
  *
  * For example, in a reference-counting algorithm where exclusive access is
  * expected after the refcount reaches 0. We can check that this property
@@ -191,4 +191,61 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size,
 #define ASSERT_EXCLUSIVE_ACCESS(var)                                           \
 	__kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT)
 
+/**
+ * ASSERT_EXCLUSIVE_BITS - assert no concurrent writes to subset of bits in @var
+ *
+ * Bit-granular variant of ASSERT_EXCLUSIVE_WRITER(var).
+ *
+ * Assert that there are no concurrent writes to a subset of bits in @var;
+ * concurrent readers are permitted. This assertion captures more detailed
+ * bit-level properties, compared to the other (word granularity) assertions.
+ * Only the bits set in @mask are checked for concurrent modifications, while
+ * ignoring the remaining bits, i.e. concurrent writes (or reads) to ~@mask bits
+ * are ignored.
+ *
+ * Use this for variables, where some bits must not be modified concurrently,
+ * yet other bits are expected to be modified concurrently.
+ *
+ * For example, variables where, after initialization, some bits are read-only,
+ * but other bits may still be modified concurrently. A reader may wish to
+ * assert that this is true as follows:
+ *
+ *	ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK);
+ *	foo = (READ_ONCE(flags) & READ_ONLY_MASK) >> READ_ONLY_SHIFT;
+ *
+ *   Note: The access that immediately follows ASSERT_EXCLUSIVE_BITS() is
+ *   assumed to access the masked bits only, and KCSAN optimistically assumes it
+ *   is therefore safe, even in the presence of data races, and marking it with
+ *   READ_ONCE() is optional from KCSAN's point-of-view. We caution, however,
+ *   that it may still be advisable to do so, since we cannot reason about all
+ *   compiler optimizations when it comes to bit manipulations (on the reader
+ *   and writer side). If you are sure nothing can go wrong, we can write the
+ *   above simply as:
+ *
+ * 	ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK);
+ *	foo = (flags & READ_ONLY_MASK) >> READ_ONLY_SHIFT;
+ *
+ * Another example, where this may be used, is when certain bits of @var may
+ * only be modified when holding the appropriate lock, but other bits may still
+ * be modified concurrently. Writers, where other bits may change concurrently,
+ * could use the assertion as follows:
+ *
+ *	spin_lock(&foo_lock);
+ *	ASSERT_EXCLUSIVE_BITS(flags, FOO_MASK);
+ *	old_flags = READ_ONCE(flags);
+ *	new_flags = (old_flags & ~FOO_MASK) | (new_foo << FOO_SHIFT);
+ *	if (cmpxchg(&flags, old_flags, new_flags) != old_flags) { ... }
+ *	spin_unlock(&foo_lock);
+ *
+ * @var variable to assert on
+ * @mask only check for modifications to bits set in @mask
+ */
+#define ASSERT_EXCLUSIVE_BITS(var, mask)                                       \
+	do {                                                                   \
+		kcsan_set_access_mask(mask);                                   \
+		__kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT);\
+		kcsan_set_access_mask(0);                                      \
+		kcsan_atomic_next(1);                                          \
+	} while (0)
+
 #endif /* _LINUX_KCSAN_CHECKS_H */
diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c
index 9bbba0e..2ff1961 100644
--- a/kernel/kcsan/debugfs.c
+++ b/kernel/kcsan/debugfs.c
@@ -100,8 +100,10 @@ static noinline void microbenchmark(unsigned long iters)
  * debugfs file from multiple tasks to generate real conflicts and show reports.
  */
 static long test_dummy;
+static long test_flags;
 static noinline void test_thread(unsigned long iters)
 {
+	const long CHANGE_BITS = 0xff00ff00ff00ff00L;
 	const struct kcsan_ctx ctx_save = current->kcsan_ctx;
 	cycles_t cycles;
 
@@ -109,16 +111,27 @@ static noinline void test_thread(unsigned long iters)
 	memset(&current->kcsan_ctx, 0, sizeof(current->kcsan_ctx));
 
 	pr_info("KCSAN: %s begin | iters: %lu\n", __func__, iters);
+	pr_info("test_dummy@%px, test_flags@%px\n", &test_dummy, &test_flags);
 
 	cycles = get_cycles();
 	while (iters--) {
+		/* These all should generate reports. */
 		__kcsan_check_read(&test_dummy, sizeof(test_dummy));
-		__kcsan_check_write(&test_dummy, sizeof(test_dummy));
 		ASSERT_EXCLUSIVE_WRITER(test_dummy);
 		ASSERT_EXCLUSIVE_ACCESS(test_dummy);
 
+		ASSERT_EXCLUSIVE_BITS(test_flags, ~CHANGE_BITS); /* no report */
+		__kcsan_check_read(&test_flags, sizeof(test_flags)); /* no report */
+
+		ASSERT_EXCLUSIVE_BITS(test_flags, CHANGE_BITS); /* report */
+		__kcsan_check_read(&test_flags, sizeof(test_flags)); /* no report */
+
 		/* not actually instrumented */
 		WRITE_ONCE(test_dummy, iters);  /* to observe value-change */
+		__kcsan_check_write(&test_dummy, sizeof(test_dummy));
+
+		test_flags ^= CHANGE_BITS; /* generate value-change */
+		__kcsan_check_write(&test_flags, sizeof(test_flags));
 	}
 	cycles = get_cycles() - cycles;
 
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 50+ messages in thread

* [PATCH kcsan 26/32] kcsan, trace: Make KCSAN compatible with tracing
  2020-03-09 19:03 [PATCH kcsan 0/32] KCSAN commits for v5.7 Paul E. McKenney
                   ` (24 preceding siblings ...)
  2020-03-09 19:04 ` [PATCH kcsan 25/32] kcsan: Introduce ASSERT_EXCLUSIVE_BITS(var, mask) paulmck
@ 2020-03-09 19:04 ` paulmck
  2020-03-09 19:57   ` Steven Rostedt
  2020-03-09 19:04 ` [PATCH kcsan 27/32] kcsan: Add option to allow watcher interruptions paulmck
                   ` (5 subsequent siblings)
  31 siblings, 1 reply; 50+ messages in thread
From: paulmck @ 2020-03-09 19:04 UTC (permalink / raw)
  To: linux-kernel, kasan-dev, kernel-team, mingo
  Cc: elver, andreyknvl, glider, dvyukov, cai, boqun.feng,
	Paul E . McKenney, Steven Rostedt

From: Marco Elver <elver@google.com>

Previously the system would lock up if ftrace was enabled together with
KCSAN. This is due to recursion on reporting if the tracer code is
instrumented with KCSAN.

To avoid this for all types of tracing, disable KCSAN instrumentation
for all of kernel/trace.

Furthermore, since KCSAN relies on udelay() to introduce delay, we have
to disable ftrace for udelay() (currently done for x86) in case KCSAN is
used together with lockdep and ftrace. The reason is that it may corrupt
lockdep IRQ flags tracing state due to a peculiar case of recursion
(details in Makefile comment).

Signed-off-by: Marco Elver <elver@google.com>
Reported-by: Qian Cai <cai@lca.pw>
Cc: Paul E. McKenney <paulmck@kernel.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Tested-by: Qian Cai <cai@lca.pw>
---
 arch/x86/lib/Makefile | 5 +++++
 kernel/kcsan/Makefile | 2 ++
 kernel/trace/Makefile | 3 +++
 3 files changed, 10 insertions(+)

diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 432a077..6110bce7 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -8,6 +8,11 @@ KCOV_INSTRUMENT_delay.o	:= n
 
 # KCSAN uses udelay for introducing watchpoint delay; avoid recursion.
 KCSAN_SANITIZE_delay.o := n
+ifdef CONFIG_KCSAN
+# In case KCSAN+lockdep+ftrace are enabled, disable ftrace for delay.o to avoid
+# lockdep -> [other libs] -> KCSAN -> udelay -> ftrace -> lockdep recursion.
+CFLAGS_REMOVE_delay.o = $(CC_FLAGS_FTRACE)
+endif
 
 # Early boot use of cmdline; don't instrument it
 ifdef CONFIG_AMD_MEM_ENCRYPT
diff --git a/kernel/kcsan/Makefile b/kernel/kcsan/Makefile
index df6b779..d4999b3 100644
--- a/kernel/kcsan/Makefile
+++ b/kernel/kcsan/Makefile
@@ -4,6 +4,8 @@ KCOV_INSTRUMENT := n
 UBSAN_SANITIZE := n
 
 CFLAGS_REMOVE_core.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_debugfs.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_report.o = $(CC_FLAGS_FTRACE)
 
 CFLAGS_core.o := $(call cc-option,-fno-conserve-stack,) \
 	$(call cc-option,-fno-stack-protector,)
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 0e63db6..9072486 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -6,6 +6,9 @@ ifdef CONFIG_FUNCTION_TRACER
 ORIG_CFLAGS := $(KBUILD_CFLAGS)
 KBUILD_CFLAGS = $(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS))
 
+# Avoid recursion due to instrumentation.
+KCSAN_SANITIZE := n
+
 ifdef CONFIG_FTRACE_SELFTEST
 # selftest needs instrumentation
 CFLAGS_trace_selftest_dynamic.o = $(CC_FLAGS_FTRACE)
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 50+ messages in thread

* [PATCH kcsan 27/32] kcsan: Add option to allow watcher interruptions
  2020-03-09 19:03 [PATCH kcsan 0/32] KCSAN commits for v5.7 Paul E. McKenney
                   ` (25 preceding siblings ...)
  2020-03-09 19:04 ` [PATCH kcsan 26/32] kcsan, trace: Make KCSAN compatible with tracing paulmck
@ 2020-03-09 19:04 ` paulmck
  2020-03-12 18:03   ` Paul E. McKenney
  2020-03-09 19:04 ` [PATCH kcsan 28/32] kcsan: Add option for verbose reporting paulmck
                   ` (4 subsequent siblings)
  31 siblings, 1 reply; 50+ messages in thread
From: paulmck @ 2020-03-09 19:04 UTC (permalink / raw)
  To: linux-kernel, kasan-dev, kernel-team, mingo
  Cc: elver, andreyknvl, glider, dvyukov, cai, boqun.feng, Paul E . McKenney

From: Marco Elver <elver@google.com>

Add option to allow interrupts while a watchpoint is set up. This can be
enabled either via CONFIG_KCSAN_INTERRUPT_WATCHER or via the boot
parameter 'kcsan.interrupt_watcher=1'.

Note that, currently not all safe per-CPU access primitives and patterns
are accounted for, which could result in false positives. For example,
asm-generic/percpu.h uses plain operations, which by default are
instrumented. On interrupts and subsequent accesses to the same
variable, KCSAN would currently report a data race with this option.

Therefore, this option should currently remain disabled by default, but
may be enabled for specific test scenarios.

To avoid new warnings, changes all uses of smp_processor_id() to use the
raw version (as already done in kcsan_found_watchpoint()). The exact SMP
processor id is for informational purposes in the report, and
correctness is not affected.

Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
---
 kernel/kcsan/core.c | 34 ++++++++++------------------------
 lib/Kconfig.kcsan   | 11 +++++++++++
 2 files changed, 21 insertions(+), 24 deletions(-)

diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index 589b1e7..e7387fe 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -21,6 +21,7 @@ static bool kcsan_early_enable = IS_ENABLED(CONFIG_KCSAN_EARLY_ENABLE);
 static unsigned int kcsan_udelay_task = CONFIG_KCSAN_UDELAY_TASK;
 static unsigned int kcsan_udelay_interrupt = CONFIG_KCSAN_UDELAY_INTERRUPT;
 static long kcsan_skip_watch = CONFIG_KCSAN_SKIP_WATCH;
+static bool kcsan_interrupt_watcher = IS_ENABLED(CONFIG_KCSAN_INTERRUPT_WATCHER);
 
 #ifdef MODULE_PARAM_PREFIX
 #undef MODULE_PARAM_PREFIX
@@ -30,6 +31,7 @@ module_param_named(early_enable, kcsan_early_enable, bool, 0);
 module_param_named(udelay_task, kcsan_udelay_task, uint, 0644);
 module_param_named(udelay_interrupt, kcsan_udelay_interrupt, uint, 0644);
 module_param_named(skip_watch, kcsan_skip_watch, long, 0644);
+module_param_named(interrupt_watcher, kcsan_interrupt_watcher, bool, 0444);
 
 bool kcsan_enabled;
 
@@ -354,7 +356,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
 	unsigned long access_mask;
 	enum kcsan_value_change value_change = KCSAN_VALUE_CHANGE_MAYBE;
 	unsigned long ua_flags = user_access_save();
-	unsigned long irq_flags;
+	unsigned long irq_flags = 0;
 
 	/*
 	 * Always reset kcsan_skip counter in slow-path to avoid underflow; see
@@ -370,26 +372,9 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
 		goto out;
 	}
 
-	/*
-	 * Disable interrupts & preemptions to avoid another thread on the same
-	 * CPU accessing memory locations for the set up watchpoint; this is to
-	 * avoid reporting races to e.g. CPU-local data.
-	 *
-	 * An alternative would be adding the source CPU to the watchpoint
-	 * encoding, and checking that watchpoint-CPU != this-CPU. There are
-	 * several problems with this:
-	 *   1. we should avoid stealing more bits from the watchpoint encoding
-	 *      as it would affect accuracy, as well as increase performance
-	 *      overhead in the fast-path;
-	 *   2. if we are preempted, but there *is* a genuine data race, we
-	 *      would *not* report it -- since this is the common case (vs.
-	 *      CPU-local data accesses), it makes more sense (from a data race
-	 *      detection point of view) to simply disable preemptions to ensure
-	 *      as many tasks as possible run on other CPUs.
-	 *
-	 * Use raw versions, to avoid lockdep recursion via IRQ flags tracing.
-	 */
-	raw_local_irq_save(irq_flags);
+	if (!kcsan_interrupt_watcher)
+		/* Use raw to avoid lockdep recursion via IRQ flags tracing. */
+		raw_local_irq_save(irq_flags);
 
 	watchpoint = insert_watchpoint((unsigned long)ptr, size, is_write);
 	if (watchpoint == NULL) {
@@ -507,7 +492,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
 		if (is_assert && value_change == KCSAN_VALUE_CHANGE_TRUE)
 			kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES);
 
-		kcsan_report(ptr, size, type, value_change, smp_processor_id(),
+		kcsan_report(ptr, size, type, value_change, raw_smp_processor_id(),
 			     KCSAN_REPORT_RACE_SIGNAL);
 	} else if (value_change == KCSAN_VALUE_CHANGE_TRUE) {
 		/* Inferring a race, since the value should not have changed. */
@@ -518,13 +503,14 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
 
 		if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert)
 			kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_TRUE,
-				     smp_processor_id(),
+				     raw_smp_processor_id(),
 				     KCSAN_REPORT_RACE_UNKNOWN_ORIGIN);
 	}
 
 	kcsan_counter_dec(KCSAN_COUNTER_USED_WATCHPOINTS);
 out_unlock:
-	raw_local_irq_restore(irq_flags);
+	if (!kcsan_interrupt_watcher)
+		raw_local_irq_restore(irq_flags);
 out:
 	user_access_restore(ua_flags);
 }
diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan
index f0b7911..081ed2e 100644
--- a/lib/Kconfig.kcsan
+++ b/lib/Kconfig.kcsan
@@ -88,6 +88,17 @@ config KCSAN_SKIP_WATCH_RANDOMIZE
 	  KCSAN_WATCH_SKIP. If false, the chosen value is always
 	  KCSAN_WATCH_SKIP.
 
+config KCSAN_INTERRUPT_WATCHER
+	bool "Interruptible watchers"
+	help
+	  If enabled, a task that set up a watchpoint may be interrupted while
+	  delayed. This option will allow KCSAN to detect races between
+	  interrupted tasks and other threads of execution on the same CPU.
+
+	  Currently disabled by default, because not all safe per-CPU access
+	  primitives and patterns may be accounted for, and therefore could
+	  result in false positives.
+
 config KCSAN_REPORT_ONCE_IN_MS
 	int "Duration in milliseconds, in which any given race is only reported once"
 	default 3000
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 50+ messages in thread

* [PATCH kcsan 28/32] kcsan: Add option for verbose reporting
  2020-03-09 19:03 [PATCH kcsan 0/32] KCSAN commits for v5.7 Paul E. McKenney
                   ` (26 preceding siblings ...)
  2020-03-09 19:04 ` [PATCH kcsan 27/32] kcsan: Add option to allow watcher interruptions paulmck
@ 2020-03-09 19:04 ` paulmck
  2020-03-09 19:04 ` [PATCH kcsan 29/32] kcsan: Add current->state to implicitly atomic accesses paulmck
                   ` (3 subsequent siblings)
  31 siblings, 0 replies; 50+ messages in thread
From: paulmck @ 2020-03-09 19:04 UTC (permalink / raw)
  To: linux-kernel, kasan-dev, kernel-team, mingo
  Cc: elver, andreyknvl, glider, dvyukov, cai, boqun.feng, Paul E . McKenney

From: Marco Elver <elver@google.com>

Adds CONFIG_KCSAN_VERBOSE to optionally enable more verbose reports.
Currently information about the reporting task's held locks and IRQ
trace events are shown, if they are enabled.

Signed-off-by: Marco Elver <elver@google.com>
Suggested-by: Qian Cai <cai@lca.pw>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
---
 kernel/kcsan/core.c   |   4 +-
 kernel/kcsan/kcsan.h  |   3 ++
 kernel/kcsan/report.c | 103 +++++++++++++++++++++++++++++++++++++++++++++++++-
 lib/Kconfig.kcsan     |  13 +++++++
 4 files changed, 120 insertions(+), 3 deletions(-)

diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index e7387fe..065615d 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -18,8 +18,8 @@
 #include "kcsan.h"
 
 static bool kcsan_early_enable = IS_ENABLED(CONFIG_KCSAN_EARLY_ENABLE);
-static unsigned int kcsan_udelay_task = CONFIG_KCSAN_UDELAY_TASK;
-static unsigned int kcsan_udelay_interrupt = CONFIG_KCSAN_UDELAY_INTERRUPT;
+unsigned int kcsan_udelay_task = CONFIG_KCSAN_UDELAY_TASK;
+unsigned int kcsan_udelay_interrupt = CONFIG_KCSAN_UDELAY_INTERRUPT;
 static long kcsan_skip_watch = CONFIG_KCSAN_SKIP_WATCH;
 static bool kcsan_interrupt_watcher = IS_ENABLED(CONFIG_KCSAN_INTERRUPT_WATCHER);
 
diff --git a/kernel/kcsan/kcsan.h b/kernel/kcsan/kcsan.h
index 892de51..e282f8b 100644
--- a/kernel/kcsan/kcsan.h
+++ b/kernel/kcsan/kcsan.h
@@ -13,6 +13,9 @@
 /* The number of adjacent watchpoints to check. */
 #define KCSAN_CHECK_ADJACENT 1
 
+extern unsigned int kcsan_udelay_task;
+extern unsigned int kcsan_udelay_interrupt;
+
 /*
  * Globally enable and disable KCSAN.
  */
diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c
index 11c791b..7bdb515 100644
--- a/kernel/kcsan/report.c
+++ b/kernel/kcsan/report.c
@@ -1,5 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 
+#include <linux/debug_locks.h>
+#include <linux/delay.h>
 #include <linux/jiffies.h>
 #include <linux/kernel.h>
 #include <linux/lockdep.h>
@@ -31,7 +33,26 @@ static struct {
 	int			cpu_id;
 	unsigned long		stack_entries[NUM_STACK_ENTRIES];
 	int			num_stack_entries;
-} other_info = { .ptr = NULL };
+
+	/*
+	 * Optionally pass @current. Typically we do not need to pass @current
+	 * via @other_info since just @task_pid is sufficient. Passing @current
+	 * has additional overhead.
+	 *
+	 * To safely pass @current, we must either use get_task_struct/
+	 * put_task_struct, or stall the thread that populated @other_info.
+	 *
+	 * We cannot rely on get_task_struct/put_task_struct in case
+	 * release_report() races with a task being released, and would have to
+	 * free it in release_report(). This may result in deadlock if we want
+	 * to use KCSAN on the allocators.
+	 *
+	 * Since we also want to reliably print held locks for
+	 * CONFIG_KCSAN_VERBOSE, the current implementation stalls the thread
+	 * that populated @other_info until it has been consumed.
+	 */
+	struct task_struct	*task;
+} other_info;
 
 /*
  * Information about reported races; used to rate limit reporting.
@@ -245,6 +266,16 @@ static int sym_strcmp(void *addr1, void *addr2)
 	return strncmp(buf1, buf2, sizeof(buf1));
 }
 
+static void print_verbose_info(struct task_struct *task)
+{
+	if (!task)
+		return;
+
+	pr_err("\n");
+	debug_show_held_locks(task);
+	print_irqtrace_events(task);
+}
+
 /*
  * Returns true if a report was generated, false otherwise.
  */
@@ -319,6 +350,9 @@ static bool print_report(const volatile void *ptr, size_t size, int access_type,
 				  other_info.num_stack_entries - other_skipnr,
 				  0);
 
+		if (IS_ENABLED(CONFIG_KCSAN_VERBOSE))
+		    print_verbose_info(other_info.task);
+
 		pr_err("\n");
 		pr_err("%s to 0x%px of %zu bytes by %s on cpu %i:\n",
 		       get_access_type(access_type), ptr, size,
@@ -340,6 +374,9 @@ static bool print_report(const volatile void *ptr, size_t size, int access_type,
 	stack_trace_print(stack_entries + skipnr, num_stack_entries - skipnr,
 			  0);
 
+	if (IS_ENABLED(CONFIG_KCSAN_VERBOSE))
+		print_verbose_info(current);
+
 	/* Print report footer. */
 	pr_err("\n");
 	pr_err("Reported by Kernel Concurrency Sanitizer on:\n");
@@ -358,6 +395,67 @@ static void release_report(unsigned long *flags, enum kcsan_report_type type)
 }
 
 /*
+ * Sets @other_info.task and awaits consumption of @other_info.
+ *
+ * Precondition: report_lock is held.
+ * Postcondition: report_lock is held.
+ */
+static void
+set_other_info_task_blocking(unsigned long *flags, const volatile void *ptr)
+{
+	/*
+	 * We may be instrumenting a code-path where current->state is already
+	 * something other than TASK_RUNNING.
+	 */
+	const bool is_running = current->state == TASK_RUNNING;
+	/*
+	 * To avoid deadlock in case we are in an interrupt here and this is a
+	 * race with a task on the same CPU (KCSAN_INTERRUPT_WATCHER), provide a
+	 * timeout to ensure this works in all contexts.
+	 *
+	 * Await approximately the worst case delay of the reporting thread (if
+	 * we are not interrupted).
+	 */
+	int timeout = max(kcsan_udelay_task, kcsan_udelay_interrupt);
+
+	other_info.task = current;
+	do {
+		if (is_running) {
+			/*
+			 * Let lockdep know the real task is sleeping, to print
+			 * the held locks (recall we turned lockdep off, so
+			 * locking/unlocking @report_lock won't be recorded).
+			 */
+			set_current_state(TASK_UNINTERRUPTIBLE);
+		}
+		spin_unlock_irqrestore(&report_lock, *flags);
+		/*
+		 * We cannot call schedule() since we also cannot reliably
+		 * determine if sleeping here is permitted -- see in_atomic().
+		 */
+
+		udelay(1);
+		spin_lock_irqsave(&report_lock, *flags);
+		if (timeout-- < 0) {
+			/*
+			 * Abort. Reset other_info.task to NULL, since it
+			 * appears the other thread is still going to consume
+			 * it. It will result in no verbose info printed for
+			 * this task.
+			 */
+			other_info.task = NULL;
+			break;
+		}
+		/*
+		 * If @ptr nor @current matches, then our information has been
+		 * consumed and we may continue. If not, retry.
+		 */
+	} while (other_info.ptr == ptr && other_info.task == current);
+	if (is_running)
+		set_current_state(TASK_RUNNING);
+}
+
+/*
  * Depending on the report type either sets other_info and returns false, or
  * acquires the matching other_info and returns true. If other_info is not
  * required for the report type, simply acquires report_lock and returns true.
@@ -388,6 +486,9 @@ static bool prepare_report(unsigned long *flags, const volatile void *ptr,
 		other_info.cpu_id		= cpu_id;
 		other_info.num_stack_entries	= stack_trace_save(other_info.stack_entries, NUM_STACK_ENTRIES, 1);
 
+		if (IS_ENABLED(CONFIG_KCSAN_VERBOSE))
+			set_other_info_task_blocking(flags, ptr);
+
 		spin_unlock_irqrestore(&report_lock, *flags);
 
 		/*
diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan
index 081ed2e..0f1447f 100644
--- a/lib/Kconfig.kcsan
+++ b/lib/Kconfig.kcsan
@@ -20,6 +20,19 @@ menuconfig KCSAN
 
 if KCSAN
 
+config KCSAN_VERBOSE
+	bool "Show verbose reports with more information about system state"
+	depends on PROVE_LOCKING
+	help
+	  If enabled, reports show more information about the system state that
+	  may help better analyze and debug races. This includes held locks and
+	  IRQ trace events.
+
+	  While this option should generally be benign, we call into more
+	  external functions on report generation; if a race report is
+	  generated from any one of them, system stability may suffer due to
+	  deadlocks or recursion.  If in doubt, say N.
+
 config KCSAN_DEBUG
 	bool "Debugging of KCSAN internals"
 
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 50+ messages in thread

* [PATCH kcsan 29/32] kcsan: Add current->state to implicitly atomic accesses
  2020-03-09 19:03 [PATCH kcsan 0/32] KCSAN commits for v5.7 Paul E. McKenney
                   ` (27 preceding siblings ...)
  2020-03-09 19:04 ` [PATCH kcsan 28/32] kcsan: Add option for verbose reporting paulmck
@ 2020-03-09 19:04 ` paulmck
  2020-03-09 19:04 ` [PATCH kcsan 30/32] kcsan: Fix a typo in a comment paulmck
                   ` (2 subsequent siblings)
  31 siblings, 0 replies; 50+ messages in thread
From: paulmck @ 2020-03-09 19:04 UTC (permalink / raw)
  To: linux-kernel, kasan-dev, kernel-team, mingo
  Cc: elver, andreyknvl, glider, dvyukov, cai, boqun.feng, Paul E . McKenney

From: Marco Elver <elver@google.com>

Add volatile current->state to list of implicitly atomic accesses. This
is in preparation to eventually enable KCSAN on kernel/sched (which
currently still has KCSAN_SANITIZE := n).

Since accesses that match the special check in atomic.h are rare, it
makes more sense to move this check to the slow-path, avoiding the
additional compare in the fast-path. With the microbenchmark, a speedup
of ~6% is measured.

Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
---
 kernel/kcsan/atomic.h  | 21 +++++++--------------
 kernel/kcsan/core.c    | 22 +++++++++++++++-------
 kernel/kcsan/debugfs.c | 27 ++++++++++++++++++---------
 3 files changed, 40 insertions(+), 30 deletions(-)

diff --git a/kernel/kcsan/atomic.h b/kernel/kcsan/atomic.h
index a9c1930..be9e625 100644
--- a/kernel/kcsan/atomic.h
+++ b/kernel/kcsan/atomic.h
@@ -4,24 +4,17 @@
 #define _KERNEL_KCSAN_ATOMIC_H
 
 #include <linux/jiffies.h>
+#include <linux/sched.h>
 
 /*
- * Helper that returns true if access to @ptr should be considered an atomic
- * access, even though it is not explicitly atomic.
- *
- * List all volatile globals that have been observed in races, to suppress
- * data race reports between accesses to these variables.
- *
- * For now, we assume that volatile accesses of globals are as strong as atomic
- * accesses (READ_ONCE, WRITE_ONCE cast to volatile). The situation is still not
- * entirely clear, as on some architectures (Alpha) READ_ONCE/WRITE_ONCE do more
- * than cast to volatile. Eventually, we hope to be able to remove this
- * function.
+ * Special rules for certain memory where concurrent conflicting accesses are
+ * common, however, the current convention is to not mark them; returns true if
+ * access to @ptr should be considered atomic. Called from slow-path.
  */
-static __always_inline bool kcsan_is_atomic(const volatile void *ptr)
+static bool kcsan_is_atomic_special(const volatile void *ptr)
 {
-	/* only jiffies for now */
-	return ptr == &jiffies;
+	/* volatile globals that have been observed in data races. */
+	return ptr == &jiffies || ptr == &current->state;
 }
 
 #endif /* _KERNEL_KCSAN_ATOMIC_H */
diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index 065615d..eb30ecd 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -188,12 +188,13 @@ static __always_inline struct kcsan_ctx *get_ctx(void)
 	return in_task() ? &current->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx);
 }
 
+/* Rules for generic atomic accesses. Called from fast-path. */
 static __always_inline bool
 is_atomic(const volatile void *ptr, size_t size, int type)
 {
 	struct kcsan_ctx *ctx;
 
-	if ((type & KCSAN_ACCESS_ATOMIC) != 0)
+	if (type & KCSAN_ACCESS_ATOMIC)
 		return true;
 
 	/*
@@ -201,16 +202,16 @@ is_atomic(const volatile void *ptr, size_t size, int type)
 	 * as atomic. This allows using them also in atomic regions, such as
 	 * seqlocks, without implicitly changing their semantics.
 	 */
-	if ((type & KCSAN_ACCESS_ASSERT) != 0)
+	if (type & KCSAN_ACCESS_ASSERT)
 		return false;
 
 	if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC) &&
-	    (type & KCSAN_ACCESS_WRITE) != 0 && size <= sizeof(long) &&
+	    (type & KCSAN_ACCESS_WRITE) && size <= sizeof(long) &&
 	    IS_ALIGNED((unsigned long)ptr, size))
 		return true; /* Assume aligned writes up to word size are atomic. */
 
 	ctx = get_ctx();
-	if (unlikely(ctx->atomic_next > 0)) {
+	if (ctx->atomic_next > 0) {
 		/*
 		 * Because we do not have separate contexts for nested
 		 * interrupts, in case atomic_next is set, we simply assume that
@@ -224,10 +225,8 @@ is_atomic(const volatile void *ptr, size_t size, int type)
 			--ctx->atomic_next; /* in task, or outer interrupt */
 		return true;
 	}
-	if (unlikely(ctx->atomic_nest_count > 0 || ctx->in_flat_atomic))
-		return true;
 
-	return kcsan_is_atomic(ptr);
+	return ctx->atomic_nest_count > 0 || ctx->in_flat_atomic;
 }
 
 static __always_inline bool
@@ -367,6 +366,15 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
 	if (!kcsan_is_enabled())
 		goto out;
 
+	/*
+	 * Special atomic rules: unlikely to be true, so we check them here in
+	 * the slow-path, and not in the fast-path in is_atomic(). Call after
+	 * kcsan_is_enabled(), as we may access memory that is not yet
+	 * initialized during early boot.
+	 */
+	if (!is_assert && kcsan_is_atomic_special(ptr))
+		goto out;
+
 	if (!check_encodable((unsigned long)ptr, size)) {
 		kcsan_counter_inc(KCSAN_COUNTER_UNENCODABLE_ACCESSES);
 		goto out;
diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c
index 2ff1961..72ee188 100644
--- a/kernel/kcsan/debugfs.c
+++ b/kernel/kcsan/debugfs.c
@@ -74,25 +74,34 @@ void kcsan_counter_dec(enum kcsan_counter_id id)
  */
 static noinline void microbenchmark(unsigned long iters)
 {
+	const struct kcsan_ctx ctx_save = current->kcsan_ctx;
+	const bool was_enabled = READ_ONCE(kcsan_enabled);
 	cycles_t cycles;
 
+	/* We may have been called from an atomic region; reset context. */
+	memset(&current->kcsan_ctx, 0, sizeof(current->kcsan_ctx));
+	/*
+	 * Disable to benchmark fast-path for all accesses, and (expected
+	 * negligible) call into slow-path, but never set up watchpoints.
+	 */
+	WRITE_ONCE(kcsan_enabled, false);
+
 	pr_info("KCSAN: %s begin | iters: %lu\n", __func__, iters);
 
 	cycles = get_cycles();
 	while (iters--) {
-		/*
-		 * We can run this benchmark from multiple tasks; this address
-		 * calculation increases likelyhood of some accesses
-		 * overlapping. Make the access type an atomic read, to never
-		 * set up watchpoints and test the fast-path only.
-		 */
-		unsigned long addr =
-			iters % (CONFIG_KCSAN_NUM_WATCHPOINTS * PAGE_SIZE);
-		__kcsan_check_access((void *)addr, sizeof(long), KCSAN_ACCESS_ATOMIC);
+		unsigned long addr = iters & ((PAGE_SIZE << 8) - 1);
+		int type = !(iters & 0x7f) ? KCSAN_ACCESS_ATOMIC :
+				(!(iters & 0xf) ? KCSAN_ACCESS_WRITE : 0);
+		__kcsan_check_access((void *)addr, sizeof(long), type);
 	}
 	cycles = get_cycles() - cycles;
 
 	pr_info("KCSAN: %s end   | cycles: %llu\n", __func__, cycles);
+
+	WRITE_ONCE(kcsan_enabled, was_enabled);
+	/* restore context */
+	current->kcsan_ctx = ctx_save;
 }
 
 /*
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 50+ messages in thread

* [PATCH kcsan 30/32] kcsan: Fix a typo in a comment
  2020-03-09 19:03 [PATCH kcsan 0/32] KCSAN commits for v5.7 Paul E. McKenney
                   ` (28 preceding siblings ...)
  2020-03-09 19:04 ` [PATCH kcsan 29/32] kcsan: Add current->state to implicitly atomic accesses paulmck
@ 2020-03-09 19:04 ` paulmck
  2020-03-09 19:04 ` [PATCH kcsan 31/32] kcsan: Update Documentation/dev-tools/kcsan.rst paulmck
  2020-03-09 19:04 ` [PATCH kcsan 32/32] kcsan: Update API documentation in kcsan-checks.h paulmck
  31 siblings, 0 replies; 50+ messages in thread
From: paulmck @ 2020-03-09 19:04 UTC (permalink / raw)
  To: linux-kernel, kasan-dev, kernel-team, mingo
  Cc: elver, andreyknvl, glider, dvyukov, cai, boqun.feng,
	Qiujun Huang, Paul E . McKenney

From: Qiujun Huang <hqjagain@gmail.com>

s/slots slots/slots/

Signed-off-by: Qiujun Huang <hqjagain@gmail.com>
Reviewed-by: Nick Desaulniers <ndesaulniers@google.com>
[elver: commit message]
Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
---
 kernel/kcsan/core.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index eb30ecd..ee82008 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -45,7 +45,7 @@ static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = {
 };
 
 /*
- * Helper macros to index into adjacent slots slots, starting from address slot
+ * Helper macros to index into adjacent slots, starting from address slot
  * itself, followed by the right and left slots.
  *
  * The purpose is 2-fold:
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 50+ messages in thread

* [PATCH kcsan 31/32] kcsan: Update Documentation/dev-tools/kcsan.rst
  2020-03-09 19:03 [PATCH kcsan 0/32] KCSAN commits for v5.7 Paul E. McKenney
                   ` (29 preceding siblings ...)
  2020-03-09 19:04 ` [PATCH kcsan 30/32] kcsan: Fix a typo in a comment paulmck
@ 2020-03-09 19:04 ` paulmck
  2020-03-09 19:04 ` [PATCH kcsan 32/32] kcsan: Update API documentation in kcsan-checks.h paulmck
  31 siblings, 0 replies; 50+ messages in thread
From: paulmck @ 2020-03-09 19:04 UTC (permalink / raw)
  To: linux-kernel, kasan-dev, kernel-team, mingo
  Cc: elver, andreyknvl, glider, dvyukov, cai, boqun.feng, Paul E . McKenney

From: Marco Elver <elver@google.com>

Extend and improve based on recent changes, and summarize important
bits that have been missing. Tested with "make htmldocs".

Signed-off-by: Marco Elver <elver@google.com>
Cc: Qian Cai <cai@lca.pw>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
---
 Documentation/dev-tools/kcsan.rst | 227 ++++++++++++++++++++++++--------------
 1 file changed, 144 insertions(+), 83 deletions(-)

diff --git a/Documentation/dev-tools/kcsan.rst b/Documentation/dev-tools/kcsan.rst
index 65a0be5..52a5d6f 100644
--- a/Documentation/dev-tools/kcsan.rst
+++ b/Documentation/dev-tools/kcsan.rst
@@ -1,27 +1,22 @@
 The Kernel Concurrency Sanitizer (KCSAN)
 ========================================
 
-Overview
---------
-
-*Kernel Concurrency Sanitizer (KCSAN)* is a dynamic data race detector for
-kernel space. KCSAN is a sampling watchpoint-based data race detector. Key
-priorities in KCSAN's design are lack of false positives, scalability, and
-simplicity. More details can be found in `Implementation Details`_.
-
-KCSAN uses compile-time instrumentation to instrument memory accesses. KCSAN is
-supported in both GCC and Clang. With GCC it requires version 7.3.0 or later.
-With Clang it requires version 7.0.0 or later.
+The Kernel Concurrency Sanitizer (KCSAN) is a dynamic race detector, which
+relies on compile-time instrumentation, and uses a watchpoint-based sampling
+approach to detect races. KCSAN's primary purpose is to detect `data races`_.
 
 Usage
 -----
 
-To enable KCSAN configure kernel with::
+KCSAN is supported in both GCC and Clang. With GCC it requires version 7.3.0 or
+later. With Clang it requires version 7.0.0 or later.
+
+To enable KCSAN configure the kernel with::
 
     CONFIG_KCSAN = y
 
 KCSAN provides several other configuration options to customize behaviour (see
-their respective help text for more info).
+the respective help text in ``lib/Kconfig.kcsan`` for more info).
 
 Error reports
 ~~~~~~~~~~~~~
@@ -96,7 +91,8 @@ The other less common type of data race report looks like this::
 This report is generated where it was not possible to determine the other
 racing thread, but a race was inferred due to the data value of the watched
 memory location having changed. These can occur either due to missing
-instrumentation or e.g. DMA accesses.
+instrumentation or e.g. DMA accesses. These reports will only be generated if
+``CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN=y`` (selected by default).
 
 Selective analysis
 ~~~~~~~~~~~~~~~~~~
@@ -110,9 +106,26 @@ the below options are available:
   behaviour when encountering a data race is deemed safe.
 
 * Disabling data race detection for entire functions can be accomplished by
-  using the function attribute ``__no_kcsan`` (or ``__no_kcsan_or_inline`` for
-  ``__always_inline`` functions). To dynamically control for which functions
-  data races are reported, see the `debugfs`_ blacklist/whitelist feature.
+  using the function attribute ``__no_kcsan``::
+
+    __no_kcsan
+    void foo(void) {
+        ...
+
+  To dynamically limit for which functions to generate reports, see the
+  `DebugFS interface`_ blacklist/whitelist feature.
+
+  For ``__always_inline`` functions, replace ``__always_inline`` with
+  ``__no_kcsan_or_inline`` (which implies ``__always_inline``)::
+
+    static __no_kcsan_or_inline void foo(void) {
+        ...
+
+  Note: Older compiler versions (GCC < 9) also do not always honor the
+  ``__no_kcsan`` attribute on regular ``inline`` functions. If false positives
+  with these compilers cannot be tolerated, for small functions where
+  ``__always_inline`` would be appropriate, ``__no_kcsan_or_inline`` should be
+  preferred instead.
 
 * To disable data race detection for a particular compilation unit, add to the
   ``Makefile``::
@@ -124,13 +137,29 @@ the below options are available:
 
     KCSAN_SANITIZE := n
 
-debugfs
-~~~~~~~
+Furthermore, it is possible to tell KCSAN to show or hide entire classes of
+data races, depending on preferences. These can be changed via the following
+Kconfig options:
+
+* ``CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY``: If enabled and a conflicting write
+  is observed via a watchpoint, but the data value of the memory location was
+  observed to remain unchanged, do not report the data race.
+
+* ``CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC``: Assume that plain aligned writes
+  up to word size are atomic by default. Assumes that such writes are not
+  subject to unsafe compiler optimizations resulting in data races. The option
+  causes KCSAN to not report data races due to conflicts where the only plain
+  accesses are aligned writes up to word size.
+
+DebugFS interface
+~~~~~~~~~~~~~~~~~
+
+The file ``/sys/kernel/debug/kcsan`` provides the following interface:
 
-* The file ``/sys/kernel/debug/kcsan`` can be read to get stats.
+* Reading ``/sys/kernel/debug/kcsan`` returns various runtime statistics.
 
-* KCSAN can be turned on or off by writing ``on`` or ``off`` to
-  ``/sys/kernel/debug/kcsan``.
+* Writing ``on`` or ``off`` to ``/sys/kernel/debug/kcsan`` allows turning KCSAN
+  on or off, respectively.
 
 * Writing ``!some_func_name`` to ``/sys/kernel/debug/kcsan`` adds
   ``some_func_name`` to the report filter list, which (by default) blacklists
@@ -142,91 +171,120 @@ debugfs
   can be used to silence frequently occurring data races; the whitelist feature
   can help with reproduction and testing of fixes.
 
+Tuning performance
+~~~~~~~~~~~~~~~~~~
+
+Core parameters that affect KCSAN's overall performance and bug detection
+ability are exposed as kernel command-line arguments whose defaults can also be
+changed via the corresponding Kconfig options.
+
+* ``kcsan.skip_watch`` (``CONFIG_KCSAN_SKIP_WATCH``): Number of per-CPU memory
+  operations to skip, before another watchpoint is set up. Setting up
+  watchpoints more frequently will result in the likelihood of races to be
+  observed to increase. This parameter has the most significant impact on
+  overall system performance and race detection ability.
+
+* ``kcsan.udelay_task`` (``CONFIG_KCSAN_UDELAY_TASK``): For tasks, the
+  microsecond delay to stall execution after a watchpoint has been set up.
+  Larger values result in the window in which we may observe a race to
+  increase.
+
+* ``kcsan.udelay_interrupt`` (``CONFIG_KCSAN_UDELAY_INTERRUPT``): For
+  interrupts, the microsecond delay to stall execution after a watchpoint has
+  been set up. Interrupts have tighter latency requirements, and their delay
+  should generally be smaller than the one chosen for tasks.
+
+They may be tweaked at runtime via ``/sys/module/kcsan/parameters/``.
+
 Data Races
 ----------
 
-Informally, two operations *conflict* if they access the same memory location,
-and at least one of them is a write operation. In an execution, two memory
-operations from different threads form a **data race** if they *conflict*, at
-least one of them is a *plain access* (non-atomic), and they are *unordered* in
-the "happens-before" order according to the `LKMM
-<../../tools/memory-model/Documentation/explanation.txt>`_.
+In an execution, two memory accesses form a *data race* if they *conflict*,
+they happen concurrently in different threads, and at least one of them is a
+*plain access*; they *conflict* if both access the same memory location, and at
+least one is a write. For a more thorough discussion and definition, see `"Plain
+Accesses and Data Races" in the LKMM`_.
+
+.. _"Plain Accesses and Data Races" in the LKMM: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/memory-model/Documentation/explanation.txt#n1922
 
-Relationship with the Linux Kernel Memory Model (LKMM)
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Relationship with the Linux-Kernel Memory Consistency Model (LKMM)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 The LKMM defines the propagation and ordering rules of various memory
 operations, which gives developers the ability to reason about concurrent code.
 Ultimately this allows to determine the possible executions of concurrent code,
 and if that code is free from data races.
 
-KCSAN is aware of *atomic* accesses (``READ_ONCE``, ``WRITE_ONCE``,
-``atomic_*``, etc.), but is oblivious of any ordering guarantees. In other
-words, KCSAN assumes that as long as a plain access is not observed to race
-with another conflicting access, memory operations are correctly ordered.
+KCSAN is aware of *marked atomic operations* (``READ_ONCE``, ``WRITE_ONCE``,
+``atomic_*``, etc.), but is oblivious of any ordering guarantees and simply
+assumes that memory barriers are placed correctly. In other words, KCSAN
+assumes that as long as a plain access is not observed to race with another
+conflicting access, memory operations are correctly ordered.
 
 This means that KCSAN will not report *potential* data races due to missing
-memory ordering. If, however, missing memory ordering (that is observable with
-a particular compiler and architecture) leads to an observable data race (e.g.
-entering a critical section erroneously), KCSAN would report the resulting
-data race.
-
-Race conditions vs. data races
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Race conditions are logic bugs, where unexpected interleaving of racing
-concurrent operations result in an erroneous state.
-
-Data races on the other hand are defined at the *memory model/language level*.
-Many data races are also harmful race conditions, which a tool like KCSAN
-reports!  However, not all data races are race conditions and vice-versa.
-KCSAN's intent is to report data races according to the LKMM. A data race
-detector can only work at the memory model/language level.
-
-Deeper analysis, to find high-level race conditions only, requires conveying
-the intended kernel logic to a tool. This requires (1) the developer writing a
-specification or model of their code, and then (2) the tool verifying that the
-implementation matches. This has been done for small bits of code using model
-checkers and other formal methods, but does not scale to the level of what can
-be covered with a dynamic analysis based data race detector such as KCSAN.
-
-For reasons outlined in this `article <https://lwn.net/Articles/793253/>`_,
-data races can be much more subtle, but can cause no less harm than high-level
-race conditions.
+memory ordering. Developers should therefore carefully consider the required
+memory ordering requirements that remain unchecked. If, however, missing
+memory ordering (that is observable with a particular compiler and
+architecture) leads to an observable data race (e.g. entering a critical
+section erroneously), KCSAN would report the resulting data race.
+
+Race Detection Beyond Data Races
+--------------------------------
+
+For code with complex concurrency design, race-condition bugs may not always
+manifest as data races. Race conditions occur if concurrently executing
+operations result in unexpected system behaviour. On the other hand, data races
+are defined at the C-language level. The following macros can be used to check
+properties of concurrent code where bugs would not manifest as data races.
+
+.. kernel-doc:: include/linux/kcsan-checks.h
+    :functions: ASSERT_EXCLUSIVE_WRITER ASSERT_EXCLUSIVE_ACCESS
+                ASSERT_EXCLUSIVE_BITS
 
 Implementation Details
 ----------------------
 
-The general approach is inspired by `DataCollider
+KCSAN relies on observing that two accesses happen concurrently. Crucially, we
+want to (a) increase the chances of observing races (especially for races that
+manifest rarely), and (b) be able to actually observe them. We can accomplish
+(a) by injecting various delays, and (b) by using address watchpoints (or
+breakpoints).
+
+If we deliberately stall a memory access, while we have a watchpoint for its
+address set up, and then observe the watchpoint to fire, two accesses to the
+same address just raced. Using hardware watchpoints, this is the approach taken
+in `DataCollider
 <http://usenix.org/legacy/events/osdi10/tech/full_papers/Erickson.pdf>`_.
 Unlike DataCollider, KCSAN does not use hardware watchpoints, but instead
-relies on compiler instrumentation. Watchpoints are implemented using an
-efficient encoding that stores access type, size, and address in a long; the
-benefits of using "soft watchpoints" are portability and greater flexibility in
-limiting which accesses trigger a watchpoint.
+relies on compiler instrumentation and "soft watchpoints".
 
-More specifically, KCSAN requires instrumenting plain (unmarked, non-atomic)
-memory operations; for each instrumented plain access:
+In KCSAN, watchpoints are implemented using an efficient encoding that stores
+access type, size, and address in a long; the benefits of using "soft
+watchpoints" are portability and greater flexibility. KCSAN then relies on the
+compiler instrumenting plain accesses. For each instrumented plain access:
 
 1. Check if a matching watchpoint exists; if yes, and at least one access is a
    write, then we encountered a racing access.
 
 2. Periodically, if no matching watchpoint exists, set up a watchpoint and
-   stall for a small delay.
+   stall for a small randomized delay.
 
 3. Also check the data value before the delay, and re-check the data value
    after delay; if the values mismatch, we infer a race of unknown origin.
 
-To detect data races between plain and atomic memory operations, KCSAN also
-annotates atomic accesses, but only to check if a watchpoint exists
-(``kcsan_check_atomic_*``); i.e.  KCSAN never sets up a watchpoint on atomic
-accesses.
+To detect data races between plain and marked accesses, KCSAN also annotates
+marked accesses, but only to check if a watchpoint exists; i.e. KCSAN never
+sets up a watchpoint on marked accesses. By never setting up watchpoints for
+marked operations, if all accesses to a variable that is accessed concurrently
+are properly marked, KCSAN will never trigger a watchpoint and therefore never
+report the accesses.
 
 Key Properties
 ~~~~~~~~~~~~~~
 
-1. **Memory Overhead:**  The current implementation uses a small array of longs
-   to encode watchpoint information, which is negligible.
+1. **Memory Overhead:**  The overall memory overhead is only a few MiB
+   depending on configuration. The current implementation uses a small array of
+   longs to encode watchpoint information, which is negligible.
 
 2. **Performance Overhead:** KCSAN's runtime aims to be minimal, using an
    efficient watchpoint encoding that does not require acquiring any shared
@@ -253,14 +311,17 @@ Key Properties
 Alternatives Considered
 -----------------------
 
-An alternative data race detection approach for the kernel can be found in
+An alternative data race detection approach for the kernel can be found in the
 `Kernel Thread Sanitizer (KTSAN) <https://github.com/google/ktsan/wiki>`_.
 KTSAN is a happens-before data race detector, which explicitly establishes the
 happens-before order between memory operations, which can then be used to
-determine data races as defined in `Data Races`_. To build a correct
-happens-before relation, KTSAN must be aware of all ordering rules of the LKMM
-and synchronization primitives. Unfortunately, any omission leads to false
-positives, which is especially important in the context of the kernel which
-includes numerous custom synchronization mechanisms. Furthermore, KTSAN's
-implementation requires metadata for each memory location (shadow memory);
-currently, for each page, KTSAN requires 4 pages of shadow memory.
+determine data races as defined in `Data Races`_.
+
+To build a correct happens-before relation, KTSAN must be aware of all ordering
+rules of the LKMM and synchronization primitives. Unfortunately, any omission
+leads to large numbers of false positives, which is especially detrimental in
+the context of the kernel which includes numerous custom synchronization
+mechanisms. To track the happens-before relation, KTSAN's implementation
+requires metadata for each memory location (shadow memory), which for each page
+corresponds to 4 pages of shadow memory, and can translate into overhead of
+tens of GiB on a large system.
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 50+ messages in thread

* [PATCH kcsan 32/32] kcsan: Update API documentation in kcsan-checks.h
  2020-03-09 19:03 [PATCH kcsan 0/32] KCSAN commits for v5.7 Paul E. McKenney
                   ` (30 preceding siblings ...)
  2020-03-09 19:04 ` [PATCH kcsan 31/32] kcsan: Update Documentation/dev-tools/kcsan.rst paulmck
@ 2020-03-09 19:04 ` paulmck
  31 siblings, 0 replies; 50+ messages in thread
From: paulmck @ 2020-03-09 19:04 UTC (permalink / raw)
  To: linux-kernel, kasan-dev, kernel-team, mingo
  Cc: elver, andreyknvl, glider, dvyukov, cai, boqun.feng, Paul E . McKenney

From: Marco Elver <elver@google.com>

Update the API documentation for ASSERT_EXCLUSIVE_* macros and make them
generate readable documentation for the code examples.

All @variable short summaries were missing ':', which was updated for
the whole file.

Tested with "make htmldocs".

Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
---
 include/linux/kcsan-checks.h | 98 +++++++++++++++++++++++++++-----------------
 1 file changed, 61 insertions(+), 37 deletions(-)

diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h
index 1b8aac5..14fd10e 100644
--- a/include/linux/kcsan-checks.h
+++ b/include/linux/kcsan-checks.h
@@ -26,9 +26,9 @@
 /**
  * __kcsan_check_access - check generic access for races
  *
- * @ptr address of access
- * @size size of access
- * @type access type modifier
+ * @ptr: address of access
+ * @size: size of access
+ * @type: access type modifier
  */
 void __kcsan_check_access(const volatile void *ptr, size_t size, int type);
 
@@ -64,7 +64,7 @@ void kcsan_flat_atomic_end(void);
  * Force treating the next n memory accesses for the current context as atomic
  * operations.
  *
- * @n number of following memory accesses to treat as atomic.
+ * @n: number of following memory accesses to treat as atomic.
  */
 void kcsan_atomic_next(int n);
 
@@ -74,7 +74,7 @@ void kcsan_atomic_next(int n);
  * Set the access mask for all accesses for the current context if non-zero.
  * Only value changes to bits set in the mask will be reported.
  *
- * @mask bitmask
+ * @mask: bitmask
  */
 void kcsan_set_access_mask(unsigned long mask);
 
@@ -106,16 +106,16 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size,
 /**
  * __kcsan_check_read - check regular read access for races
  *
- * @ptr address of access
- * @size size of access
+ * @ptr: address of access
+ * @size: size of access
  */
 #define __kcsan_check_read(ptr, size) __kcsan_check_access(ptr, size, 0)
 
 /**
  * __kcsan_check_write - check regular write access for races
  *
- * @ptr address of access
- * @size size of access
+ * @ptr: address of access
+ * @size: size of access
  */
 #define __kcsan_check_write(ptr, size)                                         \
 	__kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
@@ -123,16 +123,16 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size,
 /**
  * kcsan_check_read - check regular read access for races
  *
- * @ptr address of access
- * @size size of access
+ * @ptr: address of access
+ * @size: size of access
  */
 #define kcsan_check_read(ptr, size) kcsan_check_access(ptr, size, 0)
 
 /**
  * kcsan_check_write - check regular write access for races
  *
- * @ptr address of access
- * @size size of access
+ * @ptr: address of access
+ * @size: size of access
  */
 #define kcsan_check_write(ptr, size)                                           \
 	kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
@@ -158,14 +158,26 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size,
  * allowed. This assertion can be used to specify properties of concurrent code,
  * where violation cannot be detected as a normal data race.
  *
- * For example, if a per-CPU variable is only meant to be written by a single
- * CPU, but may be read from other CPUs; in this case, reads and writes must be
- * marked properly, however, if an off-CPU WRITE_ONCE() races with the owning
- * CPU's WRITE_ONCE(), would not constitute a data race but could be a harmful
- * race condition. Using this macro allows specifying this property in the code
- * and catch such bugs.
+ * For example, if we only have a single writer, but multiple concurrent
+ * readers, to avoid data races, all these accesses must be marked; even
+ * concurrent marked writes racing with the single writer are bugs.
+ * Unfortunately, due to being marked, they are no longer data races. For cases
+ * like these, we can use the macro as follows:
  *
- * @var variable to assert on
+ * .. code-block:: c
+ *
+ *	void writer(void) {
+ *		spin_lock(&update_foo_lock);
+ *		ASSERT_EXCLUSIVE_WRITER(shared_foo);
+ *		WRITE_ONCE(shared_foo, ...);
+ *		spin_unlock(&update_foo_lock);
+ *	}
+ *	void reader(void) {
+ *		// update_foo_lock does not need to be held!
+ *		... = READ_ONCE(shared_foo);
+ *	}
+ *
+ * @var: variable to assert on
  */
 #define ASSERT_EXCLUSIVE_WRITER(var)                                           \
 	__kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT)
@@ -177,16 +189,22 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size,
  * writers). This assertion can be used to specify properties of concurrent
  * code, where violation cannot be detected as a normal data race.
  *
- * For example, in a reference-counting algorithm where exclusive access is
- * expected after the refcount reaches 0. We can check that this property
- * actually holds as follows:
+ * For example, where exclusive access is expected after determining no other
+ * users of an object are left, but the object is not actually freed. We can
+ * check that this property actually holds as follows:
+ *
+ * .. code-block:: c
  *
  *	if (refcount_dec_and_test(&obj->refcnt)) {
  *		ASSERT_EXCLUSIVE_ACCESS(*obj);
- *		safely_dispose_of(obj);
+ *		do_some_cleanup(obj);
+ *		release_for_reuse(obj);
  *	}
  *
- * @var variable to assert on
+ * Note: For cases where the object is freed, `KASAN <kasan.html>`_ is a better
+ * fit to detect use-after-free bugs.
+ *
+ * @var: variable to assert on
  */
 #define ASSERT_EXCLUSIVE_ACCESS(var)                                           \
 	__kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT)
@@ -200,7 +218,7 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size,
  * concurrent readers are permitted. This assertion captures more detailed
  * bit-level properties, compared to the other (word granularity) assertions.
  * Only the bits set in @mask are checked for concurrent modifications, while
- * ignoring the remaining bits, i.e. concurrent writes (or reads) to ~@mask bits
+ * ignoring the remaining bits, i.e. concurrent writes (or reads) to ~mask bits
  * are ignored.
  *
  * Use this for variables, where some bits must not be modified concurrently,
@@ -210,17 +228,21 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size,
  * but other bits may still be modified concurrently. A reader may wish to
  * assert that this is true as follows:
  *
+ * .. code-block:: c
+ *
  *	ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK);
  *	foo = (READ_ONCE(flags) & READ_ONLY_MASK) >> READ_ONLY_SHIFT;
  *
- *   Note: The access that immediately follows ASSERT_EXCLUSIVE_BITS() is
- *   assumed to access the masked bits only, and KCSAN optimistically assumes it
- *   is therefore safe, even in the presence of data races, and marking it with
- *   READ_ONCE() is optional from KCSAN's point-of-view. We caution, however,
- *   that it may still be advisable to do so, since we cannot reason about all
- *   compiler optimizations when it comes to bit manipulations (on the reader
- *   and writer side). If you are sure nothing can go wrong, we can write the
- *   above simply as:
+ * Note: The access that immediately follows ASSERT_EXCLUSIVE_BITS() is assumed
+ * to access the masked bits only, and KCSAN optimistically assumes it is
+ * therefore safe, even in the presence of data races, and marking it with
+ * READ_ONCE() is optional from KCSAN's point-of-view. We caution, however, that
+ * it may still be advisable to do so, since we cannot reason about all compiler
+ * optimizations when it comes to bit manipulations (on the reader and writer
+ * side). If you are sure nothing can go wrong, we can write the above simply
+ * as:
+ *
+ * .. code-block:: c
  *
  * 	ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK);
  *	foo = (flags & READ_ONLY_MASK) >> READ_ONLY_SHIFT;
@@ -230,15 +252,17 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size,
  * be modified concurrently. Writers, where other bits may change concurrently,
  * could use the assertion as follows:
  *
+ * .. code-block:: c
+ *
  *	spin_lock(&foo_lock);
  *	ASSERT_EXCLUSIVE_BITS(flags, FOO_MASK);
- *	old_flags = READ_ONCE(flags);
+ *	old_flags = flags;
  *	new_flags = (old_flags & ~FOO_MASK) | (new_foo << FOO_SHIFT);
  *	if (cmpxchg(&flags, old_flags, new_flags) != old_flags) { ... }
  *	spin_unlock(&foo_lock);
  *
- * @var variable to assert on
- * @mask only check for modifications to bits set in @mask
+ * @var: variable to assert on
+ * @mask: only check for modifications to bits set in @mask
  */
 #define ASSERT_EXCLUSIVE_BITS(var, mask)                                       \
 	do {                                                                   \
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 50+ messages in thread

* Re: [PATCH kcsan 26/32] kcsan, trace: Make KCSAN compatible with tracing
  2020-03-09 19:04 ` [PATCH kcsan 26/32] kcsan, trace: Make KCSAN compatible with tracing paulmck
@ 2020-03-09 19:57   ` Steven Rostedt
  2020-03-09 20:27     ` Paul E. McKenney
  0 siblings, 1 reply; 50+ messages in thread
From: Steven Rostedt @ 2020-03-09 19:57 UTC (permalink / raw)
  To: paulmck
  Cc: linux-kernel, kasan-dev, kernel-team, mingo, elver, andreyknvl,
	glider, dvyukov, cai, boqun.feng

On Mon,  9 Mar 2020 12:04:14 -0700
paulmck@kernel.org wrote:

> From: Marco Elver <elver@google.com>
> 
> Previously the system would lock up if ftrace was enabled together with
> KCSAN. This is due to recursion on reporting if the tracer code is
> instrumented with KCSAN.
> 
> To avoid this for all types of tracing, disable KCSAN instrumentation
> for all of kernel/trace.
> 
> Furthermore, since KCSAN relies on udelay() to introduce delay, we have
> to disable ftrace for udelay() (currently done for x86) in case KCSAN is
> used together with lockdep and ftrace. The reason is that it may corrupt
> lockdep IRQ flags tracing state due to a peculiar case of recursion
> (details in Makefile comment).
> 
> Signed-off-by: Marco Elver <elver@google.com>
> Reported-by: Qian Cai <cai@lca.pw>
> Cc: Paul E. McKenney <paulmck@kernel.org>
> Cc: Steven Rostedt <rostedt@goodmis.org>

Acked-by: Steven Rostedt (VMware) <rostedt@goodmis.org>

-- Steve

> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
> Tested-by: Qian Cai <cai@lca.pw>
> ---
>  arch/x86/lib/Makefile | 5 +++++
>  kernel/kcsan/Makefile | 2 ++
>  kernel/trace/Makefile | 3 +++
>  3 files changed, 10 insertions(+)
> 
> diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
> index 432a077..6110bce7 100644
> --- a/arch/x86/lib/Makefile
> +++ b/arch/x86/lib/Makefile
> @@ -8,6 +8,11 @@ KCOV_INSTRUMENT_delay.o	:= n
>  
>  # KCSAN uses udelay for introducing watchpoint delay; avoid recursion.
>  KCSAN_SANITIZE_delay.o := n
> +ifdef CONFIG_KCSAN
> +# In case KCSAN+lockdep+ftrace are enabled, disable ftrace for delay.o to avoid
> +# lockdep -> [other libs] -> KCSAN -> udelay -> ftrace -> lockdep recursion.
> +CFLAGS_REMOVE_delay.o = $(CC_FLAGS_FTRACE)
> +endif
>  
>  # Early boot use of cmdline; don't instrument it
>  ifdef CONFIG_AMD_MEM_ENCRYPT
> diff --git a/kernel/kcsan/Makefile b/kernel/kcsan/Makefile
> index df6b779..d4999b3 100644
> --- a/kernel/kcsan/Makefile
> +++ b/kernel/kcsan/Makefile
> @@ -4,6 +4,8 @@ KCOV_INSTRUMENT := n
>  UBSAN_SANITIZE := n
>  
>  CFLAGS_REMOVE_core.o = $(CC_FLAGS_FTRACE)
> +CFLAGS_REMOVE_debugfs.o = $(CC_FLAGS_FTRACE)
> +CFLAGS_REMOVE_report.o = $(CC_FLAGS_FTRACE)
>  
>  CFLAGS_core.o := $(call cc-option,-fno-conserve-stack,) \
>  	$(call cc-option,-fno-stack-protector,)
> diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
> index 0e63db6..9072486 100644
> --- a/kernel/trace/Makefile
> +++ b/kernel/trace/Makefile
> @@ -6,6 +6,9 @@ ifdef CONFIG_FUNCTION_TRACER
>  ORIG_CFLAGS := $(KBUILD_CFLAGS)
>  KBUILD_CFLAGS = $(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS))
>  
> +# Avoid recursion due to instrumentation.
> +KCSAN_SANITIZE := n
> +
>  ifdef CONFIG_FTRACE_SELFTEST
>  # selftest needs instrumentation
>  CFLAGS_trace_selftest_dynamic.o = $(CC_FLAGS_FTRACE)


^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [PATCH kcsan 26/32] kcsan, trace: Make KCSAN compatible with tracing
  2020-03-09 19:57   ` Steven Rostedt
@ 2020-03-09 20:27     ` Paul E. McKenney
  0 siblings, 0 replies; 50+ messages in thread
From: Paul E. McKenney @ 2020-03-09 20:27 UTC (permalink / raw)
  To: Steven Rostedt
  Cc: linux-kernel, kasan-dev, kernel-team, mingo, elver, andreyknvl,
	glider, dvyukov, cai, boqun.feng

On Mon, Mar 09, 2020 at 03:57:22PM -0400, Steven Rostedt wrote:
> On Mon,  9 Mar 2020 12:04:14 -0700
> paulmck@kernel.org wrote:
> 
> > From: Marco Elver <elver@google.com>
> > 
> > Previously the system would lock up if ftrace was enabled together with
> > KCSAN. This is due to recursion on reporting if the tracer code is
> > instrumented with KCSAN.
> > 
> > To avoid this for all types of tracing, disable KCSAN instrumentation
> > for all of kernel/trace.
> > 
> > Furthermore, since KCSAN relies on udelay() to introduce delay, we have
> > to disable ftrace for udelay() (currently done for x86) in case KCSAN is
> > used together with lockdep and ftrace. The reason is that it may corrupt
> > lockdep IRQ flags tracing state due to a peculiar case of recursion
> > (details in Makefile comment).
> > 
> > Signed-off-by: Marco Elver <elver@google.com>
> > Reported-by: Qian Cai <cai@lca.pw>
> > Cc: Paul E. McKenney <paulmck@kernel.org>
> > Cc: Steven Rostedt <rostedt@goodmis.org>
> 
> Acked-by: Steven Rostedt (VMware) <rostedt@goodmis.org>

Applied, thank you!

							Thanx, Paul

> -- Steve
> 
> > Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
> > Tested-by: Qian Cai <cai@lca.pw>
> > ---
> >  arch/x86/lib/Makefile | 5 +++++
> >  kernel/kcsan/Makefile | 2 ++
> >  kernel/trace/Makefile | 3 +++
> >  3 files changed, 10 insertions(+)
> > 
> > diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
> > index 432a077..6110bce7 100644
> > --- a/arch/x86/lib/Makefile
> > +++ b/arch/x86/lib/Makefile
> > @@ -8,6 +8,11 @@ KCOV_INSTRUMENT_delay.o	:= n
> >  
> >  # KCSAN uses udelay for introducing watchpoint delay; avoid recursion.
> >  KCSAN_SANITIZE_delay.o := n
> > +ifdef CONFIG_KCSAN
> > +# In case KCSAN+lockdep+ftrace are enabled, disable ftrace for delay.o to avoid
> > +# lockdep -> [other libs] -> KCSAN -> udelay -> ftrace -> lockdep recursion.
> > +CFLAGS_REMOVE_delay.o = $(CC_FLAGS_FTRACE)
> > +endif
> >  
> >  # Early boot use of cmdline; don't instrument it
> >  ifdef CONFIG_AMD_MEM_ENCRYPT
> > diff --git a/kernel/kcsan/Makefile b/kernel/kcsan/Makefile
> > index df6b779..d4999b3 100644
> > --- a/kernel/kcsan/Makefile
> > +++ b/kernel/kcsan/Makefile
> > @@ -4,6 +4,8 @@ KCOV_INSTRUMENT := n
> >  UBSAN_SANITIZE := n
> >  
> >  CFLAGS_REMOVE_core.o = $(CC_FLAGS_FTRACE)
> > +CFLAGS_REMOVE_debugfs.o = $(CC_FLAGS_FTRACE)
> > +CFLAGS_REMOVE_report.o = $(CC_FLAGS_FTRACE)
> >  
> >  CFLAGS_core.o := $(call cc-option,-fno-conserve-stack,) \
> >  	$(call cc-option,-fno-stack-protector,)
> > diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
> > index 0e63db6..9072486 100644
> > --- a/kernel/trace/Makefile
> > +++ b/kernel/trace/Makefile
> > @@ -6,6 +6,9 @@ ifdef CONFIG_FUNCTION_TRACER
> >  ORIG_CFLAGS := $(KBUILD_CFLAGS)
> >  KBUILD_CFLAGS = $(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS))
> >  
> > +# Avoid recursion due to instrumentation.
> > +KCSAN_SANITIZE := n
> > +
> >  ifdef CONFIG_FTRACE_SELFTEST
> >  # selftest needs instrumentation
> >  CFLAGS_trace_selftest_dynamic.o = $(CC_FLAGS_FTRACE)
> 

^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [PATCH kcsan 27/32] kcsan: Add option to allow watcher interruptions
  2020-03-09 19:04 ` [PATCH kcsan 27/32] kcsan: Add option to allow watcher interruptions paulmck
@ 2020-03-12 18:03   ` Paul E. McKenney
  2020-03-12 18:04     ` Paul E. McKenney
  0 siblings, 1 reply; 50+ messages in thread
From: Paul E. McKenney @ 2020-03-12 18:03 UTC (permalink / raw)
  To: linux-kernel, kasan-dev, kernel-team, mingo
  Cc: elver, andreyknvl, glider, dvyukov, cai, boqun.feng

On Mon, Mar 09, 2020 at 12:04:15PM -0700, paulmck@kernel.org wrote:
> From: Marco Elver <elver@google.com>
> 
> Add option to allow interrupts while a watchpoint is set up. This can be
> enabled either via CONFIG_KCSAN_INTERRUPT_WATCHER or via the boot
> parameter 'kcsan.interrupt_watcher=1'.
> 
> Note that, currently not all safe per-CPU access primitives and patterns
> are accounted for, which could result in false positives. For example,
> asm-generic/percpu.h uses plain operations, which by default are
> instrumented. On interrupts and subsequent accesses to the same
> variable, KCSAN would currently report a data race with this option.
> 
> Therefore, this option should currently remain disabled by default, but
> may be enabled for specific test scenarios.
> 
> To avoid new warnings, changes all uses of smp_processor_id() to use the
> raw version (as already done in kcsan_found_watchpoint()). The exact SMP
> processor id is for informational purposes in the report, and
> correctness is not affected.
> 
> Signed-off-by: Marco Elver <elver@google.com>
> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>

And I get silent hangs that bisect to this patch when running the
following rcutorture command, run in the kernel source tree on a
12-hardware-thread laptop:

bash tools/testing/selftests/rcutorture/bin/kvm.sh --cpus 12 --duration 10 --kconfig "CONFIG_DEBUG_INFO=y CONFIG_KCSAN=y CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=n CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY=n CONFIG_KCSAN_REPORT_ONCE_IN_MS=100000 CONFIG_KCSAN_VERBOSE=y CONFIG_KCSAN_INTERRUPT_WATCHER=y" --configs TREE03

It works fine on some (but not all) of the other rcutorture test
scenarios.  It fails on TREE01, TREE02, TREE03, TREE09.  The common thread
is that these are the TREE scenarios are all PREEMPT=y.  So are RUDE01,
SRCU-P, TASKS01, and TASKS03, but these scenarios are not hammering
on Tree RCU, and thus have far less interrupt activity and the like.
Given that it is an interrupt-related feature being added by this commit,
this seems like expected (mis)behavior.

Can you reproduce this?  If not, are there any diagnostics I can add to
my testing?  Or a diagnostic patch I could apply?

							Thanx, Paul

^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [PATCH kcsan 27/32] kcsan: Add option to allow watcher interruptions
  2020-03-12 18:03   ` Paul E. McKenney
@ 2020-03-12 18:04     ` Paul E. McKenney
  2020-03-13 15:28       ` Marco Elver
  0 siblings, 1 reply; 50+ messages in thread
From: Paul E. McKenney @ 2020-03-12 18:04 UTC (permalink / raw)
  To: linux-kernel, kasan-dev, kernel-team, mingo
  Cc: elver, andreyknvl, glider, dvyukov, cai, boqun.feng

On Thu, Mar 12, 2020 at 11:03:28AM -0700, Paul E. McKenney wrote:
> On Mon, Mar 09, 2020 at 12:04:15PM -0700, paulmck@kernel.org wrote:
> > From: Marco Elver <elver@google.com>
> > 
> > Add option to allow interrupts while a watchpoint is set up. This can be
> > enabled either via CONFIG_KCSAN_INTERRUPT_WATCHER or via the boot
> > parameter 'kcsan.interrupt_watcher=1'.
> > 
> > Note that, currently not all safe per-CPU access primitives and patterns
> > are accounted for, which could result in false positives. For example,
> > asm-generic/percpu.h uses plain operations, which by default are
> > instrumented. On interrupts and subsequent accesses to the same
> > variable, KCSAN would currently report a data race with this option.
> > 
> > Therefore, this option should currently remain disabled by default, but
> > may be enabled for specific test scenarios.
> > 
> > To avoid new warnings, changes all uses of smp_processor_id() to use the
> > raw version (as already done in kcsan_found_watchpoint()). The exact SMP
> > processor id is for informational purposes in the report, and
> > correctness is not affected.
> > 
> > Signed-off-by: Marco Elver <elver@google.com>
> > Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
> 
> And I get silent hangs that bisect to this patch when running the
> following rcutorture command, run in the kernel source tree on a
> 12-hardware-thread laptop:
> 
> bash tools/testing/selftests/rcutorture/bin/kvm.sh --cpus 12 --duration 10 --kconfig "CONFIG_DEBUG_INFO=y CONFIG_KCSAN=y CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=n CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY=n CONFIG_KCSAN_REPORT_ONCE_IN_MS=100000 CONFIG_KCSAN_VERBOSE=y CONFIG_KCSAN_INTERRUPT_WATCHER=y" --configs TREE03
> 
> It works fine on some (but not all) of the other rcutorture test
> scenarios.  It fails on TREE01, TREE02, TREE03, TREE09.  The common thread
> is that these are the TREE scenarios are all PREEMPT=y.  So are RUDE01,
> SRCU-P, TASKS01, and TASKS03, but these scenarios are not hammering
> on Tree RCU, and thus have far less interrupt activity and the like.
> Given that it is an interrupt-related feature being added by this commit,
> this seems like expected (mis)behavior.
> 
> Can you reproduce this?  If not, are there any diagnostics I can add to
> my testing?  Or a diagnostic patch I could apply?

I should hasten to add that this feature was quite helpful in recent work!

							Thanx, Paul

^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [PATCH kcsan 17/32] kcsan: Introduce ASSERT_EXCLUSIVE_* macros
  2020-03-09 19:04 ` [PATCH kcsan 17/32] kcsan: Introduce ASSERT_EXCLUSIVE_* macros paulmck
@ 2020-03-13  8:52   ` Boqun Feng
  2020-03-13 16:15     ` Marco Elver
  0 siblings, 1 reply; 50+ messages in thread
From: Boqun Feng @ 2020-03-13  8:52 UTC (permalink / raw)
  To: paulmck
  Cc: linux-kernel, kasan-dev, kernel-team, mingo, elver, andreyknvl,
	glider, dvyukov, cai

Hi Marco,

On Mon, Mar 09, 2020 at 12:04:05PM -0700, paulmck@kernel.org wrote:
> From: Marco Elver <elver@google.com>
> 
> Introduces ASSERT_EXCLUSIVE_WRITER and ASSERT_EXCLUSIVE_ACCESS, which
> may be used to assert properties of synchronization logic, where
> violation cannot be detected as a normal data race.
> 
> Examples of the reports that may be generated:
> 
>     ==================================================================
>     BUG: KCSAN: assert: race in test_thread / test_thread
> 
>     write to 0xffffffffab3d1540 of 8 bytes by task 466 on cpu 2:
>      test_thread+0x8d/0x111
>      debugfs_write.cold+0x32/0x44
>      ...
> 
>     assert no writes to 0xffffffffab3d1540 of 8 bytes by task 464 on cpu 0:
>      test_thread+0xa3/0x111
>      debugfs_write.cold+0x32/0x44
>      ...
>     ==================================================================
> 
>     ==================================================================
>     BUG: KCSAN: assert: race in test_thread / test_thread
> 
>     assert no accesses to 0xffffffffab3d1540 of 8 bytes by task 465 on cpu 1:
>      test_thread+0xb9/0x111
>      debugfs_write.cold+0x32/0x44
>      ...
> 
>     read to 0xffffffffab3d1540 of 8 bytes by task 464 on cpu 0:
>      test_thread+0x77/0x111
>      debugfs_write.cold+0x32/0x44
>      ...
>     ==================================================================
> 
> Signed-off-by: Marco Elver <elver@google.com>
> Suggested-by: Paul E. McKenney <paulmck@kernel.org>
> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
> ---
>  include/linux/kcsan-checks.h | 40 ++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 40 insertions(+)
> 
> diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h
> index 5dcadc2..cf69617 100644
> --- a/include/linux/kcsan-checks.h
> +++ b/include/linux/kcsan-checks.h
> @@ -96,4 +96,44 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size,
>  	kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE)
>  #endif
>  
> +/**
> + * ASSERT_EXCLUSIVE_WRITER - assert no other threads are writing @var
> + *
> + * Assert that there are no other threads writing @var; other readers are
> + * allowed. This assertion can be used to specify properties of concurrent code,
> + * where violation cannot be detected as a normal data race.
> + *

I like the idea that we can assert no other writers, however I think
assertions like ASSERT_EXCLUSIVE_WRITER() are a little limited. For
example, if we have the following code:

	preempt_disable();
	do_sth();
	raw_cpu_write(var, 1);
	do_sth_else();
	preempt_enable();

we can add the assert to detect another potential writer like:

	preempt_disable();
	do_sth();
	ASSERT_EXCLUSIVE_WRITER(var);
	raw_cpu_write(var, 1);
	do_sth_else();
	preempt_enable();

, but, if I understand how KCSAN works correctly, it only works if the
another writer happens when the ASSERT_EXCLUSIVE_WRITER(var) is called,
IOW, it can only detect another writer between do_sth() and
raw_cpu_write(). But our intent is to prevent other writers for the
whole preemption-off section. With this assertion introduced, people may
end up with code like:

	preempt_disable();
	ASSERT_EXCLUSIVE_WRITER(var);
	do_sth();
	ASSERT_EXCLUSIVE_WRITER(var);
	raw_cpu_write(var, 1);
	ASSERT_EXCLUSIVE_WRITER(var);
	do_sth_else();
	ASSERT_EXCLUSIVE_WRITER(var);
	preempt_enable();

and that is horrible...

So how about making a pair of annotations
ASSERT_EXCLUSIVE_WRITER_BEGIN() and ASSERT_EXCLUSIVE_WRITER_END(), so
that we can write code like:

	preempt_disable();
	ASSERT_EXCLUSIVE_WRITER_BEGIN(var);
	do_sth();
	raw_cpu_write(var, 1);
	do_sth_else();
	ASSERT_EXCLUSIVE_WRITER_END(var);
	preempt_enable();

ASSERT_EXCLUSIVE_WRITER_BEGIN() could be a rough version of watchpoint
setting up and ASSERT_EXCLUSIVE_WRITER_END() could be watchpoint
removing. So I think it's feasible.

Thoughts?

Regards,
Boqun

> + * For example, if a per-CPU variable is only meant to be written by a single
> + * CPU, but may be read from other CPUs; in this case, reads and writes must be
> + * marked properly, however, if an off-CPU WRITE_ONCE() races with the owning
> + * CPU's WRITE_ONCE(), would not constitute a data race but could be a harmful
> + * race condition. Using this macro allows specifying this property in the code
> + * and catch such bugs.
> + *
> + * @var variable to assert on
> + */
> +#define ASSERT_EXCLUSIVE_WRITER(var)                                           \
> +	__kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT)
> +
> +/**
> + * ASSERT_EXCLUSIVE_ACCESS - assert no other threads are accessing @var
> + *
> + * Assert that no other thread is accessing @var (no readers nor writers). This
> + * assertion can be used to specify properties of concurrent code, where
> + * violation cannot be detected as a normal data race.
> + *
> + * For example, in a reference-counting algorithm where exclusive access is
> + * expected after the refcount reaches 0. We can check that this property
> + * actually holds as follows:
> + *
> + *	if (refcount_dec_and_test(&obj->refcnt)) {
> + *		ASSERT_EXCLUSIVE_ACCESS(*obj);
> + *		safely_dispose_of(obj);
> + *	}
> + *
> + * @var variable to assert on
> + */
> +#define ASSERT_EXCLUSIVE_ACCESS(var)                                           \
> +	__kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT)
> +
>  #endif /* _LINUX_KCSAN_CHECKS_H */
> -- 
> 2.9.5
> 

^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [PATCH kcsan 27/32] kcsan: Add option to allow watcher interruptions
  2020-03-12 18:04     ` Paul E. McKenney
@ 2020-03-13 15:28       ` Marco Elver
  2020-03-16 13:56         ` Marco Elver
  0 siblings, 1 reply; 50+ messages in thread
From: Marco Elver @ 2020-03-13 15:28 UTC (permalink / raw)
  To: Paul E. McKenney
  Cc: LKML, kasan-dev, kernel-team, Ingo Molnar, Andrey Konovalov,
	Alexander Potapenko, Dmitry Vyukov, Qian Cai, Boqun Feng

On Thu, 12 Mar 2020 at 19:04, Paul E. McKenney <paulmck@kernel.org> wrote:
>
> On Thu, Mar 12, 2020 at 11:03:28AM -0700, Paul E. McKenney wrote:
> > On Mon, Mar 09, 2020 at 12:04:15PM -0700, paulmck@kernel.org wrote:
> > > From: Marco Elver <elver@google.com>
> > >
> > > Add option to allow interrupts while a watchpoint is set up. This can be
> > > enabled either via CONFIG_KCSAN_INTERRUPT_WATCHER or via the boot
> > > parameter 'kcsan.interrupt_watcher=1'.
> > >
> > > Note that, currently not all safe per-CPU access primitives and patterns
> > > are accounted for, which could result in false positives. For example,
> > > asm-generic/percpu.h uses plain operations, which by default are
> > > instrumented. On interrupts and subsequent accesses to the same
> > > variable, KCSAN would currently report a data race with this option.
> > >
> > > Therefore, this option should currently remain disabled by default, but
> > > may be enabled for specific test scenarios.
> > >
> > > To avoid new warnings, changes all uses of smp_processor_id() to use the
> > > raw version (as already done in kcsan_found_watchpoint()). The exact SMP
> > > processor id is for informational purposes in the report, and
> > > correctness is not affected.
> > >
> > > Signed-off-by: Marco Elver <elver@google.com>
> > > Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
> >
> > And I get silent hangs that bisect to this patch when running the
> > following rcutorture command, run in the kernel source tree on a
> > 12-hardware-thread laptop:
> >
> > bash tools/testing/selftests/rcutorture/bin/kvm.sh --cpus 12 --duration 10 --kconfig "CONFIG_DEBUG_INFO=y CONFIG_KCSAN=y CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=n CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY=n CONFIG_KCSAN_REPORT_ONCE_IN_MS=100000 CONFIG_KCSAN_VERBOSE=y CONFIG_KCSAN_INTERRUPT_WATCHER=y" --configs TREE03
> >
> > It works fine on some (but not all) of the other rcutorture test
> > scenarios.  It fails on TREE01, TREE02, TREE03, TREE09.  The common thread
> > is that these are the TREE scenarios are all PREEMPT=y.  So are RUDE01,
> > SRCU-P, TASKS01, and TASKS03, but these scenarios are not hammering
> > on Tree RCU, and thus have far less interrupt activity and the like.
> > Given that it is an interrupt-related feature being added by this commit,
> > this seems like expected (mis)behavior.
> >
> > Can you reproduce this?  If not, are there any diagnostics I can add to
> > my testing?  Or a diagnostic patch I could apply?

I think I can reproduce it.  Let me debug some more, so far I haven't
found anything yet.

What I do know is that it's related to reporting. Turning kcsan_report
into a noop makes the test run to completion.

> I should hasten to add that this feature was quite helpful in recent work!

Good to know. :-)  We can probably keep this patch, since the default
config doesn't turn this on. But I will try to see what's up with the
hangs, and hopefully find a fix.

Thanks,
-- Marco

^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [PATCH kcsan 17/32] kcsan: Introduce ASSERT_EXCLUSIVE_* macros
  2020-03-13  8:52   ` Boqun Feng
@ 2020-03-13 16:15     ` Marco Elver
  2020-03-14  2:22       ` Boqun Feng
  0 siblings, 1 reply; 50+ messages in thread
From: Marco Elver @ 2020-03-13 16:15 UTC (permalink / raw)
  To: Boqun Feng
  Cc: Paul E. McKenney, LKML, kasan-dev, kernel-team, Ingo Molnar,
	Andrey Konovalov, Alexander Potapenko, Dmitry Vyukov, Qian Cai

On Fri, 13 Mar 2020 at 09:52, Boqun Feng <boqun.feng@gmail.com> wrote:
>
> Hi Marco,
>
> On Mon, Mar 09, 2020 at 12:04:05PM -0700, paulmck@kernel.org wrote:
> > From: Marco Elver <elver@google.com>
> >
> > Introduces ASSERT_EXCLUSIVE_WRITER and ASSERT_EXCLUSIVE_ACCESS, which
> > may be used to assert properties of synchronization logic, where
> > violation cannot be detected as a normal data race.
> >
> > Examples of the reports that may be generated:
> >
> >     ==================================================================
> >     BUG: KCSAN: assert: race in test_thread / test_thread
> >
> >     write to 0xffffffffab3d1540 of 8 bytes by task 466 on cpu 2:
> >      test_thread+0x8d/0x111
> >      debugfs_write.cold+0x32/0x44
> >      ...
> >
> >     assert no writes to 0xffffffffab3d1540 of 8 bytes by task 464 on cpu 0:
> >      test_thread+0xa3/0x111
> >      debugfs_write.cold+0x32/0x44
> >      ...
> >     ==================================================================
> >
> >     ==================================================================
> >     BUG: KCSAN: assert: race in test_thread / test_thread
> >
> >     assert no accesses to 0xffffffffab3d1540 of 8 bytes by task 465 on cpu 1:
> >      test_thread+0xb9/0x111
> >      debugfs_write.cold+0x32/0x44
> >      ...
> >
> >     read to 0xffffffffab3d1540 of 8 bytes by task 464 on cpu 0:
> >      test_thread+0x77/0x111
> >      debugfs_write.cold+0x32/0x44
> >      ...
> >     ==================================================================
> >
> > Signed-off-by: Marco Elver <elver@google.com>
> > Suggested-by: Paul E. McKenney <paulmck@kernel.org>
> > Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
> > ---
> >  include/linux/kcsan-checks.h | 40 ++++++++++++++++++++++++++++++++++++++++
> >  1 file changed, 40 insertions(+)
> >
> > diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h
> > index 5dcadc2..cf69617 100644
> > --- a/include/linux/kcsan-checks.h
> > +++ b/include/linux/kcsan-checks.h
> > @@ -96,4 +96,44 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size,
> >       kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE)
> >  #endif
> >
> > +/**
> > + * ASSERT_EXCLUSIVE_WRITER - assert no other threads are writing @var
> > + *
> > + * Assert that there are no other threads writing @var; other readers are
> > + * allowed. This assertion can be used to specify properties of concurrent code,
> > + * where violation cannot be detected as a normal data race.
> > + *
>
> I like the idea that we can assert no other writers, however I think
> assertions like ASSERT_EXCLUSIVE_WRITER() are a little limited. For
> example, if we have the following code:
>
>         preempt_disable();
>         do_sth();
>         raw_cpu_write(var, 1);
>         do_sth_else();
>         preempt_enable();
>
> we can add the assert to detect another potential writer like:
>
>         preempt_disable();
>         do_sth();
>         ASSERT_EXCLUSIVE_WRITER(var);
>         raw_cpu_write(var, 1);
>         do_sth_else();
>         preempt_enable();
>
> , but, if I understand how KCSAN works correctly, it only works if the
> another writer happens when the ASSERT_EXCLUSIVE_WRITER(var) is called,
> IOW, it can only detect another writer between do_sth() and
> raw_cpu_write(). But our intent is to prevent other writers for the
> whole preemption-off section. With this assertion introduced, people may
> end up with code like:

To confirm: KCSAN will detect a race if it sets up a watchpoint on
ASSERT_EXCLUSIVE_WRITER(var), and a concurrent write happens. Note
that the watchpoints aren't always set up, but only periodically
(discussed more below). For every watchpoint, we also inject an
artificial delay. Pseudo-code:

if watchpoint for access already set up {
  consume watchpoint;
else if should set up watchpoint {
  setup watchpoint;
  udelay(...);
  check watchpoint consumed;
  release watchpoint;
}

>         preempt_disable();
>         ASSERT_EXCLUSIVE_WRITER(var);
>         do_sth();
>         ASSERT_EXCLUSIVE_WRITER(var);
>         raw_cpu_write(var, 1);
>         ASSERT_EXCLUSIVE_WRITER(var);
>         do_sth_else();
>         ASSERT_EXCLUSIVE_WRITER(var);
>         preempt_enable();
>
> and that is horrible...

It is, and I would strongly discourage any such use, because it's not
necessary. See below.

> So how about making a pair of annotations
> ASSERT_EXCLUSIVE_WRITER_BEGIN() and ASSERT_EXCLUSIVE_WRITER_END(), so
> that we can write code like:
>
>         preempt_disable();
>         ASSERT_EXCLUSIVE_WRITER_BEGIN(var);
>         do_sth();
>         raw_cpu_write(var, 1);
>         do_sth_else();
>         ASSERT_EXCLUSIVE_WRITER_END(var);
>         preempt_enable();
>
> ASSERT_EXCLUSIVE_WRITER_BEGIN() could be a rough version of watchpoint
> setting up and ASSERT_EXCLUSIVE_WRITER_END() could be watchpoint
> removing. So I think it's feasible.

Keep in mind that the time from ASSERT_EXCLUSIVE_WRITER_BEGIN to END
might be on the order of a few nanosec, whereas KCSAN's default
watchpoint delay is 10s of microsec (default ~80 for tasks). That
means we would still have to set up a delay somewhere, and the few
nanosec between BEGIN and END are insignificant and don't buy us
anything.

Re feasibility: Right now setting up and removing watchpoints is not
exposed, and doing something like this would be an extremely intrusive
change. Because of that, without being able to quantify the actual
usefulness of this, and having evaluated better options (see below),
I'd recommend not pursuing this.

> Thoughts?

Firstly, what is your objective? From what I gather you want to
increase the probability of detecting a race with 'var'.

I agree, and have been thinking about it, but there are other options
that haven't been exhausted, before we go and make the interface more
complicated.

== Interface design ==
The interface as it is right now, is intuitive and using it is hard to
get wrong. Demanding begin/end markers introduces complexity that will
undoubtedly result in incorrect usage, because as soon as you somehow
forget to end the region, you'll get tons of false positives. This may
be due to control-flow that was missed etc. We had a similar problem
with seqlocks, and getting them to work correctly with KCSAN was
extremely difficult, because clear begin and end markers weren't
always given. I imagine introducing an interface like this will
ultimately result in similar problems, as much as we'd like to believe
this won't ever happen.

== Improving race detection for KCSAN_ACCESS_ASSERT access types ==
There are several options:

1. Always set up a watchpoint for assert-type accesses, and ignore
KCSAN_SKIP_WATCH/kcsan_skip counter (see 'should_watch()'). One
problem with this is that it would seriously impact overall
performance as soon as we get a few ASSERT_EXCLUSIVE_*() in a hot path
somewhere. A compromise might be simply being more aggressive with
setting up watchpoints on assert-type accesses.

2. Let's say in the above example (without BEGIN/END) the total
duration (via udelay) of watchpoints for 'var' being set up is 4*D.
Why not just increase the watchpoint delay for assert-type accesses to
4*D? Then, just having one ASSERT_EXCLUSIVE_WRITER(var) somewhere in
the region would have the same probability of catching a race.
(Assuming that the region's remaining execution time is on the order
of nanosecs.)

I have some limited evidence that (1) is going to help, but not (2).
This is based on experiments trying to reproduce racy use-after-free
bugs that KASAN found, but with KCSAN. The problem is that it does
slow-down overall system performance if in a hot path like an
allocator. Which led me to a 3rd option.

3. Do option (1) but do the opposite of (2), i.e. always set up a
watchpoint on assert-type accesses, but *reduce* the watchpoint delay.

I haven't yet sent a patch for any one of 1-3 because I'm hesitant
until we can actually show one of them would always be useful and
improve things. For now, the best thing is to dynamically adjust
udelay_{task,interrupt} and skip_watch either via Kconfig options or
/sys/modules/kcsan/parameters/ and not add more complexity without
good justification. A good stress test will also go a long way.

There are some more (probably bad) ideas I have, but the above are the
best options for now.

So, anything that somehow increases the total time that a watchpoint
is set up will increase the probability of detecting a race. However,
we're also trying to balance overall system performance, as poor
performance could equally affect race detection negatively (fewer
instructions executed, etc.). Right now any one of 1-3 might sound
like a decent idea, but I don't know what it will look like once we
have dozens of ASSERT_EXCLUSIVE_*() in places, especially if a few of
them are in hot paths.

Thanks,
-- Marco






> Regards,
> Boqun
>
> > + * For example, if a per-CPU variable is only meant to be written by a single
> > + * CPU, but may be read from other CPUs; in this case, reads and writes must be
> > + * marked properly, however, if an off-CPU WRITE_ONCE() races with the owning
> > + * CPU's WRITE_ONCE(), would not constitute a data race but could be a harmful
> > + * race condition. Using this macro allows specifying this property in the code
> > + * and catch such bugs.
> > + *
> > + * @var variable to assert on
> > + */
> > +#define ASSERT_EXCLUSIVE_WRITER(var)                                           \
> > +     __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT)
> > +
> > +/**
> > + * ASSERT_EXCLUSIVE_ACCESS - assert no other threads are accessing @var
> > + *
> > + * Assert that no other thread is accessing @var (no readers nor writers). This
> > + * assertion can be used to specify properties of concurrent code, where
> > + * violation cannot be detected as a normal data race.
> > + *
> > + * For example, in a reference-counting algorithm where exclusive access is
> > + * expected after the refcount reaches 0. We can check that this property
> > + * actually holds as follows:
> > + *
> > + *   if (refcount_dec_and_test(&obj->refcnt)) {
> > + *           ASSERT_EXCLUSIVE_ACCESS(*obj);
> > + *           safely_dispose_of(obj);
> > + *   }
> > + *
> > + * @var variable to assert on
> > + */
> > +#define ASSERT_EXCLUSIVE_ACCESS(var)                                           \
> > +     __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT)
> > +
> >  #endif /* _LINUX_KCSAN_CHECKS_H */
> > --
> > 2.9.5
> >

^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [PATCH kcsan 17/32] kcsan: Introduce ASSERT_EXCLUSIVE_* macros
  2020-03-13 16:15     ` Marco Elver
@ 2020-03-14  2:22       ` Boqun Feng
  2020-03-17 11:12         ` Marco Elver
  0 siblings, 1 reply; 50+ messages in thread
From: Boqun Feng @ 2020-03-14  2:22 UTC (permalink / raw)
  To: Marco Elver
  Cc: Paul E. McKenney, LKML, kasan-dev, kernel-team, Ingo Molnar,
	Andrey Konovalov, Alexander Potapenko, Dmitry Vyukov, Qian Cai

On Fri, Mar 13, 2020 at 05:15:32PM +0100, Marco Elver wrote:
> On Fri, 13 Mar 2020 at 09:52, Boqun Feng <boqun.feng@gmail.com> wrote:
> >
> > Hi Marco,
> >
> > On Mon, Mar 09, 2020 at 12:04:05PM -0700, paulmck@kernel.org wrote:
> > > From: Marco Elver <elver@google.com>
> > >
> > > Introduces ASSERT_EXCLUSIVE_WRITER and ASSERT_EXCLUSIVE_ACCESS, which
> > > may be used to assert properties of synchronization logic, where
> > > violation cannot be detected as a normal data race.
> > >
> > > Examples of the reports that may be generated:
> > >
> > >     ==================================================================
> > >     BUG: KCSAN: assert: race in test_thread / test_thread
> > >
> > >     write to 0xffffffffab3d1540 of 8 bytes by task 466 on cpu 2:
> > >      test_thread+0x8d/0x111
> > >      debugfs_write.cold+0x32/0x44
> > >      ...
> > >
> > >     assert no writes to 0xffffffffab3d1540 of 8 bytes by task 464 on cpu 0:
> > >      test_thread+0xa3/0x111
> > >      debugfs_write.cold+0x32/0x44
> > >      ...
> > >     ==================================================================
> > >
> > >     ==================================================================
> > >     BUG: KCSAN: assert: race in test_thread / test_thread
> > >
> > >     assert no accesses to 0xffffffffab3d1540 of 8 bytes by task 465 on cpu 1:
> > >      test_thread+0xb9/0x111
> > >      debugfs_write.cold+0x32/0x44
> > >      ...
> > >
> > >     read to 0xffffffffab3d1540 of 8 bytes by task 464 on cpu 0:
> > >      test_thread+0x77/0x111
> > >      debugfs_write.cold+0x32/0x44
> > >      ...
> > >     ==================================================================
> > >
> > > Signed-off-by: Marco Elver <elver@google.com>
> > > Suggested-by: Paul E. McKenney <paulmck@kernel.org>
> > > Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
> > > ---
> > >  include/linux/kcsan-checks.h | 40 ++++++++++++++++++++++++++++++++++++++++
> > >  1 file changed, 40 insertions(+)
> > >
> > > diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h
> > > index 5dcadc2..cf69617 100644
> > > --- a/include/linux/kcsan-checks.h
> > > +++ b/include/linux/kcsan-checks.h
> > > @@ -96,4 +96,44 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size,
> > >       kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE)
> > >  #endif
> > >
> > > +/**
> > > + * ASSERT_EXCLUSIVE_WRITER - assert no other threads are writing @var
> > > + *
> > > + * Assert that there are no other threads writing @var; other readers are
> > > + * allowed. This assertion can be used to specify properties of concurrent code,
> > > + * where violation cannot be detected as a normal data race.
> > > + *
> >
> > I like the idea that we can assert no other writers, however I think
> > assertions like ASSERT_EXCLUSIVE_WRITER() are a little limited. For
> > example, if we have the following code:
> >
> >         preempt_disable();
> >         do_sth();
> >         raw_cpu_write(var, 1);
> >         do_sth_else();
> >         preempt_enable();
> >
> > we can add the assert to detect another potential writer like:
> >
> >         preempt_disable();
> >         do_sth();
> >         ASSERT_EXCLUSIVE_WRITER(var);
> >         raw_cpu_write(var, 1);
> >         do_sth_else();
> >         preempt_enable();
> >
> > , but, if I understand how KCSAN works correctly, it only works if the
> > another writer happens when the ASSERT_EXCLUSIVE_WRITER(var) is called,
> > IOW, it can only detect another writer between do_sth() and
> > raw_cpu_write(). But our intent is to prevent other writers for the
> > whole preemption-off section. With this assertion introduced, people may
> > end up with code like:
> 
> To confirm: KCSAN will detect a race if it sets up a watchpoint on
> ASSERT_EXCLUSIVE_WRITER(var), and a concurrent write happens. Note
> that the watchpoints aren't always set up, but only periodically
> (discussed more below). For every watchpoint, we also inject an
> artificial delay. Pseudo-code:
> 
> if watchpoint for access already set up {
>   consume watchpoint;
> else if should set up watchpoint {
>   setup watchpoint;
>   udelay(...);
>   check watchpoint consumed;
>   release watchpoint;
> }
> 

Yes, I get this part.

> >         preempt_disable();
> >         ASSERT_EXCLUSIVE_WRITER(var);
> >         do_sth();
> >         ASSERT_EXCLUSIVE_WRITER(var);
> >         raw_cpu_write(var, 1);
> >         ASSERT_EXCLUSIVE_WRITER(var);
> >         do_sth_else();
> >         ASSERT_EXCLUSIVE_WRITER(var);
> >         preempt_enable();
> >
> > and that is horrible...
> 
> It is, and I would strongly discourage any such use, because it's not
> necessary. See below.
> 
> > So how about making a pair of annotations
> > ASSERT_EXCLUSIVE_WRITER_BEGIN() and ASSERT_EXCLUSIVE_WRITER_END(), so
> > that we can write code like:
> >
> >         preempt_disable();
> >         ASSERT_EXCLUSIVE_WRITER_BEGIN(var);
> >         do_sth();
> >         raw_cpu_write(var, 1);
> >         do_sth_else();
> >         ASSERT_EXCLUSIVE_WRITER_END(var);
> >         preempt_enable();
> >
> > ASSERT_EXCLUSIVE_WRITER_BEGIN() could be a rough version of watchpoint
> > setting up and ASSERT_EXCLUSIVE_WRITER_END() could be watchpoint
> > removing. So I think it's feasible.
> 
> Keep in mind that the time from ASSERT_EXCLUSIVE_WRITER_BEGIN to END
> might be on the order of a few nanosec, whereas KCSAN's default
> watchpoint delay is 10s of microsec (default ~80 for tasks). That
> means we would still have to set up a delay somewhere, and the few
> nanosec between BEGIN and END are insignificant and don't buy us
> anything.
> 

Yeah, the delay doesn't buy us anything given the default watchpoint
delay, and I agree even with *_{BEGIN/END}, we still need to set up a
delay somewhere. Adding a delay makes the watchpoint live longer so that
a problem will more likely happen, but sometimes the delay won't be
enough, considering another writer like:

	if (per_cpu(var, cpu) == 1)
		per_cpu(var, cpu) = 0;

in this user case, percpu variable "var" is used for maintaining some
state machine, and a CPU set a state with its own variable so that other
CPUs can consume it. And this another writer cannot be catched by:

	preempt_disable();
	do_sth();
	ASSERT_EXCLUSIVE_WRITER(var);
	raw_cpu_write(var, 1);
	do_sth_else();
	preempt_enable();

, no matter how long the delay is set. Another example: let's say the
do_sth_else() above is actually an operation that queues a callback
which writes to "var". In one version, do_sth_else() uses call_rcu(),
which works, because preemption-off is treated as RCU read-side critical
section, so we are fine. But if someone else changes it to queue_work()
for some reason, the code is just broken, and KCSAN cannot detect it, no
matter how long the delay is.

To summarize, a delay is helpful to trigger a problem because it allows
_other_ CPU/threads to run more code and do more memory accesses,
however it's not helpful if a particular problem happens due to some
memory effects of the current/watched CPU/thread. While *_{BEGIN/END}
can be helpful in this case.

> Re feasibility: Right now setting up and removing watchpoints is not
> exposed, and doing something like this would be an extremely intrusive
> change. Because of that, without being able to quantify the actual
> usefulness of this, and having evaluated better options (see below),
> I'd recommend not pursuing this.
> 
> > Thoughts?
> 
> Firstly, what is your objective? From what I gather you want to
> increase the probability of detecting a race with 'var'.
> 

Right, I want to increase the probablity.

> I agree, and have been thinking about it, but there are other options
> that haven't been exhausted, before we go and make the interface more
> complicated.
> 
> == Interface design ==
> The interface as it is right now, is intuitive and using it is hard to
> get wrong. Demanding begin/end markers introduces complexity that will

Yeah, the interface is intuitive, however it's still an extra effort to
put those assertions, right? Which means it doesn't come for free,
compared to other detection KCSAN can do, the developers don't need to
put extra lines of code. Given the extra effort for developers to use
the detect, I think we should dicuss the design thoroughly.

Besides the semantics of assertions is usually "do some checking right
now to see if things go wrong", and I don't think it quite matches the
semantics of an exclusive writer: "in this piece of code, I'm the only
one who can do the write".

> undoubtedly result in incorrect usage, because as soon as you somehow
> forget to end the region, you'll get tons of false positives. This may
> be due to control-flow that was missed etc. We had a similar problem
> with seqlocks, and getting them to work correctly with KCSAN was
> extremely difficult, because clear begin and end markers weren't
> always given. I imagine introducing an interface like this will
> ultimately result in similar problems, as much as we'd like to believe
> this won't ever happen.
> 

Well, if we use *_{BEGIN,END} approach, one solution is combining them
with sections introducing primitives (such as preemp_disable() and
preempt_enable()), for example, we can add

	#define preempt_disable_for(var)				\
	do {								\
		preempt_disable();					\
		ASSERT_EXCLUSIVE_WRITER_BEGIN(var);			\
	}

	#define preempt_enable_for(var)					\
	do {								\
		ASSERT_EXCLUSIVE_WRITER_END(var);			\
		preempt_enable();					\
	}

	(similar for spin lock)

	#define spin_lock_for(lock, var)				\
	do {								\
		spin_lock(lock);					\
		ASSERT_EXCLUSIVE_WRITER_BEGIN(var);			\
	}

	#define spin_unlock_for(lock, var)				\
	do {								\
		ASSERT_EXCLUSIVE_WRITER_END(var);			\
		spin_unlock(lock);					\
	}

I admit that I haven't thought this thoroughly, but I think this works,
and besides primitives like above can help the reader to understand the
questions like: what this lock/preemption-off critical sections are
protecting?

Thoughts?

Regards,
Boqun

> == Improving race detection for KCSAN_ACCESS_ASSERT access types ==
> There are several options:
> 
> 1. Always set up a watchpoint for assert-type accesses, and ignore
> KCSAN_SKIP_WATCH/kcsan_skip counter (see 'should_watch()'). One
> problem with this is that it would seriously impact overall
> performance as soon as we get a few ASSERT_EXCLUSIVE_*() in a hot path
> somewhere. A compromise might be simply being more aggressive with
> setting up watchpoints on assert-type accesses.
> 
> 2. Let's say in the above example (without BEGIN/END) the total
> duration (via udelay) of watchpoints for 'var' being set up is 4*D.
> Why not just increase the watchpoint delay for assert-type accesses to
> 4*D? Then, just having one ASSERT_EXCLUSIVE_WRITER(var) somewhere in
> the region would have the same probability of catching a race.
> (Assuming that the region's remaining execution time is on the order
> of nanosecs.)
> 
> I have some limited evidence that (1) is going to help, but not (2).
> This is based on experiments trying to reproduce racy use-after-free
> bugs that KASAN found, but with KCSAN. The problem is that it does
> slow-down overall system performance if in a hot path like an
> allocator. Which led me to a 3rd option.
> 
> 3. Do option (1) but do the opposite of (2), i.e. always set up a
> watchpoint on assert-type accesses, but *reduce* the watchpoint delay.
> 
> I haven't yet sent a patch for any one of 1-3 because I'm hesitant
> until we can actually show one of them would always be useful and
> improve things. For now, the best thing is to dynamically adjust
> udelay_{task,interrupt} and skip_watch either via Kconfig options or
> /sys/modules/kcsan/parameters/ and not add more complexity without
> good justification. A good stress test will also go a long way.
> 
> There are some more (probably bad) ideas I have, but the above are the
> best options for now.
> 
> So, anything that somehow increases the total time that a watchpoint
> is set up will increase the probability of detecting a race. However,
> we're also trying to balance overall system performance, as poor
> performance could equally affect race detection negatively (fewer
> instructions executed, etc.). Right now any one of 1-3 might sound
> like a decent idea, but I don't know what it will look like once we
> have dozens of ASSERT_EXCLUSIVE_*() in places, especially if a few of
> them are in hot paths.
> 
> Thanks,
> -- Marco
> 
> 
> 
> 
> 
> 
> > Regards,
> > Boqun
> >
> > > + * For example, if a per-CPU variable is only meant to be written by a single
> > > + * CPU, but may be read from other CPUs; in this case, reads and writes must be
> > > + * marked properly, however, if an off-CPU WRITE_ONCE() races with the owning
> > > + * CPU's WRITE_ONCE(), would not constitute a data race but could be a harmful
> > > + * race condition. Using this macro allows specifying this property in the code
> > > + * and catch such bugs.
> > > + *
> > > + * @var variable to assert on
> > > + */
> > > +#define ASSERT_EXCLUSIVE_WRITER(var)                                           \
> > > +     __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT)
> > > +
> > > +/**
> > > + * ASSERT_EXCLUSIVE_ACCESS - assert no other threads are accessing @var
> > > + *
> > > + * Assert that no other thread is accessing @var (no readers nor writers). This
> > > + * assertion can be used to specify properties of concurrent code, where
> > > + * violation cannot be detected as a normal data race.
> > > + *
> > > + * For example, in a reference-counting algorithm where exclusive access is
> > > + * expected after the refcount reaches 0. We can check that this property
> > > + * actually holds as follows:
> > > + *
> > > + *   if (refcount_dec_and_test(&obj->refcnt)) {
> > > + *           ASSERT_EXCLUSIVE_ACCESS(*obj);
> > > + *           safely_dispose_of(obj);
> > > + *   }
> > > + *
> > > + * @var variable to assert on
> > > + */
> > > +#define ASSERT_EXCLUSIVE_ACCESS(var)                                           \
> > > +     __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT)
> > > +
> > >  #endif /* _LINUX_KCSAN_CHECKS_H */
> > > --
> > > 2.9.5
> > >

^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [PATCH kcsan 27/32] kcsan: Add option to allow watcher interruptions
  2020-03-13 15:28       ` Marco Elver
@ 2020-03-16 13:56         ` Marco Elver
  2020-03-16 15:45           ` Paul E. McKenney
  2020-03-18 17:42           ` Marco Elver
  0 siblings, 2 replies; 50+ messages in thread
From: Marco Elver @ 2020-03-16 13:56 UTC (permalink / raw)
  To: Paul E. McKenney
  Cc: LKML, kasan-dev, kernel-team, Ingo Molnar, Andrey Konovalov,
	Alexander Potapenko, Dmitry Vyukov, Qian Cai, Boqun Feng

On Fri, 13 Mar 2020 at 16:28, Marco Elver <elver@google.com> wrote:
>
> On Thu, 12 Mar 2020 at 19:04, Paul E. McKenney <paulmck@kernel.org> wrote:
> >
> > On Thu, Mar 12, 2020 at 11:03:28AM -0700, Paul E. McKenney wrote:
> > > On Mon, Mar 09, 2020 at 12:04:15PM -0700, paulmck@kernel.org wrote:
> > > > From: Marco Elver <elver@google.com>
> > > >
> > > > Add option to allow interrupts while a watchpoint is set up. This can be
> > > > enabled either via CONFIG_KCSAN_INTERRUPT_WATCHER or via the boot
> > > > parameter 'kcsan.interrupt_watcher=1'.
> > > >
> > > > Note that, currently not all safe per-CPU access primitives and patterns
> > > > are accounted for, which could result in false positives. For example,
> > > > asm-generic/percpu.h uses plain operations, which by default are
> > > > instrumented. On interrupts and subsequent accesses to the same
> > > > variable, KCSAN would currently report a data race with this option.
> > > >
> > > > Therefore, this option should currently remain disabled by default, but
> > > > may be enabled for specific test scenarios.
> > > >
> > > > To avoid new warnings, changes all uses of smp_processor_id() to use the
> > > > raw version (as already done in kcsan_found_watchpoint()). The exact SMP
> > > > processor id is for informational purposes in the report, and
> > > > correctness is not affected.
> > > >
> > > > Signed-off-by: Marco Elver <elver@google.com>
> > > > Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
> > >
> > > And I get silent hangs that bisect to this patch when running the
> > > following rcutorture command, run in the kernel source tree on a
> > > 12-hardware-thread laptop:
> > >
> > > bash tools/testing/selftests/rcutorture/bin/kvm.sh --cpus 12 --duration 10 --kconfig "CONFIG_DEBUG_INFO=y CONFIG_KCSAN=y CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=n CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY=n CONFIG_KCSAN_REPORT_ONCE_IN_MS=100000 CONFIG_KCSAN_VERBOSE=y CONFIG_KCSAN_INTERRUPT_WATCHER=y" --configs TREE03
> > >
> > > It works fine on some (but not all) of the other rcutorture test
> > > scenarios.  It fails on TREE01, TREE02, TREE03, TREE09.  The common thread
> > > is that these are the TREE scenarios are all PREEMPT=y.  So are RUDE01,
> > > SRCU-P, TASKS01, and TASKS03, but these scenarios are not hammering
> > > on Tree RCU, and thus have far less interrupt activity and the like.
> > > Given that it is an interrupt-related feature being added by this commit,
> > > this seems like expected (mis)behavior.
> > >
> > > Can you reproduce this?  If not, are there any diagnostics I can add to
> > > my testing?  Or a diagnostic patch I could apply?
>
> I think I can reproduce it.  Let me debug some more, so far I haven't
> found anything yet.
>
> What I do know is that it's related to reporting. Turning kcsan_report
> into a noop makes the test run to completion.
>
> > I should hasten to add that this feature was quite helpful in recent work!
>
> Good to know. :-)  We can probably keep this patch, since the default
> config doesn't turn this on. But I will try to see what's up with the
> hangs, and hopefully find a fix.

So this one turned out to be quite interesting. We can get deadlocks
if we can set up multiple watchpoints per task in case it's
interrupted and the interrupt sets up another watchpoint, and there
are many concurrent races happening; because the other_info struct in
report.c may never be released if an interrupt blocks the consumer due
to waiting for other_info to become released.
Give me another day or 2 to come up with a decent fix.

Thanks,
-- Marco

^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [PATCH kcsan 27/32] kcsan: Add option to allow watcher interruptions
  2020-03-16 13:56         ` Marco Elver
@ 2020-03-16 15:45           ` Paul E. McKenney
  2020-03-16 16:22             ` Marco Elver
  2020-03-18 17:42           ` Marco Elver
  1 sibling, 1 reply; 50+ messages in thread
From: Paul E. McKenney @ 2020-03-16 15:45 UTC (permalink / raw)
  To: Marco Elver
  Cc: LKML, kasan-dev, kernel-team, Ingo Molnar, Andrey Konovalov,
	Alexander Potapenko, Dmitry Vyukov, Qian Cai, Boqun Feng

On Mon, Mar 16, 2020 at 02:56:38PM +0100, Marco Elver wrote:
> On Fri, 13 Mar 2020 at 16:28, Marco Elver <elver@google.com> wrote:
> >
> > On Thu, 12 Mar 2020 at 19:04, Paul E. McKenney <paulmck@kernel.org> wrote:
> > >
> > > On Thu, Mar 12, 2020 at 11:03:28AM -0700, Paul E. McKenney wrote:
> > > > On Mon, Mar 09, 2020 at 12:04:15PM -0700, paulmck@kernel.org wrote:
> > > > > From: Marco Elver <elver@google.com>
> > > > >
> > > > > Add option to allow interrupts while a watchpoint is set up. This can be
> > > > > enabled either via CONFIG_KCSAN_INTERRUPT_WATCHER or via the boot
> > > > > parameter 'kcsan.interrupt_watcher=1'.
> > > > >
> > > > > Note that, currently not all safe per-CPU access primitives and patterns
> > > > > are accounted for, which could result in false positives. For example,
> > > > > asm-generic/percpu.h uses plain operations, which by default are
> > > > > instrumented. On interrupts and subsequent accesses to the same
> > > > > variable, KCSAN would currently report a data race with this option.
> > > > >
> > > > > Therefore, this option should currently remain disabled by default, but
> > > > > may be enabled for specific test scenarios.
> > > > >
> > > > > To avoid new warnings, changes all uses of smp_processor_id() to use the
> > > > > raw version (as already done in kcsan_found_watchpoint()). The exact SMP
> > > > > processor id is for informational purposes in the report, and
> > > > > correctness is not affected.
> > > > >
> > > > > Signed-off-by: Marco Elver <elver@google.com>
> > > > > Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
> > > >
> > > > And I get silent hangs that bisect to this patch when running the
> > > > following rcutorture command, run in the kernel source tree on a
> > > > 12-hardware-thread laptop:
> > > >
> > > > bash tools/testing/selftests/rcutorture/bin/kvm.sh --cpus 12 --duration 10 --kconfig "CONFIG_DEBUG_INFO=y CONFIG_KCSAN=y CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=n CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY=n CONFIG_KCSAN_REPORT_ONCE_IN_MS=100000 CONFIG_KCSAN_VERBOSE=y CONFIG_KCSAN_INTERRUPT_WATCHER=y" --configs TREE03
> > > >
> > > > It works fine on some (but not all) of the other rcutorture test
> > > > scenarios.  It fails on TREE01, TREE02, TREE03, TREE09.  The common thread
> > > > is that these are the TREE scenarios are all PREEMPT=y.  So are RUDE01,
> > > > SRCU-P, TASKS01, and TASKS03, but these scenarios are not hammering
> > > > on Tree RCU, and thus have far less interrupt activity and the like.
> > > > Given that it is an interrupt-related feature being added by this commit,
> > > > this seems like expected (mis)behavior.
> > > >
> > > > Can you reproduce this?  If not, are there any diagnostics I can add to
> > > > my testing?  Or a diagnostic patch I could apply?
> >
> > I think I can reproduce it.  Let me debug some more, so far I haven't
> > found anything yet.
> >
> > What I do know is that it's related to reporting. Turning kcsan_report
> > into a noop makes the test run to completion.
> >
> > > I should hasten to add that this feature was quite helpful in recent work!
> >
> > Good to know. :-)  We can probably keep this patch, since the default
> > config doesn't turn this on. But I will try to see what's up with the
> > hangs, and hopefully find a fix.
> 
> So this one turned out to be quite interesting. We can get deadlocks
> if we can set up multiple watchpoints per task in case it's
> interrupted and the interrupt sets up another watchpoint, and there
> are many concurrent races happening; because the other_info struct in
> report.c may never be released if an interrupt blocks the consumer due
> to waiting for other_info to become released.

Been there, done that!  ;-)

> Give me another day or 2 to come up with a decent fix.

My thought is to send a pull request for the commits up to but not
including this patch, allowing ample development and testing time for
the fix.  My concern with sending this, even with a fix, is that any
further bugs might cast a shadow on the whole series, further slowing
acceptance into mainline.

Fair enough?

							Thanx, Paul

^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [PATCH kcsan 27/32] kcsan: Add option to allow watcher interruptions
  2020-03-16 15:45           ` Paul E. McKenney
@ 2020-03-16 16:22             ` Marco Elver
  2020-03-17 17:13               ` Paul E. McKenney
  0 siblings, 1 reply; 50+ messages in thread
From: Marco Elver @ 2020-03-16 16:22 UTC (permalink / raw)
  To: Paul E. McKenney
  Cc: LKML, kasan-dev, kernel-team, Ingo Molnar, Andrey Konovalov,
	Alexander Potapenko, Dmitry Vyukov, Qian Cai, Boqun Feng

On Mon, 16 Mar 2020 at 16:45, Paul E. McKenney <paulmck@kernel.org> wrote:
>
> On Mon, Mar 16, 2020 at 02:56:38PM +0100, Marco Elver wrote:
> > On Fri, 13 Mar 2020 at 16:28, Marco Elver <elver@google.com> wrote:
> > >
> > > On Thu, 12 Mar 2020 at 19:04, Paul E. McKenney <paulmck@kernel.org> wrote:
> > > >
> > > > On Thu, Mar 12, 2020 at 11:03:28AM -0700, Paul E. McKenney wrote:
> > > > > On Mon, Mar 09, 2020 at 12:04:15PM -0700, paulmck@kernel.org wrote:
> > > > > > From: Marco Elver <elver@google.com>
> > > > > >
> > > > > > Add option to allow interrupts while a watchpoint is set up. This can be
> > > > > > enabled either via CONFIG_KCSAN_INTERRUPT_WATCHER or via the boot
> > > > > > parameter 'kcsan.interrupt_watcher=1'.
> > > > > >
> > > > > > Note that, currently not all safe per-CPU access primitives and patterns
> > > > > > are accounted for, which could result in false positives. For example,
> > > > > > asm-generic/percpu.h uses plain operations, which by default are
> > > > > > instrumented. On interrupts and subsequent accesses to the same
> > > > > > variable, KCSAN would currently report a data race with this option.
> > > > > >
> > > > > > Therefore, this option should currently remain disabled by default, but
> > > > > > may be enabled for specific test scenarios.
> > > > > >
> > > > > > To avoid new warnings, changes all uses of smp_processor_id() to use the
> > > > > > raw version (as already done in kcsan_found_watchpoint()). The exact SMP
> > > > > > processor id is for informational purposes in the report, and
> > > > > > correctness is not affected.
> > > > > >
> > > > > > Signed-off-by: Marco Elver <elver@google.com>
> > > > > > Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
> > > > >
> > > > > And I get silent hangs that bisect to this patch when running the
> > > > > following rcutorture command, run in the kernel source tree on a
> > > > > 12-hardware-thread laptop:
> > > > >
> > > > > bash tools/testing/selftests/rcutorture/bin/kvm.sh --cpus 12 --duration 10 --kconfig "CONFIG_DEBUG_INFO=y CONFIG_KCSAN=y CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=n CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY=n CONFIG_KCSAN_REPORT_ONCE_IN_MS=100000 CONFIG_KCSAN_VERBOSE=y CONFIG_KCSAN_INTERRUPT_WATCHER=y" --configs TREE03
> > > > >
> > > > > It works fine on some (but not all) of the other rcutorture test
> > > > > scenarios.  It fails on TREE01, TREE02, TREE03, TREE09.  The common thread
> > > > > is that these are the TREE scenarios are all PREEMPT=y.  So are RUDE01,
> > > > > SRCU-P, TASKS01, and TASKS03, but these scenarios are not hammering
> > > > > on Tree RCU, and thus have far less interrupt activity and the like.
> > > > > Given that it is an interrupt-related feature being added by this commit,
> > > > > this seems like expected (mis)behavior.
> > > > >
> > > > > Can you reproduce this?  If not, are there any diagnostics I can add to
> > > > > my testing?  Or a diagnostic patch I could apply?
> > >
> > > I think I can reproduce it.  Let me debug some more, so far I haven't
> > > found anything yet.
> > >
> > > What I do know is that it's related to reporting. Turning kcsan_report
> > > into a noop makes the test run to completion.
> > >
> > > > I should hasten to add that this feature was quite helpful in recent work!
> > >
> > > Good to know. :-)  We can probably keep this patch, since the default
> > > config doesn't turn this on. But I will try to see what's up with the
> > > hangs, and hopefully find a fix.
> >
> > So this one turned out to be quite interesting. We can get deadlocks
> > if we can set up multiple watchpoints per task in case it's
> > interrupted and the interrupt sets up another watchpoint, and there
> > are many concurrent races happening; because the other_info struct in
> > report.c may never be released if an interrupt blocks the consumer due
> > to waiting for other_info to become released.
>
> Been there, done that!  ;-)
>
> > Give me another day or 2 to come up with a decent fix.
>
> My thought is to send a pull request for the commits up to but not
> including this patch, allowing ample development and testing time for
> the fix.  My concern with sending this, even with a fix, is that any
> further bugs might cast a shadow on the whole series, further slowing
> acceptance into mainline.
>
> Fair enough?

That's fine. I think the features changes can stay on -rcu/kcsan-dev
for now, but the documentation updates don't depend on them.
If it'd be useful, the updated documentation could be moved before
this patch to -rcu/kcsan, so we'd have

 kcsan: Add current->state to implicitly atomic accesses
 kcsan: Add option for verbose reporting
 kcsan: Add option to allow watcher interruptions
-- cut --
 kcsan: Update API documentation in kcsan-checks.h
 kcsan: Update Documentation/dev-tools/kcsan.rst
 kcsan: Fix a typo in a comment
.. rest of series ..

Although I'm fine with either.

Thanks,
-- Marco

^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [PATCH kcsan 17/32] kcsan: Introduce ASSERT_EXCLUSIVE_* macros
  2020-03-14  2:22       ` Boqun Feng
@ 2020-03-17 11:12         ` Marco Elver
  2020-03-19  3:23           ` Boqun Feng
  0 siblings, 1 reply; 50+ messages in thread
From: Marco Elver @ 2020-03-17 11:12 UTC (permalink / raw)
  To: Boqun Feng
  Cc: Paul E. McKenney, LKML, kasan-dev, kernel-team, Ingo Molnar,
	Andrey Konovalov, Alexander Potapenko, Dmitry Vyukov, Qian Cai

On Sat, 14 Mar 2020 at 03:22, Boqun Feng <boqun.feng@gmail.com> wrote:
>
> On Fri, Mar 13, 2020 at 05:15:32PM +0100, Marco Elver wrote:
> > On Fri, 13 Mar 2020 at 09:52, Boqun Feng <boqun.feng@gmail.com> wrote:
> > >
> > > Hi Marco,
> > >
> > > On Mon, Mar 09, 2020 at 12:04:05PM -0700, paulmck@kernel.org wrote:
> > > > From: Marco Elver <elver@google.com>
> > > >
> > > > Introduces ASSERT_EXCLUSIVE_WRITER and ASSERT_EXCLUSIVE_ACCESS, which
> > > > may be used to assert properties of synchronization logic, where
> > > > violation cannot be detected as a normal data race.
> > > >
> > > > Examples of the reports that may be generated:
> > > >
> > > >     ==================================================================
> > > >     BUG: KCSAN: assert: race in test_thread / test_thread
> > > >
> > > >     write to 0xffffffffab3d1540 of 8 bytes by task 466 on cpu 2:
> > > >      test_thread+0x8d/0x111
> > > >      debugfs_write.cold+0x32/0x44
> > > >      ...
> > > >
> > > >     assert no writes to 0xffffffffab3d1540 of 8 bytes by task 464 on cpu 0:
> > > >      test_thread+0xa3/0x111
> > > >      debugfs_write.cold+0x32/0x44
> > > >      ...
> > > >     ==================================================================
> > > >
> > > >     ==================================================================
> > > >     BUG: KCSAN: assert: race in test_thread / test_thread
> > > >
> > > >     assert no accesses to 0xffffffffab3d1540 of 8 bytes by task 465 on cpu 1:
> > > >      test_thread+0xb9/0x111
> > > >      debugfs_write.cold+0x32/0x44
> > > >      ...
> > > >
> > > >     read to 0xffffffffab3d1540 of 8 bytes by task 464 on cpu 0:
> > > >      test_thread+0x77/0x111
> > > >      debugfs_write.cold+0x32/0x44
> > > >      ...
> > > >     ==================================================================
> > > >
> > > > Signed-off-by: Marco Elver <elver@google.com>
> > > > Suggested-by: Paul E. McKenney <paulmck@kernel.org>
> > > > Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
> > > > ---
> > > >  include/linux/kcsan-checks.h | 40 ++++++++++++++++++++++++++++++++++++++++
> > > >  1 file changed, 40 insertions(+)
> > > >
> > > > diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h
> > > > index 5dcadc2..cf69617 100644
> > > > --- a/include/linux/kcsan-checks.h
> > > > +++ b/include/linux/kcsan-checks.h
> > > > @@ -96,4 +96,44 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size,
> > > >       kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE)
> > > >  #endif
> > > >
> > > > +/**
> > > > + * ASSERT_EXCLUSIVE_WRITER - assert no other threads are writing @var
> > > > + *
> > > > + * Assert that there are no other threads writing @var; other readers are
> > > > + * allowed. This assertion can be used to specify properties of concurrent code,
> > > > + * where violation cannot be detected as a normal data race.
> > > > + *
> > >
> > > I like the idea that we can assert no other writers, however I think
> > > assertions like ASSERT_EXCLUSIVE_WRITER() are a little limited. For
> > > example, if we have the following code:
> > >
> > >         preempt_disable();
> > >         do_sth();
> > >         raw_cpu_write(var, 1);
> > >         do_sth_else();
> > >         preempt_enable();
> > >
> > > we can add the assert to detect another potential writer like:
> > >
> > >         preempt_disable();
> > >         do_sth();
> > >         ASSERT_EXCLUSIVE_WRITER(var);
> > >         raw_cpu_write(var, 1);
> > >         do_sth_else();
> > >         preempt_enable();
> > >
> > > , but, if I understand how KCSAN works correctly, it only works if the
> > > another writer happens when the ASSERT_EXCLUSIVE_WRITER(var) is called,
> > > IOW, it can only detect another writer between do_sth() and
> > > raw_cpu_write(). But our intent is to prevent other writers for the
> > > whole preemption-off section. With this assertion introduced, people may
> > > end up with code like:
> >
> > To confirm: KCSAN will detect a race if it sets up a watchpoint on
> > ASSERT_EXCLUSIVE_WRITER(var), and a concurrent write happens. Note
> > that the watchpoints aren't always set up, but only periodically
> > (discussed more below). For every watchpoint, we also inject an
> > artificial delay. Pseudo-code:
> >
> > if watchpoint for access already set up {
> >   consume watchpoint;
> > else if should set up watchpoint {
> >   setup watchpoint;
> >   udelay(...);
> >   check watchpoint consumed;
> >   release watchpoint;
> > }
> >
>
> Yes, I get this part.
>
> > >         preempt_disable();
> > >         ASSERT_EXCLUSIVE_WRITER(var);
> > >         do_sth();
> > >         ASSERT_EXCLUSIVE_WRITER(var);
> > >         raw_cpu_write(var, 1);
> > >         ASSERT_EXCLUSIVE_WRITER(var);
> > >         do_sth_else();
> > >         ASSERT_EXCLUSIVE_WRITER(var);
> > >         preempt_enable();
> > >
> > > and that is horrible...
> >
> > It is, and I would strongly discourage any such use, because it's not
> > necessary. See below.
> >
> > > So how about making a pair of annotations
> > > ASSERT_EXCLUSIVE_WRITER_BEGIN() and ASSERT_EXCLUSIVE_WRITER_END(), so
> > > that we can write code like:
> > >
> > >         preempt_disable();
> > >         ASSERT_EXCLUSIVE_WRITER_BEGIN(var);
> > >         do_sth();
> > >         raw_cpu_write(var, 1);
> > >         do_sth_else();
> > >         ASSERT_EXCLUSIVE_WRITER_END(var);
> > >         preempt_enable();
> > >
> > > ASSERT_EXCLUSIVE_WRITER_BEGIN() could be a rough version of watchpoint
> > > setting up and ASSERT_EXCLUSIVE_WRITER_END() could be watchpoint
> > > removing. So I think it's feasible.
> >
> > Keep in mind that the time from ASSERT_EXCLUSIVE_WRITER_BEGIN to END
> > might be on the order of a few nanosec, whereas KCSAN's default
> > watchpoint delay is 10s of microsec (default ~80 for tasks). That
> > means we would still have to set up a delay somewhere, and the few
> > nanosec between BEGIN and END are insignificant and don't buy us
> > anything.
> >
>
> Yeah, the delay doesn't buy us anything given the default watchpoint
> delay, and I agree even with *_{BEGIN/END}, we still need to set up a
> delay somewhere. Adding a delay makes the watchpoint live longer so that
> a problem will more likely happen, but sometimes the delay won't be
> enough, considering another writer like:
>
>         if (per_cpu(var, cpu) == 1)
>                 per_cpu(var, cpu) = 0;
>
> in this user case, percpu variable "var" is used for maintaining some
> state machine, and a CPU set a state with its own variable so that other
> CPUs can consume it. And this another writer cannot be catched by:
>
>         preempt_disable();
>         do_sth();
>         ASSERT_EXCLUSIVE_WRITER(var);
>         raw_cpu_write(var, 1);
>         do_sth_else();
>         preempt_enable();
>

Right, the example makes sense.

That is assuming there are various other expected racy reads that are
fine. If that's not true, ASSERT_EXCLUSIVE_ACCESS should be
considered.

> , no matter how long the delay is set. Another example: let's say the
> do_sth_else() above is actually an operation that queues a callback
> which writes to "var". In one version, do_sth_else() uses call_rcu(),
> which works, because preemption-off is treated as RCU read-side critical
> section, so we are fine. But if someone else changes it to queue_work()
> for some reason, the code is just broken, and KCSAN cannot detect it, no
> matter how long the delay is.
>
> To summarize, a delay is helpful to trigger a problem because it allows
> _other_ CPU/threads to run more code and do more memory accesses,
> however it's not helpful if a particular problem happens due to some
> memory effects of the current/watched CPU/thread. While *_{BEGIN/END}
> can be helpful in this case.

Makes sense.

> > Re feasibility: Right now setting up and removing watchpoints is not
> > exposed, and doing something like this would be an extremely intrusive
> > change. Because of that, without being able to quantify the actual
> > usefulness of this, and having evaluated better options (see below),
> > I'd recommend not pursuing this.
> >
> > > Thoughts?
> >
> > Firstly, what is your objective? From what I gather you want to
> > increase the probability of detecting a race with 'var'.
> >
>
> Right, I want to increase the probablity.
>
> > I agree, and have been thinking about it, but there are other options
> > that haven't been exhausted, before we go and make the interface more
> > complicated.
> >
> > == Interface design ==
> > The interface as it is right now, is intuitive and using it is hard to
> > get wrong. Demanding begin/end markers introduces complexity that will
>
> Yeah, the interface is intuitive, however it's still an extra effort to
> put those assertions, right? Which means it doesn't come for free,
> compared to other detection KCSAN can do, the developers don't need to
> put extra lines of code. Given the extra effort for developers to use
> the detect, I think we should dicuss the design thoroughly.
>
> Besides the semantics of assertions is usually "do some checking right
> now to see if things go wrong", and I don't think it quite matches the
> semantics of an exclusive writer: "in this piece of code, I'm the only
> one who can do the write".
>
> > undoubtedly result in incorrect usage, because as soon as you somehow
> > forget to end the region, you'll get tons of false positives. This may
> > be due to control-flow that was missed etc. We had a similar problem
> > with seqlocks, and getting them to work correctly with KCSAN was
> > extremely difficult, because clear begin and end markers weren't
> > always given. I imagine introducing an interface like this will
> > ultimately result in similar problems, as much as we'd like to believe
> > this won't ever happen.
> >
>
> Well, if we use *_{BEGIN,END} approach, one solution is combining them
> with sections introducing primitives (such as preemp_disable() and
> preempt_enable()), for example, we can add
>
>         #define preempt_disable_for(var)                                \
>         do {                                                            \
>                 preempt_disable();                                      \
>                 ASSERT_EXCLUSIVE_WRITER_BEGIN(var);                     \
>         }
>
>         #define preempt_enable_for(var)                                 \
>         do {                                                            \
>                 ASSERT_EXCLUSIVE_WRITER_END(var);                       \
>                 preempt_enable();                                       \
>         }
>
>         (similar for spin lock)
>
>         #define spin_lock_for(lock, var)                                \
>         do {                                                            \
>                 spin_lock(lock);                                        \
>                 ASSERT_EXCLUSIVE_WRITER_BEGIN(var);                     \
>         }
>
>         #define spin_unlock_for(lock, var)                              \
>         do {                                                            \
>                 ASSERT_EXCLUSIVE_WRITER_END(var);                       \
>                 spin_unlock(lock);                                      \
>         }
>
> I admit that I haven't thought this thoroughly, but I think this works,
> and besides primitives like above can help the reader to understand the
> questions like: what this lock/preemption-off critical sections are
> protecting?

I can't say anything about introducing even more macros. I'd say we
need at least a dozen use-cases or more and understand them, otherwise
we may end up with the wrong API that we can never take back.

> Thoughts?

Makes sense for the cases you described.

Changing KCSAN to do this is a major change. On surface, it seems like
a refactor and exporting some existing functionality, but there are
various new corner cases, because now 2 accesses don't really have to
be concurrent anymore to detect a race (and simple properties like a
thread can't race with itself need to be taken care of). The existing
ASSERT_EXCLUSIVE macros were able to leverage existing functionality
mostly as-is. So, to motivate something like this, we need at least a
dozen or so good use-cases, where careful placement of an existing
ASSERT_EXCLUSIVE would not catch what you describe.

Thanks,
-- Marco

> Regards,
> Boqun
>
> > == Improving race detection for KCSAN_ACCESS_ASSERT access types ==
> > There are several options:
> >
> > 1. Always set up a watchpoint for assert-type accesses, and ignore
> > KCSAN_SKIP_WATCH/kcsan_skip counter (see 'should_watch()'). One
> > problem with this is that it would seriously impact overall
> > performance as soon as we get a few ASSERT_EXCLUSIVE_*() in a hot path
> > somewhere. A compromise might be simply being more aggressive with
> > setting up watchpoints on assert-type accesses.
> >
> > 2. Let's say in the above example (without BEGIN/END) the total
> > duration (via udelay) of watchpoints for 'var' being set up is 4*D.
> > Why not just increase the watchpoint delay for assert-type accesses to
> > 4*D? Then, just having one ASSERT_EXCLUSIVE_WRITER(var) somewhere in
> > the region would have the same probability of catching a race.
> > (Assuming that the region's remaining execution time is on the order
> > of nanosecs.)
> >
> > I have some limited evidence that (1) is going to help, but not (2).
> > This is based on experiments trying to reproduce racy use-after-free
> > bugs that KASAN found, but with KCSAN. The problem is that it does
> > slow-down overall system performance if in a hot path like an
> > allocator. Which led me to a 3rd option.
> >
> > 3. Do option (1) but do the opposite of (2), i.e. always set up a
> > watchpoint on assert-type accesses, but *reduce* the watchpoint delay.
> >
> > I haven't yet sent a patch for any one of 1-3 because I'm hesitant
> > until we can actually show one of them would always be useful and
> > improve things. For now, the best thing is to dynamically adjust
> > udelay_{task,interrupt} and skip_watch either via Kconfig options or
> > /sys/modules/kcsan/parameters/ and not add more complexity without
> > good justification. A good stress test will also go a long way.
> >
> > There are some more (probably bad) ideas I have, but the above are the
> > best options for now.
> >
> > So, anything that somehow increases the total time that a watchpoint
> > is set up will increase the probability of detecting a race. However,
> > we're also trying to balance overall system performance, as poor
> > performance could equally affect race detection negatively (fewer
> > instructions executed, etc.). Right now any one of 1-3 might sound
> > like a decent idea, but I don't know what it will look like once we
> > have dozens of ASSERT_EXCLUSIVE_*() in places, especially if a few of
> > them are in hot paths.
> >
> > Thanks,
> > -- Marco
> >
> >
> >
> >
> >
> >
> > > Regards,
> > > Boqun
> > >
> > > > + * For example, if a per-CPU variable is only meant to be written by a single
> > > > + * CPU, but may be read from other CPUs; in this case, reads and writes must be
> > > > + * marked properly, however, if an off-CPU WRITE_ONCE() races with the owning
> > > > + * CPU's WRITE_ONCE(), would not constitute a data race but could be a harmful
> > > > + * race condition. Using this macro allows specifying this property in the code
> > > > + * and catch such bugs.
> > > > + *
> > > > + * @var variable to assert on
> > > > + */
> > > > +#define ASSERT_EXCLUSIVE_WRITER(var)                                           \
> > > > +     __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT)
> > > > +
> > > > +/**
> > > > + * ASSERT_EXCLUSIVE_ACCESS - assert no other threads are accessing @var
> > > > + *
> > > > + * Assert that no other thread is accessing @var (no readers nor writers). This
> > > > + * assertion can be used to specify properties of concurrent code, where
> > > > + * violation cannot be detected as a normal data race.
> > > > + *
> > > > + * For example, in a reference-counting algorithm where exclusive access is
> > > > + * expected after the refcount reaches 0. We can check that this property
> > > > + * actually holds as follows:
> > > > + *
> > > > + *   if (refcount_dec_and_test(&obj->refcnt)) {
> > > > + *           ASSERT_EXCLUSIVE_ACCESS(*obj);
> > > > + *           safely_dispose_of(obj);
> > > > + *   }
> > > > + *
> > > > + * @var variable to assert on
> > > > + */
> > > > +#define ASSERT_EXCLUSIVE_ACCESS(var)                                           \
> > > > +     __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT)
> > > > +
> > > >  #endif /* _LINUX_KCSAN_CHECKS_H */
> > > > --
> > > > 2.9.5
> > > >

^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [PATCH kcsan 27/32] kcsan: Add option to allow watcher interruptions
  2020-03-16 16:22             ` Marco Elver
@ 2020-03-17 17:13               ` Paul E. McKenney
  2020-03-17 17:44                 ` Marco Elver
  0 siblings, 1 reply; 50+ messages in thread
From: Paul E. McKenney @ 2020-03-17 17:13 UTC (permalink / raw)
  To: Marco Elver
  Cc: LKML, kasan-dev, kernel-team, Ingo Molnar, Andrey Konovalov,
	Alexander Potapenko, Dmitry Vyukov, Qian Cai, Boqun Feng

On Mon, Mar 16, 2020 at 05:22:34PM +0100, Marco Elver wrote:
> On Mon, 16 Mar 2020 at 16:45, Paul E. McKenney <paulmck@kernel.org> wrote:
> >
> > On Mon, Mar 16, 2020 at 02:56:38PM +0100, Marco Elver wrote:
> > > On Fri, 13 Mar 2020 at 16:28, Marco Elver <elver@google.com> wrote:
> > > >
> > > > On Thu, 12 Mar 2020 at 19:04, Paul E. McKenney <paulmck@kernel.org> wrote:
> > > > >
> > > > > On Thu, Mar 12, 2020 at 11:03:28AM -0700, Paul E. McKenney wrote:
> > > > > > On Mon, Mar 09, 2020 at 12:04:15PM -0700, paulmck@kernel.org wrote:
> > > > > > > From: Marco Elver <elver@google.com>
> > > > > > >
> > > > > > > Add option to allow interrupts while a watchpoint is set up. This can be
> > > > > > > enabled either via CONFIG_KCSAN_INTERRUPT_WATCHER or via the boot
> > > > > > > parameter 'kcsan.interrupt_watcher=1'.
> > > > > > >
> > > > > > > Note that, currently not all safe per-CPU access primitives and patterns
> > > > > > > are accounted for, which could result in false positives. For example,
> > > > > > > asm-generic/percpu.h uses plain operations, which by default are
> > > > > > > instrumented. On interrupts and subsequent accesses to the same
> > > > > > > variable, KCSAN would currently report a data race with this option.
> > > > > > >
> > > > > > > Therefore, this option should currently remain disabled by default, but
> > > > > > > may be enabled for specific test scenarios.
> > > > > > >
> > > > > > > To avoid new warnings, changes all uses of smp_processor_id() to use the
> > > > > > > raw version (as already done in kcsan_found_watchpoint()). The exact SMP
> > > > > > > processor id is for informational purposes in the report, and
> > > > > > > correctness is not affected.
> > > > > > >
> > > > > > > Signed-off-by: Marco Elver <elver@google.com>
> > > > > > > Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
> > > > > >
> > > > > > And I get silent hangs that bisect to this patch when running the
> > > > > > following rcutorture command, run in the kernel source tree on a
> > > > > > 12-hardware-thread laptop:
> > > > > >
> > > > > > bash tools/testing/selftests/rcutorture/bin/kvm.sh --cpus 12 --duration 10 --kconfig "CONFIG_DEBUG_INFO=y CONFIG_KCSAN=y CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=n CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY=n CONFIG_KCSAN_REPORT_ONCE_IN_MS=100000 CONFIG_KCSAN_VERBOSE=y CONFIG_KCSAN_INTERRUPT_WATCHER=y" --configs TREE03
> > > > > >
> > > > > > It works fine on some (but not all) of the other rcutorture test
> > > > > > scenarios.  It fails on TREE01, TREE02, TREE03, TREE09.  The common thread
> > > > > > is that these are the TREE scenarios are all PREEMPT=y.  So are RUDE01,
> > > > > > SRCU-P, TASKS01, and TASKS03, but these scenarios are not hammering
> > > > > > on Tree RCU, and thus have far less interrupt activity and the like.
> > > > > > Given that it is an interrupt-related feature being added by this commit,
> > > > > > this seems like expected (mis)behavior.
> > > > > >
> > > > > > Can you reproduce this?  If not, are there any diagnostics I can add to
> > > > > > my testing?  Or a diagnostic patch I could apply?
> > > >
> > > > I think I can reproduce it.  Let me debug some more, so far I haven't
> > > > found anything yet.
> > > >
> > > > What I do know is that it's related to reporting. Turning kcsan_report
> > > > into a noop makes the test run to completion.
> > > >
> > > > > I should hasten to add that this feature was quite helpful in recent work!
> > > >
> > > > Good to know. :-)  We can probably keep this patch, since the default
> > > > config doesn't turn this on. But I will try to see what's up with the
> > > > hangs, and hopefully find a fix.
> > >
> > > So this one turned out to be quite interesting. We can get deadlocks
> > > if we can set up multiple watchpoints per task in case it's
> > > interrupted and the interrupt sets up another watchpoint, and there
> > > are many concurrent races happening; because the other_info struct in
> > > report.c may never be released if an interrupt blocks the consumer due
> > > to waiting for other_info to become released.
> >
> > Been there, done that!  ;-)
> >
> > > Give me another day or 2 to come up with a decent fix.
> >
> > My thought is to send a pull request for the commits up to but not
> > including this patch, allowing ample development and testing time for
> > the fix.  My concern with sending this, even with a fix, is that any
> > further bugs might cast a shadow on the whole series, further slowing
> > acceptance into mainline.
> >
> > Fair enough?
> 
> That's fine. I think the features changes can stay on -rcu/kcsan-dev
> for now, but the documentation updates don't depend on them.
> If it'd be useful, the updated documentation could be moved before
> this patch to -rcu/kcsan, so we'd have
> 
>  kcsan: Add current->state to implicitly atomic accesses
>  kcsan: Add option for verbose reporting
>  kcsan: Add option to allow watcher interruptions
> -- cut --
>  kcsan: Update API documentation in kcsan-checks.h
>  kcsan: Update Documentation/dev-tools/kcsan.rst
>  kcsan: Fix a typo in a comment
> .. rest of series ..
> 
> Although I'm fine with either.

Given my churn with a recent merge window, I am more reluctant than
I might otherwise be to do that sort of rearrangement.  Sorry to be
so cowardly!

							Thanx, Paul

^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [PATCH kcsan 27/32] kcsan: Add option to allow watcher interruptions
  2020-03-17 17:13               ` Paul E. McKenney
@ 2020-03-17 17:44                 ` Marco Elver
  0 siblings, 0 replies; 50+ messages in thread
From: Marco Elver @ 2020-03-17 17:44 UTC (permalink / raw)
  To: Paul E. McKenney
  Cc: LKML, kasan-dev, kernel-team, Ingo Molnar, Andrey Konovalov,
	Alexander Potapenko, Dmitry Vyukov, Qian Cai, Boqun Feng

On Tue, 17 Mar 2020 at 18:13, Paul E. McKenney <paulmck@kernel.org> wrote:
>
> On Mon, Mar 16, 2020 at 05:22:34PM +0100, Marco Elver wrote:
> > On Mon, 16 Mar 2020 at 16:45, Paul E. McKenney <paulmck@kernel.org> wrote:
> > >
> > > On Mon, Mar 16, 2020 at 02:56:38PM +0100, Marco Elver wrote:
> > > > On Fri, 13 Mar 2020 at 16:28, Marco Elver <elver@google.com> wrote:
> > > > >
> > > > > On Thu, 12 Mar 2020 at 19:04, Paul E. McKenney <paulmck@kernel.org> wrote:
> > > > > >
> > > > > > On Thu, Mar 12, 2020 at 11:03:28AM -0700, Paul E. McKenney wrote:
> > > > > > > On Mon, Mar 09, 2020 at 12:04:15PM -0700, paulmck@kernel.org wrote:
> > > > > > > > From: Marco Elver <elver@google.com>
> > > > > > > >
> > > > > > > > Add option to allow interrupts while a watchpoint is set up. This can be
> > > > > > > > enabled either via CONFIG_KCSAN_INTERRUPT_WATCHER or via the boot
> > > > > > > > parameter 'kcsan.interrupt_watcher=1'.
> > > > > > > >
> > > > > > > > Note that, currently not all safe per-CPU access primitives and patterns
> > > > > > > > are accounted for, which could result in false positives. For example,
> > > > > > > > asm-generic/percpu.h uses plain operations, which by default are
> > > > > > > > instrumented. On interrupts and subsequent accesses to the same
> > > > > > > > variable, KCSAN would currently report a data race with this option.
> > > > > > > >
> > > > > > > > Therefore, this option should currently remain disabled by default, but
> > > > > > > > may be enabled for specific test scenarios.
> > > > > > > >
> > > > > > > > To avoid new warnings, changes all uses of smp_processor_id() to use the
> > > > > > > > raw version (as already done in kcsan_found_watchpoint()). The exact SMP
> > > > > > > > processor id is for informational purposes in the report, and
> > > > > > > > correctness is not affected.
> > > > > > > >
> > > > > > > > Signed-off-by: Marco Elver <elver@google.com>
> > > > > > > > Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
> > > > > > >
> > > > > > > And I get silent hangs that bisect to this patch when running the
> > > > > > > following rcutorture command, run in the kernel source tree on a
> > > > > > > 12-hardware-thread laptop:
> > > > > > >
> > > > > > > bash tools/testing/selftests/rcutorture/bin/kvm.sh --cpus 12 --duration 10 --kconfig "CONFIG_DEBUG_INFO=y CONFIG_KCSAN=y CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=n CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY=n CONFIG_KCSAN_REPORT_ONCE_IN_MS=100000 CONFIG_KCSAN_VERBOSE=y CONFIG_KCSAN_INTERRUPT_WATCHER=y" --configs TREE03
> > > > > > >
> > > > > > > It works fine on some (but not all) of the other rcutorture test
> > > > > > > scenarios.  It fails on TREE01, TREE02, TREE03, TREE09.  The common thread
> > > > > > > is that these are the TREE scenarios are all PREEMPT=y.  So are RUDE01,
> > > > > > > SRCU-P, TASKS01, and TASKS03, but these scenarios are not hammering
> > > > > > > on Tree RCU, and thus have far less interrupt activity and the like.
> > > > > > > Given that it is an interrupt-related feature being added by this commit,
> > > > > > > this seems like expected (mis)behavior.
> > > > > > >
> > > > > > > Can you reproduce this?  If not, are there any diagnostics I can add to
> > > > > > > my testing?  Or a diagnostic patch I could apply?
> > > > >
> > > > > I think I can reproduce it.  Let me debug some more, so far I haven't
> > > > > found anything yet.
> > > > >
> > > > > What I do know is that it's related to reporting. Turning kcsan_report
> > > > > into a noop makes the test run to completion.
> > > > >
> > > > > > I should hasten to add that this feature was quite helpful in recent work!
> > > > >
> > > > > Good to know. :-)  We can probably keep this patch, since the default
> > > > > config doesn't turn this on. But I will try to see what's up with the
> > > > > hangs, and hopefully find a fix.
> > > >
> > > > So this one turned out to be quite interesting. We can get deadlocks
> > > > if we can set up multiple watchpoints per task in case it's
> > > > interrupted and the interrupt sets up another watchpoint, and there
> > > > are many concurrent races happening; because the other_info struct in
> > > > report.c may never be released if an interrupt blocks the consumer due
> > > > to waiting for other_info to become released.
> > >
> > > Been there, done that!  ;-)
> > >
> > > > Give me another day or 2 to come up with a decent fix.
> > >
> > > My thought is to send a pull request for the commits up to but not
> > > including this patch, allowing ample development and testing time for
> > > the fix.  My concern with sending this, even with a fix, is that any
> > > further bugs might cast a shadow on the whole series, further slowing
> > > acceptance into mainline.
> > >
> > > Fair enough?
> >
> > That's fine. I think the features changes can stay on -rcu/kcsan-dev
> > for now, but the documentation updates don't depend on them.
> > If it'd be useful, the updated documentation could be moved before
> > this patch to -rcu/kcsan, so we'd have
> >
> >  kcsan: Add current->state to implicitly atomic accesses
> >  kcsan: Add option for verbose reporting
> >  kcsan: Add option to allow watcher interruptions
> > -- cut --
> >  kcsan: Update API documentation in kcsan-checks.h
> >  kcsan: Update Documentation/dev-tools/kcsan.rst
> >  kcsan: Fix a typo in a comment
> > .. rest of series ..
> >
> > Although I'm fine with either.
>
> Given my churn with a recent merge window, I am more reluctant than
> I might otherwise be to do that sort of rearrangement.  Sorry to be
> so cowardly!

No problem. This should be fine either way.

Thank you!
-- Marco

^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [PATCH kcsan 27/32] kcsan: Add option to allow watcher interruptions
  2020-03-16 13:56         ` Marco Elver
  2020-03-16 15:45           ` Paul E. McKenney
@ 2020-03-18 17:42           ` Marco Elver
  1 sibling, 0 replies; 50+ messages in thread
From: Marco Elver @ 2020-03-18 17:42 UTC (permalink / raw)
  To: Paul E. McKenney
  Cc: LKML, kasan-dev, kernel-team, Ingo Molnar, Andrey Konovalov,
	Alexander Potapenko, Dmitry Vyukov, Qian Cai, Boqun Feng

On Mon, 16 Mar 2020 at 14:56, Marco Elver <elver@google.com> wrote:
>
> On Fri, 13 Mar 2020 at 16:28, Marco Elver <elver@google.com> wrote:
> >
> > On Thu, 12 Mar 2020 at 19:04, Paul E. McKenney <paulmck@kernel.org> wrote:
> > >
> > > On Thu, Mar 12, 2020 at 11:03:28AM -0700, Paul E. McKenney wrote:
> > > > On Mon, Mar 09, 2020 at 12:04:15PM -0700, paulmck@kernel.org wrote:
> > > > > From: Marco Elver <elver@google.com>
> > > > >
> > > > > Add option to allow interrupts while a watchpoint is set up. This can be
> > > > > enabled either via CONFIG_KCSAN_INTERRUPT_WATCHER or via the boot
> > > > > parameter 'kcsan.interrupt_watcher=1'.
> > > > >
> > > > > Note that, currently not all safe per-CPU access primitives and patterns
> > > > > are accounted for, which could result in false positives. For example,
> > > > > asm-generic/percpu.h uses plain operations, which by default are
> > > > > instrumented. On interrupts and subsequent accesses to the same
> > > > > variable, KCSAN would currently report a data race with this option.
> > > > >
> > > > > Therefore, this option should currently remain disabled by default, but
> > > > > may be enabled for specific test scenarios.
> > > > >
> > > > > To avoid new warnings, changes all uses of smp_processor_id() to use the
> > > > > raw version (as already done in kcsan_found_watchpoint()). The exact SMP
> > > > > processor id is for informational purposes in the report, and
> > > > > correctness is not affected.
> > > > >
> > > > > Signed-off-by: Marco Elver <elver@google.com>
> > > > > Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
> > > >
> > > > And I get silent hangs that bisect to this patch when running the
> > > > following rcutorture command, run in the kernel source tree on a
> > > > 12-hardware-thread laptop:
> > > >
> > > > bash tools/testing/selftests/rcutorture/bin/kvm.sh --cpus 12 --duration 10 --kconfig "CONFIG_DEBUG_INFO=y CONFIG_KCSAN=y CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=n CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY=n CONFIG_KCSAN_REPORT_ONCE_IN_MS=100000 CONFIG_KCSAN_VERBOSE=y CONFIG_KCSAN_INTERRUPT_WATCHER=y" --configs TREE03
> > > >
> > > > It works fine on some (but not all) of the other rcutorture test
> > > > scenarios.  It fails on TREE01, TREE02, TREE03, TREE09.  The common thread
> > > > is that these are the TREE scenarios are all PREEMPT=y.  So are RUDE01,
> > > > SRCU-P, TASKS01, and TASKS03, but these scenarios are not hammering
> > > > on Tree RCU, and thus have far less interrupt activity and the like.
> > > > Given that it is an interrupt-related feature being added by this commit,
> > > > this seems like expected (mis)behavior.
> > > >
> > > > Can you reproduce this?  If not, are there any diagnostics I can add to
> > > > my testing?  Or a diagnostic patch I could apply?
> >
> > I think I can reproduce it.  Let me debug some more, so far I haven't
> > found anything yet.
> >
> > What I do know is that it's related to reporting. Turning kcsan_report
> > into a noop makes the test run to completion.
> >
> > > I should hasten to add that this feature was quite helpful in recent work!
> >
> > Good to know. :-)  We can probably keep this patch, since the default
> > config doesn't turn this on. But I will try to see what's up with the
> > hangs, and hopefully find a fix.
>
> So this one turned out to be quite interesting. We can get deadlocks
> if we can set up multiple watchpoints per task in case it's
> interrupted and the interrupt sets up another watchpoint, and there
> are many concurrent races happening; because the other_info struct in
> report.c may never be released if an interrupt blocks the consumer due
> to waiting for other_info to become released.
> Give me another day or 2 to come up with a decent fix.

The patch-series fixing this:
http://lkml.kernel.org/r/20200318173845.220793-1-elver@google.com

Please do confirm it resolves the problems in your test scenarios.

Many thanks,
-- Marco

^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [PATCH kcsan 17/32] kcsan: Introduce ASSERT_EXCLUSIVE_* macros
  2020-03-17 11:12         ` Marco Elver
@ 2020-03-19  3:23           ` Boqun Feng
  2020-03-20 14:49             ` Marco Elver
  0 siblings, 1 reply; 50+ messages in thread
From: Boqun Feng @ 2020-03-19  3:23 UTC (permalink / raw)
  To: Marco Elver
  Cc: Paul E. McKenney, LKML, kasan-dev, kernel-team, Ingo Molnar,
	Andrey Konovalov, Alexander Potapenko, Dmitry Vyukov, Qian Cai

On Tue, Mar 17, 2020 at 12:12:36PM +0100, Marco Elver wrote:
> On Sat, 14 Mar 2020 at 03:22, Boqun Feng <boqun.feng@gmail.com> wrote:
> >
> > On Fri, Mar 13, 2020 at 05:15:32PM +0100, Marco Elver wrote:
> > > On Fri, 13 Mar 2020 at 09:52, Boqun Feng <boqun.feng@gmail.com> wrote:
> > > >
> > > > Hi Marco,
> > > >
> > > > On Mon, Mar 09, 2020 at 12:04:05PM -0700, paulmck@kernel.org wrote:
> > > > > From: Marco Elver <elver@google.com>
> > > > >
> > > > > Introduces ASSERT_EXCLUSIVE_WRITER and ASSERT_EXCLUSIVE_ACCESS, which
> > > > > may be used to assert properties of synchronization logic, where
> > > > > violation cannot be detected as a normal data race.
> > > > >
> > > > > Examples of the reports that may be generated:
> > > > >
> > > > >     ==================================================================
> > > > >     BUG: KCSAN: assert: race in test_thread / test_thread
> > > > >
> > > > >     write to 0xffffffffab3d1540 of 8 bytes by task 466 on cpu 2:
> > > > >      test_thread+0x8d/0x111
> > > > >      debugfs_write.cold+0x32/0x44
> > > > >      ...
> > > > >
> > > > >     assert no writes to 0xffffffffab3d1540 of 8 bytes by task 464 on cpu 0:
> > > > >      test_thread+0xa3/0x111
> > > > >      debugfs_write.cold+0x32/0x44
> > > > >      ...
> > > > >     ==================================================================
> > > > >
> > > > >     ==================================================================
> > > > >     BUG: KCSAN: assert: race in test_thread / test_thread
> > > > >
> > > > >     assert no accesses to 0xffffffffab3d1540 of 8 bytes by task 465 on cpu 1:
> > > > >      test_thread+0xb9/0x111
> > > > >      debugfs_write.cold+0x32/0x44
> > > > >      ...
> > > > >
> > > > >     read to 0xffffffffab3d1540 of 8 bytes by task 464 on cpu 0:
> > > > >      test_thread+0x77/0x111
> > > > >      debugfs_write.cold+0x32/0x44
> > > > >      ...
> > > > >     ==================================================================
> > > > >
> > > > > Signed-off-by: Marco Elver <elver@google.com>
> > > > > Suggested-by: Paul E. McKenney <paulmck@kernel.org>
> > > > > Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
> > > > > ---
> > > > >  include/linux/kcsan-checks.h | 40 ++++++++++++++++++++++++++++++++++++++++
> > > > >  1 file changed, 40 insertions(+)
> > > > >
> > > > > diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h
> > > > > index 5dcadc2..cf69617 100644
> > > > > --- a/include/linux/kcsan-checks.h
> > > > > +++ b/include/linux/kcsan-checks.h
> > > > > @@ -96,4 +96,44 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size,
> > > > >       kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE)
> > > > >  #endif
> > > > >
> > > > > +/**
> > > > > + * ASSERT_EXCLUSIVE_WRITER - assert no other threads are writing @var
> > > > > + *
> > > > > + * Assert that there are no other threads writing @var; other readers are
> > > > > + * allowed. This assertion can be used to specify properties of concurrent code,
> > > > > + * where violation cannot be detected as a normal data race.
> > > > > + *
> > > >
> > > > I like the idea that we can assert no other writers, however I think
> > > > assertions like ASSERT_EXCLUSIVE_WRITER() are a little limited. For
> > > > example, if we have the following code:
> > > >
> > > >         preempt_disable();
> > > >         do_sth();
> > > >         raw_cpu_write(var, 1);
> > > >         do_sth_else();
> > > >         preempt_enable();
> > > >
> > > > we can add the assert to detect another potential writer like:
> > > >
> > > >         preempt_disable();
> > > >         do_sth();
> > > >         ASSERT_EXCLUSIVE_WRITER(var);
> > > >         raw_cpu_write(var, 1);
> > > >         do_sth_else();
> > > >         preempt_enable();
> > > >
> > > > , but, if I understand how KCSAN works correctly, it only works if the
> > > > another writer happens when the ASSERT_EXCLUSIVE_WRITER(var) is called,
> > > > IOW, it can only detect another writer between do_sth() and
> > > > raw_cpu_write(). But our intent is to prevent other writers for the
> > > > whole preemption-off section. With this assertion introduced, people may
> > > > end up with code like:
> > >
> > > To confirm: KCSAN will detect a race if it sets up a watchpoint on
> > > ASSERT_EXCLUSIVE_WRITER(var), and a concurrent write happens. Note
> > > that the watchpoints aren't always set up, but only periodically
> > > (discussed more below). For every watchpoint, we also inject an
> > > artificial delay. Pseudo-code:
> > >
> > > if watchpoint for access already set up {
> > >   consume watchpoint;
> > > else if should set up watchpoint {
> > >   setup watchpoint;
> > >   udelay(...);
> > >   check watchpoint consumed;
> > >   release watchpoint;
> > > }
> > >
> >
> > Yes, I get this part.
> >
> > > >         preempt_disable();
> > > >         ASSERT_EXCLUSIVE_WRITER(var);
> > > >         do_sth();
> > > >         ASSERT_EXCLUSIVE_WRITER(var);
> > > >         raw_cpu_write(var, 1);
> > > >         ASSERT_EXCLUSIVE_WRITER(var);
> > > >         do_sth_else();
> > > >         ASSERT_EXCLUSIVE_WRITER(var);
> > > >         preempt_enable();
> > > >
> > > > and that is horrible...
> > >
> > > It is, and I would strongly discourage any such use, because it's not
> > > necessary. See below.
> > >
> > > > So how about making a pair of annotations
> > > > ASSERT_EXCLUSIVE_WRITER_BEGIN() and ASSERT_EXCLUSIVE_WRITER_END(), so
> > > > that we can write code like:
> > > >
> > > >         preempt_disable();
> > > >         ASSERT_EXCLUSIVE_WRITER_BEGIN(var);
> > > >         do_sth();
> > > >         raw_cpu_write(var, 1);
> > > >         do_sth_else();
> > > >         ASSERT_EXCLUSIVE_WRITER_END(var);
> > > >         preempt_enable();
> > > >
> > > > ASSERT_EXCLUSIVE_WRITER_BEGIN() could be a rough version of watchpoint
> > > > setting up and ASSERT_EXCLUSIVE_WRITER_END() could be watchpoint
> > > > removing. So I think it's feasible.
> > >
> > > Keep in mind that the time from ASSERT_EXCLUSIVE_WRITER_BEGIN to END
> > > might be on the order of a few nanosec, whereas KCSAN's default
> > > watchpoint delay is 10s of microsec (default ~80 for tasks). That
> > > means we would still have to set up a delay somewhere, and the few
> > > nanosec between BEGIN and END are insignificant and don't buy us
> > > anything.
> > >
> >
> > Yeah, the delay doesn't buy us anything given the default watchpoint
> > delay, and I agree even with *_{BEGIN/END}, we still need to set up a
> > delay somewhere. Adding a delay makes the watchpoint live longer so that
> > a problem will more likely happen, but sometimes the delay won't be
> > enough, considering another writer like:
> >
> >         if (per_cpu(var, cpu) == 1)
> >                 per_cpu(var, cpu) = 0;
> >
> > in this user case, percpu variable "var" is used for maintaining some
> > state machine, and a CPU set a state with its own variable so that other
> > CPUs can consume it. And this another writer cannot be catched by:
> >
> >         preempt_disable();
> >         do_sth();
> >         ASSERT_EXCLUSIVE_WRITER(var);
> >         raw_cpu_write(var, 1);
> >         do_sth_else();
> >         preempt_enable();
> >
> 
> Right, the example makes sense.
> 
> That is assuming there are various other expected racy reads that are
> fine. If that's not true, ASSERT_EXCLUSIVE_ACCESS should be
> considered.
> 
> > , no matter how long the delay is set. Another example: let's say the
> > do_sth_else() above is actually an operation that queues a callback
> > which writes to "var". In one version, do_sth_else() uses call_rcu(),
> > which works, because preemption-off is treated as RCU read-side critical
> > section, so we are fine. But if someone else changes it to queue_work()
> > for some reason, the code is just broken, and KCSAN cannot detect it, no
> > matter how long the delay is.
> >
> > To summarize, a delay is helpful to trigger a problem because it allows
> > _other_ CPU/threads to run more code and do more memory accesses,
> > however it's not helpful if a particular problem happens due to some
> > memory effects of the current/watched CPU/thread. While *_{BEGIN/END}
> > can be helpful in this case.
> 
> Makes sense.
> 
> > > Re feasibility: Right now setting up and removing watchpoints is not
> > > exposed, and doing something like this would be an extremely intrusive
> > > change. Because of that, without being able to quantify the actual
> > > usefulness of this, and having evaluated better options (see below),
> > > I'd recommend not pursuing this.
> > >
> > > > Thoughts?
> > >
> > > Firstly, what is your objective? From what I gather you want to
> > > increase the probability of detecting a race with 'var'.
> > >
> >
> > Right, I want to increase the probablity.
> >
> > > I agree, and have been thinking about it, but there are other options
> > > that haven't been exhausted, before we go and make the interface more
> > > complicated.
> > >
> > > == Interface design ==
> > > The interface as it is right now, is intuitive and using it is hard to
> > > get wrong. Demanding begin/end markers introduces complexity that will
> >
> > Yeah, the interface is intuitive, however it's still an extra effort to
> > put those assertions, right? Which means it doesn't come for free,
> > compared to other detection KCSAN can do, the developers don't need to
> > put extra lines of code. Given the extra effort for developers to use
> > the detect, I think we should dicuss the design thoroughly.
> >
> > Besides the semantics of assertions is usually "do some checking right
> > now to see if things go wrong", and I don't think it quite matches the
> > semantics of an exclusive writer: "in this piece of code, I'm the only
> > one who can do the write".
> >
> > > undoubtedly result in incorrect usage, because as soon as you somehow
> > > forget to end the region, you'll get tons of false positives. This may
> > > be due to control-flow that was missed etc. We had a similar problem
> > > with seqlocks, and getting them to work correctly with KCSAN was
> > > extremely difficult, because clear begin and end markers weren't
> > > always given. I imagine introducing an interface like this will
> > > ultimately result in similar problems, as much as we'd like to believe
> > > this won't ever happen.
> > >
> >
> > Well, if we use *_{BEGIN,END} approach, one solution is combining them
> > with sections introducing primitives (such as preemp_disable() and
> > preempt_enable()), for example, we can add
> >
> >         #define preempt_disable_for(var)                                \
> >         do {                                                            \
> >                 preempt_disable();                                      \
> >                 ASSERT_EXCLUSIVE_WRITER_BEGIN(var);                     \
> >         }
> >
> >         #define preempt_enable_for(var)                                 \
> >         do {                                                            \
> >                 ASSERT_EXCLUSIVE_WRITER_END(var);                       \
> >                 preempt_enable();                                       \
> >         }
> >
> >         (similar for spin lock)
> >
> >         #define spin_lock_for(lock, var)                                \
> >         do {                                                            \
> >                 spin_lock(lock);                                        \
> >                 ASSERT_EXCLUSIVE_WRITER_BEGIN(var);                     \
> >         }
> >
> >         #define spin_unlock_for(lock, var)                              \
> >         do {                                                            \
> >                 ASSERT_EXCLUSIVE_WRITER_END(var);                       \
> >                 spin_unlock(lock);                                      \
> >         }
> >
> > I admit that I haven't thought this thoroughly, but I think this works,
> > and besides primitives like above can help the reader to understand the
> > questions like: what this lock/preemption-off critical sections are
> > protecting?
> 
> I can't say anything about introducing even more macros. I'd say we
> need at least a dozen use-cases or more and understand them, otherwise
> we may end up with the wrong API that we can never take back.
> 

Agreed, real use-cases are needed for the justification of introducing
those APIs.

> > Thoughts?
> 
> Makes sense for the cases you described.
> 
> Changing KCSAN to do this is a major change. On surface, it seems like
> a refactor and exporting some existing functionality, but there are
> various new corner cases, because now 2 accesses don't really have to
> be concurrent anymore to detect a race (and simple properties like a
> thread can't race with itself need to be taken care of). The existing
> ASSERT_EXCLUSIVE macros were able to leverage existing functionality
> mostly as-is. So, to motivate something like this, we need at least a
> dozen or so good use-cases, where careful placement of an existing
> ASSERT_EXCLUSIVE would not catch what you describe.
> 

Right, I think at this point, I'm not object to merging this into
kernel, using ASSERT_EXCLUSIVE_*() does provide more chances for us to
catch bugs. That said, I think it's better if we have some comments
describing the semantics (or the limitation) of the annotations to avoid
"mis-use"s (for example, using multiple ASSERT_EXCLUSIVE_WRITER()s for
one variables in a function). But that doesn't necessarily block the
merge of this feature, we can always do that later.

Also, I think it's worthwhile to do some experiments on the
*_{BEGIN,END} interfaces. If you're interested and have cycles to work
on this, please let me, othwerwise, I can have a look at it.

Thanks!

Regards,
Boqun


> Thanks,
> -- Marco
> 
> > Regards,
> > Boqun
> >
[...]

^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [PATCH kcsan 17/32] kcsan: Introduce ASSERT_EXCLUSIVE_* macros
  2020-03-19  3:23           ` Boqun Feng
@ 2020-03-20 14:49             ` Marco Elver
  0 siblings, 0 replies; 50+ messages in thread
From: Marco Elver @ 2020-03-20 14:49 UTC (permalink / raw)
  To: Boqun Feng
  Cc: Paul E. McKenney, LKML, kasan-dev, kernel-team, Ingo Molnar,
	Andrey Konovalov, Alexander Potapenko, Dmitry Vyukov, Qian Cai

On Thu, 19 Mar 2020 at 04:23, Boqun Feng <boqun.feng@gmail.com> wrote:
>
> On Tue, Mar 17, 2020 at 12:12:36PM +0100, Marco Elver wrote:
> > On Sat, 14 Mar 2020 at 03:22, Boqun Feng <boqun.feng@gmail.com> wrote:
> > >
> > > On Fri, Mar 13, 2020 at 05:15:32PM +0100, Marco Elver wrote:
> > > > On Fri, 13 Mar 2020 at 09:52, Boqun Feng <boqun.feng@gmail.com> wrote:
> > > > >
> > > > > Hi Marco,
> > > > >
> > > > > On Mon, Mar 09, 2020 at 12:04:05PM -0700, paulmck@kernel.org wrote:
> > > > > > From: Marco Elver <elver@google.com>
> > > > > >
> > > > > > Introduces ASSERT_EXCLUSIVE_WRITER and ASSERT_EXCLUSIVE_ACCESS, which
> > > > > > may be used to assert properties of synchronization logic, where
> > > > > > violation cannot be detected as a normal data race.
> > > > > >
> > > > > > Examples of the reports that may be generated:
> > > > > >
> > > > > >     ==================================================================
> > > > > >     BUG: KCSAN: assert: race in test_thread / test_thread
> > > > > >
> > > > > >     write to 0xffffffffab3d1540 of 8 bytes by task 466 on cpu 2:
> > > > > >      test_thread+0x8d/0x111
> > > > > >      debugfs_write.cold+0x32/0x44
> > > > > >      ...
> > > > > >
> > > > > >     assert no writes to 0xffffffffab3d1540 of 8 bytes by task 464 on cpu 0:
> > > > > >      test_thread+0xa3/0x111
> > > > > >      debugfs_write.cold+0x32/0x44
> > > > > >      ...
> > > > > >     ==================================================================
> > > > > >
> > > > > >     ==================================================================
> > > > > >     BUG: KCSAN: assert: race in test_thread / test_thread
> > > > > >
> > > > > >     assert no accesses to 0xffffffffab3d1540 of 8 bytes by task 465 on cpu 1:
> > > > > >      test_thread+0xb9/0x111
> > > > > >      debugfs_write.cold+0x32/0x44
> > > > > >      ...
> > > > > >
> > > > > >     read to 0xffffffffab3d1540 of 8 bytes by task 464 on cpu 0:
> > > > > >      test_thread+0x77/0x111
> > > > > >      debugfs_write.cold+0x32/0x44
> > > > > >      ...
> > > > > >     ==================================================================
> > > > > >
> > > > > > Signed-off-by: Marco Elver <elver@google.com>
> > > > > > Suggested-by: Paul E. McKenney <paulmck@kernel.org>
> > > > > > Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
> > > > > > ---
> > > > > >  include/linux/kcsan-checks.h | 40 ++++++++++++++++++++++++++++++++++++++++
> > > > > >  1 file changed, 40 insertions(+)
> > > > > >
> > > > > > diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h
> > > > > > index 5dcadc2..cf69617 100644
> > > > > > --- a/include/linux/kcsan-checks.h
> > > > > > +++ b/include/linux/kcsan-checks.h
> > > > > > @@ -96,4 +96,44 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size,
> > > > > >       kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE)
> > > > > >  #endif
> > > > > >
> > > > > > +/**
> > > > > > + * ASSERT_EXCLUSIVE_WRITER - assert no other threads are writing @var
> > > > > > + *
> > > > > > + * Assert that there are no other threads writing @var; other readers are
> > > > > > + * allowed. This assertion can be used to specify properties of concurrent code,
> > > > > > + * where violation cannot be detected as a normal data race.
> > > > > > + *
> > > > >
> > > > > I like the idea that we can assert no other writers, however I think
> > > > > assertions like ASSERT_EXCLUSIVE_WRITER() are a little limited. For
> > > > > example, if we have the following code:
> > > > >
> > > > >         preempt_disable();
> > > > >         do_sth();
> > > > >         raw_cpu_write(var, 1);
> > > > >         do_sth_else();
> > > > >         preempt_enable();
> > > > >
> > > > > we can add the assert to detect another potential writer like:
> > > > >
> > > > >         preempt_disable();
> > > > >         do_sth();
> > > > >         ASSERT_EXCLUSIVE_WRITER(var);
> > > > >         raw_cpu_write(var, 1);
> > > > >         do_sth_else();
> > > > >         preempt_enable();
> > > > >
> > > > > , but, if I understand how KCSAN works correctly, it only works if the
> > > > > another writer happens when the ASSERT_EXCLUSIVE_WRITER(var) is called,
> > > > > IOW, it can only detect another writer between do_sth() and
> > > > > raw_cpu_write(). But our intent is to prevent other writers for the
> > > > > whole preemption-off section. With this assertion introduced, people may
> > > > > end up with code like:
> > > >
> > > > To confirm: KCSAN will detect a race if it sets up a watchpoint on
> > > > ASSERT_EXCLUSIVE_WRITER(var), and a concurrent write happens. Note
> > > > that the watchpoints aren't always set up, but only periodically
> > > > (discussed more below). For every watchpoint, we also inject an
> > > > artificial delay. Pseudo-code:
> > > >
> > > > if watchpoint for access already set up {
> > > >   consume watchpoint;
> > > > else if should set up watchpoint {
> > > >   setup watchpoint;
> > > >   udelay(...);
> > > >   check watchpoint consumed;
> > > >   release watchpoint;
> > > > }
> > > >
> > >
> > > Yes, I get this part.
> > >
> > > > >         preempt_disable();
> > > > >         ASSERT_EXCLUSIVE_WRITER(var);
> > > > >         do_sth();
> > > > >         ASSERT_EXCLUSIVE_WRITER(var);
> > > > >         raw_cpu_write(var, 1);
> > > > >         ASSERT_EXCLUSIVE_WRITER(var);
> > > > >         do_sth_else();
> > > > >         ASSERT_EXCLUSIVE_WRITER(var);
> > > > >         preempt_enable();
> > > > >
> > > > > and that is horrible...
> > > >
> > > > It is, and I would strongly discourage any such use, because it's not
> > > > necessary. See below.
> > > >
> > > > > So how about making a pair of annotations
> > > > > ASSERT_EXCLUSIVE_WRITER_BEGIN() and ASSERT_EXCLUSIVE_WRITER_END(), so
> > > > > that we can write code like:
> > > > >
> > > > >         preempt_disable();
> > > > >         ASSERT_EXCLUSIVE_WRITER_BEGIN(var);
> > > > >         do_sth();
> > > > >         raw_cpu_write(var, 1);
> > > > >         do_sth_else();
> > > > >         ASSERT_EXCLUSIVE_WRITER_END(var);
> > > > >         preempt_enable();
> > > > >
> > > > > ASSERT_EXCLUSIVE_WRITER_BEGIN() could be a rough version of watchpoint
> > > > > setting up and ASSERT_EXCLUSIVE_WRITER_END() could be watchpoint
> > > > > removing. So I think it's feasible.
> > > >
> > > > Keep in mind that the time from ASSERT_EXCLUSIVE_WRITER_BEGIN to END
> > > > might be on the order of a few nanosec, whereas KCSAN's default
> > > > watchpoint delay is 10s of microsec (default ~80 for tasks). That
> > > > means we would still have to set up a delay somewhere, and the few
> > > > nanosec between BEGIN and END are insignificant and don't buy us
> > > > anything.
> > > >
> > >
> > > Yeah, the delay doesn't buy us anything given the default watchpoint
> > > delay, and I agree even with *_{BEGIN/END}, we still need to set up a
> > > delay somewhere. Adding a delay makes the watchpoint live longer so that
> > > a problem will more likely happen, but sometimes the delay won't be
> > > enough, considering another writer like:
> > >
> > >         if (per_cpu(var, cpu) == 1)
> > >                 per_cpu(var, cpu) = 0;
> > >
> > > in this user case, percpu variable "var" is used for maintaining some
> > > state machine, and a CPU set a state with its own variable so that other
> > > CPUs can consume it. And this another writer cannot be catched by:
> > >
> > >         preempt_disable();
> > >         do_sth();
> > >         ASSERT_EXCLUSIVE_WRITER(var);
> > >         raw_cpu_write(var, 1);
> > >         do_sth_else();
> > >         preempt_enable();
> > >
> >
> > Right, the example makes sense.
> >
> > That is assuming there are various other expected racy reads that are
> > fine. If that's not true, ASSERT_EXCLUSIVE_ACCESS should be
> > considered.
> >
> > > , no matter how long the delay is set. Another example: let's say the
> > > do_sth_else() above is actually an operation that queues a callback
> > > which writes to "var". In one version, do_sth_else() uses call_rcu(),
> > > which works, because preemption-off is treated as RCU read-side critical
> > > section, so we are fine. But if someone else changes it to queue_work()
> > > for some reason, the code is just broken, and KCSAN cannot detect it, no
> > > matter how long the delay is.
> > >
> > > To summarize, a delay is helpful to trigger a problem because it allows
> > > _other_ CPU/threads to run more code and do more memory accesses,
> > > however it's not helpful if a particular problem happens due to some
> > > memory effects of the current/watched CPU/thread. While *_{BEGIN/END}
> > > can be helpful in this case.
> >
> > Makes sense.
> >
> > > > Re feasibility: Right now setting up and removing watchpoints is not
> > > > exposed, and doing something like this would be an extremely intrusive
> > > > change. Because of that, without being able to quantify the actual
> > > > usefulness of this, and having evaluated better options (see below),
> > > > I'd recommend not pursuing this.
> > > >
> > > > > Thoughts?
> > > >
> > > > Firstly, what is your objective? From what I gather you want to
> > > > increase the probability of detecting a race with 'var'.
> > > >
> > >
> > > Right, I want to increase the probablity.
> > >
> > > > I agree, and have been thinking about it, but there are other options
> > > > that haven't been exhausted, before we go and make the interface more
> > > > complicated.
> > > >
> > > > == Interface design ==
> > > > The interface as it is right now, is intuitive and using it is hard to
> > > > get wrong. Demanding begin/end markers introduces complexity that will
> > >
> > > Yeah, the interface is intuitive, however it's still an extra effort to
> > > put those assertions, right? Which means it doesn't come for free,
> > > compared to other detection KCSAN can do, the developers don't need to
> > > put extra lines of code. Given the extra effort for developers to use
> > > the detect, I think we should dicuss the design thoroughly.
> > >
> > > Besides the semantics of assertions is usually "do some checking right
> > > now to see if things go wrong", and I don't think it quite matches the
> > > semantics of an exclusive writer: "in this piece of code, I'm the only
> > > one who can do the write".
> > >
> > > > undoubtedly result in incorrect usage, because as soon as you somehow
> > > > forget to end the region, you'll get tons of false positives. This may
> > > > be due to control-flow that was missed etc. We had a similar problem
> > > > with seqlocks, and getting them to work correctly with KCSAN was
> > > > extremely difficult, because clear begin and end markers weren't
> > > > always given. I imagine introducing an interface like this will
> > > > ultimately result in similar problems, as much as we'd like to believe
> > > > this won't ever happen.
> > > >
> > >
> > > Well, if we use *_{BEGIN,END} approach, one solution is combining them
> > > with sections introducing primitives (such as preemp_disable() and
> > > preempt_enable()), for example, we can add
> > >
> > >         #define preempt_disable_for(var)                                \
> > >         do {                                                            \
> > >                 preempt_disable();                                      \
> > >                 ASSERT_EXCLUSIVE_WRITER_BEGIN(var);                     \
> > >         }
> > >
> > >         #define preempt_enable_for(var)                                 \
> > >         do {                                                            \
> > >                 ASSERT_EXCLUSIVE_WRITER_END(var);                       \
> > >                 preempt_enable();                                       \
> > >         }
> > >
> > >         (similar for spin lock)
> > >
> > >         #define spin_lock_for(lock, var)                                \
> > >         do {                                                            \
> > >                 spin_lock(lock);                                        \
> > >                 ASSERT_EXCLUSIVE_WRITER_BEGIN(var);                     \
> > >         }
> > >
> > >         #define spin_unlock_for(lock, var)                              \
> > >         do {                                                            \
> > >                 ASSERT_EXCLUSIVE_WRITER_END(var);                       \
> > >                 spin_unlock(lock);                                      \
> > >         }
> > >
> > > I admit that I haven't thought this thoroughly, but I think this works,
> > > and besides primitives like above can help the reader to understand the
> > > questions like: what this lock/preemption-off critical sections are
> > > protecting?
> >
> > I can't say anything about introducing even more macros. I'd say we
> > need at least a dozen use-cases or more and understand them, otherwise
> > we may end up with the wrong API that we can never take back.
> >
>
> Agreed, real use-cases are needed for the justification of introducing
> those APIs.
>
> > > Thoughts?
> >
> > Makes sense for the cases you described.
> >
> > Changing KCSAN to do this is a major change. On surface, it seems like
> > a refactor and exporting some existing functionality, but there are
> > various new corner cases, because now 2 accesses don't really have to
> > be concurrent anymore to detect a race (and simple properties like a
> > thread can't race with itself need to be taken care of). The existing
> > ASSERT_EXCLUSIVE macros were able to leverage existing functionality
> > mostly as-is. So, to motivate something like this, we need at least a
> > dozen or so good use-cases, where careful placement of an existing
> > ASSERT_EXCLUSIVE would not catch what you describe.
> >
>
> Right, I think at this point, I'm not object to merging this into
> kernel, using ASSERT_EXCLUSIVE_*() does provide more chances for us to
> catch bugs. That said, I think it's better if we have some comments
> describing the semantics (or the limitation) of the annotations to avoid
> "mis-use"s (for example, using multiple ASSERT_EXCLUSIVE_WRITER()s for
> one variables in a function). But that doesn't necessarily block the
> merge of this feature, we can always do that later.
>
> Also, I think it's worthwhile to do some experiments on the
> *_{BEGIN,END} interfaces. If you're interested and have cycles to work
> on this, please let me, othwerwise, I can have a look at it.

Because I really think the BEGIN/END interface is error-prone, I was
curious if I could make this work:

         preempt_disable();
         {
                  ASSERT_EXCLUSIVE_WRITER_SCOPED(var);
                  do_sth();
                  raw_cpu_write(var, 1);
                  do_sth_else();
         }
         preempt_enable();

Since all compilers that support TSAN instrumentation support
attribute "cleanup", this works for KCSAN. Half-baked patch:
  https://github.com/melver/linux/commit/d5a510a80b9755909d8b2ccc7bcfdeac99fc0080

Since it's not too intrusive, I'd be fine including this. But I think
you have to come up with the use-cases. :-)

I can send the patch once I'm done with it.

Thanks,
-- Marco


> Thanks!
>
> Regards,
> Boqun
>
>
> > Thanks,
> > -- Marco
> >
> > > Regards,
> > > Boqun
> > >
> [...]

^ permalink raw reply	[flat|nested] 50+ messages in thread

end of thread, other threads:[~2020-03-20 14:49 UTC | newest]

Thread overview: 50+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-03-09 19:03 [PATCH kcsan 0/32] KCSAN commits for v5.7 Paul E. McKenney
2020-03-09 19:03 ` [PATCH kcsan 01/32] kcsan: Prefer __always_inline for fast-path paulmck
2020-03-09 19:03 ` [PATCH kcsan 02/32] kcsan: Show full access type in report paulmck
2020-03-09 19:03 ` [PATCH kcsan 03/32] kcsan: Rate-limit reporting per data races paulmck
2020-03-09 19:03 ` [PATCH kcsan 04/32] kcsan: Make KCSAN compatible with lockdep paulmck
2020-03-09 19:03 ` [PATCH kcsan 05/32] kcsan: Address missing case with KCSAN_REPORT_VALUE_CHANGE_ONLY paulmck
2020-03-09 19:03 ` [PATCH kcsan 06/32] include/linux: Add instrumented.h infrastructure paulmck
2020-03-09 19:03 ` [PATCH kcsan 07/32] asm-generic, atomic-instrumented: Use generic instrumented.h paulmck
2020-03-09 19:03 ` [PATCH kcsan 08/32] asm-generic, kcsan: Add KCSAN instrumentation for bitops paulmck
2020-03-09 19:03 ` [PATCH kcsan 09/32] iov_iter: Use generic instrumented.h paulmck
2020-03-09 19:03 ` [PATCH kcsan 10/32] copy_to_user, copy_from_user: " paulmck
2020-03-09 19:03 ` [PATCH kcsan 11/32] kcsan: Add docbook header for data_race() paulmck
2020-03-09 19:04 ` [PATCH kcsan 12/32] kcsan: Add option to assume plain aligned writes up to word size are atomic paulmck
2020-03-09 19:04 ` [PATCH kcsan 13/32] kcsan: Clarify Kconfig option KCSAN_IGNORE_ATOMICS paulmck
2020-03-09 19:04 ` [PATCH kcsan 14/32] kcsan: Cleanup of main KCSAN Kconfig option paulmck
2020-03-09 19:04 ` [PATCH kcsan 15/32] kcsan: Fix 0-sized checks paulmck
2020-03-09 19:04 ` [PATCH kcsan 16/32] kcsan: Introduce KCSAN_ACCESS_ASSERT access type paulmck
2020-03-09 19:04 ` [PATCH kcsan 17/32] kcsan: Introduce ASSERT_EXCLUSIVE_* macros paulmck
2020-03-13  8:52   ` Boqun Feng
2020-03-13 16:15     ` Marco Elver
2020-03-14  2:22       ` Boqun Feng
2020-03-17 11:12         ` Marco Elver
2020-03-19  3:23           ` Boqun Feng
2020-03-20 14:49             ` Marco Elver
2020-03-09 19:04 ` [PATCH kcsan 18/32] kcsan: Add test to generate conflicts via debugfs paulmck
2020-03-09 19:04 ` [PATCH kcsan 19/32] kcsan: Expose core configuration parameters as module params paulmck
2020-03-09 19:04 ` [PATCH kcsan 20/32] kcsan: Fix misreporting if concurrent races on same address paulmck
2020-03-09 19:04 ` [PATCH kcsan 21/32] kcsan: Move interfaces that affects checks to kcsan-checks.h paulmck
2020-03-09 19:04 ` [PATCH kcsan 22/32] compiler.h, seqlock.h: Remove unnecessary kcsan.h includes paulmck
2020-03-09 19:04 ` [PATCH kcsan 23/32] kcsan: Introduce kcsan_value_change type paulmck
2020-03-09 19:04 ` [PATCH kcsan 24/32] kcsan: Add kcsan_set_access_mask() support paulmck
2020-03-09 19:04 ` [PATCH kcsan 25/32] kcsan: Introduce ASSERT_EXCLUSIVE_BITS(var, mask) paulmck
2020-03-09 19:04 ` [PATCH kcsan 26/32] kcsan, trace: Make KCSAN compatible with tracing paulmck
2020-03-09 19:57   ` Steven Rostedt
2020-03-09 20:27     ` Paul E. McKenney
2020-03-09 19:04 ` [PATCH kcsan 27/32] kcsan: Add option to allow watcher interruptions paulmck
2020-03-12 18:03   ` Paul E. McKenney
2020-03-12 18:04     ` Paul E. McKenney
2020-03-13 15:28       ` Marco Elver
2020-03-16 13:56         ` Marco Elver
2020-03-16 15:45           ` Paul E. McKenney
2020-03-16 16:22             ` Marco Elver
2020-03-17 17:13               ` Paul E. McKenney
2020-03-17 17:44                 ` Marco Elver
2020-03-18 17:42           ` Marco Elver
2020-03-09 19:04 ` [PATCH kcsan 28/32] kcsan: Add option for verbose reporting paulmck
2020-03-09 19:04 ` [PATCH kcsan 29/32] kcsan: Add current->state to implicitly atomic accesses paulmck
2020-03-09 19:04 ` [PATCH kcsan 30/32] kcsan: Fix a typo in a comment paulmck
2020-03-09 19:04 ` [PATCH kcsan 31/32] kcsan: Update Documentation/dev-tools/kcsan.rst paulmck
2020-03-09 19:04 ` [PATCH kcsan 32/32] kcsan: Update API documentation in kcsan-checks.h paulmck

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.