linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/5] kcsan: Move interfaces that affects checks to kcsan-checks.h
@ 2020-02-10 18:43 Marco Elver
  2020-02-10 18:43 ` [PATCH 2/5] compiler.h, seqlock.h: Remove unnecessary kcsan.h includes Marco Elver
                   ` (3 more replies)
  0 siblings, 4 replies; 7+ messages in thread
From: Marco Elver @ 2020-02-10 18:43 UTC (permalink / raw)
  To: elver; +Cc: paulmck, andreyknvl, glider, dvyukov, kasan-dev, linux-kernel

This moves functions that affect state changing the behaviour of
kcsan_check_access() to kcsan-checks.h. Since these are likely used with
kcsan_check_access() it makes more sense to have them in kcsan-checks.h,
to avoid including all of 'include/linux/kcsan.h'.

No functional change intended.

Signed-off-by: Marco Elver <elver@google.com>
---
 include/linux/kcsan-checks.h | 48 ++++++++++++++++++++++++++++++++++--
 include/linux/kcsan.h        | 41 ------------------------------
 2 files changed, 46 insertions(+), 43 deletions(-)

diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h
index cf6961794e9a1..8675411c8dbcd 100644
--- a/include/linux/kcsan-checks.h
+++ b/include/linux/kcsan-checks.h
@@ -32,10 +32,54 @@
  */
 void __kcsan_check_access(const volatile void *ptr, size_t size, int type);
 
-#else
+/**
+ * kcsan_nestable_atomic_begin - begin nestable atomic region
+ *
+ * Accesses within the atomic region may appear to race with other accesses but
+ * should be considered atomic.
+ */
+void kcsan_nestable_atomic_begin(void);
+
+/**
+ * kcsan_nestable_atomic_end - end nestable atomic region
+ */
+void kcsan_nestable_atomic_end(void);
+
+/**
+ * kcsan_flat_atomic_begin - begin flat atomic region
+ *
+ * Accesses within the atomic region may appear to race with other accesses but
+ * should be considered atomic.
+ */
+void kcsan_flat_atomic_begin(void);
+
+/**
+ * kcsan_flat_atomic_end - end flat atomic region
+ */
+void kcsan_flat_atomic_end(void);
+
+/**
+ * kcsan_atomic_next - consider following accesses as atomic
+ *
+ * Force treating the next n memory accesses for the current context as atomic
+ * operations.
+ *
+ * @n number of following memory accesses to treat as atomic.
+ */
+void kcsan_atomic_next(int n);
+
+#else /* CONFIG_KCSAN */
+
 static inline void __kcsan_check_access(const volatile void *ptr, size_t size,
 					int type) { }
-#endif
+
+static inline void kcsan_nestable_atomic_begin(void)	{ }
+static inline void kcsan_nestable_atomic_end(void)	{ }
+static inline void kcsan_flat_atomic_begin(void)	{ }
+static inline void kcsan_flat_atomic_end(void)		{ }
+static inline void kcsan_atomic_next(int n)		{ }
+
+#endif /* CONFIG_KCSAN */
 
 /*
  * kcsan_*: Only calls into the runtime when the particular compilation unit has
diff --git a/include/linux/kcsan.h b/include/linux/kcsan.h
index 1019e3a2c6897..7a614ca558f65 100644
--- a/include/linux/kcsan.h
+++ b/include/linux/kcsan.h
@@ -56,52 +56,11 @@ void kcsan_disable_current(void);
  */
 void kcsan_enable_current(void);
 
-/**
- * kcsan_nestable_atomic_begin - begin nestable atomic region
- *
- * Accesses within the atomic region may appear to race with other accesses but
- * should be considered atomic.
- */
-void kcsan_nestable_atomic_begin(void);
-
-/**
- * kcsan_nestable_atomic_end - end nestable atomic region
- */
-void kcsan_nestable_atomic_end(void);
-
-/**
- * kcsan_flat_atomic_begin - begin flat atomic region
- *
- * Accesses within the atomic region may appear to race with other accesses but
- * should be considered atomic.
- */
-void kcsan_flat_atomic_begin(void);
-
-/**
- * kcsan_flat_atomic_end - end flat atomic region
- */
-void kcsan_flat_atomic_end(void);
-
-/**
- * kcsan_atomic_next - consider following accesses as atomic
- *
- * Force treating the next n memory accesses for the current context as atomic
- * operations.
- *
- * @n number of following memory accesses to treat as atomic.
- */
-void kcsan_atomic_next(int n);
-
 #else /* CONFIG_KCSAN */
 
 static inline void kcsan_init(void)			{ }
 static inline void kcsan_disable_current(void)		{ }
 static inline void kcsan_enable_current(void)		{ }
-static inline void kcsan_nestable_atomic_begin(void)	{ }
-static inline void kcsan_nestable_atomic_end(void)	{ }
-static inline void kcsan_flat_atomic_begin(void)	{ }
-static inline void kcsan_flat_atomic_end(void)		{ }
-static inline void kcsan_atomic_next(int n)		{ }
 
 #endif /* CONFIG_KCSAN */
 
-- 
2.25.0.341.g760bfbb309-goog


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 2/5] compiler.h, seqlock.h: Remove unnecessary kcsan.h includes
  2020-02-10 18:43 [PATCH 1/5] kcsan: Move interfaces that affects checks to kcsan-checks.h Marco Elver
@ 2020-02-10 18:43 ` Marco Elver
  2020-02-10 18:43 ` [PATCH 3/5] kcsan: Introduce kcsan_value_change type Marco Elver
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 7+ messages in thread
From: Marco Elver @ 2020-02-10 18:43 UTC (permalink / raw)
  To: elver; +Cc: paulmck, andreyknvl, glider, dvyukov, kasan-dev, linux-kernel

No we longer have to include kcsan.h, since the required KCSAN interface
for both compiler.h and seqlock.h are now provided by kcsan-checks.h.

Signed-off-by: Marco Elver <elver@google.com>
---
 include/linux/compiler.h | 2 --
 include/linux/seqlock.h  | 2 +-
 2 files changed, 1 insertion(+), 3 deletions(-)

diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index c1bdf37571cb8..f504edebd5d71 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -313,8 +313,6 @@ unsigned long read_word_at_a_time(const void *addr)
 	__u.__val;					\
 })
 
-#include <linux/kcsan.h>
-
 /**
  * data_race - mark an expression as containing intentional data races
  *
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 239701cae3764..8b97204f35a77 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -37,7 +37,7 @@
 #include <linux/preempt.h>
 #include <linux/lockdep.h>
 #include <linux/compiler.h>
-#include <linux/kcsan.h>
+#include <linux/kcsan-checks.h>
 #include <asm/processor.h>
 
 /*
-- 
2.25.0.341.g760bfbb309-goog


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 3/5] kcsan: Introduce kcsan_value_change type
  2020-02-10 18:43 [PATCH 1/5] kcsan: Move interfaces that affects checks to kcsan-checks.h Marco Elver
  2020-02-10 18:43 ` [PATCH 2/5] compiler.h, seqlock.h: Remove unnecessary kcsan.h includes Marco Elver
@ 2020-02-10 18:43 ` Marco Elver
  2020-02-10 18:43 ` [PATCH 4/5] kcsan: Add kcsan_set_access_mask() support Marco Elver
  2020-02-10 18:43 ` [PATCH 5/5] kcsan: Introduce ASSERT_EXCLUSIVE_BITS(var, mask) Marco Elver
  3 siblings, 0 replies; 7+ messages in thread
From: Marco Elver @ 2020-02-10 18:43 UTC (permalink / raw)
  To: elver; +Cc: paulmck, andreyknvl, glider, dvyukov, kasan-dev, linux-kernel

Introduces kcsan_value_change type, which explicitly points out if we
either observed a value-change (TRUE), or we could not observe one but
cannot rule out a value-change happened (MAYBE). The MAYBE state can
either be reported or not, depending on configuration preferences.

A follow-up patch introduces the FALSE state, which should never be
reported.

No functional change intended.

Signed-off-by: Marco Elver <elver@google.com>
---
 kernel/kcsan/core.c   | 38 ++++++++++++++++++++++----------------
 kernel/kcsan/kcsan.h  | 19 ++++++++++++++++++-
 kernel/kcsan/report.c | 26 ++++++++++++++------------
 3 files changed, 54 insertions(+), 29 deletions(-)

diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index 498b1eb3c1cda..3f89801161d33 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -341,7 +341,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
 		u32 _4;
 		u64 _8;
 	} expect_value;
-	bool value_change = false;
+	enum kcsan_value_change value_change = KCSAN_VALUE_CHANGE_MAYBE;
 	unsigned long ua_flags = user_access_save();
 	unsigned long irq_flags;
 
@@ -398,6 +398,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
 	 * Read the current value, to later check and infer a race if the data
 	 * was modified via a non-instrumented access, e.g. from a device.
 	 */
+	expect_value._8 = 0;
 	switch (size) {
 	case 1:
 		expect_value._1 = READ_ONCE(*(const u8 *)ptr);
@@ -436,23 +437,36 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
 	 */
 	switch (size) {
 	case 1:
-		value_change = expect_value._1 != READ_ONCE(*(const u8 *)ptr);
+		expect_value._1 ^= READ_ONCE(*(const u8 *)ptr);
 		break;
 	case 2:
-		value_change = expect_value._2 != READ_ONCE(*(const u16 *)ptr);
+		expect_value._2 ^= READ_ONCE(*(const u16 *)ptr);
 		break;
 	case 4:
-		value_change = expect_value._4 != READ_ONCE(*(const u32 *)ptr);
+		expect_value._4 ^= READ_ONCE(*(const u32 *)ptr);
 		break;
 	case 8:
-		value_change = expect_value._8 != READ_ONCE(*(const u64 *)ptr);
+		expect_value._8 ^= READ_ONCE(*(const u64 *)ptr);
 		break;
 	default:
 		break; /* ignore; we do not diff the values */
 	}
 
+	/* Were we able to observe a value-change? */
+	if (expect_value._8 != 0)
+		value_change = KCSAN_VALUE_CHANGE_TRUE;
+
 	/* Check if this access raced with another. */
 	if (!remove_watchpoint(watchpoint)) {
+		/*
+		 * Depending on the access type, map a value_change of MAYBE to
+		 * TRUE (require reporting).
+		 */
+		if (value_change == KCSAN_VALUE_CHANGE_MAYBE && (size > 8 || is_assert)) {
+			/* Always assume a value-change. */
+			value_change = KCSAN_VALUE_CHANGE_TRUE;
+		}
+
 		/*
 		 * No need to increment 'data_races' counter, as the racing
 		 * thread already did.
@@ -461,20 +475,12 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
 		 * therefore both this thread and the racing thread may
 		 * increment this counter.
 		 */
-		if (is_assert)
+		if (is_assert && value_change == KCSAN_VALUE_CHANGE_TRUE)
 			kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES);
 
-		/*
-		 * - If we were not able to observe a value change due to size
-		 *   constraints, always assume a value change.
-		 * - If the access type is an assertion, we also always assume a
-		 *   value change to always report the race.
-		 */
-		value_change = value_change || size > 8 || is_assert;
-
 		kcsan_report(ptr, size, type, value_change, smp_processor_id(),
 			     KCSAN_REPORT_RACE_SIGNAL);
-	} else if (value_change) {
+	} else if (value_change == KCSAN_VALUE_CHANGE_TRUE) {
 		/* Inferring a race, since the value should not have changed. */
 
 		kcsan_counter_inc(KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN);
@@ -482,7 +488,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
 			kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES);
 
 		if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert)
-			kcsan_report(ptr, size, type, true,
+			kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_TRUE,
 				     smp_processor_id(),
 				     KCSAN_REPORT_RACE_UNKNOWN_ORIGIN);
 	}
diff --git a/kernel/kcsan/kcsan.h b/kernel/kcsan/kcsan.h
index 50078e7d43c32..83a79b08b550e 100644
--- a/kernel/kcsan/kcsan.h
+++ b/kernel/kcsan/kcsan.h
@@ -88,6 +88,22 @@ extern void kcsan_counter_dec(enum kcsan_counter_id id);
  */
 extern bool kcsan_skip_report_debugfs(unsigned long func_addr);
 
+/*
+ * Value-change states.
+ */
+enum kcsan_value_change {
+	/*
+	 * Did not observe a value-change, however, it is valid to report the
+	 * race, depending on preferences.
+	 */
+	KCSAN_VALUE_CHANGE_MAYBE,
+
+	/*
+	 * The value was observed to change, and the race should be reported.
+	 */
+	KCSAN_VALUE_CHANGE_TRUE,
+};
+
 enum kcsan_report_type {
 	/*
 	 * The thread that set up the watchpoint and briefly stalled was
@@ -111,6 +127,7 @@ enum kcsan_report_type {
  * Print a race report from thread that encountered the race.
  */
 extern void kcsan_report(const volatile void *ptr, size_t size, int access_type,
-			 bool value_change, int cpu_id, enum kcsan_report_type type);
+			 enum kcsan_value_change value_change, int cpu_id,
+			 enum kcsan_report_type type);
 
 #endif /* _KERNEL_KCSAN_KCSAN_H */
diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c
index e046dd26a2459..57805035868bc 100644
--- a/kernel/kcsan/report.c
+++ b/kernel/kcsan/report.c
@@ -130,26 +130,27 @@ static bool rate_limit_report(unsigned long frame1, unsigned long frame2)
  * Special rules to skip reporting.
  */
 static bool
-skip_report(bool value_change, unsigned long top_frame)
+skip_report(enum kcsan_value_change value_change, unsigned long top_frame)
 {
 	/*
-	 * The first call to skip_report always has value_change==true, since we
+	 * The first call to skip_report always has value_change==TRUE, since we
 	 * cannot know the value written of an instrumented access. For the 2nd
 	 * call there are 6 cases with CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY:
 	 *
-	 * 1. read watchpoint, conflicting write (value_change==true): report;
-	 * 2. read watchpoint, conflicting write (value_change==false): skip;
-	 * 3. write watchpoint, conflicting write (value_change==true): report;
-	 * 4. write watchpoint, conflicting write (value_change==false): skip;
-	 * 5. write watchpoint, conflicting read (value_change==false): skip;
-	 * 6. write watchpoint, conflicting read (value_change==true): report;
+	 * 1. read watchpoint, conflicting write (value_change==TRUE): report;
+	 * 2. read watchpoint, conflicting write (value_change==MAYBE): skip;
+	 * 3. write watchpoint, conflicting write (value_change==TRUE): report;
+	 * 4. write watchpoint, conflicting write (value_change==MAYBE): skip;
+	 * 5. write watchpoint, conflicting read (value_change==MAYBE): skip;
+	 * 6. write watchpoint, conflicting read (value_change==TRUE): report;
 	 *
 	 * Cases 1-4 are intuitive and expected; case 5 ensures we do not report
 	 * data races where the write may have rewritten the same value; case 6
 	 * is possible either if the size is larger than what we check value
 	 * changes for or the access type is KCSAN_ACCESS_ASSERT.
 	 */
-	if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY) && !value_change) {
+	if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY) &&
+	    value_change == KCSAN_VALUE_CHANGE_MAYBE) {
 		/*
 		 * The access is a write, but the data value did not change.
 		 *
@@ -245,7 +246,7 @@ static int sym_strcmp(void *addr1, void *addr2)
  * Returns true if a report was generated, false otherwise.
  */
 static bool print_report(const volatile void *ptr, size_t size, int access_type,
-			 bool value_change, int cpu_id,
+			 enum kcsan_value_change value_change, int cpu_id,
 			 enum kcsan_report_type type)
 {
 	unsigned long stack_entries[NUM_STACK_ENTRIES] = { 0 };
@@ -258,7 +259,7 @@ static bool print_report(const volatile void *ptr, size_t size, int access_type,
 	/*
 	 * Must check report filter rules before starting to print.
 	 */
-	if (skip_report(true, stack_entries[skipnr]))
+	if (skip_report(KCSAN_VALUE_CHANGE_TRUE, stack_entries[skipnr]))
 		return false;
 
 	if (type == KCSAN_REPORT_RACE_SIGNAL) {
@@ -459,7 +460,8 @@ static bool prepare_report(unsigned long *flags, const volatile void *ptr,
 }
 
 void kcsan_report(const volatile void *ptr, size_t size, int access_type,
-		  bool value_change, int cpu_id, enum kcsan_report_type type)
+		  enum kcsan_value_change value_change, int cpu_id,
+		  enum kcsan_report_type type)
 {
 	unsigned long flags = 0;
 
-- 
2.25.0.341.g760bfbb309-goog


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 4/5] kcsan: Add kcsan_set_access_mask() support
  2020-02-10 18:43 [PATCH 1/5] kcsan: Move interfaces that affects checks to kcsan-checks.h Marco Elver
  2020-02-10 18:43 ` [PATCH 2/5] compiler.h, seqlock.h: Remove unnecessary kcsan.h includes Marco Elver
  2020-02-10 18:43 ` [PATCH 3/5] kcsan: Introduce kcsan_value_change type Marco Elver
@ 2020-02-10 18:43 ` Marco Elver
  2020-02-10 18:43 ` [PATCH 5/5] kcsan: Introduce ASSERT_EXCLUSIVE_BITS(var, mask) Marco Elver
  3 siblings, 0 replies; 7+ messages in thread
From: Marco Elver @ 2020-02-10 18:43 UTC (permalink / raw)
  To: elver; +Cc: paulmck, andreyknvl, glider, dvyukov, kasan-dev, linux-kernel

When setting up an access mask with kcsan_set_access_mask(), KCSAN will
only report races if concurrent changes to bits set in access_mask are
observed. Conveying access_mask via a separate call avoids introducing
overhead in the common-case fast-path.

Signed-off-by: Marco Elver <elver@google.com>
---
 include/linux/kcsan-checks.h | 11 +++++++++
 include/linux/kcsan.h        |  5 +++++
 init/init_task.c             |  1 +
 kernel/kcsan/core.c          | 43 ++++++++++++++++++++++++++++++++----
 kernel/kcsan/kcsan.h         |  5 +++++
 kernel/kcsan/report.c        | 13 ++++++++++-
 6 files changed, 73 insertions(+), 5 deletions(-)

diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h
index 8675411c8dbcd..4ef5233ff3f04 100644
--- a/include/linux/kcsan-checks.h
+++ b/include/linux/kcsan-checks.h
@@ -68,6 +68,16 @@ void kcsan_flat_atomic_end(void);
  */
 void kcsan_atomic_next(int n);
 
+/**
+ * kcsan_set_access_mask - set access mask
+ *
+ * Set the access mask for all accesses for the current context if non-zero.
+ * Only value changes to bits set in the mask will be reported.
+ *
+ * @mask bitmask
+ */
+void kcsan_set_access_mask(unsigned long mask);
+
 #else /* CONFIG_KCSAN */
 
 static inline void __kcsan_check_access(const volatile void *ptr, size_t size,
@@ -78,6 +88,7 @@ static inline void kcsan_nestable_atomic_end(void)	{ }
 static inline void kcsan_flat_atomic_begin(void)	{ }
 static inline void kcsan_flat_atomic_end(void)		{ }
 static inline void kcsan_atomic_next(int n)		{ }
+static inline void kcsan_set_access_mask(unsigned long mask) { }
 
 #endif /* CONFIG_KCSAN */
 
diff --git a/include/linux/kcsan.h b/include/linux/kcsan.h
index 7a614ca558f65..3b84606e1e675 100644
--- a/include/linux/kcsan.h
+++ b/include/linux/kcsan.h
@@ -35,6 +35,11 @@ struct kcsan_ctx {
 	 */
 	int atomic_nest_count;
 	bool in_flat_atomic;
+
+	/*
+	 * Access mask for all accesses if non-zero.
+	 */
+	unsigned long access_mask;
 };
 
 /**
diff --git a/init/init_task.c b/init/init_task.c
index 2b4fe98b0f095..096191d177d5c 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -167,6 +167,7 @@ struct task_struct init_task
 		.atomic_next		= 0,
 		.atomic_nest_count	= 0,
 		.in_flat_atomic		= false,
+		.access_mask		= 0,
 	},
 #endif
 #ifdef CONFIG_TRACE_IRQFLAGS
diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index 3f89801161d33..589b1e7f0f253 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -39,6 +39,7 @@ static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = {
 	.atomic_next		= 0,
 	.atomic_nest_count	= 0,
 	.in_flat_atomic		= false,
+	.access_mask		= 0,
 };
 
 /*
@@ -298,6 +299,15 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr,
 
 	if (!kcsan_is_enabled())
 		return;
+
+	/*
+	 * The access_mask check relies on value-change comparison. To avoid
+	 * reporting a race where e.g. the writer set up the watchpoint, but the
+	 * reader has access_mask!=0, we have to ignore the found watchpoint.
+	 */
+	if (get_ctx()->access_mask != 0)
+		return;
+
 	/*
 	 * Consume the watchpoint as soon as possible, to minimize the chances
 	 * of !consumed. Consuming the watchpoint must always be guarded by
@@ -341,6 +351,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
 		u32 _4;
 		u64 _8;
 	} expect_value;
+	unsigned long access_mask;
 	enum kcsan_value_change value_change = KCSAN_VALUE_CHANGE_MAYBE;
 	unsigned long ua_flags = user_access_save();
 	unsigned long irq_flags;
@@ -435,18 +446,27 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
 	 * Re-read value, and check if it is as expected; if not, we infer a
 	 * racy access.
 	 */
+	access_mask = get_ctx()->access_mask;
 	switch (size) {
 	case 1:
 		expect_value._1 ^= READ_ONCE(*(const u8 *)ptr);
+		if (access_mask)
+			expect_value._1 &= (u8)access_mask;
 		break;
 	case 2:
 		expect_value._2 ^= READ_ONCE(*(const u16 *)ptr);
+		if (access_mask)
+			expect_value._2 &= (u16)access_mask;
 		break;
 	case 4:
 		expect_value._4 ^= READ_ONCE(*(const u32 *)ptr);
+		if (access_mask)
+			expect_value._4 &= (u32)access_mask;
 		break;
 	case 8:
 		expect_value._8 ^= READ_ONCE(*(const u64 *)ptr);
+		if (access_mask)
+			expect_value._8 &= (u64)access_mask;
 		break;
 	default:
 		break; /* ignore; we do not diff the values */
@@ -460,11 +480,20 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
 	if (!remove_watchpoint(watchpoint)) {
 		/*
 		 * Depending on the access type, map a value_change of MAYBE to
-		 * TRUE (require reporting).
+		 * TRUE (always report) or FALSE (never report).
 		 */
-		if (value_change == KCSAN_VALUE_CHANGE_MAYBE && (size > 8 || is_assert)) {
-			/* Always assume a value-change. */
-			value_change = KCSAN_VALUE_CHANGE_TRUE;
+		if (value_change == KCSAN_VALUE_CHANGE_MAYBE) {
+			if (access_mask != 0) {
+				/*
+				 * For access with access_mask, we require a
+				 * value-change, as it is likely that races on
+				 * ~access_mask bits are expected.
+				 */
+				value_change = KCSAN_VALUE_CHANGE_FALSE;
+			} else if (size > 8 || is_assert) {
+				/* Always assume a value-change. */
+				value_change = KCSAN_VALUE_CHANGE_TRUE;
+			}
 		}
 
 		/*
@@ -622,6 +651,12 @@ void kcsan_atomic_next(int n)
 }
 EXPORT_SYMBOL(kcsan_atomic_next);
 
+void kcsan_set_access_mask(unsigned long mask)
+{
+	get_ctx()->access_mask = mask;
+}
+EXPORT_SYMBOL(kcsan_set_access_mask);
+
 void __kcsan_check_access(const volatile void *ptr, size_t size, int type)
 {
 	check_access(ptr, size, type);
diff --git a/kernel/kcsan/kcsan.h b/kernel/kcsan/kcsan.h
index 83a79b08b550e..892de5120c1b6 100644
--- a/kernel/kcsan/kcsan.h
+++ b/kernel/kcsan/kcsan.h
@@ -98,6 +98,11 @@ enum kcsan_value_change {
 	 */
 	KCSAN_VALUE_CHANGE_MAYBE,
 
+	/*
+	 * Did not observe a value-change, and it is invalid to report the race.
+	 */
+	KCSAN_VALUE_CHANGE_FALSE,
+
 	/*
 	 * The value was observed to change, and the race should be reported.
 	 */
diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c
index 57805035868bc..70ccff816db81 100644
--- a/kernel/kcsan/report.c
+++ b/kernel/kcsan/report.c
@@ -132,6 +132,9 @@ static bool rate_limit_report(unsigned long frame1, unsigned long frame2)
 static bool
 skip_report(enum kcsan_value_change value_change, unsigned long top_frame)
 {
+	/* Should never get here if value_change==FALSE. */
+	WARN_ON_ONCE(value_change == KCSAN_VALUE_CHANGE_FALSE);
+
 	/*
 	 * The first call to skip_report always has value_change==TRUE, since we
 	 * cannot know the value written of an instrumented access. For the 2nd
@@ -475,7 +478,15 @@ void kcsan_report(const volatile void *ptr, size_t size, int access_type,
 
 	kcsan_disable_current();
 	if (prepare_report(&flags, ptr, size, access_type, cpu_id, type)) {
-		if (print_report(ptr, size, access_type, value_change, cpu_id, type) && panic_on_warn)
+		/*
+		 * Never report if value_change is FALSE, only if we it is
+		 * either TRUE or MAYBE. In case of MAYBE, further filtering may
+		 * be done once we know the full stack trace in print_report().
+		 */
+		bool reported = value_change != KCSAN_VALUE_CHANGE_FALSE &&
+				print_report(ptr, size, access_type, value_change, cpu_id, type);
+
+		if (reported && panic_on_warn)
 			panic("panic_on_warn set ...\n");
 
 		release_report(&flags, type);
-- 
2.25.0.341.g760bfbb309-goog


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 5/5] kcsan: Introduce ASSERT_EXCLUSIVE_BITS(var, mask)
  2020-02-10 18:43 [PATCH 1/5] kcsan: Move interfaces that affects checks to kcsan-checks.h Marco Elver
                   ` (2 preceding siblings ...)
  2020-02-10 18:43 ` [PATCH 4/5] kcsan: Add kcsan_set_access_mask() support Marco Elver
@ 2020-02-10 18:43 ` Marco Elver
  2020-02-10 21:07   ` John Hubbard
  3 siblings, 1 reply; 7+ messages in thread
From: Marco Elver @ 2020-02-10 18:43 UTC (permalink / raw)
  To: elver
  Cc: paulmck, andreyknvl, glider, dvyukov, kasan-dev, linux-kernel,
	Andrew Morton, David Hildenbrand, Jan Kara, John Hubbard,
	Qian Cai

This introduces ASSERT_EXCLUSIVE_BITS(var, mask).
ASSERT_EXCLUSIVE_BITS(var, mask) will cause KCSAN to assume that the
following access is safe w.r.t. data races (however, please see the
docbook comment for disclaimer here).

For more context on why this was considered necessary, please see:
  http://lkml.kernel.org/r/1580995070-25139-1-git-send-email-cai@lca.pw

In particular, data races between reads (that use @mask bits of an
access that should not be modified concurrently) and writes (that change
~@mask bits not used by the read) should ordinarily be marked. After
marking these, we would no longer be able to detect harmful races
between reads to @mask bits and writes to @mask bits.

Therefore, by using ASSERT_EXCLUSIVE_BITS(var, mask), we accomplish:

  1. No new macros introduced elsewhere; since there are numerous ways in
     which we can extract the same bits, a one-size-fits-all macro is
     less preferred.

  2. The existing code does not need to be modified (although READ_ONCE()
     may still be advisable if we cannot prove that the data race is
     always safe).

  3. We catch bugs where the exclusive bits are modified concurrently.

  4. We document properties of the current code.

Signed-off-by: Marco Elver <elver@google.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jan Kara <jack@suse.cz>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Paul E. McKenney <paulmck@kernel.org>
Cc: Qian Cai <cai@lca.pw>
---
 include/linux/kcsan-checks.h | 57 ++++++++++++++++++++++++++++++++----
 kernel/kcsan/debugfs.c       | 15 +++++++++-
 2 files changed, 65 insertions(+), 7 deletions(-)

diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h
index 4ef5233ff3f04..eae6030cd4348 100644
--- a/include/linux/kcsan-checks.h
+++ b/include/linux/kcsan-checks.h
@@ -152,9 +152,9 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size,
 #endif
 
 /**
- * ASSERT_EXCLUSIVE_WRITER - assert no other threads are writing @var
+ * ASSERT_EXCLUSIVE_WRITER - assert no concurrent writes to @var
  *
- * Assert that there are no other threads writing @var; other readers are
+ * Assert that there are no concurrent writes to @var; other readers are
  * allowed. This assertion can be used to specify properties of concurrent code,
  * where violation cannot be detected as a normal data race.
  *
@@ -171,11 +171,11 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size,
 	__kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT)
 
 /**
- * ASSERT_EXCLUSIVE_ACCESS - assert no other threads are accessing @var
+ * ASSERT_EXCLUSIVE_ACCESS - assert no concurrent accesses to @var
  *
- * Assert that no other thread is accessing @var (no readers nor writers). This
- * assertion can be used to specify properties of concurrent code, where
- * violation cannot be detected as a normal data race.
+ * Assert that there are no concurrent accesses to @var (no readers nor
+ * writers). This assertion can be used to specify properties of concurrent
+ * code, where violation cannot be detected as a normal data race.
  *
  * For example, in a reference-counting algorithm where exclusive access is
  * expected after the refcount reaches 0. We can check that this property
@@ -191,4 +191,49 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size,
 #define ASSERT_EXCLUSIVE_ACCESS(var)                                           \
 	__kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT)
 
+/**
+ * ASSERT_EXCLUSIVE_BITS - assert no concurrent writes to subset of bits in @var
+ *
+ * [Bit-granular variant of ASSERT_EXCLUSIVE_WRITER(var)]
+ *
+ * Assert that there are no concurrent writes to a subset of bits in @var;
+ * concurrent readers are permitted. Concurrent writes (or reads) to ~@mask bits
+ * are ignored. This assertion can be used to specify properties of concurrent
+ * code, where marked accesses imply violations cannot be detected as a normal
+ * data race.
+ *
+ * For example, this may be used when certain bits of @var may only be modified
+ * when holding the appropriate lock, but other bits may still be modified
+ * concurrently. Writers, where other bits may change concurrently, could use
+ * the assertion as follows:
+ *
+ *	spin_lock(&foo_lock);
+ *	ASSERT_EXCLUSIVE_BITS(flags, FOO_MASK);
+ *	old_flags = READ_ONCE(flags);
+ *	new_flags = (old_flags & ~FOO_MASK) | (new_foo << FOO_SHIFT);
+ *	if (cmpxchg(&flags, old_flags, new_flags) != old_flags) { ... }
+ *	spin_unlock(&foo_lock);
+ *
+ * Readers, could use it as follows:
+ *
+ *	ASSERT_EXCLUSIVE_BITS(flags, FOO_MASK);
+ *	foo = (READ_ONCE(flags) & FOO_MASK) >> FOO_SHIFT;
+ *
+ * NOTE: The access that immediately follows is assumed to access the masked
+ * bits only, and safe w.r.t. data races. While marking this access is optional
+ * from KCSAN's point-of-view, it may still be advisable to do so, since we
+ * cannot reason about all possible compiler optimizations when it comes to bit
+ * manipulations (on the reader and writer side).
+ *
+ * @var variable to assert on
+ * @mask only check for modifications to bits set in @mask
+ */
+#define ASSERT_EXCLUSIVE_BITS(var, mask)                                       \
+	do {                                                                   \
+		kcsan_set_access_mask(mask);                                   \
+		__kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT);\
+		kcsan_set_access_mask(0);                                      \
+		kcsan_atomic_next(1);                                          \
+	} while (0)
+
 #endif /* _LINUX_KCSAN_CHECKS_H */
diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c
index 9bbba0e57c9b3..2ff1961239778 100644
--- a/kernel/kcsan/debugfs.c
+++ b/kernel/kcsan/debugfs.c
@@ -100,8 +100,10 @@ static noinline void microbenchmark(unsigned long iters)
  * debugfs file from multiple tasks to generate real conflicts and show reports.
  */
 static long test_dummy;
+static long test_flags;
 static noinline void test_thread(unsigned long iters)
 {
+	const long CHANGE_BITS = 0xff00ff00ff00ff00L;
 	const struct kcsan_ctx ctx_save = current->kcsan_ctx;
 	cycles_t cycles;
 
@@ -109,16 +111,27 @@ static noinline void test_thread(unsigned long iters)
 	memset(&current->kcsan_ctx, 0, sizeof(current->kcsan_ctx));
 
 	pr_info("KCSAN: %s begin | iters: %lu\n", __func__, iters);
+	pr_info("test_dummy@%px, test_flags@%px\n", &test_dummy, &test_flags);
 
 	cycles = get_cycles();
 	while (iters--) {
+		/* These all should generate reports. */
 		__kcsan_check_read(&test_dummy, sizeof(test_dummy));
-		__kcsan_check_write(&test_dummy, sizeof(test_dummy));
 		ASSERT_EXCLUSIVE_WRITER(test_dummy);
 		ASSERT_EXCLUSIVE_ACCESS(test_dummy);
 
+		ASSERT_EXCLUSIVE_BITS(test_flags, ~CHANGE_BITS); /* no report */
+		__kcsan_check_read(&test_flags, sizeof(test_flags)); /* no report */
+
+		ASSERT_EXCLUSIVE_BITS(test_flags, CHANGE_BITS); /* report */
+		__kcsan_check_read(&test_flags, sizeof(test_flags)); /* no report */
+
 		/* not actually instrumented */
 		WRITE_ONCE(test_dummy, iters);  /* to observe value-change */
+		__kcsan_check_write(&test_dummy, sizeof(test_dummy));
+
+		test_flags ^= CHANGE_BITS; /* generate value-change */
+		__kcsan_check_write(&test_flags, sizeof(test_flags));
 	}
 	cycles = get_cycles() - cycles;
 
-- 
2.25.0.341.g760bfbb309-goog


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH 5/5] kcsan: Introduce ASSERT_EXCLUSIVE_BITS(var, mask)
  2020-02-10 18:43 ` [PATCH 5/5] kcsan: Introduce ASSERT_EXCLUSIVE_BITS(var, mask) Marco Elver
@ 2020-02-10 21:07   ` John Hubbard
  2020-02-11 16:07     ` Marco Elver
  0 siblings, 1 reply; 7+ messages in thread
From: John Hubbard @ 2020-02-10 21:07 UTC (permalink / raw)
  To: Marco Elver
  Cc: paulmck, andreyknvl, glider, dvyukov, kasan-dev, linux-kernel,
	Andrew Morton, David Hildenbrand, Jan Kara, Qian Cai

On 2/10/20 10:43 AM, Marco Elver wrote:
> This introduces ASSERT_EXCLUSIVE_BITS(var, mask).
> ASSERT_EXCLUSIVE_BITS(var, mask) will cause KCSAN to assume that the
> following access is safe w.r.t. data races (however, please see the
> docbook comment for disclaimer here).
> 
> For more context on why this was considered necessary, please see:
>   http://lkml.kernel.org/r/1580995070-25139-1-git-send-email-cai@lca.pw
> 
> In particular, data races between reads (that use @mask bits of an
> access that should not be modified concurrently) and writes (that change
> ~@mask bits not used by the read) should ordinarily be marked. After
> marking these, we would no longer be able to detect harmful races
> between reads to @mask bits and writes to @mask bits.

I know this is "just" the commit log, but as long as I'm reviewing the
whole thing...to make the above a little clearer, see if you like this 
revised wording:

In particular, before this patch, data races between reads (that use
@mask bits of an access that should not be modified concurrently) and
writes (that change ~@mask bits not used by the readers) would have
been annotated with "data_race()". However, doing so would then hide
real problems: we would no longer be able to detect harmful races
between reads to @mask bits and writes to @mask bits.


> 
> Therefore, by using ASSERT_EXCLUSIVE_BITS(var, mask), we accomplish:
> 
>   1. No new macros introduced elsewhere; since there are numerous ways in
>      which we can extract the same bits, a one-size-fits-all macro is
>      less preferred.

This somehow confuses me a lot. Maybe say it like this:

1. Avoid a proliferation of specific macros at the call sites: by including a
   mask in the argument list, we can use the same macro in a wide variety of 
   call sites, regardless of which bits in a field each call site uses.

?

> 
>   2. The existing code does not need to be modified (although READ_ONCE()
>      may still be advisable if we cannot prove that the data race is
>      always safe).
> 
>   3. We catch bugs where the exclusive bits are modified concurrently.
> 
>   4. We document properties of the current code.
> 
> Signed-off-by: Marco Elver <elver@google.com>
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Cc: David Hildenbrand <david@redhat.com>
> Cc: Jan Kara <jack@suse.cz>
> Cc: John Hubbard <jhubbard@nvidia.com>
> Cc: Paul E. McKenney <paulmck@kernel.org>
> Cc: Qian Cai <cai@lca.pw>
> ---
>  include/linux/kcsan-checks.h | 57 ++++++++++++++++++++++++++++++++----
>  kernel/kcsan/debugfs.c       | 15 +++++++++-
>  2 files changed, 65 insertions(+), 7 deletions(-)
> 
> diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h
> index 4ef5233ff3f04..eae6030cd4348 100644
> --- a/include/linux/kcsan-checks.h
> +++ b/include/linux/kcsan-checks.h
> @@ -152,9 +152,9 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size,
>  #endif
>  
>  /**
> - * ASSERT_EXCLUSIVE_WRITER - assert no other threads are writing @var
> + * ASSERT_EXCLUSIVE_WRITER - assert no concurrent writes to @var
>   *
> - * Assert that there are no other threads writing @var; other readers are
> + * Assert that there are no concurrent writes to @var; other readers are
>   * allowed. This assertion can be used to specify properties of concurrent code,
>   * where violation cannot be detected as a normal data race.
>   *
> @@ -171,11 +171,11 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size,
>  	__kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT)
>  
>  /**
> - * ASSERT_EXCLUSIVE_ACCESS - assert no other threads are accessing @var
> + * ASSERT_EXCLUSIVE_ACCESS - assert no concurrent accesses to @var
>   *
> - * Assert that no other thread is accessing @var (no readers nor writers). This
> - * assertion can be used to specify properties of concurrent code, where
> - * violation cannot be detected as a normal data race.
> + * Assert that there are no concurrent accesses to @var (no readers nor
> + * writers). This assertion can be used to specify properties of concurrent
> + * code, where violation cannot be detected as a normal data race.
>   *
>   * For example, in a reference-counting algorithm where exclusive access is
>   * expected after the refcount reaches 0. We can check that this property
> @@ -191,4 +191,49 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size,
>  #define ASSERT_EXCLUSIVE_ACCESS(var)                                           \
>  	__kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT)
>  
> +/**
> + * ASSERT_EXCLUSIVE_BITS - assert no concurrent writes to subset of bits in @var
> + *
> + * [Bit-granular variant of ASSERT_EXCLUSIVE_WRITER(var)]


No need for the square brackets, unless that's some emerging convention in the
documentation world. 


> + *
> + * Assert that there are no concurrent writes to a subset of bits in @var;
> + * concurrent readers are permitted. Concurrent writes (or reads) to ~@mask bits
> + * are ignored. This assertion can be used to specify properties of concurrent
> + * code, where marked accesses imply violations cannot be detected as a normal
> + * data race.


How about this wording:

/*
 * Assert that there are no concurrent writes to a subset of bits in @var;
 * concurrent readers are permitted. Concurrent writes (or reads) to ~@mask bits
 * are ignored. This assertion provides more detailed, bit-level information to
 * the KCSAN system than most of the other (word granularity) annotations. As
 * such, it allows KCSAN to safely overlook some bits while still continuing to
 * check the remaining bits for unsafe access patterns.
 *
 * Use this if you have some bits that are read-only, and other bits that are
 * not, within a variable.
 */

?


> + *
> + * For example, this may be used when certain bits of @var may only be modified
> + * when holding the appropriate lock, but other bits may still be modified
> + * concurrently. Writers, where other bits may change concurrently, could use
> + * the assertion as follows:
> + *
> + *	spin_lock(&foo_lock);
> + *	ASSERT_EXCLUSIVE_BITS(flags, FOO_MASK);
> + *	old_flags = READ_ONCE(flags);
> + *	new_flags = (old_flags & ~FOO_MASK) | (new_foo << FOO_SHIFT);
> + *	if (cmpxchg(&flags, old_flags, new_flags) != old_flags) { ... }
> + *	spin_unlock(&foo_lock);
> + *
> + * Readers, could use it as follows:
> + *
> + *	ASSERT_EXCLUSIVE_BITS(flags, FOO_MASK);
> + *	foo = (READ_ONCE(flags) & FOO_MASK) >> FOO_SHIFT;


In the general case (which is what this documentation covers), the
READ_ONCE() is not required. So this should either leave it out, or
explain that it's not necessarily required.


> + *
> + * NOTE: The access that immediately follows is assumed to access the masked
> + * bits only, and safe w.r.t. data races. While marking this access is optional
> + * from KCSAN's point-of-view, it may still be advisable to do so, since we
> + * cannot reason about all possible compiler optimizations when it comes to bit
> + * manipulations (on the reader and writer side).
> + *
> + * @var variable to assert on
> + * @mask only check for modifications to bits set in @mask
> + */
> +#define ASSERT_EXCLUSIVE_BITS(var, mask)                                       \


This API looks good to me.


> +	do {                                                                   \
> +		kcsan_set_access_mask(mask);                                   \
> +		__kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT);\
> +		kcsan_set_access_mask(0);                                      \
> +		kcsan_atomic_next(1);                                          \
> +	} while (0)
> +
>  #endif /* _LINUX_KCSAN_CHECKS_H */
> diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c
> index 9bbba0e57c9b3..2ff1961239778 100644
> --- a/kernel/kcsan/debugfs.c
> +++ b/kernel/kcsan/debugfs.c
> @@ -100,8 +100,10 @@ static noinline void microbenchmark(unsigned long iters)
>   * debugfs file from multiple tasks to generate real conflicts and show reports.
>   */
>  static long test_dummy;
> +static long test_flags;
>  static noinline void test_thread(unsigned long iters)
>  {
> +	const long CHANGE_BITS = 0xff00ff00ff00ff00L;
>  	const struct kcsan_ctx ctx_save = current->kcsan_ctx;
>  	cycles_t cycles;
>  
> @@ -109,16 +111,27 @@ static noinline void test_thread(unsigned long iters)
>  	memset(&current->kcsan_ctx, 0, sizeof(current->kcsan_ctx));
>  
>  	pr_info("KCSAN: %s begin | iters: %lu\n", __func__, iters);
> +	pr_info("test_dummy@%px, test_flags@%px\n", &test_dummy, &test_flags);
>  
>  	cycles = get_cycles();
>  	while (iters--) {
> +		/* These all should generate reports. */
>  		__kcsan_check_read(&test_dummy, sizeof(test_dummy));
> -		__kcsan_check_write(&test_dummy, sizeof(test_dummy));
>  		ASSERT_EXCLUSIVE_WRITER(test_dummy);
>  		ASSERT_EXCLUSIVE_ACCESS(test_dummy);
>  
> +		ASSERT_EXCLUSIVE_BITS(test_flags, ~CHANGE_BITS); /* no report */
> +		__kcsan_check_read(&test_flags, sizeof(test_flags)); /* no report */
> +
> +		ASSERT_EXCLUSIVE_BITS(test_flags, CHANGE_BITS); /* report */
> +		__kcsan_check_read(&test_flags, sizeof(test_flags)); /* no report */
> +
>  		/* not actually instrumented */
>  		WRITE_ONCE(test_dummy, iters);  /* to observe value-change */
> +		__kcsan_check_write(&test_dummy, sizeof(test_dummy));
> +
> +		test_flags ^= CHANGE_BITS; /* generate value-change */
> +		__kcsan_check_write(&test_flags, sizeof(test_flags));
>  	}
>  	cycles = get_cycles() - cycles;
>  
> 



thanks,
-- 
John Hubbard
NVIDIA

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 5/5] kcsan: Introduce ASSERT_EXCLUSIVE_BITS(var, mask)
  2020-02-10 21:07   ` John Hubbard
@ 2020-02-11 16:07     ` Marco Elver
  0 siblings, 0 replies; 7+ messages in thread
From: Marco Elver @ 2020-02-11 16:07 UTC (permalink / raw)
  To: John Hubbard
  Cc: Paul E. McKenney, Andrey Konovalov, Alexander Potapenko,
	Dmitry Vyukov, kasan-dev, LKML, Andrew Morton, David Hildenbrand,
	Jan Kara, Qian Cai

v2: https://lore.kernel.org/lkml/20200211160423.138870-5-elver@google.com/

On Mon, 10 Feb 2020 at 22:07, John Hubbard <jhubbard@nvidia.com> wrote:
>
> On 2/10/20 10:43 AM, Marco Elver wrote:
> > This introduces ASSERT_EXCLUSIVE_BITS(var, mask).
> > ASSERT_EXCLUSIVE_BITS(var, mask) will cause KCSAN to assume that the
> > following access is safe w.r.t. data races (however, please see the
> > docbook comment for disclaimer here).
> >
> > For more context on why this was considered necessary, please see:
> >   http://lkml.kernel.org/r/1580995070-25139-1-git-send-email-cai@lca.pw
> >
> > In particular, data races between reads (that use @mask bits of an
> > access that should not be modified concurrently) and writes (that change
> > ~@mask bits not used by the read) should ordinarily be marked. After
> > marking these, we would no longer be able to detect harmful races
> > between reads to @mask bits and writes to @mask bits.
>
> I know this is "just" the commit log, but as long as I'm reviewing the
> whole thing...to make the above a little clearer, see if you like this
> revised wording:
>
> In particular, before this patch, data races between reads (that use
> @mask bits of an access that should not be modified concurrently) and
> writes (that change ~@mask bits not used by the readers) would have
> been annotated with "data_race()". However, doing so would then hide
> real problems: we would no longer be able to detect harmful races
> between reads to @mask bits and writes to @mask bits.

Thanks, applied.

> >
> > Therefore, by using ASSERT_EXCLUSIVE_BITS(var, mask), we accomplish:
> >
> >   1. No new macros introduced elsewhere; since there are numerous ways in
> >      which we can extract the same bits, a one-size-fits-all macro is
> >      less preferred.
>
> This somehow confuses me a lot. Maybe say it like this:
>
> 1. Avoid a proliferation of specific macros at the call sites: by including a
>    mask in the argument list, we can use the same macro in a wide variety of
>    call sites, regardless of which bits in a field each call site uses.
>
> ?

Thanks, I took that mostly as-is.

> >
> >   2. The existing code does not need to be modified (although READ_ONCE()
> >      may still be advisable if we cannot prove that the data race is
> >      always safe).
> >
> >   3. We catch bugs where the exclusive bits are modified concurrently.
> >
> >   4. We document properties of the current code.
> >
> > Signed-off-by: Marco Elver <elver@google.com>
> > Cc: Andrew Morton <akpm@linux-foundation.org>
> > Cc: David Hildenbrand <david@redhat.com>
> > Cc: Jan Kara <jack@suse.cz>
> > Cc: John Hubbard <jhubbard@nvidia.com>
> > Cc: Paul E. McKenney <paulmck@kernel.org>
> > Cc: Qian Cai <cai@lca.pw>
> > ---
> >  include/linux/kcsan-checks.h | 57 ++++++++++++++++++++++++++++++++----
> >  kernel/kcsan/debugfs.c       | 15 +++++++++-
> >  2 files changed, 65 insertions(+), 7 deletions(-)
> >
> > diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h
> > index 4ef5233ff3f04..eae6030cd4348 100644
> > --- a/include/linux/kcsan-checks.h
> > +++ b/include/linux/kcsan-checks.h
> > @@ -152,9 +152,9 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size,
> >  #endif
> >
> >  /**
> > - * ASSERT_EXCLUSIVE_WRITER - assert no other threads are writing @var
> > + * ASSERT_EXCLUSIVE_WRITER - assert no concurrent writes to @var
> >   *
> > - * Assert that there are no other threads writing @var; other readers are
> > + * Assert that there are no concurrent writes to @var; other readers are
> >   * allowed. This assertion can be used to specify properties of concurrent code,
> >   * where violation cannot be detected as a normal data race.
> >   *
> > @@ -171,11 +171,11 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size,
> >       __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT)
> >
> >  /**
> > - * ASSERT_EXCLUSIVE_ACCESS - assert no other threads are accessing @var
> > + * ASSERT_EXCLUSIVE_ACCESS - assert no concurrent accesses to @var
> >   *
> > - * Assert that no other thread is accessing @var (no readers nor writers). This
> > - * assertion can be used to specify properties of concurrent code, where
> > - * violation cannot be detected as a normal data race.
> > + * Assert that there are no concurrent accesses to @var (no readers nor
> > + * writers). This assertion can be used to specify properties of concurrent
> > + * code, where violation cannot be detected as a normal data race.
> >   *
> >   * For example, in a reference-counting algorithm where exclusive access is
> >   * expected after the refcount reaches 0. We can check that this property
> > @@ -191,4 +191,49 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size,
> >  #define ASSERT_EXCLUSIVE_ACCESS(var)                                           \
> >       __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT)
> >
> > +/**
> > + * ASSERT_EXCLUSIVE_BITS - assert no concurrent writes to subset of bits in @var
> > + *
> > + * [Bit-granular variant of ASSERT_EXCLUSIVE_WRITER(var)]
>
>
> No need for the square brackets, unless that's some emerging convention in the
> documentation world.

Done.

>
> > + *
> > + * Assert that there are no concurrent writes to a subset of bits in @var;
> > + * concurrent readers are permitted. Concurrent writes (or reads) to ~@mask bits
> > + * are ignored. This assertion can be used to specify properties of concurrent
> > + * code, where marked accesses imply violations cannot be detected as a normal
> > + * data race.
>
>
> How about this wording:
>
> /*
>  * Assert that there are no concurrent writes to a subset of bits in @var;
>  * concurrent readers are permitted. Concurrent writes (or reads) to ~@mask bits
>  * are ignored. This assertion provides more detailed, bit-level information to
>  * the KCSAN system than most of the other (word granularity) annotations. As
>  * such, it allows KCSAN to safely overlook some bits while still continuing to
>  * check the remaining bits for unsafe access patterns.
>  *
>  * Use this if you have some bits that are read-only, and other bits that are
>  * not, within a variable.
>  */
>
> ?

I've updated it based on the information you want to convey here. I've
removed mention to KCSAN in the first paragraph, since KCSAN is an
implementation of this, but a user of the API shouldn't care too much
about that.

Hopefully it makes more sense in v2.

>
> > + *
> > + * For example, this may be used when certain bits of @var may only be modified
> > + * when holding the appropriate lock, but other bits may still be modified
> > + * concurrently. Writers, where other bits may change concurrently, could use
> > + * the assertion as follows:
> > + *
> > + *   spin_lock(&foo_lock);
> > + *   ASSERT_EXCLUSIVE_BITS(flags, FOO_MASK);
> > + *   old_flags = READ_ONCE(flags);
> > + *   new_flags = (old_flags & ~FOO_MASK) | (new_foo << FOO_SHIFT);
> > + *   if (cmpxchg(&flags, old_flags, new_flags) != old_flags) { ... }
> > + *   spin_unlock(&foo_lock);
> > + *
> > + * Readers, could use it as follows:
> > + *
> > + *   ASSERT_EXCLUSIVE_BITS(flags, FOO_MASK);
> > + *   foo = (READ_ONCE(flags) & FOO_MASK) >> FOO_SHIFT;
>
>
> In the general case (which is what this documentation covers), the
> READ_ONCE() is not required. So this should either leave it out, or
> explain that it's not necessarily required.

I've updated the example to lead to the fact you can omit the
READ_ONCE. However, I want to be very careful here, since I still
can't prove to myself no compiler will mess this up. In the general
case, we likely won't need the READ_ONCE, because you'd need a pretty
unfortunate compiler + architecture combo to mess this up for you. But
you never know.

Thanks,
-- Marco

>
> > + *
> > + * NOTE: The access that immediately follows is assumed to access the masked
> > + * bits only, and safe w.r.t. data races. While marking this access is optional
> > + * from KCSAN's point-of-view, it may still be advisable to do so, since we
> > + * cannot reason about all possible compiler optimizations when it comes to bit
> > + * manipulations (on the reader and writer side).
> > + *
> > + * @var variable to assert on
> > + * @mask only check for modifications to bits set in @mask
> > + */
> > +#define ASSERT_EXCLUSIVE_BITS(var, mask)                                       \
>
>
> This API looks good to me.
>
>
> > +     do {                                                                   \
> > +             kcsan_set_access_mask(mask);                                   \
> > +             __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT);\
> > +             kcsan_set_access_mask(0);                                      \
> > +             kcsan_atomic_next(1);                                          \
> > +     } while (0)
> > +
> >  #endif /* _LINUX_KCSAN_CHECKS_H */
> > diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c
> > index 9bbba0e57c9b3..2ff1961239778 100644
> > --- a/kernel/kcsan/debugfs.c
> > +++ b/kernel/kcsan/debugfs.c
> > @@ -100,8 +100,10 @@ static noinline void microbenchmark(unsigned long iters)
> >   * debugfs file from multiple tasks to generate real conflicts and show reports.
> >   */
> >  static long test_dummy;
> > +static long test_flags;
> >  static noinline void test_thread(unsigned long iters)
> >  {
> > +     const long CHANGE_BITS = 0xff00ff00ff00ff00L;
> >       const struct kcsan_ctx ctx_save = current->kcsan_ctx;
> >       cycles_t cycles;
> >
> > @@ -109,16 +111,27 @@ static noinline void test_thread(unsigned long iters)
> >       memset(&current->kcsan_ctx, 0, sizeof(current->kcsan_ctx));
> >
> >       pr_info("KCSAN: %s begin | iters: %lu\n", __func__, iters);
> > +     pr_info("test_dummy@%px, test_flags@%px\n", &test_dummy, &test_flags);
> >
> >       cycles = get_cycles();
> >       while (iters--) {
> > +             /* These all should generate reports. */
> >               __kcsan_check_read(&test_dummy, sizeof(test_dummy));
> > -             __kcsan_check_write(&test_dummy, sizeof(test_dummy));
> >               ASSERT_EXCLUSIVE_WRITER(test_dummy);
> >               ASSERT_EXCLUSIVE_ACCESS(test_dummy);
> >
> > +             ASSERT_EXCLUSIVE_BITS(test_flags, ~CHANGE_BITS); /* no report */
> > +             __kcsan_check_read(&test_flags, sizeof(test_flags)); /* no report */
> > +
> > +             ASSERT_EXCLUSIVE_BITS(test_flags, CHANGE_BITS); /* report */
> > +             __kcsan_check_read(&test_flags, sizeof(test_flags)); /* no report */
> > +
> >               /* not actually instrumented */
> >               WRITE_ONCE(test_dummy, iters);  /* to observe value-change */
> > +             __kcsan_check_write(&test_dummy, sizeof(test_dummy));
> > +
> > +             test_flags ^= CHANGE_BITS; /* generate value-change */
> > +             __kcsan_check_write(&test_flags, sizeof(test_flags));
> >       }
> >       cycles = get_cycles() - cycles;
> >
> >
>
>
>
> thanks,
> --
> John Hubbard
> NVIDIA

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2020-02-11 16:08 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-02-10 18:43 [PATCH 1/5] kcsan: Move interfaces that affects checks to kcsan-checks.h Marco Elver
2020-02-10 18:43 ` [PATCH 2/5] compiler.h, seqlock.h: Remove unnecessary kcsan.h includes Marco Elver
2020-02-10 18:43 ` [PATCH 3/5] kcsan: Introduce kcsan_value_change type Marco Elver
2020-02-10 18:43 ` [PATCH 4/5] kcsan: Add kcsan_set_access_mask() support Marco Elver
2020-02-10 18:43 ` [PATCH 5/5] kcsan: Introduce ASSERT_EXCLUSIVE_BITS(var, mask) Marco Elver
2020-02-10 21:07   ` John Hubbard
2020-02-11 16:07     ` Marco Elver

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).