All of lore.kernel.org
 help / color / mirror / Atom feed
* [RFC patch] spin_lock: add cross cache lines checking
@ 2012-03-05  3:20 Alex Shi
  2012-03-05  3:24 ` Alex Shi
  0 siblings, 1 reply; 17+ messages in thread
From: Alex Shi @ 2012-03-05  3:20 UTC (permalink / raw)
  To: tglx, mingo, hpa, arnd, akpm; +Cc: linux-kernel, x86, andi.kleen

Modern x86 CPU won't hold whole memory bus when executing 'lock'
prefixed instructions unless the instruction destination is crossing 2
cache lines. If so, it is disaster of system performance.

Actually if the lock is not in the 'packed' structure, gcc places it
safely under x86 arch. But seems add this checking in
CONFIG_DEBUG_LOCK_ALLOC is harmless.

btw, change SPIN_BUG_ON macro a little for style complain.

Inspired-by: Andi Kleen <andi.kleen@intel.com>
Signed-off-by: Alex Shi <alex.shi@intel.com>
---
diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
index 48f99f1..79d146e 100644
--- a/arch/x86/include/asm/cache.h
+++ b/arch/x86/include/asm/cache.h
@@ -7,6 +7,8 @@
 #define L1_CACHE_SHIFT	(CONFIG_X86_L1_CACHE_SHIFT)
 #define L1_CACHE_BYTES	(1 << L1_CACHE_SHIFT)
 
+#define L1_CACHE_SIZE_MASK	(~(L1_CACHE_BYTES - 1)UL)
+
 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
 
 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
index 1bfcfe5..244e528 100644
--- a/include/asm-generic/cache.h
+++ b/include/asm-generic/cache.h
@@ -9,4 +9,6 @@
 #define L1_CACHE_SHIFT		5
 #define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
 
+#define L1_CACHE_SIZE_MASK     (~(L1_CACHE_BYTES - 1)UL)
+
 #endif /* __ASM_GENERIC_CACHE_H */
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index 5f3eacd..554dcda 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -13,6 +13,12 @@
 #include <linux/delay.h>
 #include <linux/module.h>
 
+#define SPIN_BUG_ON(cond, lock, msg) {if (unlikely(cond)) spin_bug(lock, msg); }
+
+#define is_cross_lines(p)						\
+	(((unsigned long)(p) & L1_CACHE_SIZE_MASK) !=			\
+	(((unsigned long)(p) + sizeof(*p) - 1) & L1_CACHE_SIZE_MASK))	\
+
 void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
 			  struct lock_class_key *key)
 {
@@ -22,6 +28,8 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
 	 */
 	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
 	lockdep_init_map(&lock->dep_map, name, key, 0);
+	SPIN_BUG_ON(is_cross_lines(lock->raw_lock), lock,
+			"!!! the lock cross cache lines !!!");
 #endif
 	lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
 	lock->magic = SPINLOCK_MAGIC;
@@ -40,6 +48,8 @@ void __rwlock_init(rwlock_t *lock, const char *name,
 	 */
 	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
 	lockdep_init_map(&lock->dep_map, name, key, 0);
+	SPIN_BUG_ON(is_cross_lines(lock->raw_lock), lock,
+			"!!! the lock cross cache lines !!!");
 #endif
 	lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
 	lock->magic = RWLOCK_MAGIC;
@@ -75,8 +85,6 @@ static void spin_bug(raw_spinlock_t *lock, const char *msg)
 	spin_dump(lock, msg);
 }
 
-#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
-
 static inline void
 debug_spin_lock_before(raw_spinlock_t *lock)
 {



^ permalink raw reply related	[flat|nested] 17+ messages in thread

* Re: [RFC patch] spin_lock: add cross cache lines checking
  2012-03-05  3:20 [RFC patch] spin_lock: add cross cache lines checking Alex Shi
@ 2012-03-05  3:24 ` Alex Shi
  2012-03-05  5:43   ` [RFC patch] spindep: " Alex Shi
  0 siblings, 1 reply; 17+ messages in thread
From: Alex Shi @ 2012-03-05  3:24 UTC (permalink / raw)
  To: tglx; +Cc: mingo, hpa, arnd, akpm, linux-kernel, x86, andi.kleen

Oops.
Sorry, the patch is not tested well! will update it later. 

On Mon, 2012-03-05 at 11:20 +0800, Alex Shi wrote:
> Modern x86 CPU won't hold whole memory bus when executing 'lock'
> prefixed instructions unless the instruction destination is crossing 2
> cache lines. If so, it is disaster of system performance.
> 
> Actually if the lock is not in the 'packed' structure, gcc places it
> safely under x86 arch. But seems add this checking in
> CONFIG_DEBUG_LOCK_ALLOC is harmless.
> 
> btw, change SPIN_BUG_ON macro a little for style complain.
> 
> Inspired-by: Andi Kleen <andi.kleen@intel.com>
> Signed-off-by: Alex Shi <alex.shi@intel.com>
> ---
> diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
> index 48f99f1..79d146e 100644
> --- a/arch/x86/include/asm/cache.h
> +++ b/arch/x86/include/asm/cache.h
> @@ -7,6 +7,8 @@
>  #define L1_CACHE_SHIFT	(CONFIG_X86_L1_CACHE_SHIFT)
>  #define L1_CACHE_BYTES	(1 << L1_CACHE_SHIFT)
>  
> +#define L1_CACHE_SIZE_MASK	(~(L1_CACHE_BYTES - 1)UL)
> +
>  #define __read_mostly __attribute__((__section__(".data..read_mostly")))
>  
>  #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
> diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
> index 1bfcfe5..244e528 100644
> --- a/include/asm-generic/cache.h
> +++ b/include/asm-generic/cache.h
> @@ -9,4 +9,6 @@
>  #define L1_CACHE_SHIFT		5
>  #define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
>  
> +#define L1_CACHE_SIZE_MASK     (~(L1_CACHE_BYTES - 1)UL)
> +
>  #endif /* __ASM_GENERIC_CACHE_H */
> diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
> index 5f3eacd..554dcda 100644
> --- a/lib/spinlock_debug.c
> +++ b/lib/spinlock_debug.c
> @@ -13,6 +13,12 @@
>  #include <linux/delay.h>
>  #include <linux/module.h>
>  
> +#define SPIN_BUG_ON(cond, lock, msg) {if (unlikely(cond)) spin_bug(lock, msg); }
> +
> +#define is_cross_lines(p)						\
> +	(((unsigned long)(p) & L1_CACHE_SIZE_MASK) !=			\
> +	(((unsigned long)(p) + sizeof(*p) - 1) & L1_CACHE_SIZE_MASK))	\
> +
>  void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
>  			  struct lock_class_key *key)
>  {
> @@ -22,6 +28,8 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
>  	 */
>  	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
>  	lockdep_init_map(&lock->dep_map, name, key, 0);
> +	SPIN_BUG_ON(is_cross_lines(lock->raw_lock), lock,
> +			"!!! the lock cross cache lines !!!");
>  #endif
>  	lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
>  	lock->magic = SPINLOCK_MAGIC;
> @@ -40,6 +48,8 @@ void __rwlock_init(rwlock_t *lock, const char *name,
>  	 */
>  	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
>  	lockdep_init_map(&lock->dep_map, name, key, 0);
> +	SPIN_BUG_ON(is_cross_lines(lock->raw_lock), lock,
> +			"!!! the lock cross cache lines !!!");
>  #endif
>  	lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
>  	lock->magic = RWLOCK_MAGIC;
> @@ -75,8 +85,6 @@ static void spin_bug(raw_spinlock_t *lock, const char *msg)
>  	spin_dump(lock, msg);
>  }
>  
> -#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
> -
>  static inline void
>  debug_spin_lock_before(raw_spinlock_t *lock)
>  {
> 



^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [RFC patch] spindep: add cross cache lines checking
  2012-03-05  3:24 ` Alex Shi
@ 2012-03-05  5:43   ` Alex Shi
  2012-03-05  5:48     ` Alex Shi
  2012-03-05  9:41     ` Arnd Bergmann
  0 siblings, 2 replies; 17+ messages in thread
From: Alex Shi @ 2012-03-05  5:43 UTC (permalink / raw)
  To: tglx, mingo; +Cc: hpa, arnd, akpm, linux-kernel, x86, andi.kleen


On Mon, 2012-03-05 at 11:24 +0800, Alex Shi wrote:
> Oops.
> Sorry, the patch is not tested well! will update it later. 

corrected version:
==========
>From 28745c1970a61a1420d388660cd9dcc619cd38ba Mon Sep 17 00:00:00 2001
From: Alex Shi <alex.shi@intel.com>
Date: Mon, 5 Mar 2012 13:03:35 +0800
Subject: [PATCH] lockdep: add cross cache lines checking

Modern x86 CPU won't hold whole memory bus when executing 'lock'
prefixed instructions unless the instruction destination is crossing 2
cache lines. If so, it is disaster of system performance.

Actually if the lock is not in the 'packed' structure, gcc places it
safely under x86 arch. But seems add this checking in
CONFIG_DEBUG_LOCK_ALLOC is harmless.

Inspired-by: Andi Kleen <andi.kleen@intel.com>
Signed-off-by: Alex Shi <alex.shi@intel.com>
---
 arch/x86/include/asm/cache.h |    2 +
 include/asm-generic/cache.h  |    2 +
 lib/spinlock_debug.c         |   76 ++++++++++++++++++++++-------------------
 3 files changed, 45 insertions(+), 35 deletions(-)

diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
index 48f99f1..63c2316 100644
--- a/arch/x86/include/asm/cache.h
+++ b/arch/x86/include/asm/cache.h
@@ -7,6 +7,8 @@
 #define L1_CACHE_SHIFT	(CONFIG_X86_L1_CACHE_SHIFT)
 #define L1_CACHE_BYTES	(1 << L1_CACHE_SHIFT)
 
+#define L1_CACHE_SIZE_MASK	(~(L1_CACHE_BYTES - 1))
+
 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
 
 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
index 1bfcfe5..6f8eb29 100644
--- a/include/asm-generic/cache.h
+++ b/include/asm-generic/cache.h
@@ -9,4 +9,6 @@
 #define L1_CACHE_SHIFT		5
 #define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
 
+#define L1_CACHE_SIZE_MASK     (~(L1_CACHE_BYTES - 1))
+
 #endif /* __ASM_GENERIC_CACHE_H */
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index 5f3eacd..938a145 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -13,41 +13,9 @@
 #include <linux/delay.h>
 #include <linux/module.h>
 
-void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
-			  struct lock_class_key *key)
-{
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-	/*
-	 * Make sure we are not reinitializing a held lock:
-	 */
-	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
-	lockdep_init_map(&lock->dep_map, name, key, 0);
-#endif
-	lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
-	lock->magic = SPINLOCK_MAGIC;
-	lock->owner = SPINLOCK_OWNER_INIT;
-	lock->owner_cpu = -1;
-}
-
-EXPORT_SYMBOL(__raw_spin_lock_init);
-
-void __rwlock_init(rwlock_t *lock, const char *name,
-		   struct lock_class_key *key)
-{
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-	/*
-	 * Make sure we are not reinitializing a held lock:
-	 */
-	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
-	lockdep_init_map(&lock->dep_map, name, key, 0);
-#endif
-	lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
-	lock->magic = RWLOCK_MAGIC;
-	lock->owner = SPINLOCK_OWNER_INIT;
-	lock->owner_cpu = -1;
-}
-
-EXPORT_SYMBOL(__rwlock_init);
+#define is_cross_lines(p)						\
+	(((unsigned long)(p) & L1_CACHE_SIZE_MASK) !=			\
+	(((unsigned long)(p) + sizeof(*p) - 1) & L1_CACHE_SIZE_MASK))	\
 
 static void spin_dump(raw_spinlock_t *lock, const char *msg)
 {
@@ -296,3 +264,41 @@ void do_raw_write_unlock(rwlock_t *lock)
 	debug_write_unlock(lock);
 	arch_write_unlock(&lock->raw_lock);
 }
+
+void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
+			  struct lock_class_key *key)
+{
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+	/*
+	 * Make sure we are not reinitializing a held lock:
+	 */
+	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
+	lockdep_init_map(&lock->dep_map, name, key, 0);
+	SPIN_BUG_ON(is_cross_lines(&lock->raw_lock), lock,
+			"!!! the lock cross cache lines !!!");
+#endif
+	lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
+	lock->magic = SPINLOCK_MAGIC;
+	lock->owner = SPINLOCK_OWNER_INIT;
+	lock->owner_cpu = -1;
+}
+EXPORT_SYMBOL(__raw_spin_lock_init);
+
+void __rwlock_init(rwlock_t *lock, const char *name,
+		   struct lock_class_key *key)
+{
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+	/*
+	 * Make sure we are not reinitializing a held lock:
+	 */
+	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
+	lockdep_init_map(&lock->dep_map, name, key, 0);
+	RWLOCK_BUG_ON(is_cross_lines(&lock->raw_lock), lock,
+			"!!! the lock cross cache lines !!!");
+#endif
+	lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
+	lock->magic = RWLOCK_MAGIC;
+	lock->owner = SPINLOCK_OWNER_INIT;
+	lock->owner_cpu = -1;
+}
+EXPORT_SYMBOL(__rwlock_init);
-- 
1.6.3.3




^ permalink raw reply related	[flat|nested] 17+ messages in thread

* Re: [RFC patch] spindep: add cross cache lines checking
  2012-03-05  5:43   ` [RFC patch] spindep: " Alex Shi
@ 2012-03-05  5:48     ` Alex Shi
  2012-03-05  9:41     ` Arnd Bergmann
  1 sibling, 0 replies; 17+ messages in thread
From: Alex Shi @ 2012-03-05  5:48 UTC (permalink / raw)
  To: tglx; +Cc: mingo, hpa, arnd, akpm, linux-kernel, x86, andi.kleen

On Mon, 2012-03-05 at 13:43 +0800, Alex Shi wrote:
> On Mon, 2012-03-05 at 11:24 +0800, Alex Shi wrote:
> > Oops.
> > Sorry, the patch is not tested well! will update it later. 

resent for correct Thomas's e-mail address. Sorry. 
> 
> corrected version:
> ==========
> >From 28745c1970a61a1420d388660cd9dcc619cd38ba Mon Sep 17 00:00:00 2001
> From: Alex Shi <alex.shi@intel.com>
> Date: Mon, 5 Mar 2012 13:03:35 +0800
> Subject: [PATCH] lockdep: add cross cache lines checking
> 
> Modern x86 CPU won't hold whole memory bus when executing 'lock'
> prefixed instructions unless the instruction destination is crossing 2
> cache lines. If so, it is disaster of system performance.
> 
> Actually if the lock is not in the 'packed' structure, gcc places it
> safely under x86 arch. But seems add this checking in
> CONFIG_DEBUG_LOCK_ALLOC is harmless.
> 
> Inspired-by: Andi Kleen <andi.kleen@intel.com>
> Signed-off-by: Alex Shi <alex.shi@intel.com>
> ---
>  arch/x86/include/asm/cache.h |    2 +
>  include/asm-generic/cache.h  |    2 +
>  lib/spinlock_debug.c         |   76 ++++++++++++++++++++++-------------------
>  3 files changed, 45 insertions(+), 35 deletions(-)
> 
> diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
> index 48f99f1..63c2316 100644
> --- a/arch/x86/include/asm/cache.h
> +++ b/arch/x86/include/asm/cache.h
> @@ -7,6 +7,8 @@
>  #define L1_CACHE_SHIFT	(CONFIG_X86_L1_CACHE_SHIFT)
>  #define L1_CACHE_BYTES	(1 << L1_CACHE_SHIFT)
>  
> +#define L1_CACHE_SIZE_MASK	(~(L1_CACHE_BYTES - 1))
> +
>  #define __read_mostly __attribute__((__section__(".data..read_mostly")))
>  
>  #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
> diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
> index 1bfcfe5..6f8eb29 100644
> --- a/include/asm-generic/cache.h
> +++ b/include/asm-generic/cache.h
> @@ -9,4 +9,6 @@
>  #define L1_CACHE_SHIFT		5
>  #define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
>  
> +#define L1_CACHE_SIZE_MASK     (~(L1_CACHE_BYTES - 1))
> +
>  #endif /* __ASM_GENERIC_CACHE_H */
> diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
> index 5f3eacd..938a145 100644
> --- a/lib/spinlock_debug.c
> +++ b/lib/spinlock_debug.c
> @@ -13,41 +13,9 @@
>  #include <linux/delay.h>
>  #include <linux/module.h>
>  
> -void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
> -			  struct lock_class_key *key)
> -{
> -#ifdef CONFIG_DEBUG_LOCK_ALLOC
> -	/*
> -	 * Make sure we are not reinitializing a held lock:
> -	 */
> -	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
> -	lockdep_init_map(&lock->dep_map, name, key, 0);
> -#endif
> -	lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
> -	lock->magic = SPINLOCK_MAGIC;
> -	lock->owner = SPINLOCK_OWNER_INIT;
> -	lock->owner_cpu = -1;
> -}
> -
> -EXPORT_SYMBOL(__raw_spin_lock_init);
> -
> -void __rwlock_init(rwlock_t *lock, const char *name,
> -		   struct lock_class_key *key)
> -{
> -#ifdef CONFIG_DEBUG_LOCK_ALLOC
> -	/*
> -	 * Make sure we are not reinitializing a held lock:
> -	 */
> -	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
> -	lockdep_init_map(&lock->dep_map, name, key, 0);
> -#endif
> -	lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
> -	lock->magic = RWLOCK_MAGIC;
> -	lock->owner = SPINLOCK_OWNER_INIT;
> -	lock->owner_cpu = -1;
> -}
> -
> -EXPORT_SYMBOL(__rwlock_init);
> +#define is_cross_lines(p)						\
> +	(((unsigned long)(p) & L1_CACHE_SIZE_MASK) !=			\
> +	(((unsigned long)(p) + sizeof(*p) - 1) & L1_CACHE_SIZE_MASK))	\
>  
>  static void spin_dump(raw_spinlock_t *lock, const char *msg)
>  {
> @@ -296,3 +264,41 @@ void do_raw_write_unlock(rwlock_t *lock)
>  	debug_write_unlock(lock);
>  	arch_write_unlock(&lock->raw_lock);
>  }
> +
> +void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
> +			  struct lock_class_key *key)
> +{
> +#ifdef CONFIG_DEBUG_LOCK_ALLOC
> +	/*
> +	 * Make sure we are not reinitializing a held lock:
> +	 */
> +	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
> +	lockdep_init_map(&lock->dep_map, name, key, 0);
> +	SPIN_BUG_ON(is_cross_lines(&lock->raw_lock), lock,
> +			"!!! the lock cross cache lines !!!");
> +#endif
> +	lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
> +	lock->magic = SPINLOCK_MAGIC;
> +	lock->owner = SPINLOCK_OWNER_INIT;
> +	lock->owner_cpu = -1;
> +}
> +EXPORT_SYMBOL(__raw_spin_lock_init);
> +
> +void __rwlock_init(rwlock_t *lock, const char *name,
> +		   struct lock_class_key *key)
> +{
> +#ifdef CONFIG_DEBUG_LOCK_ALLOC
> +	/*
> +	 * Make sure we are not reinitializing a held lock:
> +	 */
> +	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
> +	lockdep_init_map(&lock->dep_map, name, key, 0);
> +	RWLOCK_BUG_ON(is_cross_lines(&lock->raw_lock), lock,
> +			"!!! the lock cross cache lines !!!");
> +#endif
> +	lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
> +	lock->magic = RWLOCK_MAGIC;
> +	lock->owner = SPINLOCK_OWNER_INIT;
> +	lock->owner_cpu = -1;
> +}
> +EXPORT_SYMBOL(__rwlock_init);



^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [RFC patch] spindep: add cross cache lines checking
  2012-03-05  5:43   ` [RFC patch] spindep: " Alex Shi
  2012-03-05  5:48     ` Alex Shi
@ 2012-03-05  9:41     ` Arnd Bergmann
  2012-03-05 10:43       ` Ingo Molnar
  1 sibling, 1 reply; 17+ messages in thread
From: Arnd Bergmann @ 2012-03-05  9:41 UTC (permalink / raw)
  To: Alex Shi; +Cc: tglx, mingo, hpa, akpm, linux-kernel, x86, andi.kleen

On Monday 05 March 2012, Alex Shi wrote:
> Subject: [PATCH] lockdep: add cross cache lines checking
> 
> Modern x86 CPU won't hold whole memory bus when executing 'lock'
> prefixed instructions unless the instruction destination is crossing 2
> cache lines. If so, it is disaster of system performance.
> 
> Actually if the lock is not in the 'packed' structure, gcc places it
> safely under x86 arch. But seems add this checking in
> CONFIG_DEBUG_LOCK_ALLOC is harmless.

Have you tried making this a compile-time check using __alignof__?
I would say that any spinlock in a packed data structure is
basically a bug, even more so on most other architectures besides
x86.

	Arnd

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [RFC patch] spindep: add cross cache lines checking
  2012-03-05  9:41     ` Arnd Bergmann
@ 2012-03-05 10:43       ` Ingo Molnar
  2012-03-06  6:13         ` Alex Shi
  0 siblings, 1 reply; 17+ messages in thread
From: Ingo Molnar @ 2012-03-05 10:43 UTC (permalink / raw)
  To: Arnd Bergmann
  Cc: Alex Shi, tglx, mingo, hpa, akpm, linux-kernel, x86, andi.kleen


* Arnd Bergmann <arnd@arndb.de> wrote:

> On Monday 05 March 2012, Alex Shi wrote:
> > Subject: [PATCH] lockdep: add cross cache lines checking
> > 
> > Modern x86 CPU won't hold whole memory bus when executing 
> > 'lock' prefixed instructions unless the instruction 
> > destination is crossing 2 cache lines. If so, it is disaster 
> > of system performance.
> > 
> > Actually if the lock is not in the 'packed' structure, gcc 
> > places it safely under x86 arch. But seems add this checking 
> > in CONFIG_DEBUG_LOCK_ALLOC is harmless.
> 
> Have you tried making this a compile-time check using 
> __alignof__? I would say that any spinlock in a packed data 
> structure is basically a bug, even more so on most other 
> architectures besides x86.

agreed.

Thanks,

	Ingo

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [RFC patch] spindep: add cross cache lines checking
  2012-03-05 10:43       ` Ingo Molnar
@ 2012-03-06  6:13         ` Alex Shi
  2012-03-06  6:18           ` Alex Shi
  2012-03-06  9:32           ` Arnd Bergmann
  0 siblings, 2 replies; 17+ messages in thread
From: Alex Shi @ 2012-03-06  6:13 UTC (permalink / raw)
  To: Ingo Molnar
  Cc: Arnd Bergmann, tglx, mingo, hpa, akpm, linux-kernel, x86,
	andi.kleen, gcc-help

On Mon, 2012-03-05 at 11:43 +0100, Ingo Molnar wrote:
> * Arnd Bergmann <arnd@arndb.de> wrote:
> 
> > On Monday 05 March 2012, Alex Shi wrote:
> > > Subject: [PATCH] lockdep: add cross cache lines checking
> > > 
> > > Modern x86 CPU won't hold whole memory bus when executing 
> > > 'lock' prefixed instructions unless the instruction 
> > > destination is crossing 2 cache lines. If so, it is disaster 
> > > of system performance.
> > > 
> > > Actually if the lock is not in the 'packed' structure, gcc 
> > > places it safely under x86 arch. But seems add this checking 
> > > in CONFIG_DEBUG_LOCK_ALLOC is harmless.
> > 
> > Have you tried making this a compile-time check using 
> > __alignof__? I would say that any spinlock in a packed data 
> > structure is basically a bug, even more so on most other 
> > architectures besides x86.

I have one concern and one questions here:
concern: maybe the lock is in a well designed 'packed' struct, and it is
safe for cross lines issue. but __alignof__ will return 1;

struct abc{
	raw_spinlock_t lock1;
	char 		a;
	char		b;
}__attribute__((packed));

Since the lock is the first object of struct, usually it is well placed.

question: I am a idiot on gcc, I tried some parameters of gcc " --param
l1-cache-line-size=1  -mno-align-double" and can not make a cross lines
variable without 'packed' structure, but I still don't find a grantee
why gcc can avoid the cross line variable if it's not in 'packed'
structure? 

> agreed.
> 
> Thanks,
> 
> 	Ingo



^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [RFC patch] spindep: add cross cache lines checking
  2012-03-06  6:13         ` Alex Shi
@ 2012-03-06  6:18           ` Alex Shi
  2012-03-06  9:32           ` Arnd Bergmann
  1 sibling, 0 replies; 17+ messages in thread
From: Alex Shi @ 2012-03-06  6:18 UTC (permalink / raw)
  To: Ingo Molnar
  Cc: Arnd Bergmann, tglx, mingo, hpa, akpm, linux-kernel, x86,
	andi.kleen, gcc-help

Resend for correct tglx's address. Sorry!
> > > 
> > > Have you tried making this a compile-time check using 
> > > __alignof__? I would say that any spinlock in a packed data 
> > > structure is basically a bug, even more so on most other 
> > > architectures besides x86.
> 
> I have one concern and one questions here:
> concern: maybe the lock is in a well designed 'packed' struct, and it is
> safe for cross lines issue. but __alignof__ will return 1;
> 
> struct abc{
> 	raw_spinlock_t lock1;
> 	char 		a;
> 	char		b;
> }__attribute__((packed));
> 
> Since the lock is the first object of struct, usually it is well placed.
> 
> question: I am a idiot on gcc, I tried some parameters of gcc " --param
> l1-cache-line-size=1  -mno-align-double" and can not make a cross lines
> variable without 'packed' structure, but I still don't find a grantee
> why gcc can avoid the cross line variable if it's not in 'packed'
> structure? 
> 
> > agreed.
> > 
> > Thanks,
> > 
> > 	Ingo
> 



^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [RFC patch] spindep: add cross cache lines checking
  2012-03-06  6:13         ` Alex Shi
  2012-03-06  6:18           ` Alex Shi
@ 2012-03-06  9:32           ` Arnd Bergmann
  2012-03-07  8:23             ` Alex Shi
  1 sibling, 1 reply; 17+ messages in thread
From: Arnd Bergmann @ 2012-03-06  9:32 UTC (permalink / raw)
  To: Alex Shi
  Cc: Ingo Molnar, tglx, mingo, hpa, akpm, linux-kernel, x86,
	andi.kleen, gcc-help

On Tuesday 06 March 2012, Alex Shi wrote:
> I have one concern and one questions here:
> concern: maybe the lock is in a well designed 'packed' struct, and it is
> safe for cross lines issue. but __alignof__ will return 1;
> 
> struct abc{
>         raw_spinlock_t lock1;
>         char            a;
>         char            b;
> }__attribute__((packed));
> 
> Since the lock is the first object of struct, usually it is well placed.

No, it's actually not. The structure has an external alignment of 1, so
if you have an array of these or put it into another struct like

struct xyz {
	char x;
	struct abc a;
};

then it will be misaligned. Thre is no such thing as a well designed 'packed'
struct. The only reason to use packing is to describe structures we have no
control over such as hardware layouts and on-wire formats that have unusal
alignments, and those will never have a spinlock on them.

	Arnd

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [RFC patch] spindep: add cross cache lines checking
  2012-03-06  9:32           ` Arnd Bergmann
@ 2012-03-07  8:23             ` Alex Shi
  2012-03-07 11:54               ` Arnd Bergmann
  0 siblings, 1 reply; 17+ messages in thread
From: Alex Shi @ 2012-03-07  8:23 UTC (permalink / raw)
  To: Arnd Bergmann, gcc
  Cc: Ingo Molnar, tglx, mingo, hpa, akpm, linux-kernel, x86,
	andi.kleen, gcc-help

On Tue, 2012-03-06 at 09:32 +0000, Arnd Bergmann wrote:
> On Tuesday 06 March 2012, Alex Shi wrote:
> > I have one concern and one questions here:
> > concern: maybe the lock is in a well designed 'packed' struct, and it is
> > safe for cross lines issue. but __alignof__ will return 1;
> > 
> > struct abc{
> >         raw_spinlock_t lock1;
> >         char            a;
> >         char            b;
> > }__attribute__((packed));
> > 
> > Since the lock is the first object of struct, usually it is well placed.
> 
> No, it's actually not. The structure has an external alignment of 1, so
> if you have an array of these or put it into another struct like
> 
> struct xyz {
> 	char x;
> 	struct abc a;
> };
> 
> then it will be misaligned. Thre is no such thing as a well designed 'packed'
> struct. The only reason to use packing is to describe structures we have no
> control over such as hardware layouts and on-wire formats that have unusal
> alignments, and those will never have a spinlock on them.

Understand. thx. So is the following checking that your wanted?
===
diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h
index bc2994e..64828a3 100644
--- a/include/linux/rwlock.h
+++ b/include/linux/rwlock.h
@@ -21,10 +21,12 @@
 do {								\
 	static struct lock_class_key __key;			\
 								\
+	BUILD_BUG_ON(__alignof__(lock) == 1);			\
 	__rwlock_init((lock), #lock, &__key);			\
 } while (0)
 #else
 # define rwlock_init(lock)					\
+	BUILD_BUG_ON(__alignof__(lock) == 1);			\
 	do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
 #endif
 
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 7df6c17..df8a992 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -96,11 +96,13 @@
 do {								\
 	static struct lock_class_key __key;			\
 								\
+	BUILD_BUG_ON(__alignof__(lock) == 1);			\
 	__raw_spin_lock_init((lock), #lock, &__key);		\
 } while (0)
 
 #else
 # define raw_spin_lock_init(lock)				\
+	BUILD_BUG_ON(__alignof__(lock) == 1);			\
 	do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
 #endif
 
===

Btw, 
1, it is alignof bug for default gcc on my fc15 and Ubuntu 11.10 etc?

struct sub {
        int  raw_lock;
        char a;
};
struct foo {
        struct sub z;
        int slk;
        char y;
}__attribute__((packed));

struct foo f1;

__alignof__(f1.z.raw_lock) is 4, but its address actually can align on
one byte. 

 
> 
> 	Arnd



^ permalink raw reply related	[flat|nested] 17+ messages in thread

* Re: [RFC patch] spindep: add cross cache lines checking
  2012-03-07  8:23             ` Alex Shi
@ 2012-03-07 11:54               ` Arnd Bergmann
  2012-03-07 13:13                 ` Alex Shi
  2012-03-08  2:30                 ` Alex Shi
  0 siblings, 2 replies; 17+ messages in thread
From: Arnd Bergmann @ 2012-03-07 11:54 UTC (permalink / raw)
  To: Alex Shi
  Cc: gcc, Ingo Molnar, tglx, mingo, hpa, akpm, linux-kernel, x86,
	andi.kleen, gcc-help

On Wednesday 07 March 2012, Alex Shi wrote:

> Understand. thx. So is the following checking that your wanted?
> ===
> diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h
> index bc2994e..64828a3 100644
> --- a/include/linux/rwlock.h
> +++ b/include/linux/rwlock.h
> @@ -21,10 +21,12 @@
>  do {								\
>  	static struct lock_class_key __key;			\
>  								\
> +	BUILD_BUG_ON(__alignof__(lock) == 1);			\
>  	__rwlock_init((lock), #lock, &__key);			\
>  } while (0)
>  #else
>  # define rwlock_init(lock)					\
> +	BUILD_BUG_ON(__alignof__(lock) == 1);			\
>  	do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
>  #endif

I think the check should be (__alignof__(lock) < __alignof__(rwlock_t)),
otherwise it will still pass when you have structure with attribute((packed,aligned(2)))

> 1, it is alignof bug for default gcc on my fc15 and Ubuntu 11.10 etc?
> 
> struct sub {
>         int  raw_lock;
>         char a;
> };
> struct foo {
>         struct sub z;
>         int slk;
>         char y;
> }__attribute__((packed));
> 
> struct foo f1;
> 
> __alignof__(f1.z.raw_lock) is 4, but its address actually can align on
> one byte. 

That looks like correct behavior, because the alignment of raw_lock inside of
struct sub is still 4. But it does mean that there can be cases where the
compile-time check is not sufficient, so we might want the run-time check
as well, at least under some config option.

	Arnd

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [RFC patch] spindep: add cross cache lines checking
  2012-03-07 11:54               ` Arnd Bergmann
@ 2012-03-07 13:13                 ` Alex Shi
  2012-03-07 13:39                   ` Ingo Molnar
  2012-03-08  2:30                 ` Alex Shi
  1 sibling, 1 reply; 17+ messages in thread
From: Alex Shi @ 2012-03-07 13:13 UTC (permalink / raw)
  To: Arnd Bergmann
  Cc: gcc, Ingo Molnar, tglx, mingo, hpa, akpm, linux-kernel, x86,
	andi.kleen, gcc-help


> I think the check should be (__alignof__(lock) < __alignof__(rwlock_t)),
> otherwise it will still pass when you have structure with attribute((packed,aligned(2)))


reasonable!

> 
>> 1, it is alignof bug for default gcc on my fc15 and Ubuntu 11.10 etc?
>>
>> struct sub {
>>         int  raw_lock;
>>         char a;
>> };
>> struct foo {
>>         struct sub z;
>>         int slk;
>>         char y;
>> }__attribute__((packed));
>>
>> struct foo f1;
>>
>> __alignof__(f1.z.raw_lock) is 4, but its address actually can align on
>> one byte. 
> 
> That looks like correct behavior, because the alignment of raw_lock inside of
> struct sub is still 4. But it does mean that there can be cases where the
> compile-time check is not sufficient, so we might want the run-time check
> as well, at least under some config option.


what's your opinion of this, Ingo?

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [RFC patch] spindep: add cross cache lines checking
  2012-03-07 13:13                 ` Alex Shi
@ 2012-03-07 13:39                   ` Ingo Molnar
  2012-03-08  2:21                     ` Alex Shi
  0 siblings, 1 reply; 17+ messages in thread
From: Ingo Molnar @ 2012-03-07 13:39 UTC (permalink / raw)
  To: Alex Shi
  Cc: Arnd Bergmann, gcc, tglx, mingo, hpa, akpm, linux-kernel, x86,
	andi.kleen, gcc-help


* Alex Shi <alex.shi@intel.com> wrote:

> > I think the check should be (__alignof__(lock) < 
> > __alignof__(rwlock_t)), otherwise it will still pass when 
> > you have structure with attribute((packed,aligned(2)))
> 
> reasonable!
> 
> >> 1, it is alignof bug for default gcc on my fc15 and Ubuntu 11.10 etc?
> >>
> >> struct sub {
> >>         int  raw_lock;
> >>         char a;
> >> };
> >> struct foo {
> >>         struct sub z;
> >>         int slk;
> >>         char y;
> >> }__attribute__((packed));
> >>
> >> struct foo f1;
> >>
> >> __alignof__(f1.z.raw_lock) is 4, but its address actually can align on
> >> one byte. 
> > 
> > That looks like correct behavior, because the alignment of 
> > raw_lock inside of struct sub is still 4. But it does mean 
> > that there can be cases where the compile-time check is not 
> > sufficient, so we might want the run-time check as well, at 
> > least under some config option.
> 
> what's your opinion of this, Ingo?

Dunno. How many real bugs have you found via this patch?

Thanks,

	Ingo

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [RFC patch] spindep: add cross cache lines checking
  2012-03-07 13:39                   ` Ingo Molnar
@ 2012-03-08  2:21                     ` Alex Shi
  2012-03-08  7:13                       ` Ingo Molnar
  0 siblings, 1 reply; 17+ messages in thread
From: Alex Shi @ 2012-03-08  2:21 UTC (permalink / raw)
  To: Ingo Molnar
  Cc: Arnd Bergmann, gcc, tglx, mingo, hpa, akpm, linux-kernel, x86,
	andi.kleen, gcc-help

On Wed, 2012-03-07 at 14:39 +0100, Ingo Molnar wrote:
> * Alex Shi <alex.shi@intel.com> wrote:
> 
> > > I think the check should be (__alignof__(lock) < 
> > > __alignof__(rwlock_t)), otherwise it will still pass when 
> > > you have structure with attribute((packed,aligned(2)))
> > 
> > reasonable!
> > 
> > >> 1, it is alignof bug for default gcc on my fc15 and Ubuntu 11.10 etc?
> > >>
> > >> struct sub {
> > >>         int  raw_lock;
> > >>         char a;
> > >> };
> > >> struct foo {
> > >>         struct sub z;
> > >>         int slk;
> > >>         char y;
> > >> }__attribute__((packed));
> > >>
> > >> struct foo f1;
> > >>
> > >> __alignof__(f1.z.raw_lock) is 4, but its address actually can align on
> > >> one byte. 
> > > 
> > > That looks like correct behavior, because the alignment of 
> > > raw_lock inside of struct sub is still 4. But it does mean 
> > > that there can be cases where the compile-time check is not 
> > > sufficient, so we might want the run-time check as well, at 
> > > least under some config option.
> > 
> > what's your opinion of this, Ingo?
> 
> Dunno. How many real bugs have you found via this patch?

None. Guess stupid code was shot in lkml reviewing. But if the patch in,
it is helpful to block stupid code in developing. 
> 
> Thanks,
> 
> 	Ingo



^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [RFC patch] spindep: add cross cache lines checking
  2012-03-07 11:54               ` Arnd Bergmann
  2012-03-07 13:13                 ` Alex Shi
@ 2012-03-08  2:30                 ` Alex Shi
  1 sibling, 0 replies; 17+ messages in thread
From: Alex Shi @ 2012-03-08  2:30 UTC (permalink / raw)
  To: Arnd Bergmann
  Cc: gcc, Ingo Molnar, tglx, mingo, hpa, akpm, linux-kernel, x86,
	andi.kleen, gcc-help

> > 1, it is alignof bug for default gcc on my fc15 and Ubuntu 11.10 etc?
> > 
> > struct sub {
> >         int  raw_lock;
> >         char a;
> > };
> > struct foo {
> >         struct sub z;
> >         int slk;
> >         char y;
> > }__attribute__((packed));
> > 
> > struct foo f1;
> > 
> > __alignof__(f1.z.raw_lock) is 4, but its address actually can align on
> > one byte. 
> 
> That looks like correct behavior, because the alignment of raw_lock inside of
> struct sub is still 4. But it does mean that there can be cases where the
> compile-time check is not sufficient, so we might want the run-time check
> as well, at least under some config option.

According to explanation of gcc, seems it should return 1 when it can be
align on char. And then it's useful for design intend. Any comments from
gcc guys? 
====
http://gcc.gnu.org/onlinedocs/gcc/Alignment.html
The keyword __alignof__ allows you to inquire about how an object is
aligned, or the minimum alignment usually required by a type. Its syntax
is just like sizeof.




^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [RFC patch] spindep: add cross cache lines checking
  2012-03-08  2:21                     ` Alex Shi
@ 2012-03-08  7:13                       ` Ingo Molnar
  2012-03-09  1:20                         ` Alex Shi
  0 siblings, 1 reply; 17+ messages in thread
From: Ingo Molnar @ 2012-03-08  7:13 UTC (permalink / raw)
  To: Alex Shi
  Cc: Arnd Bergmann, gcc, tglx, mingo, hpa, akpm, linux-kernel, x86,
	andi.kleen, gcc-help


* Alex Shi <alex.shi@intel.com> wrote:

> On Wed, 2012-03-07 at 14:39 +0100, Ingo Molnar wrote:
> > * Alex Shi <alex.shi@intel.com> wrote:
> > 
> > > > I think the check should be (__alignof__(lock) < 
> > > > __alignof__(rwlock_t)), otherwise it will still pass when 
> > > > you have structure with attribute((packed,aligned(2)))
> > > 
> > > reasonable!
> > > 
> > > >> 1, it is alignof bug for default gcc on my fc15 and Ubuntu 11.10 etc?
> > > >>
> > > >> struct sub {
> > > >>         int  raw_lock;
> > > >>         char a;
> > > >> };
> > > >> struct foo {
> > > >>         struct sub z;
> > > >>         int slk;
> > > >>         char y;
> > > >> }__attribute__((packed));
> > > >>
> > > >> struct foo f1;
> > > >>
> > > >> __alignof__(f1.z.raw_lock) is 4, but its address actually can align on
> > > >> one byte. 
> > > > 
> > > > That looks like correct behavior, because the alignment of 
> > > > raw_lock inside of struct sub is still 4. But it does mean 
> > > > that there can be cases where the compile-time check is not 
> > > > sufficient, so we might want the run-time check as well, at 
> > > > least under some config option.
> > > 
> > > what's your opinion of this, Ingo?
> > 
> > Dunno. How many real bugs have you found via this patch?
> 
> None. Guess stupid code was shot in lkml reviewing. But if the 
> patch in, it is helpful to block stupid code in developing.

The question is, if in the last 10 years not a single such case 
made it through to today's 15 million lines of kernel code, why 
should we add the check now?

If it was a simple build time check then maybe, but judging by 
the discussion it does not seem so simple, does it?

Thanks,

	Ingo

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [RFC patch] spindep: add cross cache lines checking
  2012-03-08  7:13                       ` Ingo Molnar
@ 2012-03-09  1:20                         ` Alex Shi
  0 siblings, 0 replies; 17+ messages in thread
From: Alex Shi @ 2012-03-09  1:20 UTC (permalink / raw)
  To: Ingo Molnar
  Cc: Arnd Bergmann, gcc, tglx, mingo, hpa, akpm, linux-kernel, x86,
	andi.kleen, gcc-help

On Thu, 2012-03-08 at 08:13 +0100, Ingo Molnar wrote:
> * Alex Shi <alex.shi@intel.com> wrote:
> 
> > On Wed, 2012-03-07 at 14:39 +0100, Ingo Molnar wrote:
> > > * Alex Shi <alex.shi@intel.com> wrote:
> > > 
> > > > > I think the check should be (__alignof__(lock) < 
> > > > > __alignof__(rwlock_t)), otherwise it will still pass when 
> > > > > you have structure with attribute((packed,aligned(2)))
> > > > 
> > > > reasonable!
> > > > 
> > > > >> 1, it is alignof bug for default gcc on my fc15 and Ubuntu 11.10 etc?
> > > > >>
> > > > >> struct sub {
> > > > >>         int  raw_lock;
> > > > >>         char a;
> > > > >> };
> > > > >> struct foo {
> > > > >>         struct sub z;
> > > > >>         int slk;
> > > > >>         char y;
> > > > >> }__attribute__((packed));
> > > > >>
> > > > >> struct foo f1;
> > > > >>
> > > > >> __alignof__(f1.z.raw_lock) is 4, but its address actually can align on
> > > > >> one byte. 
> > > > > 
> > > > > That looks like correct behavior, because the alignment of 
> > > > > raw_lock inside of struct sub is still 4. But it does mean 
> > > > > that there can be cases where the compile-time check is not 
> > > > > sufficient, so we might want the run-time check as well, at 
> > > > > least under some config option.
> > > > 
> > > > what's your opinion of this, Ingo?
> > > 
> > > Dunno. How many real bugs have you found via this patch?
> > 
> > None. Guess stupid code was shot in lkml reviewing. But if the 
> > patch in, it is helpful to block stupid code in developing.
> 
> The question is, if in the last 10 years not a single such case 
> made it through to today's 15 million lines of kernel code, why 
> should we add the check now?
> 
> If it was a simple build time check then maybe, but judging by 
> the discussion it does not seem so simple, does it?

Oh, It is may better to have, but I don't mind it was slided. Since even
alignof works as our expectation, it also can't cover all problems.
Currently, we heavily depend gcc's behavior.

Anyway, thanks for review! 


> 
> Thanks,
> 
> 	Ingo



^ permalink raw reply	[flat|nested] 17+ messages in thread

end of thread, other threads:[~2012-03-09  1:20 UTC | newest]

Thread overview: 17+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2012-03-05  3:20 [RFC patch] spin_lock: add cross cache lines checking Alex Shi
2012-03-05  3:24 ` Alex Shi
2012-03-05  5:43   ` [RFC patch] spindep: " Alex Shi
2012-03-05  5:48     ` Alex Shi
2012-03-05  9:41     ` Arnd Bergmann
2012-03-05 10:43       ` Ingo Molnar
2012-03-06  6:13         ` Alex Shi
2012-03-06  6:18           ` Alex Shi
2012-03-06  9:32           ` Arnd Bergmann
2012-03-07  8:23             ` Alex Shi
2012-03-07 11:54               ` Arnd Bergmann
2012-03-07 13:13                 ` Alex Shi
2012-03-07 13:39                   ` Ingo Molnar
2012-03-08  2:21                     ` Alex Shi
2012-03-08  7:13                       ` Ingo Molnar
2012-03-09  1:20                         ` Alex Shi
2012-03-08  2:30                 ` Alex Shi

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.