All of lore.kernel.org
 help / color / mirror / Atom feed
From: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
To: Russell King - ARM Linux <linux@arm.linux.org.uk>
Cc: Jamie Lokier <jamie@shareable.org>,
	Catalin Marinas <catalin.marinas@arm.com>,
	linux-arm-kernel@lists.arm.linux.org.uk,
	linux-kernel@vger.kernel.org
Subject: Re: Broken ARM atomic ops wrt memory barriers (was : [PATCH] Add cmpxchg support for ARMv6+ systems)
Date: Tue, 26 May 2009 11:36:54 -0400	[thread overview]
Message-ID: <20090526153654.GA17096@Krystal> (raw)
In-Reply-To: <20090526145950.GB26713@n2100.arm.linux.org.uk>

* Russell King - ARM Linux (linux@arm.linux.org.uk) wrote:
> Here's an updated version of the patch:
> 
> adds a data memory barrier before and after these operations:
> - test_and_xxx_bit()
> - atomic_xxx_return()
> - atomic_cmpxchg()
> - __xchg
> 
> Also:
> - converts smp_mb__*_atomic_*() to use smp_mb()
> - ensures __kuser_memory_barrier uses the right instruction on ARMv7
> 
> diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
> index 6116e48..15f8a09 100644
> --- a/arch/arm/include/asm/assembler.h
> +++ b/arch/arm/include/asm/assembler.h
> @@ -114,3 +114,16 @@
>  	.align	3;				\
>  	.long	9999b,9001f;			\
>  	.previous
> +
> +/*
> + * SMP data memory barrier
> + */
> +	.macro	smp_dmb
> +#ifdef CONFIG_SMP
> +#if __LINUX_ARM_ARCH__ >= 7
> +	dmb
> +#elif __LINUX_ARM_ARCH__ == 6
> +	mcr	p15, 0, r0, c7, c10, 5	@ dmb
> +#endif
> +#endif
> +	.endm
> diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
> index ee99723..16b52f3 100644
> --- a/arch/arm/include/asm/atomic.h
> +++ b/arch/arm/include/asm/atomic.h
> @@ -44,11 +44,29 @@ static inline void atomic_set(atomic_t *v, int i)
>  	: "cc");
>  }
>  
> +static inline void atomic_add(int i, atomic_t *v)
> +{
> +	unsigned long tmp;
> +	int result;
> +
> +	__asm__ __volatile__("@ atomic_add\n"
> +"1:	ldrex	%0, [%2]\n"
> +"	add	%0, %0, %3\n"
> +"	strex	%1, %0, [%2]\n"
> +"	teq	%1, #0\n"
> +"	bne	1b"
> +	: "=&r" (result), "=&r" (tmp)
> +	: "r" (&v->counter), "Ir" (i)
> +	: "cc");
> +}
> +
>  static inline int atomic_add_return(int i, atomic_t *v)
>  {
>  	unsigned long tmp;
>  	int result;
>  
> +	smp_mb();
> +
>  	__asm__ __volatile__("@ atomic_add_return\n"
>  "1:	ldrex	%0, [%2]\n"
>  "	add	%0, %0, %3\n"
> @@ -59,14 +77,34 @@ static inline int atomic_add_return(int i, atomic_t *v)
>  	: "r" (&v->counter), "Ir" (i)
>  	: "cc");
>  
> +	smp_mb();
> +
>  	return result;
>  }
>  
> +static inline void atomic_sub(int i, atomic_t *v)
> +{
> +	unsigned long tmp;
> +	int result;
> +
> +	__asm__ __volatile__("@ atomic_sub\n"
> +"1:	ldrex	%0, [%2]\n"
> +"	sub	%0, %0, %3\n"
> +"	strex	%1, %0, [%2]\n"
> +"	teq	%1, #0\n"
> +"	bne	1b"
> +	: "=&r" (result), "=&r" (tmp)
> +	: "r" (&v->counter), "Ir" (i)
> +	: "cc");
> +}
> +
>  static inline int atomic_sub_return(int i, atomic_t *v)
>  {
>  	unsigned long tmp;
>  	int result;
>  
> +	smp_mb();
> +
>  	__asm__ __volatile__("@ atomic_sub_return\n"
>  "1:	ldrex	%0, [%2]\n"
>  "	sub	%0, %0, %3\n"
> @@ -77,6 +115,8 @@ static inline int atomic_sub_return(int i, atomic_t *v)
>  	: "r" (&v->counter), "Ir" (i)
>  	: "cc");
>  
> +	smp_mb();
> +
>  	return result;
>  }
>  
> @@ -84,6 +124,8 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
>  {
>  	unsigned long oldval, res;
>  
> +	smp_mb();
> +
>  	do {
>  		__asm__ __volatile__("@ atomic_cmpxchg\n"
>  		"ldrex	%1, [%2]\n"
> @@ -95,6 +137,8 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
>  		    : "cc");
>  	} while (res);
>  
> +	smp_mb();
> +
>  	return oldval;
>  }
>  
> @@ -135,6 +179,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
>  
>  	return val;
>  }
> +#define atomic_add(i, v)	(void) atomic_add_return(i, v)
>  
>  static inline int atomic_sub_return(int i, atomic_t *v)
>  {
> @@ -148,6 +193,7 @@ static inline int atomic_sub_return(int i, atomic_t *v)
>  
>  	return val;
>  }
> +#define atomic_sub(i, v)	(void) atomic_sub_return(i, v)
>  
>  static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
>  {
> @@ -187,10 +233,8 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
>  }
>  #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
>  
> -#define atomic_add(i, v)	(void) atomic_add_return(i, v)
> -#define atomic_inc(v)		(void) atomic_add_return(1, v)
> -#define atomic_sub(i, v)	(void) atomic_sub_return(i, v)
> -#define atomic_dec(v)		(void) atomic_sub_return(1, v)
> +#define atomic_inc(v)		atomic_add(1, v)
> +#define atomic_dec(v)		atomic_sub(1, v)
>  
>  #define atomic_inc_and_test(v)	(atomic_add_return(1, v) == 0)
>  #define atomic_dec_and_test(v)	(atomic_sub_return(1, v) == 0)
> @@ -200,11 +244,10 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
>  
>  #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
>  
> -/* Atomic operations are already serializing on ARM */
> -#define smp_mb__before_atomic_dec()	barrier()
> -#define smp_mb__after_atomic_dec()	barrier()
> -#define smp_mb__before_atomic_inc()	barrier()
> -#define smp_mb__after_atomic_inc()	barrier()
> +#define smp_mb__before_atomic_dec()	smp_mb()
> +#define smp_mb__after_atomic_dec()	smp_mb()
> +#define smp_mb__before_atomic_inc()	smp_mb()
> +#define smp_mb__after_atomic_inc()	smp_mb()
>  
>  #include <asm-generic/atomic.h>
>  #endif
> diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
> index bd4dc8e..e9889c2 100644
> --- a/arch/arm/include/asm/system.h
> +++ b/arch/arm/include/asm/system.h
> @@ -248,6 +248,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
>  	unsigned int tmp;
>  #endif
>  
> +	smp_mb();
> +
>  	switch (size) {
>  #if __LINUX_ARM_ARCH__ >= 6
>  	case 1:
> @@ -258,7 +260,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
>  		"	bne	1b"
>  			: "=&r" (ret), "=&r" (tmp)
>  			: "r" (x), "r" (ptr)
> -			: "memory", "cc");
> +			: "cc");

I would not remove the "memory" constraint in here. Anyway it's just a
compiler barrier, I doubt it would make anyting faster (due to the
following smp_mb() added), but it surely makes things harder to
understand.

>  		break;
>  	case 4:
>  		asm volatile("@	__xchg4\n"
> @@ -268,7 +270,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
>  		"	bne	1b"
>  			: "=&r" (ret), "=&r" (tmp)
>  			: "r" (x), "r" (ptr)
> -			: "memory", "cc");

Same.

> +			: "cc");
>  		break;
>  #elif defined(swp_is_buggy)
>  #ifdef CONFIG_SMP
> @@ -293,20 +295,21 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
>  		"	swpb	%0, %1, [%2]"
>  			: "=&r" (ret)
>  			: "r" (x), "r" (ptr)
> -			: "memory", "cc");

Same.

> +			: "cc");
>  		break;
>  	case 4:
>  		asm volatile("@	__xchg4\n"
>  		"	swp	%0, %1, [%2]"
>  			: "=&r" (ret)
>  			: "r" (x), "r" (ptr)
> -			: "memory", "cc");

Same.

> +			: "cc");
>  		break;
>  #endif
>  	default:
>  		__bad_xchg(ptr, size), ret = 0;
>  		break;
>  	}
> +	smp_mb();
>  
>  	return ret;
>  }
> diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
> index d662a2f..83b1da6 100644
> --- a/arch/arm/kernel/entry-armv.S
> +++ b/arch/arm/kernel/entry-armv.S
> @@ -815,10 +815,7 @@ __kuser_helper_start:
>   */
>  
>  __kuser_memory_barrier:				@ 0xffff0fa0
> -
> -#if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_SMP)
> -	mcr	p15, 0, r0, c7, c10, 5	@ dmb
> -#endif
> +	smp_dmb
>  	usr_ret	lr
>  
>  	.align	5
> diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h
> index 2e787d4..c7f2627 100644
> --- a/arch/arm/lib/bitops.h
> +++ b/arch/arm/lib/bitops.h
> @@ -18,12 +18,14 @@
>  	mov	r2, #1
>  	add	r1, r1, r0, lsr #3	@ Get byte offset
>  	mov	r3, r2, lsl r3		@ create mask
> +	smp_dmb
>  1:	ldrexb	r2, [r1]
>  	ands	r0, r2, r3		@ save old value of bit
>  	\instr	r2, r2, r3			@ toggle bit
>  	strexb	ip, r2, [r1]
>  	cmp	ip, #0
>  	bne	1b
> +	smp_dmb
>  	cmp	r0, #0
>  	movne	r0, #1
>  2:	mov	pc, lr
> 

The rest looks good.

Note that we still have not checked if the full dmb is really needed
before and after each operations. For instance, lwsync (before) + isync
(after) is enough for powerpc. I suppose this is because the CPU is not
free to reorder reads when there is a data dependency.

So adding full dmb as we do here is surely safe, but might be slower
than the optimal solution.

If we determine that some reorderings are not possible on ARM, we might
eventually change rmb() for a simple barrier() and make atomic op
barriers a bit more lightweight.

But let's first make it work.

Thanks,

Mathieu


-- 
Mathieu Desnoyers
OpenPGP key fingerprint: 8CD5 52C3 8E3C 4140 715F  BA06 3F25 A8FE 3BAE 9A68

  reply	other threads:[~2009-05-26 15:37 UTC|newest]

Thread overview: 32+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <20090422171703.19555.83629.stgit@pc1117.cambridge.arm.com>
     [not found] ` <20090423141248.22193.10543.stgit@pc1117.cambridge.arm.com>
     [not found]   ` <20090524131636.GB3159@n2100.arm.linux.org.uk>
2009-05-24 14:56     ` Broken ARM atomic ops wrt memory barriers (was : [PATCH] Add cmpxchg support for ARMv6+ systems) Mathieu Desnoyers
2009-05-25 13:20       ` Jamie Lokier
2009-05-25 15:17         ` Mathieu Desnoyers
2009-05-25 16:19           ` Russell King - ARM Linux
2009-05-25 17:29             ` Mathieu Desnoyers
2009-05-25 19:34               ` Russell King - ARM Linux
2009-05-25 20:05                 ` Mathieu Desnoyers
2009-05-26 11:29                 ` Catalin Marinas
2009-05-25 19:56               ` Russell King - ARM Linux
2009-05-25 20:22                 ` Mathieu Desnoyers
2009-05-25 21:45                   ` Broken ARM (and powerpc ?) futex wrt memory barriers Mathieu Desnoyers
2009-05-25 21:57                     ` Russell King - ARM Linux
2009-05-25 22:27                       ` Mathieu Desnoyers
2009-05-26 14:59             ` Broken ARM atomic ops wrt memory barriers (was : [PATCH] Add cmpxchg support for ARMv6+ systems) Russell King - ARM Linux
2009-05-26 15:36               ` Mathieu Desnoyers [this message]
2009-05-26 15:59                 ` Russell King - ARM Linux
2009-05-26 17:23                   ` Mathieu Desnoyers
2009-05-26 18:23                     ` Russell King - ARM Linux
2009-05-26 19:17                       ` Jamie Lokier
2009-05-26 19:56                         ` Russell King - ARM Linux
2009-05-27  1:22                       ` Mathieu Desnoyers
2009-05-27  8:56                         ` Russell King - ARM Linux
2009-05-27  9:18                           ` Catalin Marinas
2009-05-27  9:14                         ` Catalin Marinas
2009-05-27 14:52                           ` Mathieu Desnoyers
2009-05-27 15:59                             ` Paul E. McKenney
2009-05-27 16:02                               ` Mathieu Desnoyers
2009-05-27 20:55                                 ` Paul E. McKenney
2009-05-27 18:40                           ` Mathieu Desnoyers
2009-05-28 18:20                 ` Russell King - ARM Linux
2009-05-28 18:38                   ` Mathieu Desnoyers
2009-05-28 18:40                     ` Russell King - ARM Linux

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20090526153654.GA17096@Krystal \
    --to=mathieu.desnoyers@polymtl.ca \
    --cc=catalin.marinas@arm.com \
    --cc=jamie@shareable.org \
    --cc=linux-arm-kernel@lists.arm.linux.org.uk \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux@arm.linux.org.uk \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.