From: tip-bot for Peter Zijlstra <tipbot@zytor.com>
To: linux-tip-commits@vger.kernel.org
Cc: linux-kernel@vger.kernel.org, hpa@zytor.com, mingo@kernel.org,
torvalds@linux-foundation.org, peterz@infradead.org,
paulmck@linux.vnet.ibm.com, macro@codesourcery.com,
ralf@linux-mips.org, tglx@linutronix.de
Subject: [tip:locking/arch] locking,arch,mips: Fold atomic_ops
Date: Thu, 14 Aug 2014 10:21:25 -0700 [thread overview]
Message-ID: <tip-ef31563e950c60bb41b97c2b61c32de874f3c949@git.kernel.org> (raw)
In-Reply-To: <20140508135852.521548500@infradead.org>
Commit-ID: ef31563e950c60bb41b97c2b61c32de874f3c949
Gitweb: http://git.kernel.org/tip/ef31563e950c60bb41b97c2b61c32de874f3c949
Author: Peter Zijlstra <peterz@infradead.org>
AuthorDate: Wed, 26 Mar 2014 17:56:43 +0100
Committer: Ingo Molnar <mingo@kernel.org>
CommitDate: Thu, 14 Aug 2014 12:48:09 +0200
locking,arch,mips: Fold atomic_ops
Many of the atomic op implementations are the same except for one
instruction; fold the lot into a few CPP macros and reduce LoC.
This also prepares for easy addition of new ops.
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Maciej W. Rozycki <macro@codesourcery.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Link: http://lkml.kernel.org/r/20140508135852.521548500@infradead.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
arch/mips/include/asm/atomic.h | 557 ++++++++++++++---------------------------
1 file changed, 187 insertions(+), 370 deletions(-)
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 37b2bef..476fe3b 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -40,195 +40,103 @@
*/
#define atomic_set(v, i) ((v)->counter = (i))
-/*
- * atomic_add - add integer to atomic variable
- * @i: integer value to add
- * @v: pointer of type atomic_t
- *
- * Atomically adds @i to @v.
- */
-static __inline__ void atomic_add(int i, atomic_t * v)
-{
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- int temp;
-
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- "1: ll %0, %1 # atomic_add \n"
- " addu %0, %2 \n"
- " sc %0, %1 \n"
- " beqzl %0, 1b \n"
- " .set mips0 \n"
- : "=&r" (temp), "+m" (v->counter)
- : "Ir" (i));
- } else if (kernel_uses_llsc) {
- int temp;
-
- do {
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- " ll %0, %1 # atomic_add \n"
- " addu %0, %2 \n"
- " sc %0, %1 \n"
- " .set mips0 \n"
- : "=&r" (temp), "+m" (v->counter)
- : "Ir" (i));
- } while (unlikely(!temp));
- } else {
- unsigned long flags;
-
- raw_local_irq_save(flags);
- v->counter += i;
- raw_local_irq_restore(flags);
- }
-}
-
-/*
- * atomic_sub - subtract the atomic variable
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v.
- */
-static __inline__ void atomic_sub(int i, atomic_t * v)
-{
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- int temp;
-
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- "1: ll %0, %1 # atomic_sub \n"
- " subu %0, %2 \n"
- " sc %0, %1 \n"
- " beqzl %0, 1b \n"
- " .set mips0 \n"
- : "=&r" (temp), "+m" (v->counter)
- : "Ir" (i));
- } else if (kernel_uses_llsc) {
- int temp;
-
- do {
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- " ll %0, %1 # atomic_sub \n"
- " subu %0, %2 \n"
- " sc %0, %1 \n"
- " .set mips0 \n"
- : "=&r" (temp), "+m" (v->counter)
- : "Ir" (i));
- } while (unlikely(!temp));
- } else {
- unsigned long flags;
-
- raw_local_irq_save(flags);
- v->counter -= i;
- raw_local_irq_restore(flags);
- }
-}
-
-/*
- * Same as above, but return the result value
- */
-static __inline__ int atomic_add_return(int i, atomic_t * v)
-{
- int result;
-
- smp_mb__before_llsc();
-
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- int temp;
-
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- "1: ll %1, %2 # atomic_add_return \n"
- " addu %0, %1, %3 \n"
- " sc %0, %2 \n"
- " beqzl %0, 1b \n"
- " addu %0, %1, %3 \n"
- " .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "+m" (v->counter)
- : "Ir" (i));
- } else if (kernel_uses_llsc) {
- int temp;
-
- do {
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- " ll %1, %2 # atomic_add_return \n"
- " addu %0, %1, %3 \n"
- " sc %0, %2 \n"
- " .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "+m" (v->counter)
- : "Ir" (i));
- } while (unlikely(!result));
-
- result = temp + i;
- } else {
- unsigned long flags;
-
- raw_local_irq_save(flags);
- result = v->counter;
- result += i;
- v->counter = result;
- raw_local_irq_restore(flags);
- }
-
- smp_llsc_mb();
-
- return result;
+#define ATOMIC_OP(op, c_op, asm_op) \
+static __inline__ void atomic_##op(int i, atomic_t * v) \
+{ \
+ if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ int temp; \
+ \
+ __asm__ __volatile__( \
+ " .set arch=r4000 \n" \
+ "1: ll %0, %1 # atomic_" #op " \n" \
+ " " #asm_op " %0, %2 \n" \
+ " sc %0, %1 \n" \
+ " beqzl %0, 1b \n" \
+ " .set mips0 \n" \
+ : "=&r" (temp), "+m" (v->counter) \
+ : "Ir" (i)); \
+ } else if (kernel_uses_llsc) { \
+ int temp; \
+ \
+ do { \
+ __asm__ __volatile__( \
+ " .set arch=r4000 \n" \
+ " ll %0, %1 # atomic_" #op "\n" \
+ " " #asm_op " %0, %2 \n" \
+ " sc %0, %1 \n" \
+ " .set mips0 \n" \
+ : "=&r" (temp), "+m" (v->counter) \
+ : "Ir" (i)); \
+ } while (unlikely(!temp)); \
+ } else { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ v->counter c_op i; \
+ raw_local_irq_restore(flags); \
+ } \
+} \
+
+#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
+static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
+{ \
+ int result; \
+ \
+ smp_mb__before_llsc(); \
+ \
+ if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ int temp; \
+ \
+ __asm__ __volatile__( \
+ " .set arch=r4000 \n" \
+ "1: ll %1, %2 # atomic_" #op "_return \n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " sc %0, %2 \n" \
+ " beqzl %0, 1b \n" \
+ " addu %0, %1, %3 \n" \
+ " .set mips0 \n" \
+ : "=&r" (result), "=&r" (temp), "+m" (v->counter) \
+ : "Ir" (i)); \
+ } else if (kernel_uses_llsc) { \
+ int temp; \
+ \
+ do { \
+ __asm__ __volatile__( \
+ " .set arch=r4000 \n" \
+ " ll %1, %2 # atomic_" #op "_return \n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " sc %0, %2 \n" \
+ " .set mips0 \n" \
+ : "=&r" (result), "=&r" (temp), "+m" (v->counter) \
+ : "Ir" (i)); \
+ } while (unlikely(!result)); \
+ \
+ result = temp + i; \
+ } else { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ result = v->counter; \
+ result c_op i; \
+ v->counter = result; \
+ raw_local_irq_restore(flags); \
+ } \
+ \
+ smp_llsc_mb(); \
+ \
+ return result; \
}
-static __inline__ int atomic_sub_return(int i, atomic_t * v)
-{
- int result;
+#define ATOMIC_OPS(op, c_op, asm_op) \
+ ATOMIC_OP(op, c_op, asm_op) \
+ ATOMIC_OP_RETURN(op, c_op, asm_op)
- smp_mb__before_llsc();
+ATOMIC_OPS(add, +=, addu)
+ATOMIC_OPS(sub, -=, subu)
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- int temp;
-
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- "1: ll %1, %2 # atomic_sub_return \n"
- " subu %0, %1, %3 \n"
- " sc %0, %2 \n"
- " beqzl %0, 1b \n"
- " subu %0, %1, %3 \n"
- " .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter)
- : "memory");
-
- result = temp - i;
- } else if (kernel_uses_llsc) {
- int temp;
-
- do {
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- " ll %1, %2 # atomic_sub_return \n"
- " subu %0, %1, %3 \n"
- " sc %0, %2 \n"
- " .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "+m" (v->counter)
- : "Ir" (i));
- } while (unlikely(!result));
-
- result = temp - i;
- } else {
- unsigned long flags;
-
- raw_local_irq_save(flags);
- result = v->counter;
- result -= i;
- v->counter = result;
- raw_local_irq_restore(flags);
- }
-
- smp_llsc_mb();
-
- return result;
-}
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
/*
* atomic_sub_if_positive - conditionally subtract integer from atomic variable
@@ -407,195 +315,104 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
*/
#define atomic64_set(v, i) ((v)->counter = (i))
-/*
- * atomic64_add - add integer to atomic variable
- * @i: integer value to add
- * @v: pointer of type atomic64_t
- *
- * Atomically adds @i to @v.
- */
-static __inline__ void atomic64_add(long i, atomic64_t * v)
-{
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- long temp;
-
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- "1: lld %0, %1 # atomic64_add \n"
- " daddu %0, %2 \n"
- " scd %0, %1 \n"
- " beqzl %0, 1b \n"
- " .set mips0 \n"
- : "=&r" (temp), "+m" (v->counter)
- : "Ir" (i));
- } else if (kernel_uses_llsc) {
- long temp;
-
- do {
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- " lld %0, %1 # atomic64_add \n"
- " daddu %0, %2 \n"
- " scd %0, %1 \n"
- " .set mips0 \n"
- : "=&r" (temp), "+m" (v->counter)
- : "Ir" (i));
- } while (unlikely(!temp));
- } else {
- unsigned long flags;
-
- raw_local_irq_save(flags);
- v->counter += i;
- raw_local_irq_restore(flags);
- }
+#define ATOMIC64_OP(op, c_op, asm_op) \
+static __inline__ void atomic64_##op(long i, atomic64_t * v) \
+{ \
+ if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ long temp; \
+ \
+ __asm__ __volatile__( \
+ " .set arch=r4000 \n" \
+ "1: lld %0, %1 # atomic64_" #op " \n" \
+ " " #asm_op " %0, %2 \n" \
+ " scd %0, %1 \n" \
+ " beqzl %0, 1b \n" \
+ " .set mips0 \n" \
+ : "=&r" (temp), "+m" (v->counter) \
+ : "Ir" (i)); \
+ } else if (kernel_uses_llsc) { \
+ long temp; \
+ \
+ do { \
+ __asm__ __volatile__( \
+ " .set arch=r4000 \n" \
+ " lld %0, %1 # atomic64_" #op "\n" \
+ " " #asm_op " %0, %2 \n" \
+ " scd %0, %1 \n" \
+ " .set mips0 \n" \
+ : "=&r" (temp), "+m" (v->counter) \
+ : "Ir" (i)); \
+ } while (unlikely(!temp)); \
+ } else { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ v->counter c_op i; \
+ raw_local_irq_restore(flags); \
+ } \
+} \
+
+#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
+static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
+{ \
+ long result; \
+ \
+ smp_mb__before_llsc(); \
+ \
+ if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ long temp; \
+ \
+ __asm__ __volatile__( \
+ " .set arch=r4000 \n" \
+ "1: lld %1, %2 # atomic64_" #op "_return\n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " scd %0, %2 \n" \
+ " beqzl %0, 1b \n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " .set mips0 \n" \
+ : "=&r" (result), "=&r" (temp), "+m" (v->counter) \
+ : "Ir" (i)); \
+ } else if (kernel_uses_llsc) { \
+ long temp; \
+ \
+ do { \
+ __asm__ __volatile__( \
+ " .set arch=r4000 \n" \
+ " lld %1, %2 # atomic64_" #op "_return\n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " scd %0, %2 \n" \
+ " .set mips0 \n" \
+ : "=&r" (result), "=&r" (temp), "=m" (v->counter) \
+ : "Ir" (i), "m" (v->counter) \
+ : "memory"); \
+ } while (unlikely(!result)); \
+ \
+ result = temp + i; \
+ } else { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ result = v->counter; \
+ result c_op i; \
+ v->counter = result; \
+ raw_local_irq_restore(flags); \
+ } \
+ \
+ smp_llsc_mb(); \
+ \
+ return result; \
}
-/*
- * atomic64_sub - subtract the atomic variable
- * @i: integer value to subtract
- * @v: pointer of type atomic64_t
- *
- * Atomically subtracts @i from @v.
- */
-static __inline__ void atomic64_sub(long i, atomic64_t * v)
-{
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- long temp;
-
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- "1: lld %0, %1 # atomic64_sub \n"
- " dsubu %0, %2 \n"
- " scd %0, %1 \n"
- " beqzl %0, 1b \n"
- " .set mips0 \n"
- : "=&r" (temp), "+m" (v->counter)
- : "Ir" (i));
- } else if (kernel_uses_llsc) {
- long temp;
-
- do {
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- " lld %0, %1 # atomic64_sub \n"
- " dsubu %0, %2 \n"
- " scd %0, %1 \n"
- " .set mips0 \n"
- : "=&r" (temp), "+m" (v->counter)
- : "Ir" (i));
- } while (unlikely(!temp));
- } else {
- unsigned long flags;
-
- raw_local_irq_save(flags);
- v->counter -= i;
- raw_local_irq_restore(flags);
- }
-}
-
-/*
- * Same as above, but return the result value
- */
-static __inline__ long atomic64_add_return(long i, atomic64_t * v)
-{
- long result;
+#define ATOMIC64_OPS(op, c_op, asm_op) \
+ ATOMIC64_OP(op, c_op, asm_op) \
+ ATOMIC64_OP_RETURN(op, c_op, asm_op)
- smp_mb__before_llsc();
+ATOMIC64_OPS(add, +=, daddu)
+ATOMIC64_OPS(sub, -=, dsubu)
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- long temp;
-
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- "1: lld %1, %2 # atomic64_add_return \n"
- " daddu %0, %1, %3 \n"
- " scd %0, %2 \n"
- " beqzl %0, 1b \n"
- " daddu %0, %1, %3 \n"
- " .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "+m" (v->counter)
- : "Ir" (i));
- } else if (kernel_uses_llsc) {
- long temp;
-
- do {
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- " lld %1, %2 # atomic64_add_return \n"
- " daddu %0, %1, %3 \n"
- " scd %0, %2 \n"
- " .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter)
- : "memory");
- } while (unlikely(!result));
-
- result = temp + i;
- } else {
- unsigned long flags;
-
- raw_local_irq_save(flags);
- result = v->counter;
- result += i;
- v->counter = result;
- raw_local_irq_restore(flags);
- }
-
- smp_llsc_mb();
-
- return result;
-}
-
-static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
-{
- long result;
-
- smp_mb__before_llsc();
-
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- long temp;
-
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- "1: lld %1, %2 # atomic64_sub_return \n"
- " dsubu %0, %1, %3 \n"
- " scd %0, %2 \n"
- " beqzl %0, 1b \n"
- " dsubu %0, %1, %3 \n"
- " .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter)
- : "memory");
- } else if (kernel_uses_llsc) {
- long temp;
-
- do {
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- " lld %1, %2 # atomic64_sub_return \n"
- " dsubu %0, %1, %3 \n"
- " scd %0, %2 \n"
- " .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter)
- : "memory");
- } while (unlikely(!result));
-
- result = temp - i;
- } else {
- unsigned long flags;
-
- raw_local_irq_save(flags);
- result = v->counter;
- result -= i;
- v->counter = result;
- raw_local_irq_restore(flags);
- }
-
- smp_llsc_mb();
-
- return result;
-}
+#undef ATOMIC64_OPS
+#undef ATOMIC64_OP_RETURN
+#undef ATOMIC64_OP
/*
* atomic64_sub_if_positive - conditionally subtract integer from atomic variable
next prev parent reply other threads:[~2014-08-14 17:22 UTC|newest]
Thread overview: 76+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-05-08 13:58 [PATCH 00/20] arch atomic 'cleanup' Peter Zijlstra
2014-05-08 13:58 ` [PATCH 01/20] x86: Kill atomic_or_long() Peter Zijlstra
2014-08-14 17:18 ` [tip:locking/arch] locking,x86: " tip-bot for Peter Zijlstra
2014-05-08 13:58 ` [PATCH 02/20] arch,alpha: Fold atomic_ops Peter Zijlstra
2014-08-14 17:18 ` [tip:locking/arch] locking,arch,alpha: " tip-bot for Peter Zijlstra
2014-05-08 13:58 ` [PATCH 03/20] arch,arc: " Peter Zijlstra
2014-05-09 9:34 ` Vineet Gupta
2014-05-09 10:22 ` Peter Zijlstra
2014-08-14 17:19 ` [tip:locking/arch] locking,arch,arc: " tip-bot for Peter Zijlstra
2014-05-08 13:58 ` [PATCH 04/20] arch,arm: " Peter Zijlstra
2014-05-08 18:31 ` Will Deacon
2014-08-14 17:19 ` [tip:locking/arch] locking,arch,arm: " tip-bot for Peter Zijlstra
2014-05-08 13:58 ` [PATCH 05/20] arch,arm64: " Peter Zijlstra
2014-05-08 18:31 ` Will Deacon
2014-08-14 17:19 ` [tip:locking/arch] locking,arch,arm64: " tip-bot for Peter Zijlstra
2014-05-08 13:58 ` [PATCH 06/20] arch,avr32: " Peter Zijlstra
2014-05-09 18:32 ` Hans-Christian Egtvedt
2014-05-09 20:43 ` Peter Zijlstra
2014-05-09 20:51 ` Peter Zijlstra
2014-05-09 21:17 ` Peter Zijlstra
2014-05-13 20:40 ` Hans-Christian Egtvedt
2014-05-13 20:50 ` Peter Zijlstra
2014-05-14 7:43 ` Hans-Christian Egtvedt
2014-05-31 14:14 ` Peter Zijlstra
2014-06-06 6:25 ` Hans-Christian Egtvedt
2014-08-14 17:19 ` [tip:locking/arch] locking,arch,avr32: " tip-bot for Peter Zijlstra
2014-08-14 19:27 ` Hans-Christian Egtvedt
2014-08-14 19:30 ` Peter Zijlstra
2014-08-14 19:32 ` Hans-Christian Egtvedt
2014-05-08 13:58 ` [PATCH 07/20] arch,cris: " Peter Zijlstra
2014-05-08 15:12 ` Geert Uytterhoeven
2014-05-08 16:06 ` Peter Zijlstra
2014-05-08 17:34 ` David Miller
2014-05-08 18:17 ` Peter Zijlstra
2014-05-08 20:27 ` David Miller
2014-05-09 8:14 ` Jesper Nilsson
2014-08-14 17:19 ` [tip:locking/arch] locking,arch,cris: " tip-bot for Peter Zijlstra
2014-05-08 13:58 ` [PATCH 08/20] arch,hexagon: " Peter Zijlstra
2014-05-12 17:28 ` rkuo
2014-08-14 17:20 ` [tip:locking/arch] locking,arch,hexagon: " tip-bot for Peter Zijlstra
2014-05-08 13:58 ` [PATCH 09/20] arch,ia64: " Peter Zijlstra
2014-08-14 17:20 ` [tip:locking/arch] locking,arch,ia64: " tip-bot for Peter Zijlstra
2014-05-08 13:58 ` [PATCH 10/20] arch,m32r: " Peter Zijlstra
2014-08-14 17:20 ` [tip:locking/arch] locking,arch,m32r: " tip-bot for Peter Zijlstra
2014-05-08 13:58 ` [PATCH 11/20] arch,m68k: " Peter Zijlstra
2014-05-09 9:08 ` Geert Uytterhoeven
2014-05-09 9:16 ` Peter Zijlstra
2014-05-09 9:44 ` Geert Uytterhoeven
2014-08-14 17:20 ` [tip:locking/arch] locking,arch,m68k: " tip-bot for Peter Zijlstra
2014-05-08 13:58 ` [PATCH 12/20] arch,metag: " Peter Zijlstra
2014-05-13 10:06 ` James Hogan
2014-08-14 17:21 ` [tip:locking/arch] locking,arch,metag: " tip-bot for Peter Zijlstra
2014-05-08 13:58 ` [PATCH 13/20] arch,mips: " Peter Zijlstra
2014-08-14 17:21 ` tip-bot for Peter Zijlstra [this message]
2014-05-08 13:58 ` [PATCH 14/20] arch,mn10300: " Peter Zijlstra
2014-08-14 17:21 ` [tip:locking/arch] locking,arch,mn10300: " tip-bot for Peter Zijlstra
2014-05-08 13:58 ` [PATCH 15/20] arch,parisc: " Peter Zijlstra
2014-08-14 17:21 ` [tip:locking/arch] locking,arch,parisc: " tip-bot for Peter Zijlstra
2014-05-08 13:58 ` [PATCH 16/20] arch,powerpc: " Peter Zijlstra
2014-08-14 17:22 ` [tip:locking/arch] locking,arch,powerpc: " tip-bot for Peter Zijlstra
2014-05-08 13:58 ` [PATCH 17/20] arch,sh: " Peter Zijlstra
2014-08-14 17:22 ` [tip:locking/arch] locking,arch,sh: " tip-bot for Peter Zijlstra
2014-05-08 13:58 ` [PATCH 18/20] arch,sparc: " Peter Zijlstra
2014-08-14 17:22 ` [tip:locking/arch] locking,arch,sparc: " tip-bot for Peter Zijlstra
2014-05-08 13:58 ` [PATCH 19/20] arch,xtensa: " Peter Zijlstra
2014-08-14 17:22 ` [tip:locking/arch] locking,arch,xtensa: " tip-bot for Peter Zijlstra
2014-05-08 13:59 ` [PATCH 20/20] arch: Rewrite generic atomic support Peter Zijlstra
2014-05-08 15:24 ` Sam Ravnborg
2014-05-08 18:26 ` Peter Zijlstra
2014-08-14 17:23 ` [tip:locking/arch] locking,arch: " tip-bot for Peter Zijlstra
2014-05-20 13:05 ` [PATCH 14/20] arch,mn10300: Fold atomic_ops David Howells
2014-05-20 13:16 ` Peter Zijlstra
2014-09-24 16:54 ` [PATCH 00/20] arch atomic 'cleanup' Will Deacon
2014-09-24 18:06 ` Peter Zijlstra
2014-09-24 18:09 ` Will Deacon
2014-09-25 5:03 ` Ingo Molnar
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=tip-ef31563e950c60bb41b97c2b61c32de874f3c949@git.kernel.org \
--to=tipbot@zytor.com \
--cc=hpa@zytor.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-tip-commits@vger.kernel.org \
--cc=macro@codesourcery.com \
--cc=mingo@kernel.org \
--cc=paulmck@linux.vnet.ibm.com \
--cc=peterz@infradead.org \
--cc=ralf@linux-mips.org \
--cc=tglx@linutronix.de \
--cc=torvalds@linux-foundation.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).