All of lore.kernel.org
 help / color / mirror / Atom feed
From: Peter Zijlstra <peterz@infradead.org>
To: torvalds@linux-foundation.org, mingo@kernel.org,
	tglx@linutronix.de, will.deacon@arm.com,
	paulmck@linux.vnet.ibm.com, boqun.feng@gmail.com,
	waiman.long@hpe.com, fweisbec@gmail.com
Cc: linux-kernel@vger.kernel.org, linux-arch@vger.kernel.org,
	rth@twiddle.net, vgupta@synopsys.com, linux@arm.linux.org.uk,
	egtvedt@samfundet.no, realmz6@gmail.com,
	ysato@users.sourceforge.jp, rkuo@codeaurora.org,
	tony.luck@intel.com, geert@linux-m68k.org,
	james.hogan@imgtec.com, ralf@linux-mips.org, dhowells@redhat.com,
	jejb@parisc-linux.org, mpe@ellerman.id.au,
	schwidefsky@de.ibm.com, dalias@libc.org, davem@davemloft.net,
	cmetcalf@mellanox.com, jcmvbkbc@gmail.com, arnd@arndb.de,
	peterz@infradead.org, dbueso@suse.de, fengguang.wu@intel.com
Subject: [PATCH -v2 02/33] locking,arc: Implement atomic_fetch_{add,sub,and,andnot,or,xor}()
Date: Tue, 31 May 2016 12:19:27 +0200	[thread overview]
Message-ID: <20160531102641.582308264@infradead.org> (raw)
In-Reply-To: 20160531101925.702692792@infradead.org

[-- Attachment #1: peterz-atomic-fetch-arc.patch --]
[-- Type: text/plain, Size: 4617 bytes --]

Implement FETCH-OP atomic primitives, these are very similar to the
existing OP-RETURN primitives we already have, except they return the
value of the atomic variable _before_ modification.

This is especially useful for irreversible operations -- such as
bitops (because it becomes impossible to reconstruct the state prior
to modification).

Acked-by: Vineet Gupta <vgupta@synopsys.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
 arch/arc/include/asm/atomic.h |  103 ++++++++++++++++++++++++++++++++++++++----
 1 file changed, 94 insertions(+), 9 deletions(-)

--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -104,6 +104,37 @@ static inline int atomic_##op##_return(i
 	return val;							\
 }
 
+#define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
+static inline int atomic_fetch_##op(int i, atomic_t *v)			\
+{									\
+	unsigned int val, orig;						\
+	SCOND_FAIL_RETRY_VAR_DEF                                        \
+									\
+	/*								\
+	 * Explicit full memory barrier needed before/after as		\
+	 * LLOCK/SCOND thmeselves don't provide any such semantics	\
+	 */								\
+	smp_mb();							\
+									\
+	__asm__ __volatile__(						\
+	"1:	llock   %[orig], [%[ctr]]		\n"		\
+	"	" #asm_op " %[val], %[orig], %[i]	\n"		\
+	"	scond   %[val], [%[ctr]]		\n"		\
+	"						\n"		\
+	SCOND_FAIL_RETRY_ASM						\
+									\
+	: [val]	"=&r"	(val),						\
+	  [orig] "=&r" (orig)						\
+	  SCOND_FAIL_RETRY_VARS						\
+	: [ctr]	"r"	(&v->counter),					\
+	  [i]	"ir"	(i)						\
+	: "cc");							\
+									\
+	smp_mb();							\
+									\
+	return orig;							\
+}
+
 #else	/* !CONFIG_ARC_HAS_LLSC */
 
 #ifndef CONFIG_SMP
@@ -166,21 +197,46 @@ static inline int atomic_##op##_return(i
 	return temp;							\
 }
 
+#define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
+static inline int atomic_fetch_##op(int i, atomic_t *v)			\
+{									\
+	unsigned long flags;						\
+	unsigned long orig;						\
+									\
+	/*								\
+	 * spin lock/unlock provides the needed smp_mb() before/after	\
+	 */								\
+	atomic_ops_lock(flags);						\
+	orig = v->counter;						\
+	v->counter c_op i;						\
+	atomic_ops_unlock(flags);					\
+									\
+	return orig;							\
+}
+
 #endif /* !CONFIG_ARC_HAS_LLSC */
 
 #define ATOMIC_OPS(op, c_op, asm_op)					\
 	ATOMIC_OP(op, c_op, asm_op)					\
-	ATOMIC_OP_RETURN(op, c_op, asm_op)
+	ATOMIC_OP_RETURN(op, c_op, asm_op)				\
+	ATOMIC_FETCH_OP(op, c_op, asm_op)
 
 ATOMIC_OPS(add, +=, add)
 ATOMIC_OPS(sub, -=, sub)
 
 #define atomic_andnot atomic_andnot
 
-ATOMIC_OP(and, &=, and)
-ATOMIC_OP(andnot, &= ~, bic)
-ATOMIC_OP(or, |=, or)
-ATOMIC_OP(xor, ^=, xor)
+#define atomic_fetch_or atomic_fetch_or
+
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op, c_op, asm_op)					\
+	ATOMIC_OP(op, c_op, asm_op)					\
+	ATOMIC_FETCH_OP(op, c_op, asm_op)
+
+ATOMIC_OPS(and, &=, and)
+ATOMIC_OPS(andnot, &= ~, bic)
+ATOMIC_OPS(or, |=, or)
+ATOMIC_OPS(xor, ^=, xor)
 
 #undef SCOND_FAIL_RETRY_VAR_DEF
 #undef SCOND_FAIL_RETRY_ASM
@@ -245,22 +301,51 @@ static inline int atomic_##op##_return(i
 	return temp;							\
 }
 
+#define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
+static inline int atomic_fetch_##op(int i, atomic_t *v)			\
+{									\
+	unsigned int temp = i;						\
+									\
+	/* Explicit full memory barrier needed before/after */		\
+	smp_mb();							\
+									\
+	__asm__ __volatile__(						\
+	"	mov r2, %0\n"						\
+	"	mov r3, %1\n"						\
+	"       .word %2\n"						\
+	"	mov %0, r2"						\
+	: "+r"(temp)							\
+	: "r"(&v->counter), "i"(asm_op)					\
+	: "r2", "r3", "memory");					\
+									\
+	smp_mb();							\
+									\
+	return temp;							\
+}
+
 #define ATOMIC_OPS(op, c_op, asm_op)					\
 	ATOMIC_OP(op, c_op, asm_op)					\
-	ATOMIC_OP_RETURN(op, c_op, asm_op)
+	ATOMIC_OP_RETURN(op, c_op, asm_op)				\
+	ATOMIC_FETCH_OP(op, c_op, asm_op)
 
 ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3)
 #define atomic_sub(i, v) atomic_add(-(i), (v))
 #define atomic_sub_return(i, v) atomic_add_return(-(i), (v))
 
-ATOMIC_OP(and, &=, CTOP_INST_AAND_DI_R2_R2_R3)
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op, c_op, asm_op)					\
+	ATOMIC_OP(op, c_op, asm_op)					\
+	ATOMIC_FETCH_OP(op, c_op, asm_op)
+
+ATOMIC_OPS(and, &=, CTOP_INST_AAND_DI_R2_R2_R3)
 #define atomic_andnot(mask, v) atomic_and(~(mask), (v))
-ATOMIC_OP(or, |=, CTOP_INST_AOR_DI_R2_R2_R3)
-ATOMIC_OP(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
+ATOMIC_OPS(or, |=, CTOP_INST_AOR_DI_R2_R2_R3)
+ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
 
 #endif /* CONFIG_ARC_PLAT_EZNPS */
 
 #undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 

  parent reply	other threads:[~2016-05-31 10:30 UTC|newest]

Thread overview: 108+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-05-31 10:19 [PATCH -v2 00/33] implement atomic_fetch_$op Peter Zijlstra
2016-05-31 10:19 ` [PATCH -v2 01/33] locking,alpha: Implement atomic{,64}_fetch_{add,sub,and,andnot,or,xor}() Peter Zijlstra
2016-05-31 10:19 ` Peter Zijlstra [this message]
2016-05-31 10:19 ` [PATCH -v2 03/33] locking,arm: Implement atomic{,64}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}() Peter Zijlstra
2016-05-31 10:19 ` [PATCH -v2 04/33] locking,arm64: " Peter Zijlstra
2016-05-31 10:19 ` [PATCH -v2 05/33] arm64: atomic: generate LSE non-return cases using common macros Peter Zijlstra
2016-05-31 10:19 ` [PATCH -v2 06/33] locking,arm64: Implement atomic{,64}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}() for LSE instructions Peter Zijlstra
2016-05-31 10:19 ` [PATCH -v2 07/33] locking,avr32: Implement atomic_fetch_{add,sub,and,or,xor}() Peter Zijlstra
2016-05-31 10:19 ` [PATCH -v2 08/33] locking,blackfin: " Peter Zijlstra
2016-05-31 10:19 ` [PATCH -v2 09/33] locking,frv: Implement atomic{,64}_fetch_{add,sub,and,or,xor}() Peter Zijlstra
2016-05-31 10:19 ` [PATCH -v2 10/33] locking,h8300: Implement atomic_fetch_{add,sub,and,or,xor}() Peter Zijlstra
2016-05-31 10:19 ` [PATCH -v2 11/33] locking,hexagon: " Peter Zijlstra
2016-05-31 10:19 ` [PATCH -v2 12/33] locking,ia64: Implement atomic{,64}_fetch_{add,sub,and,or,xor}() Peter Zijlstra
2016-05-31 10:19 ` [PATCH -v2 13/33] locking,m32r: Implement atomic_fetch_{add,sub,and,or,xor}() Peter Zijlstra
2016-05-31 10:19 ` [PATCH -v2 14/33] locking,m68k: " Peter Zijlstra
2016-06-16 10:08   ` Geert Uytterhoeven
2016-06-16 10:08     ` Geert Uytterhoeven
2016-06-16 10:08     ` Geert Uytterhoeven
2016-06-16 10:13     ` Peter Zijlstra
2016-06-16 10:13       ` Peter Zijlstra
2016-06-16 10:13       ` Peter Zijlstra
2016-06-16 12:43       ` Andreas Schwab
2016-06-16 12:43       ` Andreas Schwab
2016-06-16 12:43         ` Andreas Schwab
2016-06-16 12:43         ` Andreas Schwab
2016-06-16 12:49         ` Peter Zijlstra
2016-06-16 12:49           ` Peter Zijlstra
2016-06-16 12:49           ` Peter Zijlstra
2016-06-16 12:53           ` Andreas Schwab
2016-06-16 12:53           ` Andreas Schwab
2016-06-16 12:53             ` Andreas Schwab
2016-06-16 12:53             ` Andreas Schwab
2016-06-16 14:35             ` Peter Zijlstra
2016-06-16 14:35               ` Peter Zijlstra
2016-06-16 14:35               ` Peter Zijlstra
2016-06-16 14:37               ` Andreas Schwab
2016-06-16 14:37               ` Andreas Schwab
2016-06-16 14:37                 ` Andreas Schwab
2016-06-16 14:37                 ` Andreas Schwab
2016-06-16 14:56                 ` Peter Zijlstra
2016-06-16 14:56                 ` Peter Zijlstra
2016-06-16 14:56                   ` Peter Zijlstra
2016-06-16 14:56                   ` Peter Zijlstra
2016-06-16 15:04                   ` Andreas Schwab
2016-06-16 15:04                   ` Andreas Schwab
2016-06-16 15:04                     ` Andreas Schwab
2016-06-16 15:04                     ` Andreas Schwab
2016-06-16 17:44                     ` Peter Zijlstra
2016-06-16 17:44                     ` Peter Zijlstra
2016-06-16 17:44                       ` Peter Zijlstra
2016-06-16 17:44                       ` Peter Zijlstra
2016-06-16 19:18                       ` Andreas Schwab
2016-06-16 19:18                       ` Andreas Schwab
2016-06-16 19:18                         ` Andreas Schwab
2016-06-16 19:18                         ` Andreas Schwab
2016-06-16 19:55                       ` Geert Uytterhoeven
2016-06-16 19:55                         ` Geert Uytterhoeven
2016-06-16 19:55                         ` Geert Uytterhoeven
2016-06-16 19:55                       ` Geert Uytterhoeven
2016-06-16 12:49         ` Peter Zijlstra
2016-06-17 15:40         ` Peter Zijlstra
2016-06-17 15:40         ` Peter Zijlstra
2016-06-17 15:40           ` Peter Zijlstra
2016-06-17 15:40           ` Peter Zijlstra
2016-06-20 17:47           ` Andreas Schwab
2016-06-20 17:47           ` Andreas Schwab
2016-06-20 17:47             ` Andreas Schwab
2016-06-20 17:47             ` Andreas Schwab
2016-06-21  4:27             ` Finn Thain
2016-06-21  4:27               ` Finn Thain
2016-06-21  4:27               ` Finn Thain
2016-06-16 10:13     ` Peter Zijlstra
2016-05-31 10:19 ` [PATCH -v2 15/33] locking,metag: " Peter Zijlstra
2016-05-31 10:19 ` [PATCH -v2 16/33] locking,mips: Implement atomic{,64}_fetch_{add,sub,and,or,xor}() Peter Zijlstra
2016-05-31 10:19 ` [PATCH -v2 17/33] locking,mn10300: Implement atomic_fetch_{add,sub,and,or,xor}() Peter Zijlstra
2016-05-31 10:19 ` [PATCH -v2 18/33] locking,parisc: Implement atomic{,64}_fetch_{add,sub,and,or,xor}() Peter Zijlstra
2016-05-31 10:19 ` [PATCH -v2 19/33] locking,powerpc: Implement atomic{,64}_fetch_{add,sub,and,or,xor}{,_relaxed,_acquire,_release}() Peter Zijlstra
2016-06-01  3:11   ` Boqun Feng
2016-06-01  6:10     ` Boqun Feng
2016-06-01  8:46       ` Peter Zijlstra
2016-05-31 10:19 ` [PATCH -v2 20/33] locking,s390: Implement atomic{,64}_fetch_{add,sub,and,or,xor}() Peter Zijlstra
2016-05-31 10:19 ` [PATCH -v2 21/33] locking,sh: Implement atomic_fetch_{add,sub,and,or,xor}() Peter Zijlstra
2016-05-31 10:19 ` [PATCH -v2 22/33] locking,sparc: Implement atomic{,64}_fetch_{add,sub,and,or,xor}() Peter Zijlstra
2016-05-31 17:50   ` David Miller
2016-05-31 10:19 ` [PATCH -v2 23/33] locking,tile: " Peter Zijlstra
2016-05-31 10:19 ` [PATCH -v2 24/33] locking,x86: " Peter Zijlstra
2016-05-31 10:19 ` [PATCH -v2 25/33] locking,xtensa: Implement atomic_fetch_{add,sub,and,or,xor}() Peter Zijlstra
2016-05-31 10:19 ` [PATCH -v2 26/33] locking: Fix atomic64_relaxed bits Peter Zijlstra
2016-05-31 10:19 ` [PATCH -v2 27/33] locking: Implement atomic{,64,_long}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}() Peter Zijlstra
2016-05-31 10:19 ` [PATCH -v2 28/33] locking: Remove linux/atomic.h:atomic_fetch_or Peter Zijlstra
2016-05-31 10:19 ` [PATCH -v2 29/33] locking: Remove the deprecated atomic_{set,clear}_mask() functions Peter Zijlstra
2016-05-31 10:19 ` [PATCH -v2 30/33] locking,alpha: Convert to _relaxed atomics Peter Zijlstra
2016-05-31 10:19 ` [PATCH -v2 31/33] locking,mips: " Peter Zijlstra
2016-05-31 10:19 ` [PATCH -v2 32/33] locking,qrwlock: Employ atomic_fetch_add_acquire() Peter Zijlstra
2016-05-31 10:19 ` [PATCH -v2 33/33] locking,rwsem: Employ atomic_long_fetch_add() Peter Zijlstra
2016-06-01 14:06 ` [PATCH -v2 00/33] implement atomic_fetch_$op Will Deacon
2016-06-02  9:27 ` Vineet Gupta
2016-06-02  9:27   ` Vineet Gupta
2016-06-02  9:33   ` Peter Zijlstra
2016-06-02  9:33     ` Peter Zijlstra
2016-06-08 12:43     ` Peter Zijlstra
2016-06-08 12:43       ` Peter Zijlstra
2016-06-08 12:55       ` Ingo Molnar
2016-06-08 12:55         ` Ingo Molnar
2016-06-08 13:32         ` Peter Zijlstra
2016-06-08 13:32           ` Peter Zijlstra
2016-06-08 14:24           ` Vineet Gupta
2016-06-08 14:38             ` Peter Zijlstra

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20160531102641.582308264@infradead.org \
    --to=peterz@infradead.org \
    --cc=arnd@arndb.de \
    --cc=boqun.feng@gmail.com \
    --cc=cmetcalf@mellanox.com \
    --cc=dalias@libc.org \
    --cc=davem@davemloft.net \
    --cc=dbueso@suse.de \
    --cc=dhowells@redhat.com \
    --cc=egtvedt@samfundet.no \
    --cc=fengguang.wu@intel.com \
    --cc=fweisbec@gmail.com \
    --cc=geert@linux-m68k.org \
    --cc=james.hogan@imgtec.com \
    --cc=jcmvbkbc@gmail.com \
    --cc=jejb@parisc-linux.org \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux@arm.linux.org.uk \
    --cc=mingo@kernel.org \
    --cc=mpe@ellerman.id.au \
    --cc=paulmck@linux.vnet.ibm.com \
    --cc=ralf@linux-mips.org \
    --cc=realmz6@gmail.com \
    --cc=rkuo@codeaurora.org \
    --cc=rth@twiddle.net \
    --cc=schwidefsky@de.ibm.com \
    --cc=tglx@linutronix.de \
    --cc=tony.luck@intel.com \
    --cc=torvalds@linux-foundation.org \
    --cc=vgupta@synopsys.com \
    --cc=waiman.long@hpe.com \
    --cc=will.deacon@arm.com \
    --cc=ysato@users.sourceforge.jp \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.