All of lore.kernel.org
 help / color / mirror / Atom feed
From: tip-bot for Mark Rutland <tipbot@zytor.com>
To: linux-tip-commits@vger.kernel.org
Cc: linux-kernel@vger.kernel.org, catalin.marinas@arm.com,
	peterz@infradead.org, hpa@zytor.com, mark.rutland@arm.com,
	mingo@kernel.org, will.deacon@arm.com,
	torvalds@linux-foundation.org, tglx@linutronix.de
Subject: [tip:locking/core] locking/atomic, arm64: Use s64 for atomic64
Date: Mon, 3 Jun 2019 06:38:19 -0700	[thread overview]
Message-ID: <tip-16f18688af7ea6c65f6daa3efb4661415e2e6041@git.kernel.org> (raw)
In-Reply-To: <20190522132250.26499-8-mark.rutland@arm.com>

Commit-ID:  16f18688af7ea6c65f6daa3efb4661415e2e6041
Gitweb:     https://git.kernel.org/tip/16f18688af7ea6c65f6daa3efb4661415e2e6041
Author:     Mark Rutland <mark.rutland@arm.com>
AuthorDate: Wed, 22 May 2019 14:22:39 +0100
Committer:  Ingo Molnar <mingo@kernel.org>
CommitDate: Mon, 3 Jun 2019 12:32:56 +0200

locking/atomic, arm64: Use s64 for atomic64

As a step towards making the atomic64 API use consistent types treewide,
let's have the arm64 atomic64 implementation use s64 as the underlying
type for atomic64_t, rather than long, matching the generated headers.

As atomic64_read() depends on the generic defintion of atomic64_t, this
still returns long. This will be converted in a subsequent patch.

Note that in arch_atomic64_dec_if_positive(), the x0 variable is left as
long, as this variable is also used to hold the pointer to the
atomic64_t.

Otherwise, there should be no functional change as a result of this patch.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Cc: aou@eecs.berkeley.edu
Cc: arnd@arndb.de
Cc: bp@alien8.de
Cc: davem@davemloft.net
Cc: fenghua.yu@intel.com
Cc: heiko.carstens@de.ibm.com
Cc: herbert@gondor.apana.org.au
Cc: ink@jurassic.park.msu.ru
Cc: jhogan@kernel.org
Cc: linux@armlinux.org.uk
Cc: mattst88@gmail.com
Cc: mpe@ellerman.id.au
Cc: palmer@sifive.com
Cc: paul.burton@mips.com
Cc: paulus@samba.org
Cc: ralf@linux-mips.org
Cc: rth@twiddle.net
Cc: tony.luck@intel.com
Cc: vgupta@synopsys.com
Link: https://lkml.kernel.org/r/20190522132250.26499-8-mark.rutland@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
 arch/arm64/include/asm/atomic_ll_sc.h | 20 ++++++++++----------
 arch/arm64/include/asm/atomic_lse.h   | 34 +++++++++++++++++-----------------
 2 files changed, 27 insertions(+), 27 deletions(-)

diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h
index e321293e0c89..f3b12d7f431f 100644
--- a/arch/arm64/include/asm/atomic_ll_sc.h
+++ b/arch/arm64/include/asm/atomic_ll_sc.h
@@ -133,9 +133,9 @@ ATOMIC_OPS(xor, eor)
 
 #define ATOMIC64_OP(op, asm_op)						\
 __LL_SC_INLINE void							\
-__LL_SC_PREFIX(arch_atomic64_##op(long i, atomic64_t *v))		\
+__LL_SC_PREFIX(arch_atomic64_##op(s64 i, atomic64_t *v))		\
 {									\
-	long result;							\
+	s64 result;							\
 	unsigned long tmp;						\
 									\
 	asm volatile("// atomic64_" #op "\n"				\
@@ -150,10 +150,10 @@ __LL_SC_PREFIX(arch_atomic64_##op(long i, atomic64_t *v))		\
 __LL_SC_EXPORT(arch_atomic64_##op);
 
 #define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op)		\
-__LL_SC_INLINE long							\
-__LL_SC_PREFIX(arch_atomic64_##op##_return##name(long i, atomic64_t *v))\
+__LL_SC_INLINE s64							\
+__LL_SC_PREFIX(arch_atomic64_##op##_return##name(s64 i, atomic64_t *v))\
 {									\
-	long result;							\
+	s64 result;							\
 	unsigned long tmp;						\
 									\
 	asm volatile("// atomic64_" #op "_return" #name "\n"		\
@@ -172,10 +172,10 @@ __LL_SC_PREFIX(arch_atomic64_##op##_return##name(long i, atomic64_t *v))\
 __LL_SC_EXPORT(arch_atomic64_##op##_return##name);
 
 #define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op)		\
-__LL_SC_INLINE long							\
-__LL_SC_PREFIX(arch_atomic64_fetch_##op##name(long i, atomic64_t *v))	\
+__LL_SC_INLINE s64							\
+__LL_SC_PREFIX(arch_atomic64_fetch_##op##name(s64 i, atomic64_t *v))	\
 {									\
-	long result, val;						\
+	s64 result, val;						\
 	unsigned long tmp;						\
 									\
 	asm volatile("// atomic64_fetch_" #op #name "\n"		\
@@ -225,10 +225,10 @@ ATOMIC64_OPS(xor, eor)
 #undef ATOMIC64_OP_RETURN
 #undef ATOMIC64_OP
 
-__LL_SC_INLINE long
+__LL_SC_INLINE s64
 __LL_SC_PREFIX(arch_atomic64_dec_if_positive(atomic64_t *v))
 {
-	long result;
+	s64 result;
 	unsigned long tmp;
 
 	asm volatile("// atomic64_dec_if_positive\n"
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
index 9256a3921e4b..c53832b08af7 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -224,9 +224,9 @@ ATOMIC_FETCH_OP_SUB(        , al, "memory")
 
 #define __LL_SC_ATOMIC64(op)	__LL_SC_CALL(arch_atomic64_##op)
 #define ATOMIC64_OP(op, asm_op)						\
-static inline void arch_atomic64_##op(long i, atomic64_t *v)		\
+static inline void arch_atomic64_##op(s64 i, atomic64_t *v)		\
 {									\
-	register long x0 asm ("x0") = i;				\
+	register s64 x0 asm ("x0") = i;					\
 	register atomic64_t *x1 asm ("x1") = v;				\
 									\
 	asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(op),	\
@@ -244,9 +244,9 @@ ATOMIC64_OP(add, stadd)
 #undef ATOMIC64_OP
 
 #define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...)			\
-static inline long arch_atomic64_fetch_##op##name(long i, atomic64_t *v)\
+static inline s64 arch_atomic64_fetch_##op##name(s64 i, atomic64_t *v)	\
 {									\
-	register long x0 asm ("x0") = i;				\
+	register s64 x0 asm ("x0") = i;					\
 	register atomic64_t *x1 asm ("x1") = v;				\
 									\
 	asm volatile(ARM64_LSE_ATOMIC_INSN(				\
@@ -276,9 +276,9 @@ ATOMIC64_FETCH_OPS(add, ldadd)
 #undef ATOMIC64_FETCH_OPS
 
 #define ATOMIC64_OP_ADD_RETURN(name, mb, cl...)				\
-static inline long arch_atomic64_add_return##name(long i, atomic64_t *v)\
+static inline s64 arch_atomic64_add_return##name(s64 i, atomic64_t *v)	\
 {									\
-	register long x0 asm ("x0") = i;				\
+	register s64 x0 asm ("x0") = i;					\
 	register atomic64_t *x1 asm ("x1") = v;				\
 									\
 	asm volatile(ARM64_LSE_ATOMIC_INSN(				\
@@ -302,9 +302,9 @@ ATOMIC64_OP_ADD_RETURN(        , al, "memory")
 
 #undef ATOMIC64_OP_ADD_RETURN
 
-static inline void arch_atomic64_and(long i, atomic64_t *v)
+static inline void arch_atomic64_and(s64 i, atomic64_t *v)
 {
-	register long x0 asm ("x0") = i;
+	register s64 x0 asm ("x0") = i;
 	register atomic64_t *x1 asm ("x1") = v;
 
 	asm volatile(ARM64_LSE_ATOMIC_INSN(
@@ -320,9 +320,9 @@ static inline void arch_atomic64_and(long i, atomic64_t *v)
 }
 
 #define ATOMIC64_FETCH_OP_AND(name, mb, cl...)				\
-static inline long arch_atomic64_fetch_and##name(long i, atomic64_t *v)	\
+static inline s64 arch_atomic64_fetch_and##name(s64 i, atomic64_t *v)	\
 {									\
-	register long x0 asm ("x0") = i;				\
+	register s64 x0 asm ("x0") = i;					\
 	register atomic64_t *x1 asm ("x1") = v;				\
 									\
 	asm volatile(ARM64_LSE_ATOMIC_INSN(				\
@@ -346,9 +346,9 @@ ATOMIC64_FETCH_OP_AND(        , al, "memory")
 
 #undef ATOMIC64_FETCH_OP_AND
 
-static inline void arch_atomic64_sub(long i, atomic64_t *v)
+static inline void arch_atomic64_sub(s64 i, atomic64_t *v)
 {
-	register long x0 asm ("x0") = i;
+	register s64 x0 asm ("x0") = i;
 	register atomic64_t *x1 asm ("x1") = v;
 
 	asm volatile(ARM64_LSE_ATOMIC_INSN(
@@ -364,9 +364,9 @@ static inline void arch_atomic64_sub(long i, atomic64_t *v)
 }
 
 #define ATOMIC64_OP_SUB_RETURN(name, mb, cl...)				\
-static inline long arch_atomic64_sub_return##name(long i, atomic64_t *v)\
+static inline s64 arch_atomic64_sub_return##name(s64 i, atomic64_t *v)	\
 {									\
-	register long x0 asm ("x0") = i;				\
+	register s64 x0 asm ("x0") = i;					\
 	register atomic64_t *x1 asm ("x1") = v;				\
 									\
 	asm volatile(ARM64_LSE_ATOMIC_INSN(				\
@@ -392,9 +392,9 @@ ATOMIC64_OP_SUB_RETURN(        , al, "memory")
 #undef ATOMIC64_OP_SUB_RETURN
 
 #define ATOMIC64_FETCH_OP_SUB(name, mb, cl...)				\
-static inline long arch_atomic64_fetch_sub##name(long i, atomic64_t *v)	\
+static inline s64 arch_atomic64_fetch_sub##name(s64 i, atomic64_t *v)	\
 {									\
-	register long x0 asm ("x0") = i;				\
+	register s64 x0 asm ("x0") = i;					\
 	register atomic64_t *x1 asm ("x1") = v;				\
 									\
 	asm volatile(ARM64_LSE_ATOMIC_INSN(				\
@@ -418,7 +418,7 @@ ATOMIC64_FETCH_OP_SUB(        , al, "memory")
 
 #undef ATOMIC64_FETCH_OP_SUB
 
-static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
+static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
 {
 	register long x0 asm ("x0") = (long)v;
 

  reply	other threads:[~2019-06-03 13:38 UTC|newest]

Thread overview: 60+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-05-22 13:22 [PATCH 00/18] locking/atomic: atomic64 type cleanup Mark Rutland
2019-05-22 13:22 ` [PATCH 01/18] locking/atomic: crypto: nx: prepare for atomic64_read() conversion Mark Rutland
2019-06-03 13:34   ` [tip:locking/core] locking/atomic, crypto/nx: Prepare " tip-bot for Mark Rutland
2019-05-22 13:22 ` [PATCH 02/18] locking/atomic: s390/pci: prepare " Mark Rutland
2019-06-03 13:34   ` [tip:locking/core] locking/atomic, s390/pci: Prepare " tip-bot for Mark Rutland
2019-05-22 13:22 ` [PATCH 03/18] locking/atomic: generic: use s64 for atomic64 Mark Rutland
2019-05-22 21:16   ` Arnd Bergmann
2019-06-03 13:35   ` [tip:locking/core] locking/atomic: Use " tip-bot for Mark Rutland
2019-05-22 13:22 ` [PATCH 04/18] locking/atomic: alpha: use " Mark Rutland
2019-06-03 13:36   ` [tip:locking/core] locking/atomic, alpha: Use " tip-bot for Mark Rutland
2019-05-22 13:22 ` [PATCH 05/18] locking/atomic: arc: use " Mark Rutland
2019-05-23 23:10   ` Vineet Gupta
2019-06-03 13:36   ` [tip:locking/core] locking/atomic, arc: Use " tip-bot for Mark Rutland
2019-05-22 13:22 ` [PATCH 06/18] locking/atomic: arm: use " Mark Rutland
2019-06-03 13:37   ` [tip:locking/core] locking/atomic, arm: Use " tip-bot for Mark Rutland
2019-05-22 13:22 ` [PATCH 07/18] locking/atomic: arm64: use " Mark Rutland
2019-06-03 13:38   ` tip-bot for Mark Rutland [this message]
2019-05-22 13:22 ` [PATCH 08/18] locking/atomic: ia64: " Mark Rutland
2019-06-03 13:39   ` [tip:locking/core] locking/atomic, ia64: Use " tip-bot for Mark Rutland
2019-05-22 13:22 ` [PATCH 09/18] locking/atomic: mips: use " Mark Rutland
2019-06-03 13:39   ` [tip:locking/core] locking/atomic, mips: Use " tip-bot for Mark Rutland
2019-05-22 13:22 ` [PATCH 10/18] locking/atomic: powerpc: use " Mark Rutland
2019-05-23 13:27   ` Michael Ellerman
2019-06-03 13:40   ` [tip:locking/core] locking/atomic, powerpc: Use " tip-bot for Mark Rutland
2019-05-22 13:22 ` [PATCH 11/18] locking/atomic: riscv: fix atomic64_sub_if_positive() offset argument Mark Rutland
2019-05-22 19:06   ` Palmer Dabbelt
2019-06-03 13:41   ` [tip:locking/core] locking/atomic, riscv: Fix " tip-bot for Mark Rutland
2019-05-22 13:22 ` [PATCH 12/18] locking/atomic: riscv: use s64 for atomic64 Mark Rutland
2019-05-22 19:06   ` Palmer Dabbelt
2019-05-23 10:23     ` Mark Rutland
2019-06-03 13:41   ` [tip:locking/core] locking/atomic, riscv: Use " tip-bot for Mark Rutland
2019-05-22 13:22 ` [PATCH 13/18] locking/atomic: s390: use " Mark Rutland
2019-06-03 13:42   ` [tip:locking/core] locking/atomic, s390: Use " tip-bot for Mark Rutland
2019-05-22 13:22 ` [PATCH 14/18] locking/atomic: sparc: use " Mark Rutland
2019-06-03 13:43   ` [tip:locking/core] locking/atomic, sparc: Use " tip-bot for Mark Rutland
2019-05-22 13:22 ` [PATCH 15/18] locking/atomic: x86: use " Mark Rutland
2019-06-03 13:44   ` [tip:locking/core] locking/atomic, x86: Use " tip-bot for Mark Rutland
2019-05-22 13:22 ` [PATCH 16/18] locking/atomic: use s64 for atomic64_t on 64-bit Mark Rutland
2019-06-03 13:44   ` [tip:locking/core] locking/atomic: Use " tip-bot for Mark Rutland
2019-05-22 13:22 ` [PATCH 17/18] locking/atomic: crypto: nx: remove redundant casts Mark Rutland
2019-06-03 13:45   ` [tip:locking/core] locking/atomic, crypto/nx: Remove " tip-bot for Mark Rutland
2019-05-22 13:22 ` [PATCH 18/18] locking/atomic: s390/pci: remove " Mark Rutland
2019-06-03 13:46   ` [tip:locking/core] locking/atomic, s390/pci: Remove " tip-bot for Mark Rutland
2019-05-22 21:18 ` [PATCH 00/18] locking/atomic: atomic64 type cleanup Arnd Bergmann
2019-05-23 10:28   ` Mark Rutland
2019-05-23  8:30 ` Andrea Parri
2019-05-23 10:19   ` Mark Rutland
2019-05-23 11:20     ` Andrea Parri
2019-05-24 10:37     ` Peter Zijlstra
2019-05-24 11:18       ` Peter Zijlstra
2019-05-24 11:38         ` Greg KH
2019-05-24 11:42         ` Will Deacon
2019-05-24 11:52           ` Peter Zijlstra
2019-05-24 22:43             ` Andrea Parri
2019-05-28 10:47               ` Peter Zijlstra
2019-05-28 11:15                 ` Andrea Parri
2019-06-03 13:46             ` [tip:locking/core] Documentation/atomic_t.txt: Clarify pure non-rmw usage tip-bot for Peter Zijlstra
2019-06-06  8:44               ` Andrea Parri
2019-06-06  9:04                 ` Peter Zijlstra
2019-06-06  9:11                   ` Andrea Parri

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=tip-16f18688af7ea6c65f6daa3efb4661415e2e6041@git.kernel.org \
    --to=tipbot@zytor.com \
    --cc=catalin.marinas@arm.com \
    --cc=hpa@zytor.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-tip-commits@vger.kernel.org \
    --cc=mark.rutland@arm.com \
    --cc=mingo@kernel.org \
    --cc=peterz@infradead.org \
    --cc=tglx@linutronix.de \
    --cc=torvalds@linux-foundation.org \
    --cc=will.deacon@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.