From: Michael Ellerman <mpe@ellerman.id.au>
To: Mark Rutland <mark.rutland@arm.com>,
linux-kernel@vger.kernel.org, peterz@infradead.org,
will.deacon@arm.com
Cc: aou@eecs.berkeley.edu, arnd@arndb.de, bp@alien8.de,
catalin.marinas@arm.com, davem@davemloft.net,
fenghua.yu@intel.com, heiko.carstens@de.ibm.com,
herbert@gondor.apana.org.au, ink@jurassic.park.msu.ru,
jhogan@kernel.org, linux@armlinux.org.uk, mark.rutland@arm.com,
mattst88@gmail.com, mingo@kernel.org, palmer@sifive.com,
paul.burton@mips.com, paulus@samba.org, ralf@linux-mips.org,
rth@twiddle.net, stable@vger.kernel.org, tglx@linutronix.de,
tony.luck@intel.com, vgupta@synopsys.com
Subject: Re: [PATCH 10/18] locking/atomic: powerpc: use s64 for atomic64
Date: Thu, 23 May 2019 23:27:54 +1000 [thread overview]
Message-ID: <87ef4pqp0l.fsf@concordia.ellerman.id.au> (raw)
In-Reply-To: <20190522132250.26499-11-mark.rutland@arm.com>
Mark Rutland <mark.rutland@arm.com> writes:
> As a step towards making the atomic64 API use consistent types treewide,
> let's have the powerpc atomic64 implementation use s64 as the underlying
> type for atomic64_t, rather than long, matching the generated headers.
>
> As atomic64_read() depends on the generic defintion of atomic64_t, this
> still returns long on 64-bit. This will be converted in a subsequent
> patch.
>
> Otherwise, there should be no functional change as a result of this
> patch.
>
> Signed-off-by: Mark Rutland <mark.rutland@arm.com>
> Cc: Michael Ellerman <mpe@ellerman.id.au>
> Cc: Paul Mackerras <paulus@samba.org>
> Cc: Peter Zijlstra <peterz@infradead.org>
> Cc: Will Deacon <will.deacon@arm.com>
> ---
> arch/powerpc/include/asm/atomic.h | 44 +++++++++++++++++++--------------------
> 1 file changed, 22 insertions(+), 22 deletions(-)
Conversion looks good to me.
Reviewed-by: Michael Ellerman <mpe@ellerman.id.au> (powerpc)
cheers
> diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
> index 52eafaf74054..31c231ea56b7 100644
> --- a/arch/powerpc/include/asm/atomic.h
> +++ b/arch/powerpc/include/asm/atomic.h
> @@ -297,24 +297,24 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
>
> #define ATOMIC64_INIT(i) { (i) }
>
> -static __inline__ long atomic64_read(const atomic64_t *v)
> +static __inline__ s64 atomic64_read(const atomic64_t *v)
> {
> - long t;
> + s64 t;
>
> __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
>
> return t;
> }
>
> -static __inline__ void atomic64_set(atomic64_t *v, long i)
> +static __inline__ void atomic64_set(atomic64_t *v, s64 i)
> {
> __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
> }
>
> #define ATOMIC64_OP(op, asm_op) \
> -static __inline__ void atomic64_##op(long a, atomic64_t *v) \
> +static __inline__ void atomic64_##op(s64 a, atomic64_t *v) \
> { \
> - long t; \
> + s64 t; \
> \
> __asm__ __volatile__( \
> "1: ldarx %0,0,%3 # atomic64_" #op "\n" \
> @@ -327,10 +327,10 @@ static __inline__ void atomic64_##op(long a, atomic64_t *v) \
> }
>
> #define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
> -static inline long \
> -atomic64_##op##_return_relaxed(long a, atomic64_t *v) \
> +static inline s64 \
> +atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
> { \
> - long t; \
> + s64 t; \
> \
> __asm__ __volatile__( \
> "1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \
> @@ -345,10 +345,10 @@ atomic64_##op##_return_relaxed(long a, atomic64_t *v) \
> }
>
> #define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \
> -static inline long \
> -atomic64_fetch_##op##_relaxed(long a, atomic64_t *v) \
> +static inline s64 \
> +atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
> { \
> - long res, t; \
> + s64 res, t; \
> \
> __asm__ __volatile__( \
> "1: ldarx %0,0,%4 # atomic64_fetch_" #op "_relaxed\n" \
> @@ -396,7 +396,7 @@ ATOMIC64_OPS(xor, xor)
>
> static __inline__ void atomic64_inc(atomic64_t *v)
> {
> - long t;
> + s64 t;
>
> __asm__ __volatile__(
> "1: ldarx %0,0,%2 # atomic64_inc\n\
> @@ -409,9 +409,9 @@ static __inline__ void atomic64_inc(atomic64_t *v)
> }
> #define atomic64_inc atomic64_inc
>
> -static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
> +static __inline__ s64 atomic64_inc_return_relaxed(atomic64_t *v)
> {
> - long t;
> + s64 t;
>
> __asm__ __volatile__(
> "1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n"
> @@ -427,7 +427,7 @@ static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
>
> static __inline__ void atomic64_dec(atomic64_t *v)
> {
> - long t;
> + s64 t;
>
> __asm__ __volatile__(
> "1: ldarx %0,0,%2 # atomic64_dec\n\
> @@ -440,9 +440,9 @@ static __inline__ void atomic64_dec(atomic64_t *v)
> }
> #define atomic64_dec atomic64_dec
>
> -static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v)
> +static __inline__ s64 atomic64_dec_return_relaxed(atomic64_t *v)
> {
> - long t;
> + s64 t;
>
> __asm__ __volatile__(
> "1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n"
> @@ -463,9 +463,9 @@ static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v)
> * Atomically test *v and decrement if it is greater than 0.
> * The function returns the old value of *v minus 1.
> */
> -static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
> +static __inline__ s64 atomic64_dec_if_positive(atomic64_t *v)
> {
> - long t;
> + s64 t;
>
> __asm__ __volatile__(
> PPC_ATOMIC_ENTRY_BARRIER
> @@ -502,9 +502,9 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
> * Atomically adds @a to @v, so long as it was not @u.
> * Returns the old value of @v.
> */
> -static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
> +static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
> {
> - long t;
> + s64 t;
>
> __asm__ __volatile__ (
> PPC_ATOMIC_ENTRY_BARRIER
> @@ -534,7 +534,7 @@ static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
> */
> static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
> {
> - long t1, t2;
> + s64 t1, t2;
>
> __asm__ __volatile__ (
> PPC_ATOMIC_ENTRY_BARRIER
> --
> 2.11.0
next prev parent reply other threads:[~2019-05-23 13:28 UTC|newest]
Thread overview: 38+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-05-22 13:22 [PATCH 00/18] locking/atomic: atomic64 type cleanup Mark Rutland
2019-05-22 13:22 ` [PATCH 01/18] locking/atomic: crypto: nx: prepare for atomic64_read() conversion Mark Rutland
2019-05-22 13:22 ` [PATCH 02/18] locking/atomic: s390/pci: " Mark Rutland
2019-05-22 13:22 ` [PATCH 03/18] locking/atomic: generic: use s64 for atomic64 Mark Rutland
2019-05-22 21:16 ` Arnd Bergmann
2019-05-22 13:22 ` [PATCH 04/18] locking/atomic: alpha: " Mark Rutland
2019-05-22 13:22 ` [PATCH 05/18] locking/atomic: arc: " Mark Rutland
2019-05-23 23:10 ` Vineet Gupta
2019-05-22 13:22 ` [PATCH 06/18] locking/atomic: arm: " Mark Rutland
2019-05-22 13:22 ` [PATCH 07/18] locking/atomic: arm64: " Mark Rutland
2019-05-22 13:22 ` [PATCH 08/18] locking/atomic: ia64: " Mark Rutland
2019-05-22 13:22 ` [PATCH 09/18] locking/atomic: mips: " Mark Rutland
2019-05-22 13:22 ` [PATCH 10/18] locking/atomic: powerpc: " Mark Rutland
2019-05-23 13:27 ` Michael Ellerman [this message]
2019-05-22 13:22 ` [PATCH 11/18] locking/atomic: riscv: fix atomic64_sub_if_positive() offset argument Mark Rutland
2019-05-22 19:06 ` Palmer Dabbelt
2019-05-22 13:22 ` [PATCH 12/18] locking/atomic: riscv: use s64 for atomic64 Mark Rutland
2019-05-22 19:06 ` Palmer Dabbelt
2019-05-23 10:23 ` Mark Rutland
2019-05-22 13:22 ` [PATCH 13/18] locking/atomic: s390: " Mark Rutland
2019-05-22 13:22 ` [PATCH 14/18] locking/atomic: sparc: " Mark Rutland
2019-05-22 13:22 ` [PATCH 15/18] locking/atomic: x86: " Mark Rutland
2019-05-22 13:22 ` [PATCH 16/18] locking/atomic: use s64 for atomic64_t on 64-bit Mark Rutland
2019-05-22 13:22 ` [PATCH 17/18] locking/atomic: crypto: nx: remove redundant casts Mark Rutland
2019-05-22 13:22 ` [PATCH 18/18] locking/atomic: s390/pci: " Mark Rutland
2019-05-22 21:18 ` [PATCH 00/18] locking/atomic: atomic64 type cleanup Arnd Bergmann
2019-05-23 10:28 ` Mark Rutland
2019-05-23 8:30 ` Andrea Parri
2019-05-23 10:19 ` Mark Rutland
2019-05-23 11:20 ` Andrea Parri
2019-05-24 10:37 ` Peter Zijlstra
2019-05-24 11:18 ` Peter Zijlstra
2019-05-24 11:38 ` Greg KH
2019-05-24 11:42 ` Will Deacon
2019-05-24 11:52 ` Peter Zijlstra
2019-05-24 22:43 ` Andrea Parri
2019-05-28 10:47 ` Peter Zijlstra
2019-05-28 11:15 ` Andrea Parri
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=87ef4pqp0l.fsf@concordia.ellerman.id.au \
--to=mpe@ellerman.id.au \
--cc=aou@eecs.berkeley.edu \
--cc=arnd@arndb.de \
--cc=bp@alien8.de \
--cc=catalin.marinas@arm.com \
--cc=davem@davemloft.net \
--cc=fenghua.yu@intel.com \
--cc=heiko.carstens@de.ibm.com \
--cc=herbert@gondor.apana.org.au \
--cc=ink@jurassic.park.msu.ru \
--cc=jhogan@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux@armlinux.org.uk \
--cc=mark.rutland@arm.com \
--cc=mattst88@gmail.com \
--cc=mingo@kernel.org \
--cc=palmer@sifive.com \
--cc=paul.burton@mips.com \
--cc=paulus@samba.org \
--cc=peterz@infradead.org \
--cc=ralf@linux-mips.org \
--cc=rth@twiddle.net \
--cc=stable@vger.kernel.org \
--cc=tglx@linutronix.de \
--cc=tony.luck@intel.com \
--cc=vgupta@synopsys.com \
--cc=will.deacon@arm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).