All of lore.kernel.org
 help / color / mirror / Atom feed
From: Will Deacon <will@kernel.org>
To: linux-arm-kernel@lists.infradead.org
Cc: mark.rutland@arm.com, peterz@infradead.org,
	catalin.marinas@arm.com, ndesaulniers@google.com,
	robin.murphy@arm.com, Ard.Biesheuvel@arm.com,
	andrew.murray@arm.com, natechancellor@gmail.com,
	Will Deacon <will@kernel.org>
Subject: [PATCH v5 04/10] arm64: avoid using hard-coded registers for LSE atomics
Date: Thu, 29 Aug 2019 16:48:28 +0100	[thread overview]
Message-ID: <20190829154834.26547-5-will@kernel.org> (raw)
In-Reply-To: <20190829154834.26547-1-will@kernel.org>

From: Andrew Murray <andrew.murray@arm.com>

Now that we have removed the out-of-line ll/sc atomics we can give
the compiler the freedom to choose its own register allocation.

Remove the hard-coded use of x30.

Signed-off-by: Andrew Murray <andrew.murray@arm.com>
Signed-off-by: Will Deacon <will@kernel.org>
---
 arch/arm64/include/asm/atomic_lse.h | 70 ++++++++++++++++++++++---------------
 1 file changed, 41 insertions(+), 29 deletions(-)

diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
index 7dce5e1f074e..c6bd87d2915b 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -55,12 +55,14 @@ ATOMIC_FETCH_OPS(add, ldadd)
 #define ATOMIC_OP_ADD_RETURN(name, mb, cl...)				\
 static inline int __lse_atomic_add_return##name(int i, atomic_t *v)	\
 {									\
+	u32 tmp;							\
+									\
 	asm volatile(							\
-	"	ldadd" #mb "	%w[i], w30, %[v]\n"			\
-	"	add	%w[i], %w[i], w30"				\
-	: [i] "+r" (i), [v] "+Q" (v->counter)				\
+	"	ldadd" #mb "	%w[i], %w[tmp], %[v]\n"			\
+	"	add	%w[i], %w[i], %w[tmp]"				\
+	: [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)	\
 	: "r" (v)							\
-	: "x30", ##cl);							\
+	: cl);								\
 									\
 	return i;							\
 }
@@ -113,13 +115,15 @@ static inline void __lse_atomic_sub(int i, atomic_t *v)
 #define ATOMIC_OP_SUB_RETURN(name, mb, cl...)				\
 static inline int __lse_atomic_sub_return##name(int i, atomic_t *v)	\
 {									\
+	u32 tmp;							\
+									\
 	asm volatile(							\
 	"	neg	%w[i], %w[i]\n"					\
-	"	ldadd" #mb "	%w[i], w30, %[v]\n"			\
-	"	add	%w[i], %w[i], w30"				\
-	: [i] "+&r" (i), [v] "+Q" (v->counter)				\
+	"	ldadd" #mb "	%w[i], %w[tmp], %[v]\n"			\
+	"	add	%w[i], %w[i], %w[tmp]"				\
+	: [i] "+&r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)	\
 	: "r" (v)							\
-	: "x30", ##cl);							\
+	: cl);							\
 									\
 	return i;							\
 }
@@ -196,12 +200,14 @@ ATOMIC64_FETCH_OPS(add, ldadd)
 #define ATOMIC64_OP_ADD_RETURN(name, mb, cl...)				\
 static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\
 {									\
+	unsigned long tmp;						\
+									\
 	asm volatile(							\
-	"	ldadd" #mb "	%[i], x30, %[v]\n"			\
-	"	add	%[i], %[i], x30"				\
-	: [i] "+r" (i), [v] "+Q" (v->counter)				\
+	"	ldadd" #mb "	%[i], %x[tmp], %[v]\n"			\
+	"	add	%[i], %[i], %x[tmp]"				\
+	: [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)	\
 	: "r" (v)							\
-	: "x30", ##cl);							\
+	: cl);								\
 									\
 	return i;							\
 }
@@ -254,13 +260,15 @@ static inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
 #define ATOMIC64_OP_SUB_RETURN(name, mb, cl...)				\
 static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v)	\
 {									\
+	unsigned long tmp;						\
+									\
 	asm volatile(							\
 	"	neg	%[i], %[i]\n"					\
-	"	ldadd" #mb "	%[i], x30, %[v]\n"			\
-	"	add	%[i], %[i], x30"				\
-	: [i] "+&r" (i), [v] "+Q" (v->counter)				\
+	"	ldadd" #mb "	%[i], %x[tmp], %[v]\n"			\
+	"	add	%[i], %[i], %x[tmp]"				\
+	: [i] "+&r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)	\
 	: "r" (v)							\
-	: "x30", ##cl);							\
+	: cl);								\
 									\
 	return i;							\
 }
@@ -294,18 +302,20 @@ ATOMIC64_FETCH_OP_SUB(        , al, "memory")
 
 static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v)
 {
+	unsigned long tmp;
+
 	asm volatile(
-	"1:	ldr	x30, %[v]\n"
-	"	subs	%[ret], x30, #1\n"
+	"1:	ldr	%x[tmp], %[v]\n"
+	"	subs	%[ret], %x[tmp], #1\n"
 	"	b.lt	2f\n"
-	"	casal	x30, %[ret], %[v]\n"
-	"	sub	x30, x30, #1\n"
-	"	sub	x30, x30, %[ret]\n"
-	"	cbnz	x30, 1b\n"
+	"	casal	%x[tmp], %[ret], %[v]\n"
+	"	sub	%x[tmp], %x[tmp], #1\n"
+	"	sub	%x[tmp], %x[tmp], %[ret]\n"
+	"	cbnz	%x[tmp], 1b\n"
 	"2:"
-	: [ret] "+&r" (v), [v] "+Q" (v->counter)
+	: [ret] "+&r" (v), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)
 	:
-	: "x30", "cc", "memory");
+	: "cc", "memory");
 
 	return (long)v;
 }
@@ -318,14 +328,16 @@ static inline u##sz __lse__cmpxchg_case_##name##sz(volatile void *ptr,	\
 	register unsigned long x0 asm ("x0") = (unsigned long)ptr;	\
 	register u##sz x1 asm ("x1") = old;				\
 	register u##sz x2 asm ("x2") = new;				\
+	unsigned long tmp;						\
 									\
 	asm volatile(							\
-	"	mov	" #w "30, %" #w "[old]\n"			\
-	"	cas" #mb #sfx "\t" #w "30, %" #w "[new], %[v]\n"	\
-	"	mov	%" #w "[ret], " #w "30"				\
-	: [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr)		\
+	"	mov	%" #w "[tmp], %" #w "[old]\n"			\
+	"	cas" #mb #sfx "\t%" #w "[tmp], %" #w "[new], %[v]\n"	\
+	"	mov	%" #w "[ret], %" #w "[tmp]"			\
+	: [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr),		\
+	  [tmp] "=&r" (tmp)						\
 	: [old] "r" (x1), [new] "r" (x2)				\
-	: "x30", ##cl);							\
+	: cl);								\
 									\
 	return x0;							\
 }
-- 
2.11.0


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2019-08-29 15:50 UTC|newest]

Thread overview: 44+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-08-29 15:48 [PATCH v5 00/10] arm64: avoid out-of-line ll/sc atomics Will Deacon
2019-08-29 15:48 ` [PATCH v5 01/10] jump_label: Don't warn on __exit jump entries Will Deacon
2019-08-29 15:48 ` [PATCH v5 02/10] arm64: Use correct ll/sc atomic constraints Will Deacon
2019-08-29 15:48 ` [PATCH v5 03/10] arm64: atomics: avoid out-of-line ll/sc atomics Will Deacon
2019-09-03  6:00   ` Nathan Chancellor
2019-09-03  6:39     ` Will Deacon
2019-09-03 14:31     ` Andrew Murray
2019-09-03 14:45       ` Will Deacon
2019-09-03 15:15         ` Andrew Murray
2019-09-03 15:31           ` Andrew Murray
2019-09-03 16:37             ` Will Deacon
2019-09-03 22:04               ` Andrew Murray
2019-09-03 22:35                 ` Nick Desaulniers
     [not found]                   ` <CANW9uyuRFtNKMnSwmHWt_RebJA1ADXdZfeDHc6=yaaFH2NsyWg@mail.gmail.com>
2019-09-03 22:53                     ` Nick Desaulniers
2019-09-04 10:20                       ` Will Deacon
2019-09-04 17:28                 ` Nick Desaulniers
2019-09-05 11:25                   ` Andrew Murray
2019-09-06 19:44                     ` Nick Desaulniers
2019-08-29 15:48 ` Will Deacon [this message]
2019-08-29 15:48 ` [PATCH v5 05/10] arm64: atomics: Remove atomic_ll_sc compilation unit Will Deacon
2019-08-29 17:47   ` Nick Desaulniers
2019-08-29 20:07     ` Tri Vo
2019-08-29 21:54       ` Will Deacon
2019-08-29 15:48 ` [PATCH v5 06/10] arm64: lse: Remove unused 'alt_lse' assembly macro Will Deacon
2019-08-29 23:39   ` Andrew Murray
2019-08-29 15:48 ` [PATCH v5 07/10] arm64: asm: Kill 'asm/atomic_arch.h' Will Deacon
2019-08-29 23:43   ` Andrew Murray
2019-08-29 15:48 ` [PATCH v5 08/10] arm64: lse: Make ARM64_LSE_ATOMICS depend on JUMP_LABEL Will Deacon
2019-08-29 23:44   ` Andrew Murray
2019-08-29 15:48 ` [PATCH v5 09/10] arm64: atomics: Undefine internal macros after use Will Deacon
2019-08-29 23:44   ` Andrew Murray
2019-08-29 15:48 ` [PATCH v5 10/10] arm64: atomics: Use K constraint when toolchain appears to support it Will Deacon
2019-08-29 16:54   ` Will Deacon
2019-08-29 17:45     ` Nick Desaulniers
2019-08-29 21:53       ` Will Deacon
2019-08-30 20:57         ` Nick Desaulniers
2019-08-30  0:08     ` Andrew Murray
2019-08-30  7:52       ` Will Deacon
2019-08-30  9:11         ` Andrew Murray
2019-08-30 10:17           ` Will Deacon
2019-08-30 11:57             ` Andrew Murray
2019-08-30 10:40           ` Mark Rutland
2019-08-30 11:53             ` Andrew Murray
2019-08-29 23:49   ` Andrew Murray

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190829154834.26547-5-will@kernel.org \
    --to=will@kernel.org \
    --cc=Ard.Biesheuvel@arm.com \
    --cc=andrew.murray@arm.com \
    --cc=catalin.marinas@arm.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=mark.rutland@arm.com \
    --cc=natechancellor@gmail.com \
    --cc=ndesaulniers@google.com \
    --cc=peterz@infradead.org \
    --cc=robin.murphy@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.