All of lore.kernel.org
 help / color / mirror / Atom feed
From: Mark Rutland <mark.rutland@arm.com>
To: linux-arm-kernel@lists.infradead.org
Cc: catalin.marinas@arm.com, mark.rutland@arm.com, will@kernel.org
Subject: [PATCH 2/2] arm64: atomic: always inline the assembly
Date: Wed, 17 Aug 2022 16:59:14 +0100	[thread overview]
Message-ID: <20220817155914.3975112-3-mark.rutland@arm.com> (raw)
In-Reply-To: <20220817155914.3975112-1-mark.rutland@arm.com>

The __lse_*() and __ll_sc_*() atomic implememntations are marked as
inline rather than __always_inline, permitting a compiler to generate
out-of-line versions, which may be instrumented.

We marked the atomic wrappers as __always_inline in commit:

  c35a824c31834d94 ("arm64: make atomic helpers __always_inline")

... but did not think to do the same for the underlying implementations.

If the compiler were to out-of-line an LSE or LL/SC atomic, this could
break noinstr code. Ensure this doesn't happen by marking the underlying
implementations as __always_inline.

There should be no functional change as a result of this patch.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
---
 arch/arm64/include/asm/atomic_ll_sc.h | 18 +++++------
 arch/arm64/include/asm/atomic_lse.h   | 46 +++++++++++++++++----------
 2 files changed, 38 insertions(+), 26 deletions(-)

diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h
index 906e2d8c254ca..0890e4f568fb7 100644
--- a/arch/arm64/include/asm/atomic_ll_sc.h
+++ b/arch/arm64/include/asm/atomic_ll_sc.h
@@ -23,7 +23,7 @@
  */
 
 #define ATOMIC_OP(op, asm_op, constraint)				\
-static inline void							\
+static __always_inline void						\
 __ll_sc_atomic_##op(int i, atomic_t *v)					\
 {									\
 	unsigned long tmp;						\
@@ -40,7 +40,7 @@ __ll_sc_atomic_##op(int i, atomic_t *v)					\
 }
 
 #define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
-static inline int							\
+static __always_inline int						\
 __ll_sc_atomic_##op##_return##name(int i, atomic_t *v)			\
 {									\
 	unsigned long tmp;						\
@@ -61,7 +61,7 @@ __ll_sc_atomic_##op##_return##name(int i, atomic_t *v)			\
 }
 
 #define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint) \
-static inline int							\
+static __always_inline int						\
 __ll_sc_atomic_fetch_##op##name(int i, atomic_t *v)			\
 {									\
 	unsigned long tmp;						\
@@ -119,7 +119,7 @@ ATOMIC_OPS(andnot, bic, )
 #undef ATOMIC_OP
 
 #define ATOMIC64_OP(op, asm_op, constraint)				\
-static inline void							\
+static __always_inline void						\
 __ll_sc_atomic64_##op(s64 i, atomic64_t *v)				\
 {									\
 	s64 result;							\
@@ -136,7 +136,7 @@ __ll_sc_atomic64_##op(s64 i, atomic64_t *v)				\
 }
 
 #define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
-static inline long							\
+static __always_inline long						\
 __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v)		\
 {									\
 	s64 result;							\
@@ -157,7 +157,7 @@ __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v)		\
 }
 
 #define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\
-static inline long							\
+static __always_inline long						\
 __ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v)			\
 {									\
 	s64 result, val;						\
@@ -214,7 +214,7 @@ ATOMIC64_OPS(andnot, bic, )
 #undef ATOMIC64_OP_RETURN
 #undef ATOMIC64_OP
 
-static inline s64
+static __always_inline s64
 __ll_sc_atomic64_dec_if_positive(atomic64_t *v)
 {
 	s64 result;
@@ -237,7 +237,7 @@ __ll_sc_atomic64_dec_if_positive(atomic64_t *v)
 }
 
 #define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl, constraint)	\
-static inline u##sz							\
+static __always_inline u##sz						\
 __ll_sc__cmpxchg_case_##name##sz(volatile void *ptr,			\
 					 unsigned long old,		\
 					 u##sz new)			\
@@ -295,7 +295,7 @@ __CMPXCHG_CASE( ,  ,  mb_, 64, dmb ish,  , l, "memory", L)
 #undef __CMPXCHG_CASE
 
 #define __CMPXCHG_DBL(name, mb, rel, cl)				\
-static inline long							\
+static __always_inline long						\
 __ll_sc__cmpxchg_double##name(unsigned long old1,			\
 				      unsigned long old2,		\
 				      unsigned long new1,		\
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
index 5d460f6b7675e..52075e93de6c0 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -11,7 +11,8 @@
 #define __ASM_ATOMIC_LSE_H
 
 #define ATOMIC_OP(op, asm_op)						\
-static inline void __lse_atomic_##op(int i, atomic_t *v)		\
+static __always_inline void						\
+__lse_atomic_##op(int i, atomic_t *v)					\
 {									\
 	asm volatile(							\
 	__LSE_PREAMBLE							\
@@ -25,7 +26,7 @@ ATOMIC_OP(or, stset)
 ATOMIC_OP(xor, steor)
 ATOMIC_OP(add, stadd)
 
-static inline void __lse_atomic_sub(int i, atomic_t *v)
+static __always_inline void __lse_atomic_sub(int i, atomic_t *v)
 {
 	__lse_atomic_add(-i, v);
 }
@@ -33,7 +34,8 @@ static inline void __lse_atomic_sub(int i, atomic_t *v)
 #undef ATOMIC_OP
 
 #define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...)			\
-static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v)	\
+static __always_inline int						\
+__lse_atomic_fetch_##op##name(int i, atomic_t *v)			\
 {									\
 	int old;							\
 									\
@@ -63,7 +65,8 @@ ATOMIC_FETCH_OPS(add, ldadd)
 #undef ATOMIC_FETCH_OPS
 
 #define ATOMIC_FETCH_OP_SUB(name)					\
-static inline int __lse_atomic_fetch_sub##name(int i, atomic_t *v)	\
+static __always_inline int						\
+__lse_atomic_fetch_sub##name(int i, atomic_t *v)			\
 {									\
 	return __lse_atomic_fetch_add##name(-i, v);			\
 }
@@ -76,12 +79,14 @@ ATOMIC_FETCH_OP_SUB(        )
 #undef ATOMIC_FETCH_OP_SUB
 
 #define ATOMIC_OP_ADD_SUB_RETURN(name)					\
-static inline int __lse_atomic_add_return##name(int i, atomic_t *v)	\
+static __always_inline int						\
+__lse_atomic_add_return##name(int i, atomic_t *v)			\
 {									\
 	return __lse_atomic_fetch_add##name(i, v) + i;			\
 }									\
 									\
-static inline int __lse_atomic_sub_return##name(int i, atomic_t *v)	\
+static __always_inline int						\
+__lse_atomic_sub_return##name(int i, atomic_t *v)			\
 {									\
 	return __lse_atomic_fetch_sub(i, v) - i;			\
 }
@@ -93,13 +98,14 @@ ATOMIC_OP_ADD_SUB_RETURN(        )
 
 #undef ATOMIC_OP_ADD_SUB_RETURN
 
-static inline void __lse_atomic_and(int i, atomic_t *v)
+static __always_inline void __lse_atomic_and(int i, atomic_t *v)
 {
 	return __lse_atomic_andnot(~i, v);
 }
 
 #define ATOMIC_FETCH_OP_AND(name, mb, cl...)				\
-static inline int __lse_atomic_fetch_and##name(int i, atomic_t *v)	\
+static __always_inline int						\
+__lse_atomic_fetch_and##name(int i, atomic_t *v)			\
 {									\
 	return __lse_atomic_fetch_andnot##name(~i, v);			\
 }
@@ -112,7 +118,8 @@ ATOMIC_FETCH_OP_AND(        , al, "memory")
 #undef ATOMIC_FETCH_OP_AND
 
 #define ATOMIC64_OP(op, asm_op)						\
-static inline void __lse_atomic64_##op(s64 i, atomic64_t *v)		\
+static __always_inline void						\
+__lse_atomic64_##op(s64 i, atomic64_t *v)				\
 {									\
 	asm volatile(							\
 	__LSE_PREAMBLE							\
@@ -126,7 +133,7 @@ ATOMIC64_OP(or, stset)
 ATOMIC64_OP(xor, steor)
 ATOMIC64_OP(add, stadd)
 
-static inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
+static __always_inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
 {
 	__lse_atomic64_add(-i, v);
 }
@@ -134,7 +141,8 @@ static inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
 #undef ATOMIC64_OP
 
 #define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...)			\
-static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\
+static __always_inline long						\
+__lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)			\
 {									\
 	s64 old;							\
 									\
@@ -164,7 +172,8 @@ ATOMIC64_FETCH_OPS(add, ldadd)
 #undef ATOMIC64_FETCH_OPS
 
 #define ATOMIC64_FETCH_OP_SUB(name)					\
-static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v)	\
+static __always_inline long						\
+__lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v)			\
 {									\
 	return __lse_atomic64_fetch_add##name(-i, v);			\
 }
@@ -177,12 +186,14 @@ ATOMIC64_FETCH_OP_SUB(        )
 #undef ATOMIC64_FETCH_OP_SUB
 
 #define ATOMIC64_OP_ADD_SUB_RETURN(name)				\
-static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\
+static __always_inline long						\
+__lse_atomic64_add_return##name(s64 i, atomic64_t *v)			\
 {									\
 	return __lse_atomic64_fetch_add##name(i, v) + i;		\
 }									\
 									\
-static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v)\
+static __always_inline long						\
+__lse_atomic64_sub_return##name(s64 i, atomic64_t *v)			\
 {									\
 	return __lse_atomic64_fetch_sub##name(i, v) - i;		\
 }
@@ -194,13 +205,14 @@ ATOMIC64_OP_ADD_SUB_RETURN(        )
 
 #undef ATOMIC64_OP_ADD_SUB_RETURN
 
-static inline void __lse_atomic64_and(s64 i, atomic64_t *v)
+static __always_inline void __lse_atomic64_and(s64 i, atomic64_t *v)
 {
 	return __lse_atomic64_andnot(~i, v);
 }
 
 #define ATOMIC64_FETCH_OP_AND(name, mb, cl...)				\
-static inline long __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v)	\
+static __always_inline long						\
+__lse_atomic64_fetch_and##name(s64 i, atomic64_t *v)			\
 {									\
 	return __lse_atomic64_fetch_andnot##name(~i, v);		\
 }
@@ -212,7 +224,7 @@ ATOMIC64_FETCH_OP_AND(        , al, "memory")
 
 #undef ATOMIC64_FETCH_OP_AND
 
-static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v)
+static __always_inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v)
 {
 	unsigned long tmp;
 
-- 
2.30.2


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2022-08-17 16:00 UTC|newest]

Thread overview: 4+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-08-17 15:59 [PATCH 0/2] arm64: atomics: improvements Mark Rutland
2022-08-17 15:59 ` [PATCH 1/2] arm64: atomics: remove LL/SC trampolines Mark Rutland
2022-08-17 15:59 ` Mark Rutland [this message]
2022-09-09 18:07 ` [PATCH 0/2] arm64: atomics: improvements Catalin Marinas

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220817155914.3975112-3-mark.rutland@arm.com \
    --to=mark.rutland@arm.com \
    --cc=catalin.marinas@arm.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.