All of lore.kernel.org
 help / color / mirror / Atom feed
From: guoren@kernel.org
To: tj@kernel.org, cl@linux.com, palmer@dabbelt.com, will@kernel.org,
	catalin.marinas@arm.com, peterz@infradead.org, arnd@arndb.de
Cc: linux-arch@vger.kernel.org, linux-kernel@vger.kernel.org,
	linux-riscv@lists.infradead.org,
	Guo Ren <guoren@linux.alibaba.com>, Guo Ren <guoren@kernel.org>
Subject: [RFC PATCH 3/4] riscv: percpu: Implement this_cpu operations
Date: Mon,  8 Aug 2022 04:05:59 -0400	[thread overview]
Message-ID: <20220808080600.3346843-4-guoren@kernel.org> (raw)
In-Reply-To: <20220808080600.3346843-1-guoren@kernel.org>

From: Guo Ren <guoren@linux.alibaba.com>

This patch provides riscv specific implementations for the this_cpu
operations. We use atomic operations as appropriate (32 & 64 width).

Use AMO instructions listed below for percpu, others are generic:
 - amoadd.w/d
 - amoand.w/d
 - amoor.w/d
 - amoswap.w/d

Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Guo Ren <guoren@kernel.org>
---
 arch/riscv/include/asm/percpu.h | 104 ++++++++++++++++++++++++++++++++
 1 file changed, 104 insertions(+)
 create mode 100644 arch/riscv/include/asm/percpu.h

diff --git a/arch/riscv/include/asm/percpu.h b/arch/riscv/include/asm/percpu.h
new file mode 100644
index 000000000000..f41d339c41f3
--- /dev/null
+++ b/arch/riscv/include/asm/percpu.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _ASM_RISCV_PERCPU_H
+#define _ASM_RISCV_PERCPU_H
+
+#include <asm/cmpxchg.h>
+
+#define __PERCPU_OP_CASE(asm_type, name, sz, asm_op)			\
+static inline void							\
+__percpu_##name##_case_##sz(void *ptr, ulong val)			\
+{									\
+	__asm__ __volatile__ (						\
+		"	amo" #asm_op "." #asm_type " zero, %1, (%0)"	\
+		:							\
+		: "r" (ptr), "r" (val)					\
+		: "memory");						\
+}
+
+#define __PERCPU_RET_OP_CASE(asm_type, name, sz, asm_op, c_op)		\
+static inline u##sz							\
+__percpu_##name##_return_case_##sz(void *ptr, ulong val)		\
+{									\
+	u##sz ret;							\
+	__asm__ __volatile__ (						\
+		"	amo" #asm_op "." #asm_type " %0, %2, (%1)"	\
+		: "=r" (ret)						\
+		: "r" (ptr), "r" (val)					\
+		: "memory");						\
+									\
+	return ret c_op val;						\
+}
+
+#ifdef CONFIG_64BIT
+#define PERCPU_OP(name, asm_op)						\
+	__PERCPU_OP_CASE(w, name, 32, asm_op)				\
+	__PERCPU_OP_CASE(d, name, 64, asm_op)
+
+#define PERCPU_RET_OP(name, asm_op, c_op)				\
+	__PERCPU_RET_OP_CASE(w, name, 32, asm_op, c_op)			\
+	__PERCPU_RET_OP_CASE(d, name, 64, asm_op, c_op)
+#else  /* CONFIG_32BIT */
+#define PERCPU_OP(name, asm_op)						\
+	__PERCPU_OP_CASE(w, name, 32, asm_op)
+
+#define PERCPU_RET_OP(name, asm_op, c_op)				\
+	__PERCPU_RET_OP_CASE(w, name, 32, asm_op, c_op)
+#endif /* CONFIG_64BIT */
+
+PERCPU_OP(add, add)
+PERCPU_OP(and, and)
+PERCPU_OP(or, or)
+PERCPU_RET_OP(add, add, +)
+
+#undef __PERCPU_OP_CASE
+#undef __PERCPU_RET_OP_CASE
+#undef PERCPU_OP
+#undef PERCPU_RET_OP
+
+#define _pcp_protect(op, pcp, ...)					\
+({									\
+	preempt_disable_notrace();					\
+	op(raw_cpu_ptr(&(pcp)), __VA_ARGS__);				\
+	preempt_enable_notrace();					\
+})
+
+#define _pcp_protect_return(op, pcp, args...)				\
+({									\
+	typeof(pcp) __retval;						\
+	preempt_disable_notrace();					\
+	if (__native_word(pcp)) 					\
+		__retval = (typeof(pcp))op(raw_cpu_ptr(&(pcp)), ##args);\
+	else								\
+		BUILD_BUG();						\
+	preempt_enable_notrace();					\
+	__retval;							\
+})
+
+#define this_cpu_add_4(pcp, val)	\
+	_pcp_protect(__percpu_add_case_32, pcp, val)
+#define this_cpu_add_return_4(pcp, val)	\
+	_pcp_protect_return(__percpu_add_return_case_32, pcp, val)
+#define this_cpu_and_4(pcp, val)	\
+	_pcp_protect(__percpu_and_case_32, pcp, val)
+#define this_cpu_or_4(pcp, val)		\
+	_pcp_protect(__percpu_or_case_32, pcp, val)
+#define this_cpu_xchg_4(pcp, val)	\
+	_pcp_protect_return(xchg_relaxed, pcp, val)
+
+#ifdef CONFIG_64BIT
+#define this_cpu_add_8(pcp, val)	\
+	_pcp_protect(__percpu_add_case_64, pcp, val)
+#define this_cpu_add_return_8(pcp, val)	\
+	_pcp_protect_return(__percpu_add_return_case_64, pcp, val)
+#define this_cpu_and_8(pcp, val)	\
+	_pcp_protect(__percpu_and_case_64, pcp, val)
+#define this_cpu_or_8(pcp, val)		\
+	_pcp_protect(__percpu_or_case_64, pcp, val)
+#define this_cpu_xchg_8(pcp, val)	\
+	_pcp_protect_return(xchg_relaxed, pcp, val)
+#endif /* CONFIG_64BIT */
+
+#include <asm-generic/percpu.h>
+
+#endif /* _ASM_RISCV_PERCPU_H */
-- 
2.36.1


WARNING: multiple messages have this Message-ID (diff)
From: guoren@kernel.org
To: tj@kernel.org, cl@linux.com, palmer@dabbelt.com, will@kernel.org,
	catalin.marinas@arm.com, peterz@infradead.org, arnd@arndb.de
Cc: linux-arch@vger.kernel.org, linux-kernel@vger.kernel.org,
	linux-riscv@lists.infradead.org,
	Guo Ren <guoren@linux.alibaba.com>, Guo Ren <guoren@kernel.org>
Subject: [RFC PATCH 3/4] riscv: percpu: Implement this_cpu operations
Date: Mon,  8 Aug 2022 04:05:59 -0400	[thread overview]
Message-ID: <20220808080600.3346843-4-guoren@kernel.org> (raw)
In-Reply-To: <20220808080600.3346843-1-guoren@kernel.org>

From: Guo Ren <guoren@linux.alibaba.com>

This patch provides riscv specific implementations for the this_cpu
operations. We use atomic operations as appropriate (32 & 64 width).

Use AMO instructions listed below for percpu, others are generic:
 - amoadd.w/d
 - amoand.w/d
 - amoor.w/d
 - amoswap.w/d

Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Guo Ren <guoren@kernel.org>
---
 arch/riscv/include/asm/percpu.h | 104 ++++++++++++++++++++++++++++++++
 1 file changed, 104 insertions(+)
 create mode 100644 arch/riscv/include/asm/percpu.h

diff --git a/arch/riscv/include/asm/percpu.h b/arch/riscv/include/asm/percpu.h
new file mode 100644
index 000000000000..f41d339c41f3
--- /dev/null
+++ b/arch/riscv/include/asm/percpu.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _ASM_RISCV_PERCPU_H
+#define _ASM_RISCV_PERCPU_H
+
+#include <asm/cmpxchg.h>
+
+#define __PERCPU_OP_CASE(asm_type, name, sz, asm_op)			\
+static inline void							\
+__percpu_##name##_case_##sz(void *ptr, ulong val)			\
+{									\
+	__asm__ __volatile__ (						\
+		"	amo" #asm_op "." #asm_type " zero, %1, (%0)"	\
+		:							\
+		: "r" (ptr), "r" (val)					\
+		: "memory");						\
+}
+
+#define __PERCPU_RET_OP_CASE(asm_type, name, sz, asm_op, c_op)		\
+static inline u##sz							\
+__percpu_##name##_return_case_##sz(void *ptr, ulong val)		\
+{									\
+	u##sz ret;							\
+	__asm__ __volatile__ (						\
+		"	amo" #asm_op "." #asm_type " %0, %2, (%1)"	\
+		: "=r" (ret)						\
+		: "r" (ptr), "r" (val)					\
+		: "memory");						\
+									\
+	return ret c_op val;						\
+}
+
+#ifdef CONFIG_64BIT
+#define PERCPU_OP(name, asm_op)						\
+	__PERCPU_OP_CASE(w, name, 32, asm_op)				\
+	__PERCPU_OP_CASE(d, name, 64, asm_op)
+
+#define PERCPU_RET_OP(name, asm_op, c_op)				\
+	__PERCPU_RET_OP_CASE(w, name, 32, asm_op, c_op)			\
+	__PERCPU_RET_OP_CASE(d, name, 64, asm_op, c_op)
+#else  /* CONFIG_32BIT */
+#define PERCPU_OP(name, asm_op)						\
+	__PERCPU_OP_CASE(w, name, 32, asm_op)
+
+#define PERCPU_RET_OP(name, asm_op, c_op)				\
+	__PERCPU_RET_OP_CASE(w, name, 32, asm_op, c_op)
+#endif /* CONFIG_64BIT */
+
+PERCPU_OP(add, add)
+PERCPU_OP(and, and)
+PERCPU_OP(or, or)
+PERCPU_RET_OP(add, add, +)
+
+#undef __PERCPU_OP_CASE
+#undef __PERCPU_RET_OP_CASE
+#undef PERCPU_OP
+#undef PERCPU_RET_OP
+
+#define _pcp_protect(op, pcp, ...)					\
+({									\
+	preempt_disable_notrace();					\
+	op(raw_cpu_ptr(&(pcp)), __VA_ARGS__);				\
+	preempt_enable_notrace();					\
+})
+
+#define _pcp_protect_return(op, pcp, args...)				\
+({									\
+	typeof(pcp) __retval;						\
+	preempt_disable_notrace();					\
+	if (__native_word(pcp)) 					\
+		__retval = (typeof(pcp))op(raw_cpu_ptr(&(pcp)), ##args);\
+	else								\
+		BUILD_BUG();						\
+	preempt_enable_notrace();					\
+	__retval;							\
+})
+
+#define this_cpu_add_4(pcp, val)	\
+	_pcp_protect(__percpu_add_case_32, pcp, val)
+#define this_cpu_add_return_4(pcp, val)	\
+	_pcp_protect_return(__percpu_add_return_case_32, pcp, val)
+#define this_cpu_and_4(pcp, val)	\
+	_pcp_protect(__percpu_and_case_32, pcp, val)
+#define this_cpu_or_4(pcp, val)		\
+	_pcp_protect(__percpu_or_case_32, pcp, val)
+#define this_cpu_xchg_4(pcp, val)	\
+	_pcp_protect_return(xchg_relaxed, pcp, val)
+
+#ifdef CONFIG_64BIT
+#define this_cpu_add_8(pcp, val)	\
+	_pcp_protect(__percpu_add_case_64, pcp, val)
+#define this_cpu_add_return_8(pcp, val)	\
+	_pcp_protect_return(__percpu_add_return_case_64, pcp, val)
+#define this_cpu_and_8(pcp, val)	\
+	_pcp_protect(__percpu_and_case_64, pcp, val)
+#define this_cpu_or_8(pcp, val)		\
+	_pcp_protect(__percpu_or_case_64, pcp, val)
+#define this_cpu_xchg_8(pcp, val)	\
+	_pcp_protect_return(xchg_relaxed, pcp, val)
+#endif /* CONFIG_64BIT */
+
+#include <asm-generic/percpu.h>
+
+#endif /* _ASM_RISCV_PERCPU_H */
-- 
2.36.1


_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv

  parent reply	other threads:[~2022-08-08  8:06 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-08-08  8:05 [RFC PATCH 0/4] riscv: Add basic percpu operations guoren
2022-08-08  8:05 ` guoren
2022-08-08  8:05 ` [RFC PATCH 1/4] vmstat: percpu: Rename HAVE_CMPXCHG_LOCAL to HAVE_CMPXCHG_PERCPU_BYTE guoren
2022-08-08  8:05   ` guoren
2022-08-08  9:31   ` Christoph Lameter
2022-08-08  9:31     ` Christoph Lameter
2022-08-09  2:58     ` Guo Ren
2022-08-09  2:58       ` Guo Ren
2022-08-08  8:05 ` [RFC PATCH 2/4] arm64: percpu: Use generic PERCPU_RW_OPS guoren
2022-08-08  8:05   ` guoren
2022-08-08  8:05 ` guoren [this message]
2022-08-08  8:05   ` [RFC PATCH 3/4] riscv: percpu: Implement this_cpu operations guoren
2022-08-08  8:06 ` [RFC PATCH 4/4] riscv: cmpxchg: Remove unused cmpxchg(64)_local guoren
2022-08-08  8:06   ` guoren

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220808080600.3346843-4-guoren@kernel.org \
    --to=guoren@kernel.org \
    --cc=arnd@arndb.de \
    --cc=catalin.marinas@arm.com \
    --cc=cl@linux.com \
    --cc=guoren@linux.alibaba.com \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-riscv@lists.infradead.org \
    --cc=palmer@dabbelt.com \
    --cc=peterz@infradead.org \
    --cc=tj@kernel.org \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.