* [RFC PATCH 0/4] riscv: Add basic percpu operations
@ 2022-08-08 8:05 ` guoren
0 siblings, 0 replies; 14+ messages in thread
From: guoren @ 2022-08-08 8:05 UTC (permalink / raw)
To: tj, cl, palmer, will, catalin.marinas, peterz, arnd
Cc: linux-arch, linux-kernel, linux-riscv, Guo Ren
From: Guo Ren <guoren@linux.alibaba.com>
The series try to add basic percpu operations for riscv. HAVE_CMPXCHG_LOCAL
would let people confuse with cmpxchg(64)_local, so make the name more
accurate (HAVE_CMPXCHG_PERCPU_BYTE). Last, remove RISC-V's
cmpxchg(64)_local definition because it's no use.
Guo Ren (4):
vmstat: percpu: Rename HAVE_CMPXCHG_LOCAL to HAVE_CMPXCHG_PERCPU_BYTE
arm64: percpu: Use generic PERCPU_RW_OPS
riscv: percpu: Implement this_cpu operations
riscv: cmpxchg: Remove unused cmpxchg(64)_local
.../locking/cmpxchg-local/arch-support.txt | 6 +-
arch/Kconfig | 2 +-
arch/arm64/Kconfig | 2 +-
arch/arm64/include/asm/percpu.h | 33 ------
arch/riscv/include/asm/cmpxchg.h | 9 --
arch/riscv/include/asm/percpu.h | 104 ++++++++++++++++++
arch/s390/Kconfig | 2 +-
arch/x86/Kconfig | 2 +-
mm/vmstat.c | 4 +-
9 files changed, 113 insertions(+), 51 deletions(-)
create mode 100644 arch/riscv/include/asm/percpu.h
--
2.36.1
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
^ permalink raw reply [flat|nested] 14+ messages in thread
* [RFC PATCH 1/4] vmstat: percpu: Rename HAVE_CMPXCHG_LOCAL to HAVE_CMPXCHG_PERCPU_BYTE
2022-08-08 8:05 ` guoren
@ 2022-08-08 8:05 ` guoren
-1 siblings, 0 replies; 14+ messages in thread
From: guoren @ 2022-08-08 8:05 UTC (permalink / raw)
To: tj, cl, palmer, will, catalin.marinas, peterz, arnd
Cc: linux-arch, linux-kernel, linux-riscv, Guo Ren, Guo Ren
From: Guo Ren <guoren@linux.alibaba.com>
The name HAVE_CMPXCHG_LOCAL is confused with using cmpxchg_local, but
vmstat needs this_cpu_cmpxchg_1. Rename would clarify the meaning, and
maybe we could remove cmpxchg(64)_local API (Only drivers/iommu/intel
used) in the future.
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Guo Ren <guoren@kernel.org>
---
.../features/locking/cmpxchg-local/arch-support.txt | 6 +++---
arch/Kconfig | 2 +-
arch/arm64/Kconfig | 2 +-
arch/s390/Kconfig | 2 +-
arch/x86/Kconfig | 2 +-
mm/vmstat.c | 4 ++--
6 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/Documentation/features/locking/cmpxchg-local/arch-support.txt b/Documentation/features/locking/cmpxchg-local/arch-support.txt
index 8b1a8d9e1c79..4d4c5c2fa66d 100644
--- a/Documentation/features/locking/cmpxchg-local/arch-support.txt
+++ b/Documentation/features/locking/cmpxchg-local/arch-support.txt
@@ -1,7 +1,7 @@
#
-# Feature name: cmpxchg-local
-# Kconfig: HAVE_CMPXCHG_LOCAL
-# description: arch supports the this_cpu_cmpxchg() API
+# Feature name: cmpxchg-percpu-byte
+# Kconfig: HAVE_CMPXCHG_PERCPU_BYTE
+# description: arch supports the this_cpu_cmpxchg_1() API
#
-----------------------
| arch |status|
diff --git a/arch/Kconfig b/arch/Kconfig
index f330410da63a..81800cdfe161 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -471,7 +471,7 @@ config HAVE_ALIGNED_STRUCT_PAGE
on a struct page for better performance. However selecting this
might increase the size of a struct page by a word.
-config HAVE_CMPXCHG_LOCAL
+config HAVE_CMPXCHG_PERCPU_BYTE
bool
config HAVE_CMPXCHG_DOUBLE
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 571cc234d0b3..24a82bdc766a 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -175,7 +175,7 @@ config ARM64
select HAVE_EBPF_JIT
select HAVE_C_RECORDMCOUNT
select HAVE_CMPXCHG_DOUBLE
- select HAVE_CMPXCHG_LOCAL
+ select HAVE_CMPXCHG_PERCPU_BYTE
select HAVE_CONTEXT_TRACKING_USER
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 318fce77601d..ac03af800bf7 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -151,7 +151,7 @@ config S390
select HAVE_ARCH_VMAP_STACK
select HAVE_ASM_MODVERSIONS
select HAVE_CMPXCHG_DOUBLE
- select HAVE_CMPXCHG_LOCAL
+ select HAVE_CMPXCHG_PERCPU_BYTE
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index f9920f1341c8..5f4f6df7b89f 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -184,7 +184,7 @@ config X86
select HAVE_ARCH_WITHIN_STACK_FRAMES
select HAVE_ASM_MODVERSIONS
select HAVE_CMPXCHG_DOUBLE
- select HAVE_CMPXCHG_LOCAL
+ select HAVE_CMPXCHG_PERCPU_BYTE
select HAVE_CONTEXT_TRACKING_USER if X86_64
select HAVE_CONTEXT_TRACKING_USER_OFFSTACK if HAVE_CONTEXT_TRACKING_USER
select HAVE_C_RECORDMCOUNT
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 373d2730fcf2..b2fc6d28d3b2 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -554,9 +554,9 @@ void __dec_node_page_state(struct page *page, enum node_stat_item item)
}
EXPORT_SYMBOL(__dec_node_page_state);
-#ifdef CONFIG_HAVE_CMPXCHG_LOCAL
+#ifdef CONFIG_HAVE_CMPXCHG_PERCPU_BYTE
/*
- * If we have cmpxchg_local support then we do not need to incur the overhead
+ * If we have this_cpu_cmpxchg_1 arch support then we do not need to incur the overhead
* that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
*
* mod_state() modifies the zone counter state through atomic per cpu
--
2.36.1
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [RFC PATCH 1/4] vmstat: percpu: Rename HAVE_CMPXCHG_LOCAL to HAVE_CMPXCHG_PERCPU_BYTE
@ 2022-08-08 8:05 ` guoren
0 siblings, 0 replies; 14+ messages in thread
From: guoren @ 2022-08-08 8:05 UTC (permalink / raw)
To: tj, cl, palmer, will, catalin.marinas, peterz, arnd
Cc: linux-arch, linux-kernel, linux-riscv, Guo Ren, Guo Ren
From: Guo Ren <guoren@linux.alibaba.com>
The name HAVE_CMPXCHG_LOCAL is confused with using cmpxchg_local, but
vmstat needs this_cpu_cmpxchg_1. Rename would clarify the meaning, and
maybe we could remove cmpxchg(64)_local API (Only drivers/iommu/intel
used) in the future.
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Guo Ren <guoren@kernel.org>
---
.../features/locking/cmpxchg-local/arch-support.txt | 6 +++---
arch/Kconfig | 2 +-
arch/arm64/Kconfig | 2 +-
arch/s390/Kconfig | 2 +-
arch/x86/Kconfig | 2 +-
mm/vmstat.c | 4 ++--
6 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/Documentation/features/locking/cmpxchg-local/arch-support.txt b/Documentation/features/locking/cmpxchg-local/arch-support.txt
index 8b1a8d9e1c79..4d4c5c2fa66d 100644
--- a/Documentation/features/locking/cmpxchg-local/arch-support.txt
+++ b/Documentation/features/locking/cmpxchg-local/arch-support.txt
@@ -1,7 +1,7 @@
#
-# Feature name: cmpxchg-local
-# Kconfig: HAVE_CMPXCHG_LOCAL
-# description: arch supports the this_cpu_cmpxchg() API
+# Feature name: cmpxchg-percpu-byte
+# Kconfig: HAVE_CMPXCHG_PERCPU_BYTE
+# description: arch supports the this_cpu_cmpxchg_1() API
#
-----------------------
| arch |status|
diff --git a/arch/Kconfig b/arch/Kconfig
index f330410da63a..81800cdfe161 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -471,7 +471,7 @@ config HAVE_ALIGNED_STRUCT_PAGE
on a struct page for better performance. However selecting this
might increase the size of a struct page by a word.
-config HAVE_CMPXCHG_LOCAL
+config HAVE_CMPXCHG_PERCPU_BYTE
bool
config HAVE_CMPXCHG_DOUBLE
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 571cc234d0b3..24a82bdc766a 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -175,7 +175,7 @@ config ARM64
select HAVE_EBPF_JIT
select HAVE_C_RECORDMCOUNT
select HAVE_CMPXCHG_DOUBLE
- select HAVE_CMPXCHG_LOCAL
+ select HAVE_CMPXCHG_PERCPU_BYTE
select HAVE_CONTEXT_TRACKING_USER
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 318fce77601d..ac03af800bf7 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -151,7 +151,7 @@ config S390
select HAVE_ARCH_VMAP_STACK
select HAVE_ASM_MODVERSIONS
select HAVE_CMPXCHG_DOUBLE
- select HAVE_CMPXCHG_LOCAL
+ select HAVE_CMPXCHG_PERCPU_BYTE
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index f9920f1341c8..5f4f6df7b89f 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -184,7 +184,7 @@ config X86
select HAVE_ARCH_WITHIN_STACK_FRAMES
select HAVE_ASM_MODVERSIONS
select HAVE_CMPXCHG_DOUBLE
- select HAVE_CMPXCHG_LOCAL
+ select HAVE_CMPXCHG_PERCPU_BYTE
select HAVE_CONTEXT_TRACKING_USER if X86_64
select HAVE_CONTEXT_TRACKING_USER_OFFSTACK if HAVE_CONTEXT_TRACKING_USER
select HAVE_C_RECORDMCOUNT
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 373d2730fcf2..b2fc6d28d3b2 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -554,9 +554,9 @@ void __dec_node_page_state(struct page *page, enum node_stat_item item)
}
EXPORT_SYMBOL(__dec_node_page_state);
-#ifdef CONFIG_HAVE_CMPXCHG_LOCAL
+#ifdef CONFIG_HAVE_CMPXCHG_PERCPU_BYTE
/*
- * If we have cmpxchg_local support then we do not need to incur the overhead
+ * If we have this_cpu_cmpxchg_1 arch support then we do not need to incur the overhead
* that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
*
* mod_state() modifies the zone counter state through atomic per cpu
--
2.36.1
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
^ permalink raw reply related [flat|nested] 14+ messages in thread
* Re: [RFC PATCH 1/4] vmstat: percpu: Rename HAVE_CMPXCHG_LOCAL to HAVE_CMPXCHG_PERCPU_BYTE
2022-08-08 8:05 ` guoren
@ 2022-08-08 9:31 ` Christoph Lameter
-1 siblings, 0 replies; 14+ messages in thread
From: Christoph Lameter @ 2022-08-08 9:31 UTC (permalink / raw)
To: Guo Ren
Cc: tj, palmer, will, catalin.marinas, peterz, arnd, linux-arch,
linux-kernel, linux-riscv, Guo Ren
On Mon, 8 Aug 2022, guoren@kernel.org wrote:
> The name HAVE_CMPXCHG_LOCAL is confused with using cmpxchg_local, but
> vmstat needs this_cpu_cmpxchg_1. Rename would clarify the meaning, and
> maybe we could remove cmpxchg(64)_local API (Only drivers/iommu/intel
> used) in the future.
HAVE_CMPXCHG_LOCAL indicates that cmpxchg_local() is available.
The term LOCAL is important because that has traditionally signified an
operation that has an atomic nature that only works on the local core.
cmpxchg local is used in slub too in the form of this_cpu_cmpxchg_double.
But there is the other naming using this_cpu.....
Maybe rename to
HAVE_THIS_CPU_CMPXCHG ?
and clean up all the other mentions of "local" in the source too?
There is also a local.h header around somewhere
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [RFC PATCH 1/4] vmstat: percpu: Rename HAVE_CMPXCHG_LOCAL to HAVE_CMPXCHG_PERCPU_BYTE
@ 2022-08-08 9:31 ` Christoph Lameter
0 siblings, 0 replies; 14+ messages in thread
From: Christoph Lameter @ 2022-08-08 9:31 UTC (permalink / raw)
To: Guo Ren
Cc: tj, palmer, will, catalin.marinas, peterz, arnd, linux-arch,
linux-kernel, linux-riscv, Guo Ren
On Mon, 8 Aug 2022, guoren@kernel.org wrote:
> The name HAVE_CMPXCHG_LOCAL is confused with using cmpxchg_local, but
> vmstat needs this_cpu_cmpxchg_1. Rename would clarify the meaning, and
> maybe we could remove cmpxchg(64)_local API (Only drivers/iommu/intel
> used) in the future.
HAVE_CMPXCHG_LOCAL indicates that cmpxchg_local() is available.
The term LOCAL is important because that has traditionally signified an
operation that has an atomic nature that only works on the local core.
cmpxchg local is used in slub too in the form of this_cpu_cmpxchg_double.
But there is the other naming using this_cpu.....
Maybe rename to
HAVE_THIS_CPU_CMPXCHG ?
and clean up all the other mentions of "local" in the source too?
There is also a local.h header around somewhere
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [RFC PATCH 1/4] vmstat: percpu: Rename HAVE_CMPXCHG_LOCAL to HAVE_CMPXCHG_PERCPU_BYTE
2022-08-08 9:31 ` Christoph Lameter
@ 2022-08-09 2:58 ` Guo Ren
-1 siblings, 0 replies; 14+ messages in thread
From: Guo Ren @ 2022-08-09 2:58 UTC (permalink / raw)
To: Christoph Lameter
Cc: tj, palmer, will, catalin.marinas, peterz, arnd, linux-arch,
linux-kernel, linux-riscv, Guo Ren
On Mon, Aug 8, 2022 at 5:31 PM Christoph Lameter <cl@gentwo.de> wrote:
>
> On Mon, 8 Aug 2022, guoren@kernel.org wrote:
>
> > The name HAVE_CMPXCHG_LOCAL is confused with using cmpxchg_local, but
> > vmstat needs this_cpu_cmpxchg_1. Rename would clarify the meaning, and
> > maybe we could remove cmpxchg(64)_local API (Only drivers/iommu/intel
> > used) in the future.
>
> HAVE_CMPXCHG_LOCAL indicates that cmpxchg_local() is available.
>
> The term LOCAL is important because that has traditionally signified an
> operation that has an atomic nature that only works on the local core.
>
> cmpxchg local is used in slub too in the form of this_cpu_cmpxchg_double.
1. raw_cpu_generic_cmpxchg_double don't use cmpxchg(64)_local.
2. x86 and s390 implement this_cpu_cmpxchg_double with direct asm
code, no relationship to cmpxchg local.
3. Only arm64 using cmpxchg_double_local internal, but we could remove
the relationship from generic cmpxchg_double_local. It's a fake usage.
So maybe it's time to remove cmpxchg(64)_local in Linux and replace
them by this_cpu_cmpxchg & cmpxchg_relaxed.
>
> But there is the other naming using this_cpu.....
>
> Maybe rename to
>
> HAVE_THIS_CPU_CMPXCHG ?
I think we should keep 1/BYTE as a suffix because riscv only
implements 4bytes & 8bytes size cmpxchg. But vmstat needs 1Byte.
>
> and clean up all the other mentions of "local" in the source too?
Good point, I would try. How we deal with drivers/iommu/intel/iommu.c:
tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
Change "cmpxchg64_local -> cmpxchg64_relaxed" would make them happy? I
think they are cmpxchg_local & cmpxchg_sync users.
>
> There is also a local.h header around somewhere
Yes, thx for mentioning, I missed that. The alpha, loongarch, MIPS,
PowerPC and x86 make local_cmpxchg -> cmpxchg_local. Most of them are
copy-paste guys, not real users.
--
Best Regards
Guo Ren
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [RFC PATCH 1/4] vmstat: percpu: Rename HAVE_CMPXCHG_LOCAL to HAVE_CMPXCHG_PERCPU_BYTE
@ 2022-08-09 2:58 ` Guo Ren
0 siblings, 0 replies; 14+ messages in thread
From: Guo Ren @ 2022-08-09 2:58 UTC (permalink / raw)
To: Christoph Lameter
Cc: tj, palmer, will, catalin.marinas, peterz, arnd, linux-arch,
linux-kernel, linux-riscv, Guo Ren
On Mon, Aug 8, 2022 at 5:31 PM Christoph Lameter <cl@gentwo.de> wrote:
>
> On Mon, 8 Aug 2022, guoren@kernel.org wrote:
>
> > The name HAVE_CMPXCHG_LOCAL is confused with using cmpxchg_local, but
> > vmstat needs this_cpu_cmpxchg_1. Rename would clarify the meaning, and
> > maybe we could remove cmpxchg(64)_local API (Only drivers/iommu/intel
> > used) in the future.
>
> HAVE_CMPXCHG_LOCAL indicates that cmpxchg_local() is available.
>
> The term LOCAL is important because that has traditionally signified an
> operation that has an atomic nature that only works on the local core.
>
> cmpxchg local is used in slub too in the form of this_cpu_cmpxchg_double.
1. raw_cpu_generic_cmpxchg_double don't use cmpxchg(64)_local.
2. x86 and s390 implement this_cpu_cmpxchg_double with direct asm
code, no relationship to cmpxchg local.
3. Only arm64 using cmpxchg_double_local internal, but we could remove
the relationship from generic cmpxchg_double_local. It's a fake usage.
So maybe it's time to remove cmpxchg(64)_local in Linux and replace
them by this_cpu_cmpxchg & cmpxchg_relaxed.
>
> But there is the other naming using this_cpu.....
>
> Maybe rename to
>
> HAVE_THIS_CPU_CMPXCHG ?
I think we should keep 1/BYTE as a suffix because riscv only
implements 4bytes & 8bytes size cmpxchg. But vmstat needs 1Byte.
>
> and clean up all the other mentions of "local" in the source too?
Good point, I would try. How we deal with drivers/iommu/intel/iommu.c:
tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
Change "cmpxchg64_local -> cmpxchg64_relaxed" would make them happy? I
think they are cmpxchg_local & cmpxchg_sync users.
>
> There is also a local.h header around somewhere
Yes, thx for mentioning, I missed that. The alpha, loongarch, MIPS,
PowerPC and x86 make local_cmpxchg -> cmpxchg_local. Most of them are
copy-paste guys, not real users.
--
Best Regards
Guo Ren
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
^ permalink raw reply [flat|nested] 14+ messages in thread
* [RFC PATCH 2/4] arm64: percpu: Use generic PERCPU_RW_OPS
2022-08-08 8:05 ` guoren
@ 2022-08-08 8:05 ` guoren
-1 siblings, 0 replies; 14+ messages in thread
From: guoren @ 2022-08-08 8:05 UTC (permalink / raw)
To: tj, cl, palmer, will, catalin.marinas, peterz, arnd
Cc: linux-arch, linux-kernel, linux-riscv, Guo Ren, Guo Ren
From: Guo Ren <guoren@linux.alibaba.com>
The generic percpu implementation also using READ_ONCE()/WRITE_ONCE().
And the generic even give a better __native_word() check.
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Guo Ren <guoren@kernel.org>
---
arch/arm64/include/asm/percpu.h | 33 ---------------------------------
1 file changed, 33 deletions(-)
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index b9ba19dbdb69..a58de20d742a 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -52,17 +52,6 @@ static inline unsigned long __kern_my_cpu_offset(void)
#define __my_cpu_offset __kern_my_cpu_offset()
#endif
-#define PERCPU_RW_OPS(sz) \
-static inline unsigned long __percpu_read_##sz(void *ptr) \
-{ \
- return READ_ONCE(*(u##sz *)ptr); \
-} \
- \
-static inline void __percpu_write_##sz(void *ptr, unsigned long val) \
-{ \
- WRITE_ONCE(*(u##sz *)ptr, (u##sz)val); \
-}
-
#define __PERCPU_OP_CASE(w, sfx, name, sz, op_llsc, op_lse) \
static inline void \
__percpu_##name##_case_##sz(void *ptr, unsigned long val) \
@@ -120,10 +109,6 @@ __percpu_##name##_return_case_##sz(void *ptr, unsigned long val) \
__PERCPU_RET_OP_CASE(w, , name, 32, op_llsc, op_lse) \
__PERCPU_RET_OP_CASE( , , name, 64, op_llsc, op_lse)
-PERCPU_RW_OPS(8)
-PERCPU_RW_OPS(16)
-PERCPU_RW_OPS(32)
-PERCPU_RW_OPS(64)
PERCPU_OP(add, add, stadd)
PERCPU_OP(andnot, bic, stclr)
PERCPU_OP(or, orr, stset)
@@ -168,24 +153,6 @@ PERCPU_RET_OP(add, add, ldadd)
__retval; \
})
-#define this_cpu_read_1(pcp) \
- _pcp_protect_return(__percpu_read_8, pcp)
-#define this_cpu_read_2(pcp) \
- _pcp_protect_return(__percpu_read_16, pcp)
-#define this_cpu_read_4(pcp) \
- _pcp_protect_return(__percpu_read_32, pcp)
-#define this_cpu_read_8(pcp) \
- _pcp_protect_return(__percpu_read_64, pcp)
-
-#define this_cpu_write_1(pcp, val) \
- _pcp_protect(__percpu_write_8, pcp, (unsigned long)val)
-#define this_cpu_write_2(pcp, val) \
- _pcp_protect(__percpu_write_16, pcp, (unsigned long)val)
-#define this_cpu_write_4(pcp, val) \
- _pcp_protect(__percpu_write_32, pcp, (unsigned long)val)
-#define this_cpu_write_8(pcp, val) \
- _pcp_protect(__percpu_write_64, pcp, (unsigned long)val)
-
#define this_cpu_add_1(pcp, val) \
_pcp_protect(__percpu_add_case_8, pcp, val)
#define this_cpu_add_2(pcp, val) \
--
2.36.1
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [RFC PATCH 2/4] arm64: percpu: Use generic PERCPU_RW_OPS
@ 2022-08-08 8:05 ` guoren
0 siblings, 0 replies; 14+ messages in thread
From: guoren @ 2022-08-08 8:05 UTC (permalink / raw)
To: tj, cl, palmer, will, catalin.marinas, peterz, arnd
Cc: linux-arch, linux-kernel, linux-riscv, Guo Ren, Guo Ren
From: Guo Ren <guoren@linux.alibaba.com>
The generic percpu implementation also using READ_ONCE()/WRITE_ONCE().
And the generic even give a better __native_word() check.
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Guo Ren <guoren@kernel.org>
---
arch/arm64/include/asm/percpu.h | 33 ---------------------------------
1 file changed, 33 deletions(-)
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index b9ba19dbdb69..a58de20d742a 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -52,17 +52,6 @@ static inline unsigned long __kern_my_cpu_offset(void)
#define __my_cpu_offset __kern_my_cpu_offset()
#endif
-#define PERCPU_RW_OPS(sz) \
-static inline unsigned long __percpu_read_##sz(void *ptr) \
-{ \
- return READ_ONCE(*(u##sz *)ptr); \
-} \
- \
-static inline void __percpu_write_##sz(void *ptr, unsigned long val) \
-{ \
- WRITE_ONCE(*(u##sz *)ptr, (u##sz)val); \
-}
-
#define __PERCPU_OP_CASE(w, sfx, name, sz, op_llsc, op_lse) \
static inline void \
__percpu_##name##_case_##sz(void *ptr, unsigned long val) \
@@ -120,10 +109,6 @@ __percpu_##name##_return_case_##sz(void *ptr, unsigned long val) \
__PERCPU_RET_OP_CASE(w, , name, 32, op_llsc, op_lse) \
__PERCPU_RET_OP_CASE( , , name, 64, op_llsc, op_lse)
-PERCPU_RW_OPS(8)
-PERCPU_RW_OPS(16)
-PERCPU_RW_OPS(32)
-PERCPU_RW_OPS(64)
PERCPU_OP(add, add, stadd)
PERCPU_OP(andnot, bic, stclr)
PERCPU_OP(or, orr, stset)
@@ -168,24 +153,6 @@ PERCPU_RET_OP(add, add, ldadd)
__retval; \
})
-#define this_cpu_read_1(pcp) \
- _pcp_protect_return(__percpu_read_8, pcp)
-#define this_cpu_read_2(pcp) \
- _pcp_protect_return(__percpu_read_16, pcp)
-#define this_cpu_read_4(pcp) \
- _pcp_protect_return(__percpu_read_32, pcp)
-#define this_cpu_read_8(pcp) \
- _pcp_protect_return(__percpu_read_64, pcp)
-
-#define this_cpu_write_1(pcp, val) \
- _pcp_protect(__percpu_write_8, pcp, (unsigned long)val)
-#define this_cpu_write_2(pcp, val) \
- _pcp_protect(__percpu_write_16, pcp, (unsigned long)val)
-#define this_cpu_write_4(pcp, val) \
- _pcp_protect(__percpu_write_32, pcp, (unsigned long)val)
-#define this_cpu_write_8(pcp, val) \
- _pcp_protect(__percpu_write_64, pcp, (unsigned long)val)
-
#define this_cpu_add_1(pcp, val) \
_pcp_protect(__percpu_add_case_8, pcp, val)
#define this_cpu_add_2(pcp, val) \
--
2.36.1
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [RFC PATCH 3/4] riscv: percpu: Implement this_cpu operations
2022-08-08 8:05 ` guoren
@ 2022-08-08 8:05 ` guoren
-1 siblings, 0 replies; 14+ messages in thread
From: guoren @ 2022-08-08 8:05 UTC (permalink / raw)
To: tj, cl, palmer, will, catalin.marinas, peterz, arnd
Cc: linux-arch, linux-kernel, linux-riscv, Guo Ren, Guo Ren
From: Guo Ren <guoren@linux.alibaba.com>
This patch provides riscv specific implementations for the this_cpu
operations. We use atomic operations as appropriate (32 & 64 width).
Use AMO instructions listed below for percpu, others are generic:
- amoadd.w/d
- amoand.w/d
- amoor.w/d
- amoswap.w/d
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Guo Ren <guoren@kernel.org>
---
arch/riscv/include/asm/percpu.h | 104 ++++++++++++++++++++++++++++++++
1 file changed, 104 insertions(+)
create mode 100644 arch/riscv/include/asm/percpu.h
diff --git a/arch/riscv/include/asm/percpu.h b/arch/riscv/include/asm/percpu.h
new file mode 100644
index 000000000000..f41d339c41f3
--- /dev/null
+++ b/arch/riscv/include/asm/percpu.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _ASM_RISCV_PERCPU_H
+#define _ASM_RISCV_PERCPU_H
+
+#include <asm/cmpxchg.h>
+
+#define __PERCPU_OP_CASE(asm_type, name, sz, asm_op) \
+static inline void \
+__percpu_##name##_case_##sz(void *ptr, ulong val) \
+{ \
+ __asm__ __volatile__ ( \
+ " amo" #asm_op "." #asm_type " zero, %1, (%0)" \
+ : \
+ : "r" (ptr), "r" (val) \
+ : "memory"); \
+}
+
+#define __PERCPU_RET_OP_CASE(asm_type, name, sz, asm_op, c_op) \
+static inline u##sz \
+__percpu_##name##_return_case_##sz(void *ptr, ulong val) \
+{ \
+ u##sz ret; \
+ __asm__ __volatile__ ( \
+ " amo" #asm_op "." #asm_type " %0, %2, (%1)" \
+ : "=r" (ret) \
+ : "r" (ptr), "r" (val) \
+ : "memory"); \
+ \
+ return ret c_op val; \
+}
+
+#ifdef CONFIG_64BIT
+#define PERCPU_OP(name, asm_op) \
+ __PERCPU_OP_CASE(w, name, 32, asm_op) \
+ __PERCPU_OP_CASE(d, name, 64, asm_op)
+
+#define PERCPU_RET_OP(name, asm_op, c_op) \
+ __PERCPU_RET_OP_CASE(w, name, 32, asm_op, c_op) \
+ __PERCPU_RET_OP_CASE(d, name, 64, asm_op, c_op)
+#else /* CONFIG_32BIT */
+#define PERCPU_OP(name, asm_op) \
+ __PERCPU_OP_CASE(w, name, 32, asm_op)
+
+#define PERCPU_RET_OP(name, asm_op, c_op) \
+ __PERCPU_RET_OP_CASE(w, name, 32, asm_op, c_op)
+#endif /* CONFIG_64BIT */
+
+PERCPU_OP(add, add)
+PERCPU_OP(and, and)
+PERCPU_OP(or, or)
+PERCPU_RET_OP(add, add, +)
+
+#undef __PERCPU_OP_CASE
+#undef __PERCPU_RET_OP_CASE
+#undef PERCPU_OP
+#undef PERCPU_RET_OP
+
+#define _pcp_protect(op, pcp, ...) \
+({ \
+ preempt_disable_notrace(); \
+ op(raw_cpu_ptr(&(pcp)), __VA_ARGS__); \
+ preempt_enable_notrace(); \
+})
+
+#define _pcp_protect_return(op, pcp, args...) \
+({ \
+ typeof(pcp) __retval; \
+ preempt_disable_notrace(); \
+ if (__native_word(pcp)) \
+ __retval = (typeof(pcp))op(raw_cpu_ptr(&(pcp)), ##args);\
+ else \
+ BUILD_BUG(); \
+ preempt_enable_notrace(); \
+ __retval; \
+})
+
+#define this_cpu_add_4(pcp, val) \
+ _pcp_protect(__percpu_add_case_32, pcp, val)
+#define this_cpu_add_return_4(pcp, val) \
+ _pcp_protect_return(__percpu_add_return_case_32, pcp, val)
+#define this_cpu_and_4(pcp, val) \
+ _pcp_protect(__percpu_and_case_32, pcp, val)
+#define this_cpu_or_4(pcp, val) \
+ _pcp_protect(__percpu_or_case_32, pcp, val)
+#define this_cpu_xchg_4(pcp, val) \
+ _pcp_protect_return(xchg_relaxed, pcp, val)
+
+#ifdef CONFIG_64BIT
+#define this_cpu_add_8(pcp, val) \
+ _pcp_protect(__percpu_add_case_64, pcp, val)
+#define this_cpu_add_return_8(pcp, val) \
+ _pcp_protect_return(__percpu_add_return_case_64, pcp, val)
+#define this_cpu_and_8(pcp, val) \
+ _pcp_protect(__percpu_and_case_64, pcp, val)
+#define this_cpu_or_8(pcp, val) \
+ _pcp_protect(__percpu_or_case_64, pcp, val)
+#define this_cpu_xchg_8(pcp, val) \
+ _pcp_protect_return(xchg_relaxed, pcp, val)
+#endif /* CONFIG_64BIT */
+
+#include <asm-generic/percpu.h>
+
+#endif /* _ASM_RISCV_PERCPU_H */
--
2.36.1
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [RFC PATCH 3/4] riscv: percpu: Implement this_cpu operations
@ 2022-08-08 8:05 ` guoren
0 siblings, 0 replies; 14+ messages in thread
From: guoren @ 2022-08-08 8:05 UTC (permalink / raw)
To: tj, cl, palmer, will, catalin.marinas, peterz, arnd
Cc: linux-arch, linux-kernel, linux-riscv, Guo Ren, Guo Ren
From: Guo Ren <guoren@linux.alibaba.com>
This patch provides riscv specific implementations for the this_cpu
operations. We use atomic operations as appropriate (32 & 64 width).
Use AMO instructions listed below for percpu, others are generic:
- amoadd.w/d
- amoand.w/d
- amoor.w/d
- amoswap.w/d
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Guo Ren <guoren@kernel.org>
---
arch/riscv/include/asm/percpu.h | 104 ++++++++++++++++++++++++++++++++
1 file changed, 104 insertions(+)
create mode 100644 arch/riscv/include/asm/percpu.h
diff --git a/arch/riscv/include/asm/percpu.h b/arch/riscv/include/asm/percpu.h
new file mode 100644
index 000000000000..f41d339c41f3
--- /dev/null
+++ b/arch/riscv/include/asm/percpu.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _ASM_RISCV_PERCPU_H
+#define _ASM_RISCV_PERCPU_H
+
+#include <asm/cmpxchg.h>
+
+#define __PERCPU_OP_CASE(asm_type, name, sz, asm_op) \
+static inline void \
+__percpu_##name##_case_##sz(void *ptr, ulong val) \
+{ \
+ __asm__ __volatile__ ( \
+ " amo" #asm_op "." #asm_type " zero, %1, (%0)" \
+ : \
+ : "r" (ptr), "r" (val) \
+ : "memory"); \
+}
+
+#define __PERCPU_RET_OP_CASE(asm_type, name, sz, asm_op, c_op) \
+static inline u##sz \
+__percpu_##name##_return_case_##sz(void *ptr, ulong val) \
+{ \
+ u##sz ret; \
+ __asm__ __volatile__ ( \
+ " amo" #asm_op "." #asm_type " %0, %2, (%1)" \
+ : "=r" (ret) \
+ : "r" (ptr), "r" (val) \
+ : "memory"); \
+ \
+ return ret c_op val; \
+}
+
+#ifdef CONFIG_64BIT
+#define PERCPU_OP(name, asm_op) \
+ __PERCPU_OP_CASE(w, name, 32, asm_op) \
+ __PERCPU_OP_CASE(d, name, 64, asm_op)
+
+#define PERCPU_RET_OP(name, asm_op, c_op) \
+ __PERCPU_RET_OP_CASE(w, name, 32, asm_op, c_op) \
+ __PERCPU_RET_OP_CASE(d, name, 64, asm_op, c_op)
+#else /* CONFIG_32BIT */
+#define PERCPU_OP(name, asm_op) \
+ __PERCPU_OP_CASE(w, name, 32, asm_op)
+
+#define PERCPU_RET_OP(name, asm_op, c_op) \
+ __PERCPU_RET_OP_CASE(w, name, 32, asm_op, c_op)
+#endif /* CONFIG_64BIT */
+
+PERCPU_OP(add, add)
+PERCPU_OP(and, and)
+PERCPU_OP(or, or)
+PERCPU_RET_OP(add, add, +)
+
+#undef __PERCPU_OP_CASE
+#undef __PERCPU_RET_OP_CASE
+#undef PERCPU_OP
+#undef PERCPU_RET_OP
+
+#define _pcp_protect(op, pcp, ...) \
+({ \
+ preempt_disable_notrace(); \
+ op(raw_cpu_ptr(&(pcp)), __VA_ARGS__); \
+ preempt_enable_notrace(); \
+})
+
+#define _pcp_protect_return(op, pcp, args...) \
+({ \
+ typeof(pcp) __retval; \
+ preempt_disable_notrace(); \
+ if (__native_word(pcp)) \
+ __retval = (typeof(pcp))op(raw_cpu_ptr(&(pcp)), ##args);\
+ else \
+ BUILD_BUG(); \
+ preempt_enable_notrace(); \
+ __retval; \
+})
+
+#define this_cpu_add_4(pcp, val) \
+ _pcp_protect(__percpu_add_case_32, pcp, val)
+#define this_cpu_add_return_4(pcp, val) \
+ _pcp_protect_return(__percpu_add_return_case_32, pcp, val)
+#define this_cpu_and_4(pcp, val) \
+ _pcp_protect(__percpu_and_case_32, pcp, val)
+#define this_cpu_or_4(pcp, val) \
+ _pcp_protect(__percpu_or_case_32, pcp, val)
+#define this_cpu_xchg_4(pcp, val) \
+ _pcp_protect_return(xchg_relaxed, pcp, val)
+
+#ifdef CONFIG_64BIT
+#define this_cpu_add_8(pcp, val) \
+ _pcp_protect(__percpu_add_case_64, pcp, val)
+#define this_cpu_add_return_8(pcp, val) \
+ _pcp_protect_return(__percpu_add_return_case_64, pcp, val)
+#define this_cpu_and_8(pcp, val) \
+ _pcp_protect(__percpu_and_case_64, pcp, val)
+#define this_cpu_or_8(pcp, val) \
+ _pcp_protect(__percpu_or_case_64, pcp, val)
+#define this_cpu_xchg_8(pcp, val) \
+ _pcp_protect_return(xchg_relaxed, pcp, val)
+#endif /* CONFIG_64BIT */
+
+#include <asm-generic/percpu.h>
+
+#endif /* _ASM_RISCV_PERCPU_H */
--
2.36.1
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [RFC PATCH 4/4] riscv: cmpxchg: Remove unused cmpxchg(64)_local
2022-08-08 8:05 ` guoren
@ 2022-08-08 8:06 ` guoren
-1 siblings, 0 replies; 14+ messages in thread
From: guoren @ 2022-08-08 8:06 UTC (permalink / raw)
To: tj, cl, palmer, will, catalin.marinas, peterz, arnd
Cc: linux-arch, linux-kernel, linux-riscv, Guo Ren, Guo Ren
From: Guo Ren <guoren@linux.alibaba.com>
Only cmpxchg64_local is used in drivers/iommu/intel/iommu.c, and
cmpxchg_local has been deprecated in common part. So cmpxchg_local
is unecessary to riscv.
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Guo Ren <guoren@kernel.org>
---
arch/riscv/include/asm/cmpxchg.h | 9 ---------
1 file changed, 9 deletions(-)
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index 12debce235e5..0407680b13ad 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -345,19 +345,10 @@
_o_, _n_, sizeof(*(ptr))); \
})
-#define arch_cmpxchg_local(ptr, o, n) \
- (__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr))))
-
#define arch_cmpxchg64(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
arch_cmpxchg((ptr), (o), (n)); \
})
-#define arch_cmpxchg64_local(ptr, o, n) \
-({ \
- BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- arch_cmpxchg_relaxed((ptr), (o), (n)); \
-})
-
#endif /* _ASM_RISCV_CMPXCHG_H */
--
2.36.1
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [RFC PATCH 4/4] riscv: cmpxchg: Remove unused cmpxchg(64)_local
@ 2022-08-08 8:06 ` guoren
0 siblings, 0 replies; 14+ messages in thread
From: guoren @ 2022-08-08 8:06 UTC (permalink / raw)
To: tj, cl, palmer, will, catalin.marinas, peterz, arnd
Cc: linux-arch, linux-kernel, linux-riscv, Guo Ren, Guo Ren
From: Guo Ren <guoren@linux.alibaba.com>
Only cmpxchg64_local is used in drivers/iommu/intel/iommu.c, and
cmpxchg_local has been deprecated in common part. So cmpxchg_local
is unecessary to riscv.
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Guo Ren <guoren@kernel.org>
---
arch/riscv/include/asm/cmpxchg.h | 9 ---------
1 file changed, 9 deletions(-)
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index 12debce235e5..0407680b13ad 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -345,19 +345,10 @@
_o_, _n_, sizeof(*(ptr))); \
})
-#define arch_cmpxchg_local(ptr, o, n) \
- (__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr))))
-
#define arch_cmpxchg64(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
arch_cmpxchg((ptr), (o), (n)); \
})
-#define arch_cmpxchg64_local(ptr, o, n) \
-({ \
- BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- arch_cmpxchg_relaxed((ptr), (o), (n)); \
-})
-
#endif /* _ASM_RISCV_CMPXCHG_H */
--
2.36.1
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
^ permalink raw reply related [flat|nested] 14+ messages in thread