* misc riscv cleanups
@ 2019-08-21 14:58 Christoph Hellwig
2019-08-21 14:58 ` [PATCH 1/6] riscv: refactor the IPI code Christoph Hellwig
` (5 more replies)
0 siblings, 6 replies; 26+ messages in thread
From: Christoph Hellwig @ 2019-08-21 14:58 UTC (permalink / raw)
To: Palmer Dabbelt, Paul Walmsley; +Cc: Atish Patra, linux-riscv
Hi all,
this series has a couple small cleanups and micro-optimizations
that resulted from the nommu work. Pushing them out ASAP to avoid
conflicts the tlbflush work from Atish.
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
^ permalink raw reply [flat|nested] 26+ messages in thread
* [PATCH 1/6] riscv: refactor the IPI code
2019-08-21 14:58 misc riscv cleanups Christoph Hellwig
@ 2019-08-21 14:58 ` Christoph Hellwig
2019-08-24 1:03 ` Atish Patra
2019-09-05 8:44 ` Paul Walmsley
2019-08-21 14:58 ` [PATCH 2/6] riscv: cleanup send_ipi_mask Christoph Hellwig
` (4 subsequent siblings)
5 siblings, 2 replies; 26+ messages in thread
From: Christoph Hellwig @ 2019-08-21 14:58 UTC (permalink / raw)
To: Palmer Dabbelt, Paul Walmsley; +Cc: Atish Patra, linux-riscv
This prepares for adding native non-SBI IPI code.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
arch/riscv/kernel/smp.c | 55 +++++++++++++++++++++++------------------
1 file changed, 31 insertions(+), 24 deletions(-)
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index 5a9834503a2f..8cd730239613 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -78,13 +78,38 @@ static void ipi_stop(void)
wait_for_interrupt();
}
+static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op)
+{
+ int cpuid, hartid;
+ struct cpumask hartid_mask;
+
+ cpumask_clear(&hartid_mask);
+ mb();
+ for_each_cpu(cpuid, mask) {
+ set_bit(op, &ipi_data[cpuid].bits);
+ hartid = cpuid_to_hartid_map(cpuid);
+ cpumask_set_cpu(hartid, &hartid_mask);
+ }
+ mb();
+ sbi_send_ipi(cpumask_bits(&hartid_mask));
+}
+
+static void send_ipi_single(int cpu, enum ipi_message_type op)
+{
+ send_ipi_mask(cpumask_of(cpu), op);
+}
+
+static inline void clear_ipi(void)
+{
+ csr_clear(CSR_SIP, SIE_SSIE);
+}
+
void riscv_software_interrupt(void)
{
unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
unsigned long *stats = ipi_data[smp_processor_id()].stats;
- /* Clear pending IPI */
- csr_clear(CSR_SIP, SIE_SSIE);
+ clear_ipi();
while (true) {
unsigned long ops;
@@ -118,23 +143,6 @@ void riscv_software_interrupt(void)
}
}
-static void
-send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
-{
- int cpuid, hartid;
- struct cpumask hartid_mask;
-
- cpumask_clear(&hartid_mask);
- mb();
- for_each_cpu(cpuid, to_whom) {
- set_bit(operation, &ipi_data[cpuid].bits);
- hartid = cpuid_to_hartid_map(cpuid);
- cpumask_set_cpu(hartid, &hartid_mask);
- }
- mb();
- sbi_send_ipi(cpumask_bits(&hartid_mask));
-}
-
static const char * const ipi_names[] = {
[IPI_RESCHEDULE] = "Rescheduling interrupts",
[IPI_CALL_FUNC] = "Function call interrupts",
@@ -156,12 +164,12 @@ void show_ipi_stats(struct seq_file *p, int prec)
void arch_send_call_function_ipi_mask(struct cpumask *mask)
{
- send_ipi_message(mask, IPI_CALL_FUNC);
+ send_ipi_mask(mask, IPI_CALL_FUNC);
}
void arch_send_call_function_single_ipi(int cpu)
{
- send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
+ send_ipi_single(cpu, IPI_CALL_FUNC);
}
void smp_send_stop(void)
@@ -176,7 +184,7 @@ void smp_send_stop(void)
if (system_state <= SYSTEM_RUNNING)
pr_crit("SMP: stopping secondary CPUs\n");
- send_ipi_message(&mask, IPI_CPU_STOP);
+ send_ipi_mask(&mask, IPI_CPU_STOP);
}
/* Wait up to one second for other CPUs to stop */
@@ -191,6 +199,5 @@ void smp_send_stop(void)
void smp_send_reschedule(int cpu)
{
- send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
+ send_ipi_single(cpu, IPI_RESCHEDULE);
}
-
--
2.20.1
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
^ permalink raw reply related [flat|nested] 26+ messages in thread
* [PATCH 2/6] riscv: cleanup send_ipi_mask
2019-08-21 14:58 misc riscv cleanups Christoph Hellwig
2019-08-21 14:58 ` [PATCH 1/6] riscv: refactor the IPI code Christoph Hellwig
@ 2019-08-21 14:58 ` Christoph Hellwig
2019-08-24 0:11 ` Atish Patra
2019-09-05 8:46 ` Paul Walmsley
2019-08-21 14:58 ` [PATCH 3/6] riscv: optimize send_ipi_single Christoph Hellwig
` (3 subsequent siblings)
5 siblings, 2 replies; 26+ messages in thread
From: Christoph Hellwig @ 2019-08-21 14:58 UTC (permalink / raw)
To: Palmer Dabbelt, Paul Walmsley; +Cc: Atish Patra, linux-riscv
Use the special barriers for atomic bitops to make the intention
a little more clear, and use riscv_cpuid_to_hartid_mask instead of
open coding it.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
arch/riscv/kernel/smp.c | 16 +++++++---------
1 file changed, 7 insertions(+), 9 deletions(-)
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index 8cd730239613..2e21669aa068 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -80,17 +80,15 @@ static void ipi_stop(void)
static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op)
{
- int cpuid, hartid;
struct cpumask hartid_mask;
+ int cpu;
- cpumask_clear(&hartid_mask);
- mb();
- for_each_cpu(cpuid, mask) {
- set_bit(op, &ipi_data[cpuid].bits);
- hartid = cpuid_to_hartid_map(cpuid);
- cpumask_set_cpu(hartid, &hartid_mask);
- }
- mb();
+ smp_mb__before_atomic();
+ for_each_cpu(cpu, mask)
+ set_bit(op, &ipi_data[cpu].bits);
+ smp_mb__after_atomic();
+
+ riscv_cpuid_to_hartid_mask(mask, &hartid_mask);
sbi_send_ipi(cpumask_bits(&hartid_mask));
}
--
2.20.1
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
^ permalink raw reply related [flat|nested] 26+ messages in thread
* [PATCH 3/6] riscv: optimize send_ipi_single
2019-08-21 14:58 misc riscv cleanups Christoph Hellwig
2019-08-21 14:58 ` [PATCH 1/6] riscv: refactor the IPI code Christoph Hellwig
2019-08-21 14:58 ` [PATCH 2/6] riscv: cleanup send_ipi_mask Christoph Hellwig
@ 2019-08-21 14:58 ` Christoph Hellwig
2019-08-24 0:26 ` Atish Patra
2019-09-05 8:48 ` Paul Walmsley
2019-08-21 14:58 ` [PATCH 4/6] riscv: cleanup riscv_cpuid_to_hartid_mask Christoph Hellwig
` (2 subsequent siblings)
5 siblings, 2 replies; 26+ messages in thread
From: Christoph Hellwig @ 2019-08-21 14:58 UTC (permalink / raw)
To: Palmer Dabbelt, Paul Walmsley; +Cc: Atish Patra, linux-riscv
Don't go through send_ipi_mask, but just set the op bit an then pass a
simple generate hartid mask directly to sbi_send_ipi.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
arch/riscv/kernel/smp.c | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index 2e21669aa068..a3715d621f60 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -94,7 +94,13 @@ static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op)
static void send_ipi_single(int cpu, enum ipi_message_type op)
{
- send_ipi_mask(cpumask_of(cpu), op);
+ int hartid = cpuid_to_hartid_map(cpu);
+
+ smp_mb__before_atomic();
+ set_bit(op, &ipi_data[cpu].bits);
+ smp_mb__after_atomic();
+
+ sbi_send_ipi(cpumask_bits(cpumask_of(hartid)));
}
static inline void clear_ipi(void)
--
2.20.1
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
^ permalink raw reply related [flat|nested] 26+ messages in thread
* [PATCH 4/6] riscv: cleanup riscv_cpuid_to_hartid_mask
2019-08-21 14:58 misc riscv cleanups Christoph Hellwig
` (2 preceding siblings ...)
2019-08-21 14:58 ` [PATCH 3/6] riscv: optimize send_ipi_single Christoph Hellwig
@ 2019-08-21 14:58 ` Christoph Hellwig
2019-08-24 0:03 ` Atish Patra
2019-09-05 8:50 ` Paul Walmsley
2019-08-21 14:58 ` [PATCH 5/6] riscv: don't use the rdtime(h) pseudo-instructions Christoph Hellwig
2019-08-21 14:58 ` [PATCH 6/6] riscv: move the TLB flush logic out of line Christoph Hellwig
5 siblings, 2 replies; 26+ messages in thread
From: Christoph Hellwig @ 2019-08-21 14:58 UTC (permalink / raw)
To: Palmer Dabbelt, Paul Walmsley; +Cc: Atish Patra, linux-riscv
Move the initial clearing of the mask from the callers to
riscv_cpuid_to_hartid_mask, and remove the unused !CONFIG_SMP stub.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
arch/riscv/include/asm/smp.h | 6 ------
arch/riscv/include/asm/tlbflush.h | 1 -
arch/riscv/kernel/smp.c | 1 +
arch/riscv/mm/cacheflush.c | 1 -
4 files changed, 1 insertion(+), 8 deletions(-)
diff --git a/arch/riscv/include/asm/smp.h b/arch/riscv/include/asm/smp.h
index c6ed4d691def..a83451d73a4e 100644
--- a/arch/riscv/include/asm/smp.h
+++ b/arch/riscv/include/asm/smp.h
@@ -61,11 +61,5 @@ static inline unsigned long cpuid_to_hartid_map(int cpu)
return boot_cpu_hartid;
}
-static inline void riscv_cpuid_to_hartid_mask(const struct cpumask *in,
- struct cpumask *out)
-{
- cpumask_set_cpu(cpuid_to_hartid_map(0), out);
-}
-
#endif /* CONFIG_SMP */
#endif /* _ASM_RISCV_SMP_H */
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index 4d9bbe8438bf..df31fe2ed09c 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -47,7 +47,6 @@ static inline void remote_sfence_vma(struct cpumask *cmask, unsigned long start,
{
struct cpumask hmask;
- cpumask_clear(&hmask);
riscv_cpuid_to_hartid_mask(cmask, &hmask);
sbi_remote_sfence_vma(hmask.bits, start, size);
}
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index a3715d621f60..3836760d7aaf 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -56,6 +56,7 @@ void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out)
{
int cpu;
+ cpumask_clear(out);
for_each_cpu(cpu, in)
cpumask_set_cpu(cpuid_to_hartid_map(cpu), out);
}
diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
index 9ebcff8ba263..3f15938dec89 100644
--- a/arch/riscv/mm/cacheflush.c
+++ b/arch/riscv/mm/cacheflush.c
@@ -47,7 +47,6 @@ void flush_icache_mm(struct mm_struct *mm, bool local)
cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
local |= cpumask_empty(&others);
if (mm != current->active_mm || !local) {
- cpumask_clear(&hmask);
riscv_cpuid_to_hartid_mask(&others, &hmask);
sbi_remote_fence_i(hmask.bits);
} else {
--
2.20.1
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
^ permalink raw reply related [flat|nested] 26+ messages in thread
* [PATCH 5/6] riscv: don't use the rdtime(h) pseudo-instructions
2019-08-21 14:58 misc riscv cleanups Christoph Hellwig
` (3 preceding siblings ...)
2019-08-21 14:58 ` [PATCH 4/6] riscv: cleanup riscv_cpuid_to_hartid_mask Christoph Hellwig
@ 2019-08-21 14:58 ` Christoph Hellwig
2019-08-24 0:37 ` Atish Patra
2019-09-05 8:55 ` Paul Walmsley
2019-08-21 14:58 ` [PATCH 6/6] riscv: move the TLB flush logic out of line Christoph Hellwig
5 siblings, 2 replies; 26+ messages in thread
From: Christoph Hellwig @ 2019-08-21 14:58 UTC (permalink / raw)
To: Palmer Dabbelt, Paul Walmsley; +Cc: Atish Patra, linux-riscv
If we just use the CSRs that these map to directly the code is simpler
and doesn't require extra inline assembly code. Also fix up the top-level
comment in timer-riscv.c to not talk about the cycle count or mention
details of the clocksource interface, of which this file is just a
consumer.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
arch/riscv/include/asm/timex.h | 44 +++++++++++++++----------------
drivers/clocksource/timer-riscv.c | 17 +++---------
2 files changed, 25 insertions(+), 36 deletions(-)
diff --git a/arch/riscv/include/asm/timex.h b/arch/riscv/include/asm/timex.h
index 6a703ec9d796..c7ef131b9e4c 100644
--- a/arch/riscv/include/asm/timex.h
+++ b/arch/riscv/include/asm/timex.h
@@ -6,43 +6,41 @@
#ifndef _ASM_RISCV_TIMEX_H
#define _ASM_RISCV_TIMEX_H
-#include <asm/param.h>
+#include <asm/csr.h>
typedef unsigned long cycles_t;
-static inline cycles_t get_cycles_inline(void)
+static inline cycles_t get_cycles(void)
{
- cycles_t n;
-
- __asm__ __volatile__ (
- "rdtime %0"
- : "=r" (n));
- return n;
+ return csr_read(CSR_TIME);
}
-#define get_cycles get_cycles_inline
+#define get_cycles get_cycles
#ifdef CONFIG_64BIT
-static inline uint64_t get_cycles64(void)
+static inline u64 get_cycles64(void)
+{
+ return get_cycles();
+}
+#else /* CONFIG_64BIT */
+static inline u32 get_cycles_hi(void)
{
- return get_cycles();
+ return csr_read(CSR_TIMEH);
}
-#else
-static inline uint64_t get_cycles64(void)
+
+static inline u64 get_cycles64(void)
{
- u32 lo, hi, tmp;
- __asm__ __volatile__ (
- "1:\n"
- "rdtimeh %0\n"
- "rdtime %1\n"
- "rdtimeh %2\n"
- "bne %0, %2, 1b"
- : "=&r" (hi), "=&r" (lo), "=&r" (tmp));
+ u32 hi, lo;
+
+ do {
+ hi = get_cycles_hi();
+ lo = get_cycles();
+ } while (hi != get_cycles_hi());
+
return ((u64)hi << 32) | lo;
}
-#endif
+#endif /* CONFIG_64BIT */
#define ARCH_HAS_READ_CURRENT_TIMER
-
static inline int read_current_timer(unsigned long *timer_val)
{
*timer_val = get_cycles();
diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
index 09e031176bc6..470c7ef02ea4 100644
--- a/drivers/clocksource/timer-riscv.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -2,6 +2,10 @@
/*
* Copyright (C) 2012 Regents of the University of California
* Copyright (C) 2017 SiFive
+ *
+ * All RISC-V systems have a timer attached to every hart. These timers can be
+ * read from the "time" and "timeh" CSRs, and can use the SBI to setup
+ * events.
*/
#include <linux/clocksource.h>
#include <linux/clockchips.h>
@@ -12,19 +16,6 @@
#include <asm/smp.h>
#include <asm/sbi.h>
-/*
- * All RISC-V systems have a timer attached to every hart. These timers can be
- * read by the 'rdcycle' pseudo instruction, and can use the SBI to setup
- * events. In order to abstract the architecture-specific timer reading and
- * setting functions away from the clock event insertion code, we provide
- * function pointers to the clockevent subsystem that perform two basic
- * operations: rdtime() reads the timer on the current CPU, and
- * next_event(delta) sets the next timer event to 'delta' cycles in the future.
- * As the timers are inherently a per-cpu resource, these callbacks perform
- * operations on the current hart. There is guaranteed to be exactly one timer
- * per hart on all RISC-V systems.
- */
-
static int riscv_clock_next_event(unsigned long delta,
struct clock_event_device *ce)
{
--
2.20.1
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
^ permalink raw reply related [flat|nested] 26+ messages in thread
* [PATCH 6/6] riscv: move the TLB flush logic out of line
2019-08-21 14:58 misc riscv cleanups Christoph Hellwig
` (4 preceding siblings ...)
2019-08-21 14:58 ` [PATCH 5/6] riscv: don't use the rdtime(h) pseudo-instructions Christoph Hellwig
@ 2019-08-21 14:58 ` Christoph Hellwig
2019-08-24 0:03 ` Atish Patra
2019-09-05 8:58 ` Paul Walmsley
5 siblings, 2 replies; 26+ messages in thread
From: Christoph Hellwig @ 2019-08-21 14:58 UTC (permalink / raw)
To: Palmer Dabbelt, Paul Walmsley; +Cc: Atish Patra, linux-riscv
The TLB flush logic is going to become more complex. Start moving
it out of line.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
arch/riscv/include/asm/tlbflush.h | 37 ++++++-------------------------
arch/riscv/mm/Makefile | 3 +++
arch/riscv/mm/tlbflush.c | 35 +++++++++++++++++++++++++++++
3 files changed, 45 insertions(+), 30 deletions(-)
create mode 100644 arch/riscv/mm/tlbflush.c
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index df31fe2ed09c..075a784c66c5 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -25,8 +25,13 @@ static inline void local_flush_tlb_page(unsigned long addr)
__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory");
}
-#ifndef CONFIG_SMP
-
+#ifdef CONFIG_SMP
+void flush_tlb_all(void);
+void flush_tlb_mm(struct mm_struct *mm);
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+#else /* CONFIG_SMP */
#define flush_tlb_all() local_flush_tlb_all()
#define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
@@ -37,34 +42,6 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
}
#define flush_tlb_mm(mm) flush_tlb_all()
-
-#else /* CONFIG_SMP */
-
-#include <asm/sbi.h>
-
-static inline void remote_sfence_vma(struct cpumask *cmask, unsigned long start,
- unsigned long size)
-{
- struct cpumask hmask;
-
- riscv_cpuid_to_hartid_mask(cmask, &hmask);
- sbi_remote_sfence_vma(hmask.bits, start, size);
-}
-
-#define flush_tlb_all() sbi_remote_sfence_vma(NULL, 0, -1)
-
-#define flush_tlb_range(vma, start, end) \
- remote_sfence_vma(mm_cpumask((vma)->vm_mm), start, (end) - (start))
-
-static inline void flush_tlb_page(struct vm_area_struct *vma,
- unsigned long addr)
-{
- flush_tlb_range(vma, addr, addr + PAGE_SIZE);
-}
-
-#define flush_tlb_mm(mm) \
- remote_sfence_vma(mm_cpumask(mm), 0, -1)
-
#endif /* CONFIG_SMP */
/* Flush a range of kernel pages */
diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
index 74055e1d6f21..9d9a17335686 100644
--- a/arch/riscv/mm/Makefile
+++ b/arch/riscv/mm/Makefile
@@ -13,4 +13,7 @@ obj-y += cacheflush.o
obj-y += context.o
obj-y += sifive_l2_cache.o
+ifeq ($(CONFIG_MMU),y)
+obj-$(CONFIG_SMP) += tlbflush.o
+endif
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
new file mode 100644
index 000000000000..df93b26f1b9d
--- /dev/null
+++ b/arch/riscv/mm/tlbflush.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <asm/sbi.h>
+
+void flush_tlb_all(void)
+{
+ sbi_remote_sfence_vma(NULL, 0, -1);
+}
+
+static void __sbi_tlb_flush_range(struct cpumask *cmask, unsigned long start,
+ unsigned long size)
+{
+ struct cpumask hmask;
+
+ riscv_cpuid_to_hartid_mask(cmask, &hmask);
+ sbi_remote_sfence_vma(hmask.bits, start, size);
+}
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+ __sbi_tlb_flush_range(mm_cpumask(mm), 0, -1);
+}
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
+{
+ __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), addr, PAGE_SIZE);
+}
+
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), start, end - start);
+}
--
2.20.1
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
^ permalink raw reply related [flat|nested] 26+ messages in thread
* Re: [PATCH 4/6] riscv: cleanup riscv_cpuid_to_hartid_mask
2019-08-21 14:58 ` [PATCH 4/6] riscv: cleanup riscv_cpuid_to_hartid_mask Christoph Hellwig
@ 2019-08-24 0:03 ` Atish Patra
2019-09-05 8:50 ` Paul Walmsley
1 sibling, 0 replies; 26+ messages in thread
From: Atish Patra @ 2019-08-24 0:03 UTC (permalink / raw)
To: hch, paul.walmsley, palmer; +Cc: linux-riscv
On Wed, 2019-08-21 at 23:58 +0900, Christoph Hellwig wrote:
> Move the initial clearing of the mask from the callers to
> riscv_cpuid_to_hartid_mask, and remove the unused !CONFIG_SMP stub.
>
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
> arch/riscv/include/asm/smp.h | 6 ------
> arch/riscv/include/asm/tlbflush.h | 1 -
> arch/riscv/kernel/smp.c | 1 +
> arch/riscv/mm/cacheflush.c | 1 -
> 4 files changed, 1 insertion(+), 8 deletions(-)
>
> diff --git a/arch/riscv/include/asm/smp.h
> b/arch/riscv/include/asm/smp.h
> index c6ed4d691def..a83451d73a4e 100644
> --- a/arch/riscv/include/asm/smp.h
> +++ b/arch/riscv/include/asm/smp.h
> @@ -61,11 +61,5 @@ static inline unsigned long
> cpuid_to_hartid_map(int cpu)
> return boot_cpu_hartid;
> }
>
> -static inline void riscv_cpuid_to_hartid_mask(const struct cpumask
> *in,
> - struct cpumask *out)
> -{
> - cpumask_set_cpu(cpuid_to_hartid_map(0), out);
> -}
> -
> #endif /* CONFIG_SMP */
> #endif /* _ASM_RISCV_SMP_H */
> diff --git a/arch/riscv/include/asm/tlbflush.h
> b/arch/riscv/include/asm/tlbflush.h
> index 4d9bbe8438bf..df31fe2ed09c 100644
> --- a/arch/riscv/include/asm/tlbflush.h
> +++ b/arch/riscv/include/asm/tlbflush.h
> @@ -47,7 +47,6 @@ static inline void remote_sfence_vma(struct cpumask
> *cmask, unsigned long start,
> {
> struct cpumask hmask;
>
> - cpumask_clear(&hmask);
> riscv_cpuid_to_hartid_mask(cmask, &hmask);
> sbi_remote_sfence_vma(hmask.bits, start, size);
> }
> diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
> index a3715d621f60..3836760d7aaf 100644
> --- a/arch/riscv/kernel/smp.c
> +++ b/arch/riscv/kernel/smp.c
> @@ -56,6 +56,7 @@ void riscv_cpuid_to_hartid_mask(const struct
> cpumask *in, struct cpumask *out)
> {
> int cpu;
>
> + cpumask_clear(out);
> for_each_cpu(cpu, in)
> cpumask_set_cpu(cpuid_to_hartid_map(cpu), out);
> }
> diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
> index 9ebcff8ba263..3f15938dec89 100644
> --- a/arch/riscv/mm/cacheflush.c
> +++ b/arch/riscv/mm/cacheflush.c
> @@ -47,7 +47,6 @@ void flush_icache_mm(struct mm_struct *mm, bool
> local)
> cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
> local |= cpumask_empty(&others);
> if (mm != current->active_mm || !local) {
> - cpumask_clear(&hmask);
> riscv_cpuid_to_hartid_mask(&others, &hmask);
> sbi_remote_fence_i(hmask.bits);
> } else {
Reviewed-by: Atish Patra <atish.patra@wdc.com>
--
Regards,
Atish
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH 6/6] riscv: move the TLB flush logic out of line
2019-08-21 14:58 ` [PATCH 6/6] riscv: move the TLB flush logic out of line Christoph Hellwig
@ 2019-08-24 0:03 ` Atish Patra
2019-09-05 8:58 ` Paul Walmsley
1 sibling, 0 replies; 26+ messages in thread
From: Atish Patra @ 2019-08-24 0:03 UTC (permalink / raw)
To: hch, paul.walmsley, palmer; +Cc: linux-riscv
On Wed, 2019-08-21 at 23:58 +0900, Christoph Hellwig wrote:
> The TLB flush logic is going to become more complex. Start moving
> it out of line.
>
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
> arch/riscv/include/asm/tlbflush.h | 37 ++++++-----------------------
> --
> arch/riscv/mm/Makefile | 3 +++
> arch/riscv/mm/tlbflush.c | 35 +++++++++++++++++++++++++++++
> 3 files changed, 45 insertions(+), 30 deletions(-)
> create mode 100644 arch/riscv/mm/tlbflush.c
>
> diff --git a/arch/riscv/include/asm/tlbflush.h
> b/arch/riscv/include/asm/tlbflush.h
> index df31fe2ed09c..075a784c66c5 100644
> --- a/arch/riscv/include/asm/tlbflush.h
> +++ b/arch/riscv/include/asm/tlbflush.h
> @@ -25,8 +25,13 @@ static inline void local_flush_tlb_page(unsigned
> long addr)
> __asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) :
> "memory");
> }
>
> -#ifndef CONFIG_SMP
> -
> +#ifdef CONFIG_SMP
> +void flush_tlb_all(void);
> +void flush_tlb_mm(struct mm_struct *mm);
> +void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
> +void flush_tlb_range(struct vm_area_struct *vma, unsigned long
> start,
> + unsigned long end);
> +#else /* CONFIG_SMP */
> #define flush_tlb_all() local_flush_tlb_all()
> #define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
>
> @@ -37,34 +42,6 @@ static inline void flush_tlb_range(struct
> vm_area_struct *vma,
> }
>
> #define flush_tlb_mm(mm) flush_tlb_all()
> -
> -#else /* CONFIG_SMP */
> -
> -#include <asm/sbi.h>
> -
> -static inline void remote_sfence_vma(struct cpumask *cmask, unsigned
> long start,
> - unsigned long size)
> -{
> - struct cpumask hmask;
> -
> - riscv_cpuid_to_hartid_mask(cmask, &hmask);
> - sbi_remote_sfence_vma(hmask.bits, start, size);
> -}
> -
> -#define flush_tlb_all() sbi_remote_sfence_vma(NULL, 0, -1)
> -
> -#define flush_tlb_range(vma, start, end) \
> - remote_sfence_vma(mm_cpumask((vma)->vm_mm), start, (end) -
> (start))
> -
> -static inline void flush_tlb_page(struct vm_area_struct *vma,
> - unsigned long addr)
> -{
> - flush_tlb_range(vma, addr, addr + PAGE_SIZE);
> -}
> -
> -#define flush_tlb_mm(mm) \
> - remote_sfence_vma(mm_cpumask(mm), 0, -1)
> -
> #endif /* CONFIG_SMP */
>
> /* Flush a range of kernel pages */
> diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
> index 74055e1d6f21..9d9a17335686 100644
> --- a/arch/riscv/mm/Makefile
> +++ b/arch/riscv/mm/Makefile
> @@ -13,4 +13,7 @@ obj-y += cacheflush.o
> obj-y += context.o
> obj-y += sifive_l2_cache.o
>
> +ifeq ($(CONFIG_MMU),y)
> +obj-$(CONFIG_SMP) += tlbflush.o
> +endif
> obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
> diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
> new file mode 100644
> index 000000000000..df93b26f1b9d
> --- /dev/null
> +++ b/arch/riscv/mm/tlbflush.c
> @@ -0,0 +1,35 @@
> +// SPDX-License-Identifier: GPL-2.0
> +
> +#include <linux/mm.h>
> +#include <linux/smp.h>
> +#include <asm/sbi.h>
> +
> +void flush_tlb_all(void)
> +{
> + sbi_remote_sfence_vma(NULL, 0, -1);
> +}
> +
> +static void __sbi_tlb_flush_range(struct cpumask *cmask, unsigned
> long start,
> + unsigned long size)
> +{
> + struct cpumask hmask;
> +
> + riscv_cpuid_to_hartid_mask(cmask, &hmask);
> + sbi_remote_sfence_vma(hmask.bits, start, size);
> +}
> +
> +void flush_tlb_mm(struct mm_struct *mm)
> +{
> + __sbi_tlb_flush_range(mm_cpumask(mm), 0, -1);
> +}
> +
> +void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
> +{
> + __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), addr, PAGE_SIZE);
> +}
> +
> +void flush_tlb_range(struct vm_area_struct *vma, unsigned long
> start,
> + unsigned long end)
> +{
> + __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), start, end -
> start);
> +}
Reviewed-by: Atish Patra <atish.patra@wdc.com>
--
Regards,
Atish
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH 2/6] riscv: cleanup send_ipi_mask
2019-08-21 14:58 ` [PATCH 2/6] riscv: cleanup send_ipi_mask Christoph Hellwig
@ 2019-08-24 0:11 ` Atish Patra
2019-08-26 11:28 ` hch
2019-09-05 8:46 ` Paul Walmsley
1 sibling, 1 reply; 26+ messages in thread
From: Atish Patra @ 2019-08-24 0:11 UTC (permalink / raw)
To: hch, paul.walmsley, palmer; +Cc: linux-riscv
On Wed, 2019-08-21 at 23:58 +0900, Christoph Hellwig wrote:
> Use the special barriers for atomic bitops to make the intention
> a little more clear, and use riscv_cpuid_to_hartid_mask instead of
> open coding it.
>
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
> arch/riscv/kernel/smp.c | 16 +++++++---------
> 1 file changed, 7 insertions(+), 9 deletions(-)
>
> diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
> index 8cd730239613..2e21669aa068 100644
> --- a/arch/riscv/kernel/smp.c
> +++ b/arch/riscv/kernel/smp.c
> @@ -80,17 +80,15 @@ static void ipi_stop(void)
>
> static void send_ipi_mask(const struct cpumask *mask, enum
> ipi_message_type op)
> {
> - int cpuid, hartid;
> struct cpumask hartid_mask;
> + int cpu;
>
> - cpumask_clear(&hartid_mask);
> - mb();
> - for_each_cpu(cpuid, mask) {
> - set_bit(op, &ipi_data[cpuid].bits);
> - hartid = cpuid_to_hartid_map(cpuid);
> - cpumask_set_cpu(hartid, &hartid_mask);
> - }
> - mb();
> + smp_mb__before_atomic();
> + for_each_cpu(cpu, mask)
> + set_bit(op, &ipi_data[cpu].bits);
> + smp_mb__after_atomic();
> +
> + riscv_cpuid_to_hartid_mask(mask, &hartid_mask);
Isn't that less optimized than previous one ?
This will iterate all the cpus set in mask twice during every ipi sent.
For now, we won't see any different. As we have more number of cpus in
RISC-V (hopefully one day ;) ;)), this may affect the performance.
> sbi_send_ipi(cpumask_bits(&hartid_mask));
> }
>
--
Regards,
Atish
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH 3/6] riscv: optimize send_ipi_single
2019-08-21 14:58 ` [PATCH 3/6] riscv: optimize send_ipi_single Christoph Hellwig
@ 2019-08-24 0:26 ` Atish Patra
2019-08-26 11:29 ` hch
2019-09-05 8:48 ` Paul Walmsley
1 sibling, 1 reply; 26+ messages in thread
From: Atish Patra @ 2019-08-24 0:26 UTC (permalink / raw)
To: hch, paul.walmsley, palmer; +Cc: linux-riscv
On Wed, 2019-08-21 at 23:58 +0900, Christoph Hellwig wrote:
> Don't go through send_ipi_mask, but just set the op bit an then pass
> a
> simple generate hartid mask directly to sbi_send_ipi.
>
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
> arch/riscv/kernel/smp.c | 8 +++++++-
> 1 file changed, 7 insertions(+), 1 deletion(-)
>
> diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
> index 2e21669aa068..a3715d621f60 100644
> --- a/arch/riscv/kernel/smp.c
> +++ b/arch/riscv/kernel/smp.c
> @@ -94,7 +94,13 @@ static void send_ipi_mask(const struct cpumask
> *mask, enum ipi_message_type op)
>
> static void send_ipi_single(int cpu, enum ipi_message_type op)
> {
> - send_ipi_mask(cpumask_of(cpu), op);
> + int hartid = cpuid_to_hartid_map(cpu);
> +
> + smp_mb__before_atomic();
> + set_bit(op, &ipi_data[cpu].bits);
> + smp_mb__after_atomic();
> +
> + sbi_send_ipi(cpumask_bits(cpumask_of(hartid)));
The only cost save I see is you don't have to cpumask_clear anymore.
Is there any other cost save ? If not is it worth duplicating the code
?
May be I am being too pedantic here.. :) :)
> }
>
> static inline void clear_ipi(void)
--
Regards,
Atish
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH 5/6] riscv: don't use the rdtime(h) pseudo-instructions
2019-08-21 14:58 ` [PATCH 5/6] riscv: don't use the rdtime(h) pseudo-instructions Christoph Hellwig
@ 2019-08-24 0:37 ` Atish Patra
2019-08-24 0:43 ` Atish Patra
2019-08-26 11:30 ` hch
2019-09-05 8:55 ` Paul Walmsley
1 sibling, 2 replies; 26+ messages in thread
From: Atish Patra @ 2019-08-24 0:37 UTC (permalink / raw)
To: hch, paul.walmsley, palmer; +Cc: linux-riscv
On Wed, 2019-08-21 at 23:58 +0900, Christoph Hellwig wrote:
> If we just use the CSRs that these map to directly the code is
> simpler
> and doesn't require extra inline assembly code. Also fix up the top-
> level
> comment in timer-riscv.c to not talk about the cycle count or mention
> details of the clocksource interface, of which this file is just a
> consumer.
>
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
> arch/riscv/include/asm/timex.h | 44 +++++++++++++++------------
> ----
> drivers/clocksource/timer-riscv.c | 17 +++---------
> 2 files changed, 25 insertions(+), 36 deletions(-)
>
> diff --git a/arch/riscv/include/asm/timex.h
> b/arch/riscv/include/asm/timex.h
> index 6a703ec9d796..c7ef131b9e4c 100644
> --- a/arch/riscv/include/asm/timex.h
> +++ b/arch/riscv/include/asm/timex.h
> @@ -6,43 +6,41 @@
> #ifndef _ASM_RISCV_TIMEX_H
> #define _ASM_RISCV_TIMEX_H
>
> -#include <asm/param.h>
> +#include <asm/csr.h>
>
> typedef unsigned long cycles_t;
>
> -static inline cycles_t get_cycles_inline(void)
> +static inline cycles_t get_cycles(void)
> {
> - cycles_t n;
> -
> - __asm__ __volatile__ (
> - "rdtime %0"
> - : "=r" (n));
> - return n;
> + return csr_read(CSR_TIME);
Does this work correctly in QEMU ? I was looking at the qemu code and
it looks like it returns cpu_get_host_ticks which seems wrong to me.
https://github.com/qemu/qemu/blob/master/target/riscv/csr.c#L213
> }
> -#define get_cycles get_cycles_inline
> +#define get_cycles get_cycles
>
> #ifdef CONFIG_64BIT
> -static inline uint64_t get_cycles64(void)
> +static inline u64 get_cycles64(void)
> +{
> + return get_cycles();
> +}
> +#else /* CONFIG_64BIT */
> +static inline u32 get_cycles_hi(void)
> {
> - return get_cycles();
> + return csr_read(CSR_TIMEH);
> }
> -#else
> -static inline uint64_t get_cycles64(void)
> +
> +static inline u64 get_cycles64(void)
> {
> - u32 lo, hi, tmp;
> - __asm__ __volatile__ (
> - "1:\n"
> - "rdtimeh %0\n"
> - "rdtime %1\n"
> - "rdtimeh %2\n"
> - "bne %0, %2, 1b"
> - : "=&r" (hi), "=&r" (lo), "=&r" (tmp));
> + u32 hi, lo;
> +
> + do {
> + hi = get_cycles_hi();
> + lo = get_cycles();
> + } while (hi != get_cycles_hi());
> +
> return ((u64)hi << 32) | lo;
> }
> -#endif
> +#endif /* CONFIG_64BIT */
>
> #define ARCH_HAS_READ_CURRENT_TIMER
> -
> static inline int read_current_timer(unsigned long *timer_val)
> {
> *timer_val = get_cycles();
> diff --git a/drivers/clocksource/timer-riscv.c
> b/drivers/clocksource/timer-riscv.c
> index 09e031176bc6..470c7ef02ea4 100644
> --- a/drivers/clocksource/timer-riscv.c
> +++ b/drivers/clocksource/timer-riscv.c
> @@ -2,6 +2,10 @@
> /*
> * Copyright (C) 2012 Regents of the University of California
> * Copyright (C) 2017 SiFive
> + *
> + * All RISC-V systems have a timer attached to every hart. These
> timers can be
> + * read from the "time" and "timeh" CSRs, and can use the SBI to
> setup
> + * events.
> */
> #include <linux/clocksource.h>
> #include <linux/clockchips.h>
> @@ -12,19 +16,6 @@
> #include <asm/smp.h>
> #include <asm/sbi.h>
>
> -/*
> - * All RISC-V systems have a timer attached to every hart. These
> timers can be
> - * read by the 'rdcycle' pseudo instruction, and can use the SBI to
> setup
> - * events. In order to abstract the architecture-specific timer
> reading and
> - * setting functions away from the clock event insertion code, we
> provide
> - * function pointers to the clockevent subsystem that perform two
> basic
> - * operations: rdtime() reads the timer on the current CPU, and
> - * next_event(delta) sets the next timer event to 'delta' cycles in
> the future.
> - * As the timers are inherently a per-cpu resource, these callbacks
> perform
> - * operations on the current hart. There is guaranteed to be
> exactly one timer
> - * per hart on all RISC-V systems.
> - */
> -
> static int riscv_clock_next_event(unsigned long delta,
> struct clock_event_device *ce)
> {
--
Regards,
Atish
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH 5/6] riscv: don't use the rdtime(h) pseudo-instructions
2019-08-24 0:37 ` Atish Patra
@ 2019-08-24 0:43 ` Atish Patra
2019-08-26 11:30 ` hch
1 sibling, 0 replies; 26+ messages in thread
From: Atish Patra @ 2019-08-24 0:43 UTC (permalink / raw)
To: hch, paul.walmsley, palmer; +Cc: linux-riscv
On Fri, 2019-08-23 at 17:37 -0700, Atish Patra wrote:
> On Wed, 2019-08-21 at 23:58 +0900, Christoph Hellwig wrote:
> > If we just use the CSRs that these map to directly the code is
> > simpler
> > and doesn't require extra inline assembly code. Also fix up the
> > top-
> > level
> > comment in timer-riscv.c to not talk about the cycle count or
> > mention
> > details of the clocksource interface, of which this file is just a
> > consumer.
> >
> > Signed-off-by: Christoph Hellwig <hch@lst.de>
> > ---
> > arch/riscv/include/asm/timex.h | 44 +++++++++++++++------------
> > ----
> > drivers/clocksource/timer-riscv.c | 17 +++---------
> > 2 files changed, 25 insertions(+), 36 deletions(-)
> >
> > diff --git a/arch/riscv/include/asm/timex.h
> > b/arch/riscv/include/asm/timex.h
> > index 6a703ec9d796..c7ef131b9e4c 100644
> > --- a/arch/riscv/include/asm/timex.h
> > +++ b/arch/riscv/include/asm/timex.h
> > @@ -6,43 +6,41 @@
> > #ifndef _ASM_RISCV_TIMEX_H
> > #define _ASM_RISCV_TIMEX_H
> >
> > -#include <asm/param.h>
> > +#include <asm/csr.h>
> >
> > typedef unsigned long cycles_t;
> >
> > -static inline cycles_t get_cycles_inline(void)
> > +static inline cycles_t get_cycles(void)
> > {
> > - cycles_t n;
> > -
> > - __asm__ __volatile__ (
> > - "rdtime %0"
> > - : "=r" (n));
> > - return n;
> > + return csr_read(CSR_TIME);
>
> Does this work correctly in QEMU ? I was looking at the qemu code and
> it looks like it returns cpu_get_host_ticks which seems wrong to me.
>
> https://github.com/qemu/qemu/blob/master/target/riscv/csr.c#L213
>
>
Nevermind. I missed the CONFIG_USER_ONLY and got confused.
csr_read will also trap and get the correct value.
Regards,
Atish
> > }
> > -#define get_cycles get_cycles_inline
> > +#define get_cycles get_cycles
> >
> > #ifdef CONFIG_64BIT
> > -static inline uint64_t get_cycles64(void)
> > +static inline u64 get_cycles64(void)
> > +{
> > + return get_cycles();
> > +}
> > +#else /* CONFIG_64BIT */
> > +static inline u32 get_cycles_hi(void)
> > {
> > - return get_cycles();
> > + return csr_read(CSR_TIMEH);
> > }
> > -#else
> > -static inline uint64_t get_cycles64(void)
> > +
> > +static inline u64 get_cycles64(void)
> > {
> > - u32 lo, hi, tmp;
> > - __asm__ __volatile__ (
> > - "1:\n"
> > - "rdtimeh %0\n"
> > - "rdtime %1\n"
> > - "rdtimeh %2\n"
> > - "bne %0, %2, 1b"
> > - : "=&r" (hi), "=&r" (lo), "=&r" (tmp));
> > + u32 hi, lo;
> > +
> > + do {
> > + hi = get_cycles_hi();
> > + lo = get_cycles();
> > + } while (hi != get_cycles_hi());
> > +
> > return ((u64)hi << 32) | lo;
> > }
> > -#endif
> > +#endif /* CONFIG_64BIT */
> >
> > #define ARCH_HAS_READ_CURRENT_TIMER
> > -
> > static inline int read_current_timer(unsigned long *timer_val)
> > {
> > *timer_val = get_cycles();
> > diff --git a/drivers/clocksource/timer-riscv.c
> > b/drivers/clocksource/timer-riscv.c
> > index 09e031176bc6..470c7ef02ea4 100644
> > --- a/drivers/clocksource/timer-riscv.c
> > +++ b/drivers/clocksource/timer-riscv.c
> > @@ -2,6 +2,10 @@
> > /*
> > * Copyright (C) 2012 Regents of the University of California
> > * Copyright (C) 2017 SiFive
> > + *
> > + * All RISC-V systems have a timer attached to every hart. These
> > timers can be
> > + * read from the "time" and "timeh" CSRs, and can use the SBI to
> > setup
> > + * events.
> > */
> > #include <linux/clocksource.h>
> > #include <linux/clockchips.h>
> > @@ -12,19 +16,6 @@
> > #include <asm/smp.h>
> > #include <asm/sbi.h>
> >
> > -/*
> > - * All RISC-V systems have a timer attached to every hart. These
> > timers can be
> > - * read by the 'rdcycle' pseudo instruction, and can use the SBI
> > to
> > setup
> > - * events. In order to abstract the architecture-specific timer
> > reading and
> > - * setting functions away from the clock event insertion code, we
> > provide
> > - * function pointers to the clockevent subsystem that perform two
> > basic
> > - * operations: rdtime() reads the timer on the current CPU, and
> > - * next_event(delta) sets the next timer event to 'delta' cycles
> > in
> > the future.
> > - * As the timers are inherently a per-cpu resource, these
> > callbacks
> > perform
> > - * operations on the current hart. There is guaranteed to be
> > exactly one timer
> > - * per hart on all RISC-V systems.
> > - */
> > -
> > static int riscv_clock_next_event(unsigned long delta,
> > struct clock_event_device *ce)
> > {
--
Regards,
Atish
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH 1/6] riscv: refactor the IPI code
2019-08-21 14:58 ` [PATCH 1/6] riscv: refactor the IPI code Christoph Hellwig
@ 2019-08-24 1:03 ` Atish Patra
2019-09-05 8:44 ` Paul Walmsley
1 sibling, 0 replies; 26+ messages in thread
From: Atish Patra @ 2019-08-24 1:03 UTC (permalink / raw)
To: hch, paul.walmsley, palmer; +Cc: linux-riscv
On Wed, 2019-08-21 at 23:58 +0900, Christoph Hellwig wrote:
> This prepares for adding native non-SBI IPI code.
>
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
> arch/riscv/kernel/smp.c | 55 +++++++++++++++++++++++--------------
> ----
> 1 file changed, 31 insertions(+), 24 deletions(-)
>
> diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
> index 5a9834503a2f..8cd730239613 100644
> --- a/arch/riscv/kernel/smp.c
> +++ b/arch/riscv/kernel/smp.c
> @@ -78,13 +78,38 @@ static void ipi_stop(void)
> wait_for_interrupt();
> }
>
> +static void send_ipi_mask(const struct cpumask *mask, enum
> ipi_message_type op)
> +{
> + int cpuid, hartid;
> + struct cpumask hartid_mask;
> +
> + cpumask_clear(&hartid_mask);
> + mb();
> + for_each_cpu(cpuid, mask) {
> + set_bit(op, &ipi_data[cpuid].bits);
> + hartid = cpuid_to_hartid_map(cpuid);
> + cpumask_set_cpu(hartid, &hartid_mask);
> + }
> + mb();
> + sbi_send_ipi(cpumask_bits(&hartid_mask));
> +}
> +
> +static void send_ipi_single(int cpu, enum ipi_message_type op)
> +{
> + send_ipi_mask(cpumask_of(cpu), op);
> +}
> +
> +static inline void clear_ipi(void)
> +{
> + csr_clear(CSR_SIP, SIE_SSIE);
> +}
> +
> void riscv_software_interrupt(void)
> {
> unsigned long *pending_ipis =
> &ipi_data[smp_processor_id()].bits;
> unsigned long *stats = ipi_data[smp_processor_id()].stats;
>
> - /* Clear pending IPI */
> - csr_clear(CSR_SIP, SIE_SSIE);
> + clear_ipi();
>
> while (true) {
> unsigned long ops;
> @@ -118,23 +143,6 @@ void riscv_software_interrupt(void)
> }
> }
>
> -static void
> -send_ipi_message(const struct cpumask *to_whom, enum
> ipi_message_type operation)
> -{
> - int cpuid, hartid;
> - struct cpumask hartid_mask;
> -
> - cpumask_clear(&hartid_mask);
> - mb();
> - for_each_cpu(cpuid, to_whom) {
> - set_bit(operation, &ipi_data[cpuid].bits);
> - hartid = cpuid_to_hartid_map(cpuid);
> - cpumask_set_cpu(hartid, &hartid_mask);
> - }
> - mb();
> - sbi_send_ipi(cpumask_bits(&hartid_mask));
> -}
> -
> static const char * const ipi_names[] = {
> [IPI_RESCHEDULE] = "Rescheduling interrupts",
> [IPI_CALL_FUNC] = "Function call interrupts",
> @@ -156,12 +164,12 @@ void show_ipi_stats(struct seq_file *p, int
> prec)
>
> void arch_send_call_function_ipi_mask(struct cpumask *mask)
> {
> - send_ipi_message(mask, IPI_CALL_FUNC);
> + send_ipi_mask(mask, IPI_CALL_FUNC);
> }
>
> void arch_send_call_function_single_ipi(int cpu)
> {
> - send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
> + send_ipi_single(cpu, IPI_CALL_FUNC);
> }
>
> void smp_send_stop(void)
> @@ -176,7 +184,7 @@ void smp_send_stop(void)
>
> if (system_state <= SYSTEM_RUNNING)
> pr_crit("SMP: stopping secondary CPUs\n");
> - send_ipi_message(&mask, IPI_CPU_STOP);
> + send_ipi_mask(&mask, IPI_CPU_STOP);
> }
>
> /* Wait up to one second for other CPUs to stop */
> @@ -191,6 +199,5 @@ void smp_send_stop(void)
>
> void smp_send_reschedule(int cpu)
> {
> - send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
> + send_ipi_single(cpu, IPI_RESCHEDULE);
> }
> -
Reviewed-by: Atish Patra <atish.patra@wdc.com>
--
Regards,
Atish
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH 2/6] riscv: cleanup send_ipi_mask
2019-08-24 0:11 ` Atish Patra
@ 2019-08-26 11:28 ` hch
2019-08-27 18:45 ` Atish Patra
0 siblings, 1 reply; 26+ messages in thread
From: hch @ 2019-08-26 11:28 UTC (permalink / raw)
To: Atish Patra; +Cc: linux-riscv, palmer, hch, paul.walmsley
On Sat, Aug 24, 2019 at 12:11:15AM +0000, Atish Patra wrote:
> Isn't that less optimized than previous one ?
>
> This will iterate all the cpus set in mask twice during every ipi sent.
> For now, we won't see any different. As we have more number of cpus in
> RISC-V (hopefully one day ;) ;)), this may affect the performance.
By then we are hopefully done with using the SBI IPI code :) The native
IPI code this refactor is preparing for won't need the hartid
translation for example. The point of this patch isn't really to
micro-optimize, but to make the code clear and obvious.
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH 3/6] riscv: optimize send_ipi_single
2019-08-24 0:26 ` Atish Patra
@ 2019-08-26 11:29 ` hch
2019-08-27 18:48 ` Atish Patra
0 siblings, 1 reply; 26+ messages in thread
From: hch @ 2019-08-26 11:29 UTC (permalink / raw)
To: Atish Patra; +Cc: linux-riscv, palmer, hch, paul.walmsley
On Sat, Aug 24, 2019 at 12:26:26AM +0000, Atish Patra wrote:
> > static void send_ipi_single(int cpu, enum ipi_message_type op)
> > {
> > - send_ipi_mask(cpumask_of(cpu), op);
> > + int hartid = cpuid_to_hartid_map(cpu);
> > +
> > + smp_mb__before_atomic();
> > + set_bit(op, &ipi_data[cpu].bits);
> > + smp_mb__after_atomic();
> > +
> > + sbi_send_ipi(cpumask_bits(cpumask_of(hartid)));
>
> The only cost save I see is you don't have to cpumask_clear anymore.
> Is there any other cost save ? If not is it worth duplicating the code
> ?
>
> May be I am being too pedantic here.. :) :)
It avoids the additional potentially huge cpumask, and generally makes
the code a lot more obvious. This might not really be needed, but
helps with sharing the code nicely with the native IPI path.
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH 5/6] riscv: don't use the rdtime(h) pseudo-instructions
2019-08-24 0:37 ` Atish Patra
2019-08-24 0:43 ` Atish Patra
@ 2019-08-26 11:30 ` hch
2019-08-27 18:50 ` Atish Patra
1 sibling, 1 reply; 26+ messages in thread
From: hch @ 2019-08-26 11:30 UTC (permalink / raw)
To: Atish Patra; +Cc: linux-riscv, palmer, hch, paul.walmsley
On Sat, Aug 24, 2019 at 12:37:02AM +0000, Atish Patra wrote:
> > -static inline cycles_t get_cycles_inline(void)
> > +static inline cycles_t get_cycles(void)
> > {
> > - cycles_t n;
> > -
> > - __asm__ __volatile__ (
> > - "rdtime %0"
> > - : "=r" (n));
> > - return n;
> > + return csr_read(CSR_TIME);
>
> Does this work correctly in QEMU ? I was looking at the qemu code and
> it looks like it returns cpu_get_host_ticks which seems wrong to me.
It better should. rdtime is just a pseudo-instruction that the
assembler translates to a CSR read. (in other words something totally
pointless, no idea why it even is in the spec).
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH 2/6] riscv: cleanup send_ipi_mask
2019-08-26 11:28 ` hch
@ 2019-08-27 18:45 ` Atish Patra
0 siblings, 0 replies; 26+ messages in thread
From: Atish Patra @ 2019-08-27 18:45 UTC (permalink / raw)
To: hch; +Cc: linux-riscv, palmer, paul.walmsley
On Mon, 2019-08-26 at 13:28 +0200, hch@lst.de wrote:
> On Sat, Aug 24, 2019 at 12:11:15AM +0000, Atish Patra wrote:
> > Isn't that less optimized than previous one ?
> >
> > This will iterate all the cpus set in mask twice during every ipi
> > sent.
> > For now, we won't see any different. As we have more number of cpus
> > in
> > RISC-V (hopefully one day ;) ;)), this may affect the performance.
>
> By then we are hopefully done with using the SBI IPI code :) The
> native
> IPI code this refactor is preparing for won't need the hartid
> translation for example. The point of this patch isn't really to
> micro-optimize, but to make the code clear and obvious.
ok. Sounds good to me.
Reviewed-by: Atish Patra <atish.patra@wdc.com>
--
Regards,
Atish
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH 3/6] riscv: optimize send_ipi_single
2019-08-26 11:29 ` hch
@ 2019-08-27 18:48 ` Atish Patra
0 siblings, 0 replies; 26+ messages in thread
From: Atish Patra @ 2019-08-27 18:48 UTC (permalink / raw)
To: hch; +Cc: linux-riscv, palmer, paul.walmsley
On Mon, 2019-08-26 at 13:29 +0200, hch@lst.de wrote:
> On Sat, Aug 24, 2019 at 12:26:26AM +0000, Atish Patra wrote:
> > > static void send_ipi_single(int cpu, enum ipi_message_type op)
> > > {
> > > - send_ipi_mask(cpumask_of(cpu), op);
> > > + int hartid = cpuid_to_hartid_map(cpu);
> > > +
> > > + smp_mb__before_atomic();
> > > + set_bit(op, &ipi_data[cpu].bits);
> > > + smp_mb__after_atomic();
> > > +
> > > + sbi_send_ipi(cpumask_bits(cpumask_of(hartid)));
> >
> > The only cost save I see is you don't have to cpumask_clear
> > anymore.
> > Is there any other cost save ? If not is it worth duplicating the
> > code
> > ?
> >
> > May be I am being too pedantic here.. :) :)
>
> It avoids the additional potentially huge cpumask, and generally
> makes
> the code a lot more obvious. This might not really be needed, but
> helps with sharing the code nicely with the native IPI path.
ok.
Reviewed-by: Atish Patra <atish.patra@wdc.com>
--
Regards,
Atish
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH 5/6] riscv: don't use the rdtime(h) pseudo-instructions
2019-08-26 11:30 ` hch
@ 2019-08-27 18:50 ` Atish Patra
0 siblings, 0 replies; 26+ messages in thread
From: Atish Patra @ 2019-08-27 18:50 UTC (permalink / raw)
To: hch; +Cc: linux-riscv, palmer, paul.walmsley
On Mon, 2019-08-26 at 13:30 +0200, hch@lst.de wrote:
> On Sat, Aug 24, 2019 at 12:37:02AM +0000, Atish Patra wrote:
> > > -static inline cycles_t get_cycles_inline(void)
> > > +static inline cycles_t get_cycles(void)
> > > {
> > > - cycles_t n;
> > > -
> > > - __asm__ __volatile__ (
> > > - "rdtime %0"
> > > - : "=r" (n));
> > > - return n;
> > > + return csr_read(CSR_TIME);
> >
> > Does this work correctly in QEMU ? I was looking at the qemu code
> > and
> > it looks like it returns cpu_get_host_ticks which seems wrong to
> > me.
>
> It better should. rdtime is just a pseudo-instruction that the
> assembler translates to a CSR read. (in other words something
> totally
> pointless, no idea why it even is in the spec).
Yes. I did not see the usermode macro carefully in qemu code.
Reviewed-by: Atish Patra <atish.patra@wdc.com>
--
Regards,
Atish
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH 1/6] riscv: refactor the IPI code
2019-08-21 14:58 ` [PATCH 1/6] riscv: refactor the IPI code Christoph Hellwig
2019-08-24 1:03 ` Atish Patra
@ 2019-09-05 8:44 ` Paul Walmsley
1 sibling, 0 replies; 26+ messages in thread
From: Paul Walmsley @ 2019-09-05 8:44 UTC (permalink / raw)
To: Christoph Hellwig; +Cc: Atish Patra, linux-riscv, Palmer Dabbelt
On Wed, 21 Aug 2019, Christoph Hellwig wrote:
> This prepares for adding native non-SBI IPI code.
>
> Signed-off-by: Christoph Hellwig <hch@lst.de>
Thanks - dropped the previous version from the v5.4-rc1 queue, and queued
this one in its place, with Atish's Reviewed-by:.
- Paul
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH 2/6] riscv: cleanup send_ipi_mask
2019-08-21 14:58 ` [PATCH 2/6] riscv: cleanup send_ipi_mask Christoph Hellwig
2019-08-24 0:11 ` Atish Patra
@ 2019-09-05 8:46 ` Paul Walmsley
1 sibling, 0 replies; 26+ messages in thread
From: Paul Walmsley @ 2019-09-05 8:46 UTC (permalink / raw)
To: Christoph Hellwig; +Cc: Atish Patra, linux-riscv, Palmer Dabbelt
On Wed, 21 Aug 2019, Christoph Hellwig wrote:
> Use the special barriers for atomic bitops to make the intention
> a little more clear, and use riscv_cpuid_to_hartid_mask instead of
> open coding it.
>
> Signed-off-by: Christoph Hellwig <hch@lst.de>
Thanks, queued for v5.4-rc1 with Atish's Reviewed-by:.
- Paul
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH 3/6] riscv: optimize send_ipi_single
2019-08-21 14:58 ` [PATCH 3/6] riscv: optimize send_ipi_single Christoph Hellwig
2019-08-24 0:26 ` Atish Patra
@ 2019-09-05 8:48 ` Paul Walmsley
1 sibling, 0 replies; 26+ messages in thread
From: Paul Walmsley @ 2019-09-05 8:48 UTC (permalink / raw)
To: Christoph Hellwig; +Cc: Atish Patra, linux-riscv, Palmer Dabbelt
On Wed, 21 Aug 2019, Christoph Hellwig wrote:
> Don't go through send_ipi_mask, but just set the op bit an then pass a
> simple generate hartid mask directly to sbi_send_ipi.
>
> Signed-off-by: Christoph Hellwig <hch@lst.de>
Thanks, I fixed some minor issues in the patch description and queued
the following for v5.4-rc1.
- Paul
From: Christoph Hellwig <hch@lst.de>
Date: Wed, 21 Aug 2019 23:58:34 +0900
Subject: [PATCH] riscv: optimize send_ipi_single
Don't go through send_ipi_mask, but just set the op bit and then pass
a simple generated hartid mask directly to sbi_send_ipi.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
[paul.walmsley@sifive.com: minor patch description fixes]
Signed-off-by: Paul Walmsley <paul.walmsley@sifive.com>
---
arch/riscv/kernel/smp.c | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index 2e21669aa068..a3715d621f60 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -94,7 +94,13 @@ static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op)
static void send_ipi_single(int cpu, enum ipi_message_type op)
{
- send_ipi_mask(cpumask_of(cpu), op);
+ int hartid = cpuid_to_hartid_map(cpu);
+
+ smp_mb__before_atomic();
+ set_bit(op, &ipi_data[cpu].bits);
+ smp_mb__after_atomic();
+
+ sbi_send_ipi(cpumask_bits(cpumask_of(hartid)));
}
static inline void clear_ipi(void)
--
2.23.0
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
^ permalink raw reply related [flat|nested] 26+ messages in thread
* Re: [PATCH 4/6] riscv: cleanup riscv_cpuid_to_hartid_mask
2019-08-21 14:58 ` [PATCH 4/6] riscv: cleanup riscv_cpuid_to_hartid_mask Christoph Hellwig
2019-08-24 0:03 ` Atish Patra
@ 2019-09-05 8:50 ` Paul Walmsley
1 sibling, 0 replies; 26+ messages in thread
From: Paul Walmsley @ 2019-09-05 8:50 UTC (permalink / raw)
To: Christoph Hellwig; +Cc: Atish Patra, linux-riscv, Palmer Dabbelt
On Wed, 21 Aug 2019, Christoph Hellwig wrote:
> Move the initial clearing of the mask from the callers to
> riscv_cpuid_to_hartid_mask, and remove the unused !CONFIG_SMP stub.
>
> Signed-off-by: Christoph Hellwig <hch@lst.de>
Thanks, queued for v5.4-rc1 with Atish's Reviewed-by:.
- Paul
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH 5/6] riscv: don't use the rdtime(h) pseudo-instructions
2019-08-21 14:58 ` [PATCH 5/6] riscv: don't use the rdtime(h) pseudo-instructions Christoph Hellwig
2019-08-24 0:37 ` Atish Patra
@ 2019-09-05 8:55 ` Paul Walmsley
1 sibling, 0 replies; 26+ messages in thread
From: Paul Walmsley @ 2019-09-05 8:55 UTC (permalink / raw)
To: Christoph Hellwig; +Cc: Atish Patra, linux-riscv, Palmer Dabbelt
On Wed, 21 Aug 2019, Christoph Hellwig wrote:
> If we just use the CSRs that these map to directly the code is simpler
> and doesn't require extra inline assembly code. Also fix up the top-level
> comment in timer-riscv.c to not talk about the cycle count or mention
> details of the clocksource interface, of which this file is just a
> consumer.
>
> Signed-off-by: Christoph Hellwig <hch@lst.de>
Thanks, queued for v5.4-rc1 with Atish's Reviewed-by:.
- Paul
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH 6/6] riscv: move the TLB flush logic out of line
2019-08-21 14:58 ` [PATCH 6/6] riscv: move the TLB flush logic out of line Christoph Hellwig
2019-08-24 0:03 ` Atish Patra
@ 2019-09-05 8:58 ` Paul Walmsley
1 sibling, 0 replies; 26+ messages in thread
From: Paul Walmsley @ 2019-09-05 8:58 UTC (permalink / raw)
To: Christoph Hellwig; +Cc: Atish Patra, linux-riscv, Palmer Dabbelt
On Wed, 21 Aug 2019, Christoph Hellwig wrote:
> The TLB flush logic is going to become more complex. Start moving
> it out of line.
>
> Signed-off-by: Christoph Hellwig <hch@lst.de>
Thanks. "checkpatch.pl --strict" reported several whitespace issues with
this patch. I fixed those up here and queued the following for v5.4-rc1.
- Paul
From: Christoph Hellwig <hch@lst.de>
Date: Wed, 21 Aug 2019 23:58:37 +0900
Subject: [PATCH] riscv: move the TLB flush logic out of line
The TLB flush logic is going to become more complex. Start moving
it out of line.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
[paul.walmsley@sifive.com: fixed checkpatch whitespace warnings]
Signed-off-by: Paul Walmsley <paul.walmsley@sifive.com>
---
arch/riscv/include/asm/tlbflush.h | 37 ++++++-------------------------
arch/riscv/mm/Makefile | 3 +++
arch/riscv/mm/tlbflush.c | 35 +++++++++++++++++++++++++++++
3 files changed, 45 insertions(+), 30 deletions(-)
create mode 100644 arch/riscv/mm/tlbflush.c
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index df31fe2ed09c..37ae4e367ad2 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -25,8 +25,13 @@ static inline void local_flush_tlb_page(unsigned long addr)
__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory");
}
-#ifndef CONFIG_SMP
-
+#ifdef CONFIG_SMP
+void flush_tlb_all(void);
+void flush_tlb_mm(struct mm_struct *mm);
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+#else /* CONFIG_SMP */
#define flush_tlb_all() local_flush_tlb_all()
#define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
@@ -37,34 +42,6 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
}
#define flush_tlb_mm(mm) flush_tlb_all()
-
-#else /* CONFIG_SMP */
-
-#include <asm/sbi.h>
-
-static inline void remote_sfence_vma(struct cpumask *cmask, unsigned long start,
- unsigned long size)
-{
- struct cpumask hmask;
-
- riscv_cpuid_to_hartid_mask(cmask, &hmask);
- sbi_remote_sfence_vma(hmask.bits, start, size);
-}
-
-#define flush_tlb_all() sbi_remote_sfence_vma(NULL, 0, -1)
-
-#define flush_tlb_range(vma, start, end) \
- remote_sfence_vma(mm_cpumask((vma)->vm_mm), start, (end) - (start))
-
-static inline void flush_tlb_page(struct vm_area_struct *vma,
- unsigned long addr)
-{
- flush_tlb_range(vma, addr, addr + PAGE_SIZE);
-}
-
-#define flush_tlb_mm(mm) \
- remote_sfence_vma(mm_cpumask(mm), 0, -1)
-
#endif /* CONFIG_SMP */
/* Flush a range of kernel pages */
diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
index 74055e1d6f21..9d9a17335686 100644
--- a/arch/riscv/mm/Makefile
+++ b/arch/riscv/mm/Makefile
@@ -13,4 +13,7 @@ obj-y += cacheflush.o
obj-y += context.o
obj-y += sifive_l2_cache.o
+ifeq ($(CONFIG_MMU),y)
+obj-$(CONFIG_SMP) += tlbflush.o
+endif
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
new file mode 100644
index 000000000000..24cd33d2c48f
--- /dev/null
+++ b/arch/riscv/mm/tlbflush.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <asm/sbi.h>
+
+void flush_tlb_all(void)
+{
+ sbi_remote_sfence_vma(NULL, 0, -1);
+}
+
+static void __sbi_tlb_flush_range(struct cpumask *cmask, unsigned long start,
+ unsigned long size)
+{
+ struct cpumask hmask;
+
+ riscv_cpuid_to_hartid_mask(cmask, &hmask);
+ sbi_remote_sfence_vma(hmask.bits, start, size);
+}
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+ __sbi_tlb_flush_range(mm_cpumask(mm), 0, -1);
+}
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
+{
+ __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), addr, PAGE_SIZE);
+}
+
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), start, end - start);
+}
--
2.23.0
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
^ permalink raw reply related [flat|nested] 26+ messages in thread
end of thread, other threads:[~2019-09-05 8:58 UTC | newest]
Thread overview: 26+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-08-21 14:58 misc riscv cleanups Christoph Hellwig
2019-08-21 14:58 ` [PATCH 1/6] riscv: refactor the IPI code Christoph Hellwig
2019-08-24 1:03 ` Atish Patra
2019-09-05 8:44 ` Paul Walmsley
2019-08-21 14:58 ` [PATCH 2/6] riscv: cleanup send_ipi_mask Christoph Hellwig
2019-08-24 0:11 ` Atish Patra
2019-08-26 11:28 ` hch
2019-08-27 18:45 ` Atish Patra
2019-09-05 8:46 ` Paul Walmsley
2019-08-21 14:58 ` [PATCH 3/6] riscv: optimize send_ipi_single Christoph Hellwig
2019-08-24 0:26 ` Atish Patra
2019-08-26 11:29 ` hch
2019-08-27 18:48 ` Atish Patra
2019-09-05 8:48 ` Paul Walmsley
2019-08-21 14:58 ` [PATCH 4/6] riscv: cleanup riscv_cpuid_to_hartid_mask Christoph Hellwig
2019-08-24 0:03 ` Atish Patra
2019-09-05 8:50 ` Paul Walmsley
2019-08-21 14:58 ` [PATCH 5/6] riscv: don't use the rdtime(h) pseudo-instructions Christoph Hellwig
2019-08-24 0:37 ` Atish Patra
2019-08-24 0:43 ` Atish Patra
2019-08-26 11:30 ` hch
2019-08-27 18:50 ` Atish Patra
2019-09-05 8:55 ` Paul Walmsley
2019-08-21 14:58 ` [PATCH 6/6] riscv: move the TLB flush logic out of line Christoph Hellwig
2019-08-24 0:03 ` Atish Patra
2019-09-05 8:58 ` Paul Walmsley
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).