All of lore.kernel.org
 help / color / mirror / Atom feed
From: Alexandru Elisei <alexandru.elisei@arm.com>
To: maz@kernel.org, james.morse@arm.com, suzuki.poulose@arm.com,
	linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu, will@kernel.org,
	linux-kernel@vger.kernel.org
Subject: [RFC PATCH v4 30/39] KVM: arm64: nVHE: Context switch SPE state if VCPU has SPE
Date: Wed, 25 Aug 2021 17:18:06 +0100	[thread overview]
Message-ID: <20210825161815.266051-31-alexandru.elisei@arm.com> (raw)
In-Reply-To: <20210825161815.266051-1-alexandru.elisei@arm.com>

For non-VHE systems, make the SPE register state part of the context that
is saved and restored at each world switch. The SPE buffer management
interrupt will be handled in a later patch.

Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
---
 arch/arm64/include/asm/kvm_hyp.h        | 19 ++++++
 arch/arm64/kvm/hyp/include/hyp/spe-sr.h | 32 +++++++++
 arch/arm64/kvm/hyp/nvhe/Makefile        |  1 +
 arch/arm64/kvm/hyp/nvhe/debug-sr.c      |  6 +-
 arch/arm64/kvm/hyp/nvhe/spe-sr.c        | 87 +++++++++++++++++++++++++
 arch/arm64/kvm/hyp/nvhe/switch.c        | 29 +++++++--
 6 files changed, 165 insertions(+), 9 deletions(-)
 create mode 100644 arch/arm64/kvm/hyp/include/hyp/spe-sr.h
 create mode 100644 arch/arm64/kvm/hyp/nvhe/spe-sr.c

diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index 48619c2c0dc6..06e77a739458 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -88,6 +88,25 @@ void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu,
 				    struct kvm_cpu_context *host_ctxt);
 void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu,
 				       struct kvm_cpu_context *host_ctxt);
+#ifdef CONFIG_KVM_ARM_SPE
+void __spe_save_host_state_nvhe(struct kvm_vcpu *vcpu,
+				struct kvm_cpu_context *host_ctxt);
+void __spe_save_guest_state_nvhe(struct kvm_vcpu *vcpu,
+				 struct kvm_cpu_context *guest_ctxt);
+void __spe_restore_host_state_nvhe(struct kvm_vcpu *vcpu,
+				   struct kvm_cpu_context *host_ctxt);
+void __spe_restore_guest_state_nvhe(struct kvm_vcpu *vcpu,
+				    struct kvm_cpu_context *guest_ctxt);
+#else
+static inline void __spe_save_host_state_nvhe(struct kvm_vcpu *vcpu,
+					struct kvm_cpu_context *host_ctxt) {}
+static inline void __spe_save_guest_state_nvhe(struct kvm_vcpu *vcpu,
+					struct kvm_cpu_context *guest_ctxt) {}
+static inline void __spe_restore_host_state_nvhe(struct kvm_vcpu *vcpu,
+					struct kvm_cpu_context *host_ctxt) {}
+static inline void __spe_restore_guest_state_nvhe(struct kvm_vcpu *vcpu,
+					struct kvm_cpu_context *guest_ctxt) {}
+#endif
 #endif
 
 void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
diff --git a/arch/arm64/kvm/hyp/include/hyp/spe-sr.h b/arch/arm64/kvm/hyp/include/hyp/spe-sr.h
new file mode 100644
index 000000000000..d5f8f3ffc7d4
--- /dev/null
+++ b/arch/arm64/kvm/hyp/include/hyp/spe-sr.h
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 - ARM Ltd
+ * Author: Alexandru Elisei <alexandru.elisei@arm.com>
+ */
+
+#ifndef __ARM64_KVM_HYP_SPE_SR_H__
+#define __ARM64_KVM_HYP_SPE_SR_H__
+
+#include <linux/kvm_host.h>
+
+#include <asm/kvm_asm.h>
+
+static inline void __spe_save_common_state(struct kvm_cpu_context *ctxt)
+{
+	ctxt_sys_reg(ctxt, PMSICR_EL1) = read_sysreg_s(SYS_PMSICR_EL1);
+	ctxt_sys_reg(ctxt, PMSIRR_EL1) = read_sysreg_s(SYS_PMSIRR_EL1);
+	ctxt_sys_reg(ctxt, PMSFCR_EL1) = read_sysreg_s(SYS_PMSFCR_EL1);
+	ctxt_sys_reg(ctxt, PMSEVFR_EL1) = read_sysreg_s(SYS_PMSEVFR_EL1);
+	ctxt_sys_reg(ctxt, PMSLATFR_EL1) = read_sysreg_s(SYS_PMSLATFR_EL1);
+}
+
+static inline void __spe_restore_common_state(struct kvm_cpu_context *ctxt)
+{
+	write_sysreg_s(ctxt_sys_reg(ctxt, PMSICR_EL1), SYS_PMSICR_EL1);
+	write_sysreg_s(ctxt_sys_reg(ctxt, PMSIRR_EL1), SYS_PMSIRR_EL1);
+	write_sysreg_s(ctxt_sys_reg(ctxt, PMSFCR_EL1), SYS_PMSFCR_EL1);
+	write_sysreg_s(ctxt_sys_reg(ctxt, PMSEVFR_EL1), SYS_PMSEVFR_EL1);
+	write_sysreg_s(ctxt_sys_reg(ctxt, PMSLATFR_EL1), SYS_PMSLATFR_EL1);
+}
+
+#endif /* __ARM64_KVM_HYP_SPE_SR_H__ */
diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile
index 5df6193fc430..37dca45d85d5 100644
--- a/arch/arm64/kvm/hyp/nvhe/Makefile
+++ b/arch/arm64/kvm/hyp/nvhe/Makefile
@@ -15,6 +15,7 @@ lib-objs := $(addprefix ../../../lib/, $(lib-objs))
 obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o \
 	 hyp-main.o hyp-smp.o psci-relay.o early_alloc.o stub.o page_alloc.o \
 	 cache.o setup.o mm.o mem_protect.o
+obj-$(CONFIG_KVM_ARM_SPE) += spe-sr.o
 obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
 	 ../fpsimd.o ../hyp-entry.o ../exception.o ../pgtable.o
 obj-y += $(lib-objs)
diff --git a/arch/arm64/kvm/hyp/nvhe/debug-sr.c b/arch/arm64/kvm/hyp/nvhe/debug-sr.c
index 186b90b5fd20..1622615954b2 100644
--- a/arch/arm64/kvm/hyp/nvhe/debug-sr.c
+++ b/arch/arm64/kvm/hyp/nvhe/debug-sr.c
@@ -85,7 +85,8 @@ void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu,
 				    struct kvm_cpu_context *host_ctxt)
 {
 	/* Disable and flush SPE data generation */
-	if (vcpu->arch.flags & KVM_ARM64_DEBUG_SAVE_SPE_BUFFER)
+	if (!kvm_vcpu_has_spe(vcpu) &&
+	    vcpu->arch.flags & KVM_ARM64_DEBUG_SAVE_SPE_BUFFER)
 		__debug_save_spe(__ctxt_sys_reg(host_ctxt, PMSCR_EL1));
 	/* Disable and flush Self-Hosted Trace generation */
 	if (vcpu->arch.flags & KVM_ARM64_DEBUG_SAVE_TRBE_BUFFER)
@@ -100,7 +101,8 @@ void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
 void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu,
 				       struct kvm_cpu_context *host_ctxt)
 {
-	if (vcpu->arch.flags & KVM_ARM64_DEBUG_SAVE_SPE_BUFFER)
+	if (!kvm_vcpu_has_spe(vcpu) &&
+	    vcpu->arch.flags & KVM_ARM64_DEBUG_SAVE_SPE_BUFFER)
 		__debug_restore_spe(ctxt_sys_reg(host_ctxt, PMSCR_EL1));
 	if (vcpu->arch.flags & KVM_ARM64_DEBUG_SAVE_TRBE_BUFFER)
 		__debug_restore_trace(vcpu->arch.host_debug_state.trfcr_el1);
diff --git a/arch/arm64/kvm/hyp/nvhe/spe-sr.c b/arch/arm64/kvm/hyp/nvhe/spe-sr.c
new file mode 100644
index 000000000000..46e47c9fd08f
--- /dev/null
+++ b/arch/arm64/kvm/hyp/nvhe/spe-sr.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 - ARM Ltd
+ * Author: Alexandru Elisei <alexandru.elisei@arm.com>
+ */
+
+#include <linux/kvm_host.h>
+
+#include <asm/kvm_hyp.h>
+
+#include <hyp/spe-sr.h>
+
+/*
+ * The owning exception level remains unchange from EL1 during the world switch,
+ * which means that profiling is disabled for as long as we execute at EL2. KVM
+ * does not need to explicitely disable profiling, like it does when the VCPU
+ * does not have SPE and we change buffer owning exception level, nor does it
+ * need to do any synchronization around sysreg save/restore.
+ */
+
+void __spe_save_host_state_nvhe(struct kvm_vcpu *vcpu,
+				struct kvm_cpu_context *host_ctxt)
+{
+	u64 pmblimitr;
+
+	pmblimitr = read_sysreg_s(SYS_PMBLIMITR_EL1);
+	if (pmblimitr & BIT(SYS_PMBLIMITR_EL1_E_SHIFT)) {
+		psb_csync();
+		dsb(nsh);
+		/*
+		 * The buffer performs indirect writes to system registers, a
+		 * context synchronization event is needed before the new
+		 * PMBPTR_EL1 value is visible to subsequent direct reads.
+		 */
+		isb();
+	}
+
+	ctxt_sys_reg(host_ctxt, PMBPTR_EL1) = read_sysreg_s(SYS_PMBPTR_EL1);
+	ctxt_sys_reg(host_ctxt, PMBSR_EL1) = read_sysreg_s(SYS_PMBSR_EL1);
+	ctxt_sys_reg(host_ctxt, PMBLIMITR_EL1) = pmblimitr;
+	ctxt_sys_reg(host_ctxt, PMSCR_EL1) = read_sysreg_s(SYS_PMSCR_EL1);
+	ctxt_sys_reg(host_ctxt, PMSCR_EL2) = read_sysreg_el2(SYS_PMSCR);
+
+	__spe_save_common_state(host_ctxt);
+}
+
+void __spe_save_guest_state_nvhe(struct kvm_vcpu *vcpu,
+				 struct kvm_cpu_context *guest_ctxt)
+{
+	if (read_sysreg_s(SYS_PMBLIMITR_EL1) & BIT(SYS_PMBLIMITR_EL1_E_SHIFT)) {
+		psb_csync();
+		dsb(nsh);
+		/* Ensure hardware updates to PMBPTR_EL1 are visible. */
+		isb();
+	}
+
+	ctxt_sys_reg(guest_ctxt, PMBPTR_EL1) = read_sysreg_s(SYS_PMBPTR_EL1);
+	ctxt_sys_reg(guest_ctxt, PMBSR_EL1) = read_sysreg_s(SYS_PMBSR_EL1);
+	/* PMBLIMITR_EL1 is updated only on a trapped write. */
+	ctxt_sys_reg(guest_ctxt, PMSCR_EL1) = read_sysreg_s(SYS_PMSCR_EL1);
+
+	__spe_save_common_state(guest_ctxt);
+}
+
+void __spe_restore_host_state_nvhe(struct kvm_vcpu *vcpu,
+				   struct kvm_cpu_context *host_ctxt)
+{
+	__spe_restore_common_state(host_ctxt);
+
+	write_sysreg_s(ctxt_sys_reg(host_ctxt, PMBPTR_EL1), SYS_PMBPTR_EL1);
+	write_sysreg_s(ctxt_sys_reg(host_ctxt, PMBSR_EL1), SYS_PMBSR_EL1);
+	write_sysreg_s(ctxt_sys_reg(host_ctxt, PMBLIMITR_EL1), SYS_PMBLIMITR_EL1);
+	write_sysreg_s(ctxt_sys_reg(host_ctxt, PMSCR_EL1), SYS_PMSCR_EL1);
+	write_sysreg_el2(ctxt_sys_reg(host_ctxt, PMSCR_EL2), SYS_PMSCR);
+}
+
+void __spe_restore_guest_state_nvhe(struct kvm_vcpu *vcpu,
+				    struct kvm_cpu_context *guest_ctxt)
+{
+	__spe_restore_common_state(guest_ctxt);
+
+	write_sysreg_s(ctxt_sys_reg(guest_ctxt, PMBPTR_EL1), SYS_PMBPTR_EL1);
+	write_sysreg_s(ctxt_sys_reg(guest_ctxt, PMBSR_EL1), SYS_PMBSR_EL1);
+	write_sysreg_s(ctxt_sys_reg(guest_ctxt, PMBLIMITR_EL1), SYS_PMBLIMITR_EL1);
+	write_sysreg_s(ctxt_sys_reg(guest_ctxt, PMSCR_EL1), SYS_PMSCR_EL1);
+	write_sysreg_el2(0, SYS_PMSCR);
+}
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index 04d654e71a6e..62ef2a5789ba 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -194,12 +194,16 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
 
 	__sysreg_save_state_nvhe(host_ctxt);
 	/*
-	 * We must flush and disable the SPE buffer for nVHE, as
-	 * the translation regime(EL1&0) is going to be loaded with
-	 * that of the guest. And we must do this before we change the
-	 * translation regime to EL2 (via MDCR_EL2_E2PB == 0) and
-	 * before we load guest Stage1.
+	 * If the VCPU has the SPE feature bit set, then we save the host's SPE
+	 * context.
+	 *
+	 * Otherwise, we only flush and disable the SPE buffer for nVHE, as the
+	 * translation regime(EL1&0) is going to be loaded with that of the
+	 * guest. And we must do this before we change the translation regime to
+	 * EL2 (via MDCR_EL2_E2PB == 0) and before we load guest Stage1.
 	 */
+	if (kvm_vcpu_has_spe(vcpu))
+		__spe_save_host_state_nvhe(vcpu, host_ctxt);
 	__debug_save_host_buffers_nvhe(vcpu, host_ctxt);
 
 	__kvm_adjust_pc(vcpu);
@@ -218,6 +222,9 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
 	__load_guest_stage2(kern_hyp_va(vcpu->arch.hw_mmu));
 	__activate_traps(vcpu);
 
+	if (kvm_vcpu_has_spe(vcpu))
+		__spe_restore_guest_state_nvhe(vcpu, guest_ctxt);
+
 	__hyp_vgic_restore_state(vcpu);
 	__timer_enable_traps(vcpu);
 
@@ -232,6 +239,10 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
 
 	__sysreg_save_state_nvhe(guest_ctxt);
 	__sysreg32_save_state(vcpu);
+
+	if (kvm_vcpu_has_spe(vcpu))
+		__spe_save_guest_state_nvhe(vcpu, guest_ctxt);
+
 	__timer_disable_traps(vcpu);
 	__hyp_vgic_save_state(vcpu);
 
@@ -244,10 +255,14 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
 		__fpsimd_save_fpexc32(vcpu);
 
 	__debug_switch_to_host(vcpu);
+
 	/*
-	 * This must come after restoring the host sysregs, since a non-VHE
-	 * system may enable SPE here and make use of the TTBRs.
+	 * Restoring the host context must come after restoring the host
+	 * sysregs, since a non-VHE system may enable SPE here and make use of
+	 * the TTBRs.
 	 */
+	if (kvm_vcpu_has_spe(vcpu))
+		__spe_restore_host_state_nvhe(vcpu, host_ctxt);
 	__debug_restore_host_buffers_nvhe(vcpu, host_ctxt);
 
 	if (pmu_switch_needed)
-- 
2.33.0


WARNING: multiple messages have this Message-ID (diff)
From: Alexandru Elisei <alexandru.elisei@arm.com>
To: maz@kernel.org, james.morse@arm.com, suzuki.poulose@arm.com,
	linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu, will@kernel.org,
	linux-kernel@vger.kernel.org
Subject: [RFC PATCH v4 30/39] KVM: arm64: nVHE: Context switch SPE state if VCPU has SPE
Date: Wed, 25 Aug 2021 17:18:06 +0100	[thread overview]
Message-ID: <20210825161815.266051-31-alexandru.elisei@arm.com> (raw)
In-Reply-To: <20210825161815.266051-1-alexandru.elisei@arm.com>

For non-VHE systems, make the SPE register state part of the context that
is saved and restored at each world switch. The SPE buffer management
interrupt will be handled in a later patch.

Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
---
 arch/arm64/include/asm/kvm_hyp.h        | 19 ++++++
 arch/arm64/kvm/hyp/include/hyp/spe-sr.h | 32 +++++++++
 arch/arm64/kvm/hyp/nvhe/Makefile        |  1 +
 arch/arm64/kvm/hyp/nvhe/debug-sr.c      |  6 +-
 arch/arm64/kvm/hyp/nvhe/spe-sr.c        | 87 +++++++++++++++++++++++++
 arch/arm64/kvm/hyp/nvhe/switch.c        | 29 +++++++--
 6 files changed, 165 insertions(+), 9 deletions(-)
 create mode 100644 arch/arm64/kvm/hyp/include/hyp/spe-sr.h
 create mode 100644 arch/arm64/kvm/hyp/nvhe/spe-sr.c

diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index 48619c2c0dc6..06e77a739458 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -88,6 +88,25 @@ void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu,
 				    struct kvm_cpu_context *host_ctxt);
 void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu,
 				       struct kvm_cpu_context *host_ctxt);
+#ifdef CONFIG_KVM_ARM_SPE
+void __spe_save_host_state_nvhe(struct kvm_vcpu *vcpu,
+				struct kvm_cpu_context *host_ctxt);
+void __spe_save_guest_state_nvhe(struct kvm_vcpu *vcpu,
+				 struct kvm_cpu_context *guest_ctxt);
+void __spe_restore_host_state_nvhe(struct kvm_vcpu *vcpu,
+				   struct kvm_cpu_context *host_ctxt);
+void __spe_restore_guest_state_nvhe(struct kvm_vcpu *vcpu,
+				    struct kvm_cpu_context *guest_ctxt);
+#else
+static inline void __spe_save_host_state_nvhe(struct kvm_vcpu *vcpu,
+					struct kvm_cpu_context *host_ctxt) {}
+static inline void __spe_save_guest_state_nvhe(struct kvm_vcpu *vcpu,
+					struct kvm_cpu_context *guest_ctxt) {}
+static inline void __spe_restore_host_state_nvhe(struct kvm_vcpu *vcpu,
+					struct kvm_cpu_context *host_ctxt) {}
+static inline void __spe_restore_guest_state_nvhe(struct kvm_vcpu *vcpu,
+					struct kvm_cpu_context *guest_ctxt) {}
+#endif
 #endif
 
 void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
diff --git a/arch/arm64/kvm/hyp/include/hyp/spe-sr.h b/arch/arm64/kvm/hyp/include/hyp/spe-sr.h
new file mode 100644
index 000000000000..d5f8f3ffc7d4
--- /dev/null
+++ b/arch/arm64/kvm/hyp/include/hyp/spe-sr.h
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 - ARM Ltd
+ * Author: Alexandru Elisei <alexandru.elisei@arm.com>
+ */
+
+#ifndef __ARM64_KVM_HYP_SPE_SR_H__
+#define __ARM64_KVM_HYP_SPE_SR_H__
+
+#include <linux/kvm_host.h>
+
+#include <asm/kvm_asm.h>
+
+static inline void __spe_save_common_state(struct kvm_cpu_context *ctxt)
+{
+	ctxt_sys_reg(ctxt, PMSICR_EL1) = read_sysreg_s(SYS_PMSICR_EL1);
+	ctxt_sys_reg(ctxt, PMSIRR_EL1) = read_sysreg_s(SYS_PMSIRR_EL1);
+	ctxt_sys_reg(ctxt, PMSFCR_EL1) = read_sysreg_s(SYS_PMSFCR_EL1);
+	ctxt_sys_reg(ctxt, PMSEVFR_EL1) = read_sysreg_s(SYS_PMSEVFR_EL1);
+	ctxt_sys_reg(ctxt, PMSLATFR_EL1) = read_sysreg_s(SYS_PMSLATFR_EL1);
+}
+
+static inline void __spe_restore_common_state(struct kvm_cpu_context *ctxt)
+{
+	write_sysreg_s(ctxt_sys_reg(ctxt, PMSICR_EL1), SYS_PMSICR_EL1);
+	write_sysreg_s(ctxt_sys_reg(ctxt, PMSIRR_EL1), SYS_PMSIRR_EL1);
+	write_sysreg_s(ctxt_sys_reg(ctxt, PMSFCR_EL1), SYS_PMSFCR_EL1);
+	write_sysreg_s(ctxt_sys_reg(ctxt, PMSEVFR_EL1), SYS_PMSEVFR_EL1);
+	write_sysreg_s(ctxt_sys_reg(ctxt, PMSLATFR_EL1), SYS_PMSLATFR_EL1);
+}
+
+#endif /* __ARM64_KVM_HYP_SPE_SR_H__ */
diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile
index 5df6193fc430..37dca45d85d5 100644
--- a/arch/arm64/kvm/hyp/nvhe/Makefile
+++ b/arch/arm64/kvm/hyp/nvhe/Makefile
@@ -15,6 +15,7 @@ lib-objs := $(addprefix ../../../lib/, $(lib-objs))
 obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o \
 	 hyp-main.o hyp-smp.o psci-relay.o early_alloc.o stub.o page_alloc.o \
 	 cache.o setup.o mm.o mem_protect.o
+obj-$(CONFIG_KVM_ARM_SPE) += spe-sr.o
 obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
 	 ../fpsimd.o ../hyp-entry.o ../exception.o ../pgtable.o
 obj-y += $(lib-objs)
diff --git a/arch/arm64/kvm/hyp/nvhe/debug-sr.c b/arch/arm64/kvm/hyp/nvhe/debug-sr.c
index 186b90b5fd20..1622615954b2 100644
--- a/arch/arm64/kvm/hyp/nvhe/debug-sr.c
+++ b/arch/arm64/kvm/hyp/nvhe/debug-sr.c
@@ -85,7 +85,8 @@ void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu,
 				    struct kvm_cpu_context *host_ctxt)
 {
 	/* Disable and flush SPE data generation */
-	if (vcpu->arch.flags & KVM_ARM64_DEBUG_SAVE_SPE_BUFFER)
+	if (!kvm_vcpu_has_spe(vcpu) &&
+	    vcpu->arch.flags & KVM_ARM64_DEBUG_SAVE_SPE_BUFFER)
 		__debug_save_spe(__ctxt_sys_reg(host_ctxt, PMSCR_EL1));
 	/* Disable and flush Self-Hosted Trace generation */
 	if (vcpu->arch.flags & KVM_ARM64_DEBUG_SAVE_TRBE_BUFFER)
@@ -100,7 +101,8 @@ void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
 void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu,
 				       struct kvm_cpu_context *host_ctxt)
 {
-	if (vcpu->arch.flags & KVM_ARM64_DEBUG_SAVE_SPE_BUFFER)
+	if (!kvm_vcpu_has_spe(vcpu) &&
+	    vcpu->arch.flags & KVM_ARM64_DEBUG_SAVE_SPE_BUFFER)
 		__debug_restore_spe(ctxt_sys_reg(host_ctxt, PMSCR_EL1));
 	if (vcpu->arch.flags & KVM_ARM64_DEBUG_SAVE_TRBE_BUFFER)
 		__debug_restore_trace(vcpu->arch.host_debug_state.trfcr_el1);
diff --git a/arch/arm64/kvm/hyp/nvhe/spe-sr.c b/arch/arm64/kvm/hyp/nvhe/spe-sr.c
new file mode 100644
index 000000000000..46e47c9fd08f
--- /dev/null
+++ b/arch/arm64/kvm/hyp/nvhe/spe-sr.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 - ARM Ltd
+ * Author: Alexandru Elisei <alexandru.elisei@arm.com>
+ */
+
+#include <linux/kvm_host.h>
+
+#include <asm/kvm_hyp.h>
+
+#include <hyp/spe-sr.h>
+
+/*
+ * The owning exception level remains unchange from EL1 during the world switch,
+ * which means that profiling is disabled for as long as we execute at EL2. KVM
+ * does not need to explicitely disable profiling, like it does when the VCPU
+ * does not have SPE and we change buffer owning exception level, nor does it
+ * need to do any synchronization around sysreg save/restore.
+ */
+
+void __spe_save_host_state_nvhe(struct kvm_vcpu *vcpu,
+				struct kvm_cpu_context *host_ctxt)
+{
+	u64 pmblimitr;
+
+	pmblimitr = read_sysreg_s(SYS_PMBLIMITR_EL1);
+	if (pmblimitr & BIT(SYS_PMBLIMITR_EL1_E_SHIFT)) {
+		psb_csync();
+		dsb(nsh);
+		/*
+		 * The buffer performs indirect writes to system registers, a
+		 * context synchronization event is needed before the new
+		 * PMBPTR_EL1 value is visible to subsequent direct reads.
+		 */
+		isb();
+	}
+
+	ctxt_sys_reg(host_ctxt, PMBPTR_EL1) = read_sysreg_s(SYS_PMBPTR_EL1);
+	ctxt_sys_reg(host_ctxt, PMBSR_EL1) = read_sysreg_s(SYS_PMBSR_EL1);
+	ctxt_sys_reg(host_ctxt, PMBLIMITR_EL1) = pmblimitr;
+	ctxt_sys_reg(host_ctxt, PMSCR_EL1) = read_sysreg_s(SYS_PMSCR_EL1);
+	ctxt_sys_reg(host_ctxt, PMSCR_EL2) = read_sysreg_el2(SYS_PMSCR);
+
+	__spe_save_common_state(host_ctxt);
+}
+
+void __spe_save_guest_state_nvhe(struct kvm_vcpu *vcpu,
+				 struct kvm_cpu_context *guest_ctxt)
+{
+	if (read_sysreg_s(SYS_PMBLIMITR_EL1) & BIT(SYS_PMBLIMITR_EL1_E_SHIFT)) {
+		psb_csync();
+		dsb(nsh);
+		/* Ensure hardware updates to PMBPTR_EL1 are visible. */
+		isb();
+	}
+
+	ctxt_sys_reg(guest_ctxt, PMBPTR_EL1) = read_sysreg_s(SYS_PMBPTR_EL1);
+	ctxt_sys_reg(guest_ctxt, PMBSR_EL1) = read_sysreg_s(SYS_PMBSR_EL1);
+	/* PMBLIMITR_EL1 is updated only on a trapped write. */
+	ctxt_sys_reg(guest_ctxt, PMSCR_EL1) = read_sysreg_s(SYS_PMSCR_EL1);
+
+	__spe_save_common_state(guest_ctxt);
+}
+
+void __spe_restore_host_state_nvhe(struct kvm_vcpu *vcpu,
+				   struct kvm_cpu_context *host_ctxt)
+{
+	__spe_restore_common_state(host_ctxt);
+
+	write_sysreg_s(ctxt_sys_reg(host_ctxt, PMBPTR_EL1), SYS_PMBPTR_EL1);
+	write_sysreg_s(ctxt_sys_reg(host_ctxt, PMBSR_EL1), SYS_PMBSR_EL1);
+	write_sysreg_s(ctxt_sys_reg(host_ctxt, PMBLIMITR_EL1), SYS_PMBLIMITR_EL1);
+	write_sysreg_s(ctxt_sys_reg(host_ctxt, PMSCR_EL1), SYS_PMSCR_EL1);
+	write_sysreg_el2(ctxt_sys_reg(host_ctxt, PMSCR_EL2), SYS_PMSCR);
+}
+
+void __spe_restore_guest_state_nvhe(struct kvm_vcpu *vcpu,
+				    struct kvm_cpu_context *guest_ctxt)
+{
+	__spe_restore_common_state(guest_ctxt);
+
+	write_sysreg_s(ctxt_sys_reg(guest_ctxt, PMBPTR_EL1), SYS_PMBPTR_EL1);
+	write_sysreg_s(ctxt_sys_reg(guest_ctxt, PMBSR_EL1), SYS_PMBSR_EL1);
+	write_sysreg_s(ctxt_sys_reg(guest_ctxt, PMBLIMITR_EL1), SYS_PMBLIMITR_EL1);
+	write_sysreg_s(ctxt_sys_reg(guest_ctxt, PMSCR_EL1), SYS_PMSCR_EL1);
+	write_sysreg_el2(0, SYS_PMSCR);
+}
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index 04d654e71a6e..62ef2a5789ba 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -194,12 +194,16 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
 
 	__sysreg_save_state_nvhe(host_ctxt);
 	/*
-	 * We must flush and disable the SPE buffer for nVHE, as
-	 * the translation regime(EL1&0) is going to be loaded with
-	 * that of the guest. And we must do this before we change the
-	 * translation regime to EL2 (via MDCR_EL2_E2PB == 0) and
-	 * before we load guest Stage1.
+	 * If the VCPU has the SPE feature bit set, then we save the host's SPE
+	 * context.
+	 *
+	 * Otherwise, we only flush and disable the SPE buffer for nVHE, as the
+	 * translation regime(EL1&0) is going to be loaded with that of the
+	 * guest. And we must do this before we change the translation regime to
+	 * EL2 (via MDCR_EL2_E2PB == 0) and before we load guest Stage1.
 	 */
+	if (kvm_vcpu_has_spe(vcpu))
+		__spe_save_host_state_nvhe(vcpu, host_ctxt);
 	__debug_save_host_buffers_nvhe(vcpu, host_ctxt);
 
 	__kvm_adjust_pc(vcpu);
@@ -218,6 +222,9 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
 	__load_guest_stage2(kern_hyp_va(vcpu->arch.hw_mmu));
 	__activate_traps(vcpu);
 
+	if (kvm_vcpu_has_spe(vcpu))
+		__spe_restore_guest_state_nvhe(vcpu, guest_ctxt);
+
 	__hyp_vgic_restore_state(vcpu);
 	__timer_enable_traps(vcpu);
 
@@ -232,6 +239,10 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
 
 	__sysreg_save_state_nvhe(guest_ctxt);
 	__sysreg32_save_state(vcpu);
+
+	if (kvm_vcpu_has_spe(vcpu))
+		__spe_save_guest_state_nvhe(vcpu, guest_ctxt);
+
 	__timer_disable_traps(vcpu);
 	__hyp_vgic_save_state(vcpu);
 
@@ -244,10 +255,14 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
 		__fpsimd_save_fpexc32(vcpu);
 
 	__debug_switch_to_host(vcpu);
+
 	/*
-	 * This must come after restoring the host sysregs, since a non-VHE
-	 * system may enable SPE here and make use of the TTBRs.
+	 * Restoring the host context must come after restoring the host
+	 * sysregs, since a non-VHE system may enable SPE here and make use of
+	 * the TTBRs.
 	 */
+	if (kvm_vcpu_has_spe(vcpu))
+		__spe_restore_host_state_nvhe(vcpu, host_ctxt);
 	__debug_restore_host_buffers_nvhe(vcpu, host_ctxt);
 
 	if (pmu_switch_needed)
-- 
2.33.0

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

WARNING: multiple messages have this Message-ID (diff)
From: Alexandru Elisei <alexandru.elisei@arm.com>
To: maz@kernel.org, james.morse@arm.com, suzuki.poulose@arm.com,
	linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu, will@kernel.org,
	linux-kernel@vger.kernel.org
Subject: [RFC PATCH v4 30/39] KVM: arm64: nVHE: Context switch SPE state if VCPU has SPE
Date: Wed, 25 Aug 2021 17:18:06 +0100	[thread overview]
Message-ID: <20210825161815.266051-31-alexandru.elisei@arm.com> (raw)
In-Reply-To: <20210825161815.266051-1-alexandru.elisei@arm.com>

For non-VHE systems, make the SPE register state part of the context that
is saved and restored at each world switch. The SPE buffer management
interrupt will be handled in a later patch.

Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
---
 arch/arm64/include/asm/kvm_hyp.h        | 19 ++++++
 arch/arm64/kvm/hyp/include/hyp/spe-sr.h | 32 +++++++++
 arch/arm64/kvm/hyp/nvhe/Makefile        |  1 +
 arch/arm64/kvm/hyp/nvhe/debug-sr.c      |  6 +-
 arch/arm64/kvm/hyp/nvhe/spe-sr.c        | 87 +++++++++++++++++++++++++
 arch/arm64/kvm/hyp/nvhe/switch.c        | 29 +++++++--
 6 files changed, 165 insertions(+), 9 deletions(-)
 create mode 100644 arch/arm64/kvm/hyp/include/hyp/spe-sr.h
 create mode 100644 arch/arm64/kvm/hyp/nvhe/spe-sr.c

diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index 48619c2c0dc6..06e77a739458 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -88,6 +88,25 @@ void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu,
 				    struct kvm_cpu_context *host_ctxt);
 void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu,
 				       struct kvm_cpu_context *host_ctxt);
+#ifdef CONFIG_KVM_ARM_SPE
+void __spe_save_host_state_nvhe(struct kvm_vcpu *vcpu,
+				struct kvm_cpu_context *host_ctxt);
+void __spe_save_guest_state_nvhe(struct kvm_vcpu *vcpu,
+				 struct kvm_cpu_context *guest_ctxt);
+void __spe_restore_host_state_nvhe(struct kvm_vcpu *vcpu,
+				   struct kvm_cpu_context *host_ctxt);
+void __spe_restore_guest_state_nvhe(struct kvm_vcpu *vcpu,
+				    struct kvm_cpu_context *guest_ctxt);
+#else
+static inline void __spe_save_host_state_nvhe(struct kvm_vcpu *vcpu,
+					struct kvm_cpu_context *host_ctxt) {}
+static inline void __spe_save_guest_state_nvhe(struct kvm_vcpu *vcpu,
+					struct kvm_cpu_context *guest_ctxt) {}
+static inline void __spe_restore_host_state_nvhe(struct kvm_vcpu *vcpu,
+					struct kvm_cpu_context *host_ctxt) {}
+static inline void __spe_restore_guest_state_nvhe(struct kvm_vcpu *vcpu,
+					struct kvm_cpu_context *guest_ctxt) {}
+#endif
 #endif
 
 void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
diff --git a/arch/arm64/kvm/hyp/include/hyp/spe-sr.h b/arch/arm64/kvm/hyp/include/hyp/spe-sr.h
new file mode 100644
index 000000000000..d5f8f3ffc7d4
--- /dev/null
+++ b/arch/arm64/kvm/hyp/include/hyp/spe-sr.h
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 - ARM Ltd
+ * Author: Alexandru Elisei <alexandru.elisei@arm.com>
+ */
+
+#ifndef __ARM64_KVM_HYP_SPE_SR_H__
+#define __ARM64_KVM_HYP_SPE_SR_H__
+
+#include <linux/kvm_host.h>
+
+#include <asm/kvm_asm.h>
+
+static inline void __spe_save_common_state(struct kvm_cpu_context *ctxt)
+{
+	ctxt_sys_reg(ctxt, PMSICR_EL1) = read_sysreg_s(SYS_PMSICR_EL1);
+	ctxt_sys_reg(ctxt, PMSIRR_EL1) = read_sysreg_s(SYS_PMSIRR_EL1);
+	ctxt_sys_reg(ctxt, PMSFCR_EL1) = read_sysreg_s(SYS_PMSFCR_EL1);
+	ctxt_sys_reg(ctxt, PMSEVFR_EL1) = read_sysreg_s(SYS_PMSEVFR_EL1);
+	ctxt_sys_reg(ctxt, PMSLATFR_EL1) = read_sysreg_s(SYS_PMSLATFR_EL1);
+}
+
+static inline void __spe_restore_common_state(struct kvm_cpu_context *ctxt)
+{
+	write_sysreg_s(ctxt_sys_reg(ctxt, PMSICR_EL1), SYS_PMSICR_EL1);
+	write_sysreg_s(ctxt_sys_reg(ctxt, PMSIRR_EL1), SYS_PMSIRR_EL1);
+	write_sysreg_s(ctxt_sys_reg(ctxt, PMSFCR_EL1), SYS_PMSFCR_EL1);
+	write_sysreg_s(ctxt_sys_reg(ctxt, PMSEVFR_EL1), SYS_PMSEVFR_EL1);
+	write_sysreg_s(ctxt_sys_reg(ctxt, PMSLATFR_EL1), SYS_PMSLATFR_EL1);
+}
+
+#endif /* __ARM64_KVM_HYP_SPE_SR_H__ */
diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile
index 5df6193fc430..37dca45d85d5 100644
--- a/arch/arm64/kvm/hyp/nvhe/Makefile
+++ b/arch/arm64/kvm/hyp/nvhe/Makefile
@@ -15,6 +15,7 @@ lib-objs := $(addprefix ../../../lib/, $(lib-objs))
 obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o \
 	 hyp-main.o hyp-smp.o psci-relay.o early_alloc.o stub.o page_alloc.o \
 	 cache.o setup.o mm.o mem_protect.o
+obj-$(CONFIG_KVM_ARM_SPE) += spe-sr.o
 obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
 	 ../fpsimd.o ../hyp-entry.o ../exception.o ../pgtable.o
 obj-y += $(lib-objs)
diff --git a/arch/arm64/kvm/hyp/nvhe/debug-sr.c b/arch/arm64/kvm/hyp/nvhe/debug-sr.c
index 186b90b5fd20..1622615954b2 100644
--- a/arch/arm64/kvm/hyp/nvhe/debug-sr.c
+++ b/arch/arm64/kvm/hyp/nvhe/debug-sr.c
@@ -85,7 +85,8 @@ void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu,
 				    struct kvm_cpu_context *host_ctxt)
 {
 	/* Disable and flush SPE data generation */
-	if (vcpu->arch.flags & KVM_ARM64_DEBUG_SAVE_SPE_BUFFER)
+	if (!kvm_vcpu_has_spe(vcpu) &&
+	    vcpu->arch.flags & KVM_ARM64_DEBUG_SAVE_SPE_BUFFER)
 		__debug_save_spe(__ctxt_sys_reg(host_ctxt, PMSCR_EL1));
 	/* Disable and flush Self-Hosted Trace generation */
 	if (vcpu->arch.flags & KVM_ARM64_DEBUG_SAVE_TRBE_BUFFER)
@@ -100,7 +101,8 @@ void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
 void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu,
 				       struct kvm_cpu_context *host_ctxt)
 {
-	if (vcpu->arch.flags & KVM_ARM64_DEBUG_SAVE_SPE_BUFFER)
+	if (!kvm_vcpu_has_spe(vcpu) &&
+	    vcpu->arch.flags & KVM_ARM64_DEBUG_SAVE_SPE_BUFFER)
 		__debug_restore_spe(ctxt_sys_reg(host_ctxt, PMSCR_EL1));
 	if (vcpu->arch.flags & KVM_ARM64_DEBUG_SAVE_TRBE_BUFFER)
 		__debug_restore_trace(vcpu->arch.host_debug_state.trfcr_el1);
diff --git a/arch/arm64/kvm/hyp/nvhe/spe-sr.c b/arch/arm64/kvm/hyp/nvhe/spe-sr.c
new file mode 100644
index 000000000000..46e47c9fd08f
--- /dev/null
+++ b/arch/arm64/kvm/hyp/nvhe/spe-sr.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 - ARM Ltd
+ * Author: Alexandru Elisei <alexandru.elisei@arm.com>
+ */
+
+#include <linux/kvm_host.h>
+
+#include <asm/kvm_hyp.h>
+
+#include <hyp/spe-sr.h>
+
+/*
+ * The owning exception level remains unchange from EL1 during the world switch,
+ * which means that profiling is disabled for as long as we execute at EL2. KVM
+ * does not need to explicitely disable profiling, like it does when the VCPU
+ * does not have SPE and we change buffer owning exception level, nor does it
+ * need to do any synchronization around sysreg save/restore.
+ */
+
+void __spe_save_host_state_nvhe(struct kvm_vcpu *vcpu,
+				struct kvm_cpu_context *host_ctxt)
+{
+	u64 pmblimitr;
+
+	pmblimitr = read_sysreg_s(SYS_PMBLIMITR_EL1);
+	if (pmblimitr & BIT(SYS_PMBLIMITR_EL1_E_SHIFT)) {
+		psb_csync();
+		dsb(nsh);
+		/*
+		 * The buffer performs indirect writes to system registers, a
+		 * context synchronization event is needed before the new
+		 * PMBPTR_EL1 value is visible to subsequent direct reads.
+		 */
+		isb();
+	}
+
+	ctxt_sys_reg(host_ctxt, PMBPTR_EL1) = read_sysreg_s(SYS_PMBPTR_EL1);
+	ctxt_sys_reg(host_ctxt, PMBSR_EL1) = read_sysreg_s(SYS_PMBSR_EL1);
+	ctxt_sys_reg(host_ctxt, PMBLIMITR_EL1) = pmblimitr;
+	ctxt_sys_reg(host_ctxt, PMSCR_EL1) = read_sysreg_s(SYS_PMSCR_EL1);
+	ctxt_sys_reg(host_ctxt, PMSCR_EL2) = read_sysreg_el2(SYS_PMSCR);
+
+	__spe_save_common_state(host_ctxt);
+}
+
+void __spe_save_guest_state_nvhe(struct kvm_vcpu *vcpu,
+				 struct kvm_cpu_context *guest_ctxt)
+{
+	if (read_sysreg_s(SYS_PMBLIMITR_EL1) & BIT(SYS_PMBLIMITR_EL1_E_SHIFT)) {
+		psb_csync();
+		dsb(nsh);
+		/* Ensure hardware updates to PMBPTR_EL1 are visible. */
+		isb();
+	}
+
+	ctxt_sys_reg(guest_ctxt, PMBPTR_EL1) = read_sysreg_s(SYS_PMBPTR_EL1);
+	ctxt_sys_reg(guest_ctxt, PMBSR_EL1) = read_sysreg_s(SYS_PMBSR_EL1);
+	/* PMBLIMITR_EL1 is updated only on a trapped write. */
+	ctxt_sys_reg(guest_ctxt, PMSCR_EL1) = read_sysreg_s(SYS_PMSCR_EL1);
+
+	__spe_save_common_state(guest_ctxt);
+}
+
+void __spe_restore_host_state_nvhe(struct kvm_vcpu *vcpu,
+				   struct kvm_cpu_context *host_ctxt)
+{
+	__spe_restore_common_state(host_ctxt);
+
+	write_sysreg_s(ctxt_sys_reg(host_ctxt, PMBPTR_EL1), SYS_PMBPTR_EL1);
+	write_sysreg_s(ctxt_sys_reg(host_ctxt, PMBSR_EL1), SYS_PMBSR_EL1);
+	write_sysreg_s(ctxt_sys_reg(host_ctxt, PMBLIMITR_EL1), SYS_PMBLIMITR_EL1);
+	write_sysreg_s(ctxt_sys_reg(host_ctxt, PMSCR_EL1), SYS_PMSCR_EL1);
+	write_sysreg_el2(ctxt_sys_reg(host_ctxt, PMSCR_EL2), SYS_PMSCR);
+}
+
+void __spe_restore_guest_state_nvhe(struct kvm_vcpu *vcpu,
+				    struct kvm_cpu_context *guest_ctxt)
+{
+	__spe_restore_common_state(guest_ctxt);
+
+	write_sysreg_s(ctxt_sys_reg(guest_ctxt, PMBPTR_EL1), SYS_PMBPTR_EL1);
+	write_sysreg_s(ctxt_sys_reg(guest_ctxt, PMBSR_EL1), SYS_PMBSR_EL1);
+	write_sysreg_s(ctxt_sys_reg(guest_ctxt, PMBLIMITR_EL1), SYS_PMBLIMITR_EL1);
+	write_sysreg_s(ctxt_sys_reg(guest_ctxt, PMSCR_EL1), SYS_PMSCR_EL1);
+	write_sysreg_el2(0, SYS_PMSCR);
+}
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index 04d654e71a6e..62ef2a5789ba 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -194,12 +194,16 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
 
 	__sysreg_save_state_nvhe(host_ctxt);
 	/*
-	 * We must flush and disable the SPE buffer for nVHE, as
-	 * the translation regime(EL1&0) is going to be loaded with
-	 * that of the guest. And we must do this before we change the
-	 * translation regime to EL2 (via MDCR_EL2_E2PB == 0) and
-	 * before we load guest Stage1.
+	 * If the VCPU has the SPE feature bit set, then we save the host's SPE
+	 * context.
+	 *
+	 * Otherwise, we only flush and disable the SPE buffer for nVHE, as the
+	 * translation regime(EL1&0) is going to be loaded with that of the
+	 * guest. And we must do this before we change the translation regime to
+	 * EL2 (via MDCR_EL2_E2PB == 0) and before we load guest Stage1.
 	 */
+	if (kvm_vcpu_has_spe(vcpu))
+		__spe_save_host_state_nvhe(vcpu, host_ctxt);
 	__debug_save_host_buffers_nvhe(vcpu, host_ctxt);
 
 	__kvm_adjust_pc(vcpu);
@@ -218,6 +222,9 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
 	__load_guest_stage2(kern_hyp_va(vcpu->arch.hw_mmu));
 	__activate_traps(vcpu);
 
+	if (kvm_vcpu_has_spe(vcpu))
+		__spe_restore_guest_state_nvhe(vcpu, guest_ctxt);
+
 	__hyp_vgic_restore_state(vcpu);
 	__timer_enable_traps(vcpu);
 
@@ -232,6 +239,10 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
 
 	__sysreg_save_state_nvhe(guest_ctxt);
 	__sysreg32_save_state(vcpu);
+
+	if (kvm_vcpu_has_spe(vcpu))
+		__spe_save_guest_state_nvhe(vcpu, guest_ctxt);
+
 	__timer_disable_traps(vcpu);
 	__hyp_vgic_save_state(vcpu);
 
@@ -244,10 +255,14 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
 		__fpsimd_save_fpexc32(vcpu);
 
 	__debug_switch_to_host(vcpu);
+
 	/*
-	 * This must come after restoring the host sysregs, since a non-VHE
-	 * system may enable SPE here and make use of the TTBRs.
+	 * Restoring the host context must come after restoring the host
+	 * sysregs, since a non-VHE system may enable SPE here and make use of
+	 * the TTBRs.
 	 */
+	if (kvm_vcpu_has_spe(vcpu))
+		__spe_restore_host_state_nvhe(vcpu, host_ctxt);
 	__debug_restore_host_buffers_nvhe(vcpu, host_ctxt);
 
 	if (pmu_switch_needed)
-- 
2.33.0


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2021-08-25 16:18 UTC|newest]

Thread overview: 141+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-08-25 16:17 [RFC PATCH v4 00/39] KVM: arm64: Add Statistical Profiling Extension (SPE) support Alexandru Elisei
2021-08-25 16:17 ` Alexandru Elisei
2021-08-25 16:17 ` Alexandru Elisei
2021-08-25 16:17 ` [RFC PATCH v4 01/39] KVM: arm64: Make lock_all_vcpus() available to the rest of KVM Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-09-22 10:39   ` Suzuki K Poulose
2021-09-22 10:39     ` Suzuki K Poulose
2021-09-22 10:39     ` Suzuki K Poulose
2021-08-25 16:17 ` [RFC PATCH v4 02/39] KVM: arm64: Add lock/unlock memslot user API Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-10-18  9:04   ` Suzuki K Poulose
2021-10-18  9:04     ` Suzuki K Poulose
2021-10-18  9:04     ` Suzuki K Poulose
2021-10-18 14:09     ` Alexandru Elisei
2021-10-18 14:09       ` Alexandru Elisei
2021-10-18 14:09       ` Alexandru Elisei
2021-08-25 16:17 ` [RFC PATCH v4 03/39] KVM: arm64: Implement the memslot lock/unlock functionality Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:17 ` [RFC PATCH v4 04/39] KVM: arm64: Defer CMOs for locked memslots until a VCPU is run Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-10-17 11:43   ` Marc Zyngier
2021-10-17 11:43     ` Marc Zyngier
2021-10-17 11:43     ` Marc Zyngier
2021-10-18 12:56     ` Alexandru Elisei
2021-10-18 12:56       ` Alexandru Elisei
2021-10-18 12:56       ` Alexandru Elisei
2021-08-25 16:17 ` [RFC PATCH v4 05/39] KVM: arm64: Perform CMOs on locked memslots when userspace resets VCPUs Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:17 ` [RFC PATCH v4 06/39] KVM: arm64: Delay tag scrubbing for locked memslots until a VCPU runs Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:17 ` [RFC PATCH v4 07/39] KVM: arm64: Unlock memslots after stage 2 tables are freed Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:17 ` [RFC PATCH v4 08/39] KVM: arm64: Deny changes to locked memslots Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:17 ` [RFC PATCH v4 09/39] KVM: Add kvm_warn{,_ratelimited} macros Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:17 ` [RFC PATCH v4 10/39] KVM: arm64: Print a warning for unexpected faults on locked memslots Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:17 ` [RFC PATCH v4 11/39] KVM: arm64: Allow userspace to lock and unlock memslots Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:17 ` [RFC PATCH v4 12/39] KVM: arm64: Add the KVM_ARM_VCPU_SUPPORTED_CPUS VCPU ioctl Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:17 ` [RFC PATCH v4 13/39] KVM: arm64: Add CONFIG_KVM_ARM_SPE Kconfig option Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:17 ` [RFC PATCH v4 14/39] KVM: arm64: Add SPE capability and VCPU feature Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:17 ` [RFC PATCH v4 15/39] drivers/perf: Expose the cpumask of CPUs that support SPE Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:17 ` [RFC PATCH v4 16/39] KVM: arm64: Make SPE available when at least one CPU supports it Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:17 ` [RFC PATCH v4 17/39] KVM: arm64: Set the VCPU SPE feature bit when SPE is available Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:17 ` [RFC PATCH v4 18/39] KVM: arm64: Expose SPE version to guests Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:17 ` [RFC PATCH v4 19/39] KVM: arm64: Do not emulate SPE on CPUs which don't have SPE Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:17 ` [RFC PATCH v4 20/39] KVM: arm64: Add a new VCPU device control group for SPE Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:17 ` [RFC PATCH v4 21/39] KVM: arm64: Add SPE VCPU device attribute to set the interrupt number Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:17 ` [RFC PATCH v4 22/39] KVM: arm64: Add SPE VCPU device attribute to initialize SPE Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:17 ` [RFC PATCH v4 23/39] KVM: arm64: VHE: Clear MDCR_EL2.E2PB in vcpu_put() Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:17   ` Alexandru Elisei
2021-08-25 16:18 ` [RFC PATCH v4 24/39] KVM: arm64: debug: Configure MDCR_EL2 when a VCPU has SPE Alexandru Elisei
2021-08-25 16:18   ` Alexandru Elisei
2021-08-25 16:18   ` Alexandru Elisei
2021-08-25 16:18 ` [RFC PATCH v4 25/39] KVM: arm64: Move the write to MDCR_EL2 out of __activate_traps_common() Alexandru Elisei
2021-08-25 16:18   ` Alexandru Elisei
2021-08-25 16:18   ` Alexandru Elisei
2021-08-25 16:18 ` [RFC PATCH v4 26/39] KVM: arm64: VHE: Change MDCR_EL2 at world switch if VCPU has SPE Alexandru Elisei
2021-08-25 16:18   ` Alexandru Elisei
2021-08-25 16:18   ` Alexandru Elisei
2021-08-25 16:18 ` [RFC PATCH v4 27/39] KVM: arm64: Add SPE system registers to VCPU context Alexandru Elisei
2021-08-25 16:18   ` Alexandru Elisei
2021-08-25 16:18   ` Alexandru Elisei
2021-08-25 16:18 ` [RFC PATCH v4 28/39] KVM: arm64: nVHE: Save PMSCR_EL1 to the host context Alexandru Elisei
2021-08-25 16:18   ` Alexandru Elisei
2021-08-25 16:18   ` Alexandru Elisei
2021-08-25 16:18 ` [RFC PATCH v4 29/39] KVM: arm64: Rename DEBUG_STATE_SAVE_SPE -> DEBUG_SAVE_SPE_BUFFER flags Alexandru Elisei
2021-08-25 16:18   ` Alexandru Elisei
2021-08-25 16:18   ` Alexandru Elisei
2021-08-25 16:18 ` Alexandru Elisei [this message]
2021-08-25 16:18   ` [RFC PATCH v4 30/39] KVM: arm64: nVHE: Context switch SPE state if VCPU has SPE Alexandru Elisei
2021-08-25 16:18   ` Alexandru Elisei
2021-08-25 16:18 ` [RFC PATCH v4 31/39] KVM: arm64: VHE: " Alexandru Elisei
2021-08-25 16:18   ` Alexandru Elisei
2021-08-25 16:18   ` Alexandru Elisei
2021-08-25 16:18 ` [RFC PATCH v4 32/39] KVM: arm64: Save/restore PMSNEVFR_EL1 on VCPU put/load Alexandru Elisei
2021-08-25 16:18   ` Alexandru Elisei
2021-08-25 16:18   ` Alexandru Elisei
2021-08-25 16:18 ` [RFC PATCH v4 33/39] KVM: arm64: Allow guest to use physical timestamps if perfmon_capable() Alexandru Elisei
2021-08-25 16:18   ` Alexandru Elisei
2021-08-25 16:18   ` Alexandru Elisei
2021-08-25 16:18 ` [RFC PATCH v4 34/39] KVM: arm64: Emulate SPE buffer management interrupt Alexandru Elisei
2021-08-25 16:18   ` Alexandru Elisei
2021-08-25 16:18   ` Alexandru Elisei
2021-08-25 16:18 ` [RFC PATCH v4 35/39] KVM: arm64: Add an userspace API to stop a VCPU profiling Alexandru Elisei
2021-08-25 16:18   ` Alexandru Elisei
2021-08-25 16:18   ` Alexandru Elisei
2021-08-25 16:18 ` [RFC PATCH v4 36/39] KVM: arm64: Implement " Alexandru Elisei
2021-08-25 16:18   ` Alexandru Elisei
2021-08-25 16:18   ` Alexandru Elisei
2021-08-25 16:18 ` [RFC PATCH v4 37/39] KVM: arm64: Add PMSIDR_EL1 to the SPE register context Alexandru Elisei
2021-08-25 16:18   ` Alexandru Elisei
2021-08-25 16:18   ` Alexandru Elisei
2021-08-25 16:18 ` [RFC PATCH v4 38/39] KVM: arm64: Make CONFIG_KVM_ARM_SPE depend on !CONFIG_NUMA_BALANCING Alexandru Elisei
2021-08-25 16:18   ` Alexandru Elisei
2021-08-25 16:18   ` Alexandru Elisei
2021-08-25 16:18 ` [RFC PATCH v4 39/39] KVM: arm64: Allow userspace to enable SPE for guests Alexandru Elisei
2021-08-25 16:18   ` Alexandru Elisei
2021-08-25 16:18   ` Alexandru Elisei
2021-09-22 10:11 ` [RFC PATCH v4 00/39] KVM: arm64: Add Statistical Profiling Extension (SPE) support Suzuki K Poulose
2021-09-22 10:11   ` Suzuki K Poulose
2021-09-22 10:11   ` Suzuki K Poulose
2021-09-23 15:12   ` Alexandru Elisei
2021-09-23 15:12     ` Alexandru Elisei
2021-09-23 15:12     ` Alexandru Elisei

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210825161815.266051-31-alexandru.elisei@arm.com \
    --to=alexandru.elisei@arm.com \
    --cc=james.morse@arm.com \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=maz@kernel.org \
    --cc=suzuki.poulose@arm.com \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.