All of lore.kernel.org
 help / color / mirror / Atom feed
From: Fuad Tabba <tabba@google.com>
To: kvmarm@lists.cs.columbia.edu
Cc: maz@kernel.org, will@kernel.org, james.morse@arm.com,
	alexandru.elisei@arm.com, suzuki.poulose@arm.com,
	mark.rutland@arm.com, christoffer.dall@arm.com,
	pbonzini@redhat.com, drjones@redhat.com, qperret@google.com,
	kvm@vger.kernel.org, linux-arm-kernel@lists.infradead.org,
	kernel-team@android.com, tabba@google.com
Subject: [PATCH v2 11/13] KVM: arm64: Trap access to pVM restricted features
Date: Tue, 15 Jun 2021 14:39:48 +0100	[thread overview]
Message-ID: <20210615133950.693489-12-tabba@google.com> (raw)
In-Reply-To: <20210615133950.693489-1-tabba@google.com>

Trap accesses to restricted features for VMs running in protected
mode.

Access to feature registers are emulated, and only supported
features are exposed to protected VMs.

Accesses to restricted registers as well as restricted
instructions are trapped, and an undefined exception is injected
into the protected guest.

Only affects the functionality of protected VMs. Otherwise,
should not affect non-protected VMs when KVM is running in
protected mode.

Signed-off-by: Fuad Tabba <tabba@google.com>
---
 arch/arm64/kvm/hyp/include/hyp/switch.h |   3 +
 arch/arm64/kvm/hyp/nvhe/switch.c        | 116 +++++++++++++++++++++---
 2 files changed, 105 insertions(+), 14 deletions(-)

diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index f5d3d1da0aec..d9f087ed6e02 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -33,6 +33,9 @@
 extern struct exception_table_entry __start___kvm_ex_table;
 extern struct exception_table_entry __stop___kvm_ex_table;
 
+int kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu);
+int kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu);
+
 /* Check whether the FP regs were dirtied while in the host-side run loop: */
 static inline bool update_fp_enabled(struct kvm_vcpu *vcpu)
 {
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index 967a3ad74fbd..8505201e1cfb 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -34,12 +34,74 @@ DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
 DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
 DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
 
+/*
+ * Set EL2 configuration registers to trap restricted register accesses and
+ * instructions for protected VMs.
+ *
+ * Should be called right before vcpu entry to restrict its impact only to the
+ * protected guest.
+ */
+static void __activate_traps_pvm(struct kvm_vcpu *vcpu)
+{
+	u64 hcr;
+	u64 mdcr;
+	u64 cptr;
+
+	if (!kvm_vm_is_protected(kern_hyp_va(vcpu->kvm)))
+		return;
+
+	hcr = read_sysreg(hcr_el2);
+	mdcr = read_sysreg(mdcr_el2);
+	cptr = read_sysreg(cptr_el2);
+
+	hcr |= HCR_TID3 |			/* Feature Registers */
+	       HCR_TLOR |			/* LOR */
+	       HCR_RW |	HCR_TID0 |		/* AArch64 EL1 only */
+	       HCR_TERR |			/* RAS */
+	       HCR_TID5 |			/* Memory Tagging */
+	       HCR_TACR | HCR_TIDCP | HCR_TID1; /* Implementation defined */
+
+	hcr &= ~(HCR_DCT | HCR_ATA |	/* Memory Tagging */
+		 HCR_FIEN |		/* RAS */
+		 HCR_AMVOFFEN);		/* Disable AMU register virtualization */
+
+
+	/* Debug and Trace */
+	mdcr |= MDCR_EL2_TDRA | MDCR_EL2_TDA | MDCR_EL2_TDE |
+		MDCR_EL2_TDOSA | MDCR_EL2_TDCC | MDCR_EL2_TTRF |
+		MDCR_EL2_TPM | MDCR_EL2_TPMCR |
+		MDCR_EL2_TPMS; /* SPE */
+
+	mdcr &= ~(MDCR_EL2_HPME | MDCR_EL2_MTPME |		/* PMU */
+		  (MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT));	/* SPE */
+
+	cptr |= CPTR_EL2_TTA |	/* Trace */
+		CPTR_EL2_TAM |	/* AMU */
+		CPTR_EL2_TZ;	/* SVE */
+
+	/*
+	 * Clear res0 bits and set res1 bits to trap potential new features.
+	 * It's not guaranteed, but new features are typically added with the
+	 * trapping value being the inverse of the reserved one.
+	 */
+	hcr &= ~HCR_RES0;
+	mdcr &= ~MDCR_EL2_RES0;
+	cptr &= ~CPTR_NVHE_EL2_RES0;
+	cptr |= CPTR_NVHE_EL2_RES1;
+
+	/*  __deactivate_traps() restores these registers. */
+	write_sysreg(hcr, hcr_el2);
+	write_sysreg(mdcr, mdcr_el2);
+	write_sysreg(cptr, cptr_el2);
+}
+
 static void __activate_traps(struct kvm_vcpu *vcpu)
 {
 	u64 val;
 
 	___activate_traps(vcpu);
 	__activate_traps_common(vcpu);
+	__activate_traps_pvm(vcpu);
 
 	val = CPTR_EL2_DEFAULT;
 	val |= CPTR_EL2_TTA | CPTR_EL2_TAM;
@@ -165,30 +227,56 @@ static void __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
 		write_sysreg(pmu->events_host, pmcntenset_el0);
 }
 
+/**
+ * Handle system register accesses for protected VMs.
+ *
+ * Return 1 if handled, or 0 if not.
+ */
+static int handle_pvm_sys64(struct kvm_vcpu *vcpu)
+{
+	if (kvm_vm_is_protected(kern_hyp_va(vcpu->kvm)))
+		return kvm_handle_pvm_sys64(vcpu);
+	else
+		return 0;
+}
+
+/**
+ * Handle restricted feature accesses for protected VMs.
+ *
+ * Return 1 if handled, or 0 if not.
+ */
+static int handle_pvm_restricted(struct kvm_vcpu *vcpu)
+{
+	if (kvm_vm_is_protected(kern_hyp_va(vcpu->kvm)))
+		return kvm_handle_pvm_restricted(vcpu);
+	else
+		return 0;
+}
+
 typedef int (*exit_handle_fn)(struct kvm_vcpu *);
 
 static exit_handle_fn hyp_exit_handlers[] = {
-	[0 ... ESR_ELx_EC_MAX]		= NULL,
+	[0 ... ESR_ELx_EC_MAX]		= handle_pvm_restricted,
 	[ESR_ELx_EC_WFx]		= NULL,
-	[ESR_ELx_EC_CP15_32]		= NULL,
-	[ESR_ELx_EC_CP15_64]		= NULL,
-	[ESR_ELx_EC_CP14_MR]		= NULL,
-	[ESR_ELx_EC_CP14_LS]		= NULL,
-	[ESR_ELx_EC_CP14_64]		= NULL,
+	[ESR_ELx_EC_CP15_32]		= handle_pvm_restricted,
+	[ESR_ELx_EC_CP15_64]		= handle_pvm_restricted,
+	[ESR_ELx_EC_CP14_MR]		= handle_pvm_restricted,
+	[ESR_ELx_EC_CP14_LS]		= handle_pvm_restricted,
+	[ESR_ELx_EC_CP14_64]		= handle_pvm_restricted,
 	[ESR_ELx_EC_HVC32]		= NULL,
 	[ESR_ELx_EC_SMC32]		= NULL,
 	[ESR_ELx_EC_HVC64]		= NULL,
 	[ESR_ELx_EC_SMC64]		= NULL,
-	[ESR_ELx_EC_SYS64]		= NULL,
-	[ESR_ELx_EC_SVE]		= NULL,
+	[ESR_ELx_EC_SYS64]		= handle_pvm_sys64,
+	[ESR_ELx_EC_SVE]		= handle_pvm_restricted,
 	[ESR_ELx_EC_IABT_LOW]		= NULL,
 	[ESR_ELx_EC_DABT_LOW]		= NULL,
-	[ESR_ELx_EC_SOFTSTP_LOW]	= NULL,
-	[ESR_ELx_EC_WATCHPT_LOW]	= NULL,
-	[ESR_ELx_EC_BREAKPT_LOW]	= NULL,
-	[ESR_ELx_EC_BKPT32]		= NULL,
-	[ESR_ELx_EC_BRK64]		= NULL,
-	[ESR_ELx_EC_FP_ASIMD]		= NULL,
+	[ESR_ELx_EC_SOFTSTP_LOW]	= handle_pvm_restricted,
+	[ESR_ELx_EC_WATCHPT_LOW]	= handle_pvm_restricted,
+	[ESR_ELx_EC_BREAKPT_LOW]	= handle_pvm_restricted,
+	[ESR_ELx_EC_BKPT32]		= handle_pvm_restricted,
+	[ESR_ELx_EC_BRK64]		= handle_pvm_restricted,
+	[ESR_ELx_EC_FP_ASIMD]		= handle_pvm_restricted,
 	[ESR_ELx_EC_PAC]		= NULL,
 };
 
-- 
2.32.0.272.g935e593368-goog


WARNING: multiple messages have this Message-ID (diff)
From: Fuad Tabba <tabba@google.com>
To: kvmarm@lists.cs.columbia.edu
Cc: kernel-team@android.com, kvm@vger.kernel.org, maz@kernel.org,
	pbonzini@redhat.com, will@kernel.org,
	linux-arm-kernel@lists.infradead.org
Subject: [PATCH v2 11/13] KVM: arm64: Trap access to pVM restricted features
Date: Tue, 15 Jun 2021 14:39:48 +0100	[thread overview]
Message-ID: <20210615133950.693489-12-tabba@google.com> (raw)
In-Reply-To: <20210615133950.693489-1-tabba@google.com>

Trap accesses to restricted features for VMs running in protected
mode.

Access to feature registers are emulated, and only supported
features are exposed to protected VMs.

Accesses to restricted registers as well as restricted
instructions are trapped, and an undefined exception is injected
into the protected guest.

Only affects the functionality of protected VMs. Otherwise,
should not affect non-protected VMs when KVM is running in
protected mode.

Signed-off-by: Fuad Tabba <tabba@google.com>
---
 arch/arm64/kvm/hyp/include/hyp/switch.h |   3 +
 arch/arm64/kvm/hyp/nvhe/switch.c        | 116 +++++++++++++++++++++---
 2 files changed, 105 insertions(+), 14 deletions(-)

diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index f5d3d1da0aec..d9f087ed6e02 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -33,6 +33,9 @@
 extern struct exception_table_entry __start___kvm_ex_table;
 extern struct exception_table_entry __stop___kvm_ex_table;
 
+int kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu);
+int kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu);
+
 /* Check whether the FP regs were dirtied while in the host-side run loop: */
 static inline bool update_fp_enabled(struct kvm_vcpu *vcpu)
 {
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index 967a3ad74fbd..8505201e1cfb 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -34,12 +34,74 @@ DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
 DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
 DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
 
+/*
+ * Set EL2 configuration registers to trap restricted register accesses and
+ * instructions for protected VMs.
+ *
+ * Should be called right before vcpu entry to restrict its impact only to the
+ * protected guest.
+ */
+static void __activate_traps_pvm(struct kvm_vcpu *vcpu)
+{
+	u64 hcr;
+	u64 mdcr;
+	u64 cptr;
+
+	if (!kvm_vm_is_protected(kern_hyp_va(vcpu->kvm)))
+		return;
+
+	hcr = read_sysreg(hcr_el2);
+	mdcr = read_sysreg(mdcr_el2);
+	cptr = read_sysreg(cptr_el2);
+
+	hcr |= HCR_TID3 |			/* Feature Registers */
+	       HCR_TLOR |			/* LOR */
+	       HCR_RW |	HCR_TID0 |		/* AArch64 EL1 only */
+	       HCR_TERR |			/* RAS */
+	       HCR_TID5 |			/* Memory Tagging */
+	       HCR_TACR | HCR_TIDCP | HCR_TID1; /* Implementation defined */
+
+	hcr &= ~(HCR_DCT | HCR_ATA |	/* Memory Tagging */
+		 HCR_FIEN |		/* RAS */
+		 HCR_AMVOFFEN);		/* Disable AMU register virtualization */
+
+
+	/* Debug and Trace */
+	mdcr |= MDCR_EL2_TDRA | MDCR_EL2_TDA | MDCR_EL2_TDE |
+		MDCR_EL2_TDOSA | MDCR_EL2_TDCC | MDCR_EL2_TTRF |
+		MDCR_EL2_TPM | MDCR_EL2_TPMCR |
+		MDCR_EL2_TPMS; /* SPE */
+
+	mdcr &= ~(MDCR_EL2_HPME | MDCR_EL2_MTPME |		/* PMU */
+		  (MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT));	/* SPE */
+
+	cptr |= CPTR_EL2_TTA |	/* Trace */
+		CPTR_EL2_TAM |	/* AMU */
+		CPTR_EL2_TZ;	/* SVE */
+
+	/*
+	 * Clear res0 bits and set res1 bits to trap potential new features.
+	 * It's not guaranteed, but new features are typically added with the
+	 * trapping value being the inverse of the reserved one.
+	 */
+	hcr &= ~HCR_RES0;
+	mdcr &= ~MDCR_EL2_RES0;
+	cptr &= ~CPTR_NVHE_EL2_RES0;
+	cptr |= CPTR_NVHE_EL2_RES1;
+
+	/*  __deactivate_traps() restores these registers. */
+	write_sysreg(hcr, hcr_el2);
+	write_sysreg(mdcr, mdcr_el2);
+	write_sysreg(cptr, cptr_el2);
+}
+
 static void __activate_traps(struct kvm_vcpu *vcpu)
 {
 	u64 val;
 
 	___activate_traps(vcpu);
 	__activate_traps_common(vcpu);
+	__activate_traps_pvm(vcpu);
 
 	val = CPTR_EL2_DEFAULT;
 	val |= CPTR_EL2_TTA | CPTR_EL2_TAM;
@@ -165,30 +227,56 @@ static void __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
 		write_sysreg(pmu->events_host, pmcntenset_el0);
 }
 
+/**
+ * Handle system register accesses for protected VMs.
+ *
+ * Return 1 if handled, or 0 if not.
+ */
+static int handle_pvm_sys64(struct kvm_vcpu *vcpu)
+{
+	if (kvm_vm_is_protected(kern_hyp_va(vcpu->kvm)))
+		return kvm_handle_pvm_sys64(vcpu);
+	else
+		return 0;
+}
+
+/**
+ * Handle restricted feature accesses for protected VMs.
+ *
+ * Return 1 if handled, or 0 if not.
+ */
+static int handle_pvm_restricted(struct kvm_vcpu *vcpu)
+{
+	if (kvm_vm_is_protected(kern_hyp_va(vcpu->kvm)))
+		return kvm_handle_pvm_restricted(vcpu);
+	else
+		return 0;
+}
+
 typedef int (*exit_handle_fn)(struct kvm_vcpu *);
 
 static exit_handle_fn hyp_exit_handlers[] = {
-	[0 ... ESR_ELx_EC_MAX]		= NULL,
+	[0 ... ESR_ELx_EC_MAX]		= handle_pvm_restricted,
 	[ESR_ELx_EC_WFx]		= NULL,
-	[ESR_ELx_EC_CP15_32]		= NULL,
-	[ESR_ELx_EC_CP15_64]		= NULL,
-	[ESR_ELx_EC_CP14_MR]		= NULL,
-	[ESR_ELx_EC_CP14_LS]		= NULL,
-	[ESR_ELx_EC_CP14_64]		= NULL,
+	[ESR_ELx_EC_CP15_32]		= handle_pvm_restricted,
+	[ESR_ELx_EC_CP15_64]		= handle_pvm_restricted,
+	[ESR_ELx_EC_CP14_MR]		= handle_pvm_restricted,
+	[ESR_ELx_EC_CP14_LS]		= handle_pvm_restricted,
+	[ESR_ELx_EC_CP14_64]		= handle_pvm_restricted,
 	[ESR_ELx_EC_HVC32]		= NULL,
 	[ESR_ELx_EC_SMC32]		= NULL,
 	[ESR_ELx_EC_HVC64]		= NULL,
 	[ESR_ELx_EC_SMC64]		= NULL,
-	[ESR_ELx_EC_SYS64]		= NULL,
-	[ESR_ELx_EC_SVE]		= NULL,
+	[ESR_ELx_EC_SYS64]		= handle_pvm_sys64,
+	[ESR_ELx_EC_SVE]		= handle_pvm_restricted,
 	[ESR_ELx_EC_IABT_LOW]		= NULL,
 	[ESR_ELx_EC_DABT_LOW]		= NULL,
-	[ESR_ELx_EC_SOFTSTP_LOW]	= NULL,
-	[ESR_ELx_EC_WATCHPT_LOW]	= NULL,
-	[ESR_ELx_EC_BREAKPT_LOW]	= NULL,
-	[ESR_ELx_EC_BKPT32]		= NULL,
-	[ESR_ELx_EC_BRK64]		= NULL,
-	[ESR_ELx_EC_FP_ASIMD]		= NULL,
+	[ESR_ELx_EC_SOFTSTP_LOW]	= handle_pvm_restricted,
+	[ESR_ELx_EC_WATCHPT_LOW]	= handle_pvm_restricted,
+	[ESR_ELx_EC_BREAKPT_LOW]	= handle_pvm_restricted,
+	[ESR_ELx_EC_BKPT32]		= handle_pvm_restricted,
+	[ESR_ELx_EC_BRK64]		= handle_pvm_restricted,
+	[ESR_ELx_EC_FP_ASIMD]		= handle_pvm_restricted,
 	[ESR_ELx_EC_PAC]		= NULL,
 };
 
-- 
2.32.0.272.g935e593368-goog

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

WARNING: multiple messages have this Message-ID (diff)
From: Fuad Tabba <tabba@google.com>
To: kvmarm@lists.cs.columbia.edu
Cc: maz@kernel.org, will@kernel.org, james.morse@arm.com,
	 alexandru.elisei@arm.com, suzuki.poulose@arm.com,
	mark.rutland@arm.com,  christoffer.dall@arm.com,
	pbonzini@redhat.com, drjones@redhat.com,  qperret@google.com,
	kvm@vger.kernel.org, linux-arm-kernel@lists.infradead.org,
	 kernel-team@android.com, tabba@google.com
Subject: [PATCH v2 11/13] KVM: arm64: Trap access to pVM restricted features
Date: Tue, 15 Jun 2021 14:39:48 +0100	[thread overview]
Message-ID: <20210615133950.693489-12-tabba@google.com> (raw)
In-Reply-To: <20210615133950.693489-1-tabba@google.com>

Trap accesses to restricted features for VMs running in protected
mode.

Access to feature registers are emulated, and only supported
features are exposed to protected VMs.

Accesses to restricted registers as well as restricted
instructions are trapped, and an undefined exception is injected
into the protected guest.

Only affects the functionality of protected VMs. Otherwise,
should not affect non-protected VMs when KVM is running in
protected mode.

Signed-off-by: Fuad Tabba <tabba@google.com>
---
 arch/arm64/kvm/hyp/include/hyp/switch.h |   3 +
 arch/arm64/kvm/hyp/nvhe/switch.c        | 116 +++++++++++++++++++++---
 2 files changed, 105 insertions(+), 14 deletions(-)

diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index f5d3d1da0aec..d9f087ed6e02 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -33,6 +33,9 @@
 extern struct exception_table_entry __start___kvm_ex_table;
 extern struct exception_table_entry __stop___kvm_ex_table;
 
+int kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu);
+int kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu);
+
 /* Check whether the FP regs were dirtied while in the host-side run loop: */
 static inline bool update_fp_enabled(struct kvm_vcpu *vcpu)
 {
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index 967a3ad74fbd..8505201e1cfb 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -34,12 +34,74 @@ DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
 DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
 DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
 
+/*
+ * Set EL2 configuration registers to trap restricted register accesses and
+ * instructions for protected VMs.
+ *
+ * Should be called right before vcpu entry to restrict its impact only to the
+ * protected guest.
+ */
+static void __activate_traps_pvm(struct kvm_vcpu *vcpu)
+{
+	u64 hcr;
+	u64 mdcr;
+	u64 cptr;
+
+	if (!kvm_vm_is_protected(kern_hyp_va(vcpu->kvm)))
+		return;
+
+	hcr = read_sysreg(hcr_el2);
+	mdcr = read_sysreg(mdcr_el2);
+	cptr = read_sysreg(cptr_el2);
+
+	hcr |= HCR_TID3 |			/* Feature Registers */
+	       HCR_TLOR |			/* LOR */
+	       HCR_RW |	HCR_TID0 |		/* AArch64 EL1 only */
+	       HCR_TERR |			/* RAS */
+	       HCR_TID5 |			/* Memory Tagging */
+	       HCR_TACR | HCR_TIDCP | HCR_TID1; /* Implementation defined */
+
+	hcr &= ~(HCR_DCT | HCR_ATA |	/* Memory Tagging */
+		 HCR_FIEN |		/* RAS */
+		 HCR_AMVOFFEN);		/* Disable AMU register virtualization */
+
+
+	/* Debug and Trace */
+	mdcr |= MDCR_EL2_TDRA | MDCR_EL2_TDA | MDCR_EL2_TDE |
+		MDCR_EL2_TDOSA | MDCR_EL2_TDCC | MDCR_EL2_TTRF |
+		MDCR_EL2_TPM | MDCR_EL2_TPMCR |
+		MDCR_EL2_TPMS; /* SPE */
+
+	mdcr &= ~(MDCR_EL2_HPME | MDCR_EL2_MTPME |		/* PMU */
+		  (MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT));	/* SPE */
+
+	cptr |= CPTR_EL2_TTA |	/* Trace */
+		CPTR_EL2_TAM |	/* AMU */
+		CPTR_EL2_TZ;	/* SVE */
+
+	/*
+	 * Clear res0 bits and set res1 bits to trap potential new features.
+	 * It's not guaranteed, but new features are typically added with the
+	 * trapping value being the inverse of the reserved one.
+	 */
+	hcr &= ~HCR_RES0;
+	mdcr &= ~MDCR_EL2_RES0;
+	cptr &= ~CPTR_NVHE_EL2_RES0;
+	cptr |= CPTR_NVHE_EL2_RES1;
+
+	/*  __deactivate_traps() restores these registers. */
+	write_sysreg(hcr, hcr_el2);
+	write_sysreg(mdcr, mdcr_el2);
+	write_sysreg(cptr, cptr_el2);
+}
+
 static void __activate_traps(struct kvm_vcpu *vcpu)
 {
 	u64 val;
 
 	___activate_traps(vcpu);
 	__activate_traps_common(vcpu);
+	__activate_traps_pvm(vcpu);
 
 	val = CPTR_EL2_DEFAULT;
 	val |= CPTR_EL2_TTA | CPTR_EL2_TAM;
@@ -165,30 +227,56 @@ static void __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
 		write_sysreg(pmu->events_host, pmcntenset_el0);
 }
 
+/**
+ * Handle system register accesses for protected VMs.
+ *
+ * Return 1 if handled, or 0 if not.
+ */
+static int handle_pvm_sys64(struct kvm_vcpu *vcpu)
+{
+	if (kvm_vm_is_protected(kern_hyp_va(vcpu->kvm)))
+		return kvm_handle_pvm_sys64(vcpu);
+	else
+		return 0;
+}
+
+/**
+ * Handle restricted feature accesses for protected VMs.
+ *
+ * Return 1 if handled, or 0 if not.
+ */
+static int handle_pvm_restricted(struct kvm_vcpu *vcpu)
+{
+	if (kvm_vm_is_protected(kern_hyp_va(vcpu->kvm)))
+		return kvm_handle_pvm_restricted(vcpu);
+	else
+		return 0;
+}
+
 typedef int (*exit_handle_fn)(struct kvm_vcpu *);
 
 static exit_handle_fn hyp_exit_handlers[] = {
-	[0 ... ESR_ELx_EC_MAX]		= NULL,
+	[0 ... ESR_ELx_EC_MAX]		= handle_pvm_restricted,
 	[ESR_ELx_EC_WFx]		= NULL,
-	[ESR_ELx_EC_CP15_32]		= NULL,
-	[ESR_ELx_EC_CP15_64]		= NULL,
-	[ESR_ELx_EC_CP14_MR]		= NULL,
-	[ESR_ELx_EC_CP14_LS]		= NULL,
-	[ESR_ELx_EC_CP14_64]		= NULL,
+	[ESR_ELx_EC_CP15_32]		= handle_pvm_restricted,
+	[ESR_ELx_EC_CP15_64]		= handle_pvm_restricted,
+	[ESR_ELx_EC_CP14_MR]		= handle_pvm_restricted,
+	[ESR_ELx_EC_CP14_LS]		= handle_pvm_restricted,
+	[ESR_ELx_EC_CP14_64]		= handle_pvm_restricted,
 	[ESR_ELx_EC_HVC32]		= NULL,
 	[ESR_ELx_EC_SMC32]		= NULL,
 	[ESR_ELx_EC_HVC64]		= NULL,
 	[ESR_ELx_EC_SMC64]		= NULL,
-	[ESR_ELx_EC_SYS64]		= NULL,
-	[ESR_ELx_EC_SVE]		= NULL,
+	[ESR_ELx_EC_SYS64]		= handle_pvm_sys64,
+	[ESR_ELx_EC_SVE]		= handle_pvm_restricted,
 	[ESR_ELx_EC_IABT_LOW]		= NULL,
 	[ESR_ELx_EC_DABT_LOW]		= NULL,
-	[ESR_ELx_EC_SOFTSTP_LOW]	= NULL,
-	[ESR_ELx_EC_WATCHPT_LOW]	= NULL,
-	[ESR_ELx_EC_BREAKPT_LOW]	= NULL,
-	[ESR_ELx_EC_BKPT32]		= NULL,
-	[ESR_ELx_EC_BRK64]		= NULL,
-	[ESR_ELx_EC_FP_ASIMD]		= NULL,
+	[ESR_ELx_EC_SOFTSTP_LOW]	= handle_pvm_restricted,
+	[ESR_ELx_EC_WATCHPT_LOW]	= handle_pvm_restricted,
+	[ESR_ELx_EC_BREAKPT_LOW]	= handle_pvm_restricted,
+	[ESR_ELx_EC_BKPT32]		= handle_pvm_restricted,
+	[ESR_ELx_EC_BRK64]		= handle_pvm_restricted,
+	[ESR_ELx_EC_FP_ASIMD]		= handle_pvm_restricted,
 	[ESR_ELx_EC_PAC]		= NULL,
 };
 
-- 
2.32.0.272.g935e593368-goog


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2021-06-15 13:41 UTC|newest]

Thread overview: 96+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-06-15 13:39 [PATCH v2 00/13] KVM: arm64: Fixed features for protected VMs Fuad Tabba
2021-06-15 13:39 ` Fuad Tabba
2021-06-15 13:39 ` Fuad Tabba
2021-06-15 13:39 ` [PATCH v2 01/13] KVM: arm64: Remove trailing whitespace in comments Fuad Tabba
2021-06-15 13:39   ` Fuad Tabba
2021-06-15 13:39   ` Fuad Tabba
2021-07-01 12:55   ` Will Deacon
2021-07-01 12:55     ` Will Deacon
2021-07-01 12:55     ` Will Deacon
2021-07-01 13:24     ` Fuad Tabba
2021-07-01 13:24       ` Fuad Tabba
2021-07-01 13:24       ` Fuad Tabba
2021-06-15 13:39 ` [PATCH v2 02/13] KVM: arm64: MDCR_EL2 is a 64-bit register Fuad Tabba
2021-06-15 13:39   ` Fuad Tabba
2021-06-15 13:39   ` Fuad Tabba
2021-07-01 12:53   ` Will Deacon
2021-07-01 12:53     ` Will Deacon
2021-07-01 12:53     ` Will Deacon
2021-07-01 13:24     ` Fuad Tabba
2021-07-01 13:24       ` Fuad Tabba
2021-07-01 13:24       ` Fuad Tabba
2021-06-15 13:39 ` [PATCH v2 03/13] KVM: arm64: Fix names of config register fields Fuad Tabba
2021-06-15 13:39   ` Fuad Tabba
2021-06-15 13:39   ` Fuad Tabba
2021-07-01 13:01   ` Will Deacon
2021-07-01 13:01     ` Will Deacon
2021-07-01 13:01     ` Will Deacon
2021-07-01 13:44     ` Fuad Tabba
2021-07-01 13:44       ` Fuad Tabba
2021-07-01 13:44       ` Fuad Tabba
2021-06-15 13:39 ` [PATCH v2 04/13] KVM: arm64: Refactor sys_regs.h,c for nVHE reuse Fuad Tabba
2021-06-15 13:39   ` Fuad Tabba
2021-06-15 13:39   ` Fuad Tabba
2021-07-01 13:09   ` Will Deacon
2021-07-01 13:09     ` [PATCH v2 04/13] KVM: arm64: Refactor sys_regs.h, c " Will Deacon
2021-07-01 13:09     ` Will Deacon
2021-07-01 14:04     ` [PATCH v2 04/13] KVM: arm64: Refactor sys_regs.h,c " Fuad Tabba
2021-07-01 14:04       ` [PATCH v2 04/13] KVM: arm64: Refactor sys_regs.h, c " Fuad Tabba
2021-07-01 14:04       ` Fuad Tabba
2021-06-15 13:39 ` [PATCH v2 05/13] KVM: arm64: Restore mdcr_el2 from vcpu Fuad Tabba
2021-06-15 13:39   ` Fuad Tabba
2021-06-15 13:39   ` Fuad Tabba
2021-07-01 13:17   ` Will Deacon
2021-07-01 13:17     ` Will Deacon
2021-07-01 13:17     ` Will Deacon
2021-07-01 14:05     ` Fuad Tabba
2021-07-01 14:05       ` Fuad Tabba
2021-07-01 14:05       ` Fuad Tabba
2021-06-15 13:39 ` [PATCH v2 06/13] KVM: arm64: Add feature register flag definitions Fuad Tabba
2021-06-15 13:39   ` Fuad Tabba
2021-06-15 13:39   ` Fuad Tabba
2021-07-01 13:22   ` Will Deacon
2021-07-01 13:22     ` Will Deacon
2021-07-01 13:22     ` Will Deacon
2021-07-01 14:31     ` Fuad Tabba
2021-07-01 14:31       ` Fuad Tabba
2021-07-01 14:31       ` Fuad Tabba
2021-06-15 13:39 ` [PATCH v2 07/13] KVM: arm64: Add config register bit definitions Fuad Tabba
2021-06-15 13:39   ` Fuad Tabba
2021-06-15 13:39   ` Fuad Tabba
2021-07-01 13:33   ` Will Deacon
2021-07-01 13:33     ` Will Deacon
2021-07-01 13:33     ` Will Deacon
2021-07-01 14:52     ` Fuad Tabba
2021-07-01 14:52       ` Fuad Tabba
2021-07-01 14:52       ` Fuad Tabba
2021-06-15 13:39 ` [PATCH v2 08/13] KVM: arm64: Guest exit handlers for nVHE hyp Fuad Tabba
2021-06-15 13:39   ` Fuad Tabba
2021-06-15 13:39   ` Fuad Tabba
2021-07-01 13:48   ` Will Deacon
2021-07-01 13:48     ` Will Deacon
2021-07-01 13:48     ` Will Deacon
2021-07-01 14:58     ` Fuad Tabba
2021-07-01 14:58       ` Fuad Tabba
2021-07-01 14:58       ` Fuad Tabba
2021-06-15 13:39 ` [PATCH v2 09/13] KVM: arm64: Add trap handlers for protected VMs Fuad Tabba
2021-06-15 13:39   ` Fuad Tabba
2021-06-15 13:39   ` Fuad Tabba
2021-07-01 14:08   ` Will Deacon
2021-07-01 14:08     ` Will Deacon
2021-07-01 14:08     ` Will Deacon
2021-07-14 20:01     ` Andrew Jones
2021-07-14 20:01       ` Andrew Jones
2021-07-14 20:01       ` Andrew Jones
2021-06-15 13:39 ` [PATCH v2 10/13] KVM: arm64: Move sanitized copies of CPU features Fuad Tabba
2021-06-15 13:39   ` Fuad Tabba
2021-06-15 13:39   ` Fuad Tabba
2021-06-15 13:39 ` Fuad Tabba [this message]
2021-06-15 13:39   ` [PATCH v2 11/13] KVM: arm64: Trap access to pVM restricted features Fuad Tabba
2021-06-15 13:39   ` Fuad Tabba
2021-06-15 13:39 ` [PATCH v2 12/13] KVM: arm64: Handle protected guests at 32 bits Fuad Tabba
2021-06-15 13:39   ` Fuad Tabba
2021-06-15 13:39   ` Fuad Tabba
2021-06-15 13:39 ` [PATCH v2 13/13] KVM: arm64: Check vcpu features at pVM creation Fuad Tabba
2021-06-15 13:39   ` Fuad Tabba
2021-06-15 13:39   ` Fuad Tabba

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210615133950.693489-12-tabba@google.com \
    --to=tabba@google.com \
    --cc=alexandru.elisei@arm.com \
    --cc=christoffer.dall@arm.com \
    --cc=drjones@redhat.com \
    --cc=james.morse@arm.com \
    --cc=kernel-team@android.com \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=mark.rutland@arm.com \
    --cc=maz@kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=qperret@google.com \
    --cc=suzuki.poulose@arm.com \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.