All of lore.kernel.org
 help / color / mirror / Atom feed
From: Andre Przywara <andre.przywara@arm.com>
To: Marc Zyngier <marc.zyngier@arm.com>,
	Christoffer Dall <christoffer.dall@arm.com>
Cc: linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu, kvm@vger.kernel.org
Subject: [PATCH 1/2] KVM: arm/arm64: Add save/restore support for firmware workaround state
Date: Mon,  7 Jan 2019 12:05:36 +0000	[thread overview]
Message-ID: <20190107120537.184252-2-andre.przywara@arm.com> (raw)
In-Reply-To: <20190107120537.184252-1-andre.przywara@arm.com>

KVM implements the firmware interface for mitigating cache speculation
vulnerabilities. Guests may use this interface to ensure mitigation is
active.
If we want to migrate such a guest to a host with a different support
level for those workarounds, migration might need to fail, to ensure that
critical guests don't loose their protection.

Introduce a way for userland to save and restore the workarounds state.
On restoring we do checks that make sure we don't downgrade our
mitigation level.

Signed-off-by: Andre Przywara <andre.przywara@arm.com>
---
 arch/arm/include/asm/kvm_emulate.h   |  10 ++
 arch/arm/include/uapi/asm/kvm.h      |   9 ++
 arch/arm64/include/asm/kvm_emulate.h |  14 +++
 arch/arm64/include/uapi/asm/kvm.h    |   9 ++
 virt/kvm/arm/psci.c                  | 138 ++++++++++++++++++++++++++-
 5 files changed, 178 insertions(+), 2 deletions(-)

diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index 77121b713bef..2255c50debab 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -275,6 +275,16 @@ static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
 	return vcpu_cp15(vcpu, c0_MPIDR) & MPIDR_HWID_BITMASK;
 }
 
+static inline bool kvm_arm_get_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu)
+{
+	return false;
+}
+
+static inline void kvm_arm_set_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu,
+						      bool flag)
+{
+}
+
 static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
 {
 	*vcpu_cpsr(vcpu) |= PSR_E_BIT;
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index 4602464ebdfb..02c93b1d8f6d 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -214,6 +214,15 @@ struct kvm_vcpu_events {
 #define KVM_REG_ARM_FW_REG(r)		(KVM_REG_ARM | KVM_REG_SIZE_U64 | \
 					 KVM_REG_ARM_FW | ((r) & 0xffff))
 #define KVM_REG_ARM_PSCI_VERSION	KVM_REG_ARM_FW_REG(0)
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1	KVM_REG_ARM_FW_REG(1)
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL	0
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL	1
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2	KVM_REG_ARM_FW_REG(2)
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_MASK	0x3
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL	0
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL	1
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNAFFECTED	2
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED	4
 
 /* Device Control API: ARM VGIC */
 #define KVM_DEV_ARM_VGIC_GRP_ADDR	0
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 506386a3edde..a44f07f68da4 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -336,6 +336,20 @@ static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
 	return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
 }
 
+static inline bool kvm_arm_get_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu)
+{
+	return vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG;
+}
+
+static inline void kvm_arm_set_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu,
+						      bool flag)
+{
+	if (flag)
+		vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
+	else
+		vcpu->arch.workaround_flags &= ~VCPU_WORKAROUND_2_FLAG;
+}
+
 static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
 {
 	if (vcpu_mode_is_32bit(vcpu)) {
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 97c3478ee6e7..4a19ef199a99 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -225,6 +225,15 @@ struct kvm_vcpu_events {
 #define KVM_REG_ARM_FW_REG(r)		(KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
 					 KVM_REG_ARM_FW | ((r) & 0xffff))
 #define KVM_REG_ARM_PSCI_VERSION	KVM_REG_ARM_FW_REG(0)
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1	KVM_REG_ARM_FW_REG(1)
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL	0
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL	1
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2	KVM_REG_ARM_FW_REG(2)
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_MASK	0x3
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL	0
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL	1
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNAFFECTED	2
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED	4
 
 /* Device Control API: ARM VGIC */
 #define KVM_DEV_ARM_VGIC_GRP_ADDR	0
diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
index 9b73d3ad918a..4c671908ef62 100644
--- a/virt/kvm/arm/psci.c
+++ b/virt/kvm/arm/psci.c
@@ -445,12 +445,18 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
 
 int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
 {
-	return 1;		/* PSCI version */
+	return 3;		/* PSCI version and two workaround registers */
 }
 
 int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
 {
-	if (put_user(KVM_REG_ARM_PSCI_VERSION, uindices))
+	if (put_user(KVM_REG_ARM_PSCI_VERSION, uindices++))
+		return -EFAULT;
+
+	if (put_user(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1, uindices++))
+		return -EFAULT;
+
+	if (put_user(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2, uindices++))
 		return -EFAULT;
 
 	return 0;
@@ -469,6 +475,45 @@ int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 		return 0;
 	}
 
+	if (reg->id == KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1) {
+		void __user *uaddr = (void __user *)(long)reg->addr;
+		u64 val = 0;
+
+		if (kvm_arm_harden_branch_predictor())
+			val = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL;
+
+		if (copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)))
+			return -EFAULT;
+
+		return 0;
+	}
+
+	if (reg->id == KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2) {
+		void __user *uaddr = (void __user *)(long)reg->addr;
+		u64 val = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
+
+		switch (kvm_arm_have_ssbd()) {
+		case KVM_SSBD_FORCE_DISABLE:
+		case KVM_SSBD_UNKNOWN:
+			break;
+		case KVM_SSBD_KERNEL:
+			val |= KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL;
+			break;
+		case KVM_SSBD_FORCE_ENABLE:
+		case KVM_SSBD_MITIGATED:
+			val |= KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNAFFECTED;
+			break;
+		}
+
+		if (kvm_arm_get_vcpu_workaround_2_flag(vcpu))
+			val |= KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED;
+
+		if (copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)))
+			return -EFAULT;
+
+		return 0;
+	}
+
 	return -EINVAL;
 }
 
@@ -499,5 +544,94 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 		}
 	}
 
+	if (reg->id == KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1) {
+		void __user *uaddr = (void __user *)(long)reg->addr;
+		u64 val;
+
+		if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)))
+			return -EFAULT;
+
+		/* Make sure we support WORKAROUND_1 if userland asks for it. */
+		if ((val & KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL) &&
+		    !kvm_arm_harden_branch_predictor())
+			return -EINVAL;
+
+		/* Any other bit is reserved. */
+		if (val & ~KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL)
+			return -EINVAL;
+
+		return 0;
+	}
+
+	if (reg->id == KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2) {
+		void __user *uaddr = (void __user *)(long)reg->addr;
+		unsigned int wa_state;
+		bool wa_flag;
+		u64 val;
+
+		if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)))
+			return -EFAULT;
+
+		/* Reject any unknown bits. */
+		if (val & ~(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_MASK|
+			    KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED))
+			return -EINVAL;
+
+		/*
+		 * The value passed from userland has to be compatible with
+		 * our own workaround status. We also have to consider the
+		 * requested per-VCPU state for some combinations:
+		 * --------------+-----------+-----------------+---------------
+		 * \ user value  |           |                 |
+		 *  ------------ | SSBD_NONE |   SSBD_KERNEL   |  SSBD_ALWAYS
+		 *  this kernel \|           |                 |
+		 * --------------+-----------+-----------------+---------------
+		 * UNKNOWN       |     OK    |   -EINVAL       |   -EINVAL
+		 * FORCE_DISABLE |           |                 |
+		 * --------------+-----------+-----------------+---------------
+		 * KERNEL        |     OK    | copy VCPU state | set VCPU state
+		 * --------------+-----------+-----------------+---------------
+		 * FORCE_ENABLE  |     OK    |      OK         |      OK
+		 * MITIGATED     |           |                 |
+		 * --------------+-----------+-----------------+---------------
+		 */
+
+		wa_state = val & KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_MASK;
+		switch (wa_state) {
+		case  KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL:
+			/* We can always support no mitigation (1st column). */
+			return 0;
+		case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL:
+		case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNAFFECTED:
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		switch (kvm_arm_have_ssbd()) {
+		case KVM_SSBD_UNKNOWN:
+		case KVM_SSBD_FORCE_DISABLE:
+		default:
+			/* ... but some mitigation was requested (1st line). */
+			return -EINVAL;
+		case KVM_SSBD_FORCE_ENABLE:
+		case KVM_SSBD_MITIGATED:
+			/* Always-on is always compatible (3rd line). */
+			return 0;
+		case KVM_SSBD_KERNEL:		/* 2nd line */
+			wa_flag = val;
+			wa_flag |= KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_MASK;
+
+			/* Force on when always-on is requested. */
+			if (wa_state == KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNAFFECTED)
+				wa_flag = true;
+			break;
+		}
+
+		kvm_arm_set_vcpu_workaround_2_flag(vcpu, wa_flag);
+
+		return 0;
+	}
+
 	return -EINVAL;
 }
-- 
2.17.1

WARNING: multiple messages have this Message-ID (diff)
From: Andre Przywara <andre.przywara@arm.com>
To: Marc Zyngier <marc.zyngier@arm.com>,
	Christoffer Dall <christoffer.dall@arm.com>
Cc: linux-arm-kernel@lists.infradead.org,
	Peter Maydell <peter.maydell@linaro.org>,
	kvmarm@lists.cs.columbia.edu, kvm@vger.kernel.org
Subject: [PATCH 1/2] KVM: arm/arm64: Add save/restore support for firmware workaround state
Date: Mon,  7 Jan 2019 12:05:36 +0000	[thread overview]
Message-ID: <20190107120537.184252-2-andre.przywara@arm.com> (raw)
In-Reply-To: <20190107120537.184252-1-andre.przywara@arm.com>

KVM implements the firmware interface for mitigating cache speculation
vulnerabilities. Guests may use this interface to ensure mitigation is
active.
If we want to migrate such a guest to a host with a different support
level for those workarounds, migration might need to fail, to ensure that
critical guests don't loose their protection.

Introduce a way for userland to save and restore the workarounds state.
On restoring we do checks that make sure we don't downgrade our
mitigation level.

Signed-off-by: Andre Przywara <andre.przywara@arm.com>
---
 arch/arm/include/asm/kvm_emulate.h   |  10 ++
 arch/arm/include/uapi/asm/kvm.h      |   9 ++
 arch/arm64/include/asm/kvm_emulate.h |  14 +++
 arch/arm64/include/uapi/asm/kvm.h    |   9 ++
 virt/kvm/arm/psci.c                  | 138 ++++++++++++++++++++++++++-
 5 files changed, 178 insertions(+), 2 deletions(-)

diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index 77121b713bef..2255c50debab 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -275,6 +275,16 @@ static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
 	return vcpu_cp15(vcpu, c0_MPIDR) & MPIDR_HWID_BITMASK;
 }
 
+static inline bool kvm_arm_get_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu)
+{
+	return false;
+}
+
+static inline void kvm_arm_set_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu,
+						      bool flag)
+{
+}
+
 static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
 {
 	*vcpu_cpsr(vcpu) |= PSR_E_BIT;
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index 4602464ebdfb..02c93b1d8f6d 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -214,6 +214,15 @@ struct kvm_vcpu_events {
 #define KVM_REG_ARM_FW_REG(r)		(KVM_REG_ARM | KVM_REG_SIZE_U64 | \
 					 KVM_REG_ARM_FW | ((r) & 0xffff))
 #define KVM_REG_ARM_PSCI_VERSION	KVM_REG_ARM_FW_REG(0)
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1	KVM_REG_ARM_FW_REG(1)
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL	0
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL	1
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2	KVM_REG_ARM_FW_REG(2)
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_MASK	0x3
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL	0
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL	1
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNAFFECTED	2
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED	4
 
 /* Device Control API: ARM VGIC */
 #define KVM_DEV_ARM_VGIC_GRP_ADDR	0
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 506386a3edde..a44f07f68da4 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -336,6 +336,20 @@ static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
 	return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
 }
 
+static inline bool kvm_arm_get_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu)
+{
+	return vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG;
+}
+
+static inline void kvm_arm_set_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu,
+						      bool flag)
+{
+	if (flag)
+		vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
+	else
+		vcpu->arch.workaround_flags &= ~VCPU_WORKAROUND_2_FLAG;
+}
+
 static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
 {
 	if (vcpu_mode_is_32bit(vcpu)) {
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 97c3478ee6e7..4a19ef199a99 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -225,6 +225,15 @@ struct kvm_vcpu_events {
 #define KVM_REG_ARM_FW_REG(r)		(KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
 					 KVM_REG_ARM_FW | ((r) & 0xffff))
 #define KVM_REG_ARM_PSCI_VERSION	KVM_REG_ARM_FW_REG(0)
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1	KVM_REG_ARM_FW_REG(1)
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL	0
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL	1
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2	KVM_REG_ARM_FW_REG(2)
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_MASK	0x3
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL	0
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL	1
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNAFFECTED	2
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED	4
 
 /* Device Control API: ARM VGIC */
 #define KVM_DEV_ARM_VGIC_GRP_ADDR	0
diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
index 9b73d3ad918a..4c671908ef62 100644
--- a/virt/kvm/arm/psci.c
+++ b/virt/kvm/arm/psci.c
@@ -445,12 +445,18 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
 
 int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
 {
-	return 1;		/* PSCI version */
+	return 3;		/* PSCI version and two workaround registers */
 }
 
 int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
 {
-	if (put_user(KVM_REG_ARM_PSCI_VERSION, uindices))
+	if (put_user(KVM_REG_ARM_PSCI_VERSION, uindices++))
+		return -EFAULT;
+
+	if (put_user(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1, uindices++))
+		return -EFAULT;
+
+	if (put_user(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2, uindices++))
 		return -EFAULT;
 
 	return 0;
@@ -469,6 +475,45 @@ int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 		return 0;
 	}
 
+	if (reg->id == KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1) {
+		void __user *uaddr = (void __user *)(long)reg->addr;
+		u64 val = 0;
+
+		if (kvm_arm_harden_branch_predictor())
+			val = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL;
+
+		if (copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)))
+			return -EFAULT;
+
+		return 0;
+	}
+
+	if (reg->id == KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2) {
+		void __user *uaddr = (void __user *)(long)reg->addr;
+		u64 val = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
+
+		switch (kvm_arm_have_ssbd()) {
+		case KVM_SSBD_FORCE_DISABLE:
+		case KVM_SSBD_UNKNOWN:
+			break;
+		case KVM_SSBD_KERNEL:
+			val |= KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL;
+			break;
+		case KVM_SSBD_FORCE_ENABLE:
+		case KVM_SSBD_MITIGATED:
+			val |= KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNAFFECTED;
+			break;
+		}
+
+		if (kvm_arm_get_vcpu_workaround_2_flag(vcpu))
+			val |= KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED;
+
+		if (copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)))
+			return -EFAULT;
+
+		return 0;
+	}
+
 	return -EINVAL;
 }
 
@@ -499,5 +544,94 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 		}
 	}
 
+	if (reg->id == KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1) {
+		void __user *uaddr = (void __user *)(long)reg->addr;
+		u64 val;
+
+		if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)))
+			return -EFAULT;
+
+		/* Make sure we support WORKAROUND_1 if userland asks for it. */
+		if ((val & KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL) &&
+		    !kvm_arm_harden_branch_predictor())
+			return -EINVAL;
+
+		/* Any other bit is reserved. */
+		if (val & ~KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL)
+			return -EINVAL;
+
+		return 0;
+	}
+
+	if (reg->id == KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2) {
+		void __user *uaddr = (void __user *)(long)reg->addr;
+		unsigned int wa_state;
+		bool wa_flag;
+		u64 val;
+
+		if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)))
+			return -EFAULT;
+
+		/* Reject any unknown bits. */
+		if (val & ~(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_MASK|
+			    KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED))
+			return -EINVAL;
+
+		/*
+		 * The value passed from userland has to be compatible with
+		 * our own workaround status. We also have to consider the
+		 * requested per-VCPU state for some combinations:
+		 * --------------+-----------+-----------------+---------------
+		 * \ user value  |           |                 |
+		 *  ------------ | SSBD_NONE |   SSBD_KERNEL   |  SSBD_ALWAYS
+		 *  this kernel \|           |                 |
+		 * --------------+-----------+-----------------+---------------
+		 * UNKNOWN       |     OK    |   -EINVAL       |   -EINVAL
+		 * FORCE_DISABLE |           |                 |
+		 * --------------+-----------+-----------------+---------------
+		 * KERNEL        |     OK    | copy VCPU state | set VCPU state
+		 * --------------+-----------+-----------------+---------------
+		 * FORCE_ENABLE  |     OK    |      OK         |      OK
+		 * MITIGATED     |           |                 |
+		 * --------------+-----------+-----------------+---------------
+		 */
+
+		wa_state = val & KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_MASK;
+		switch (wa_state) {
+		case  KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL:
+			/* We can always support no mitigation (1st column). */
+			return 0;
+		case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL:
+		case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNAFFECTED:
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		switch (kvm_arm_have_ssbd()) {
+		case KVM_SSBD_UNKNOWN:
+		case KVM_SSBD_FORCE_DISABLE:
+		default:
+			/* ... but some mitigation was requested (1st line). */
+			return -EINVAL;
+		case KVM_SSBD_FORCE_ENABLE:
+		case KVM_SSBD_MITIGATED:
+			/* Always-on is always compatible (3rd line). */
+			return 0;
+		case KVM_SSBD_KERNEL:		/* 2nd line */
+			wa_flag = val;
+			wa_flag |= KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_MASK;
+
+			/* Force on when always-on is requested. */
+			if (wa_state == KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNAFFECTED)
+				wa_flag = true;
+			break;
+		}
+
+		kvm_arm_set_vcpu_workaround_2_flag(vcpu, wa_flag);
+
+		return 0;
+	}
+
 	return -EINVAL;
 }
-- 
2.17.1


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  reply	other threads:[~2019-01-07 12:05 UTC|newest]

Thread overview: 50+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-01-07 12:05 [PATCH 0/2] KVM: arm/arm64: Add VCPU workarounds firmware register Andre Przywara
2019-01-07 12:05 ` Andre Przywara
2019-01-07 12:05 ` Andre Przywara [this message]
2019-01-07 12:05   ` [PATCH 1/2] KVM: arm/arm64: Add save/restore support for firmware workaround state Andre Przywara
2019-01-07 13:17   ` Steven Price
2019-01-07 13:17     ` Steven Price
2019-01-21 17:04     ` Andre Przywara
2019-01-21 17:04       ` Andre Przywara
2019-02-22 12:26     ` Andre Przywara
2019-02-22 12:26       ` Andre Przywara
2019-01-22 15:17   ` Dave Martin
2019-01-22 15:17     ` Dave Martin
2019-01-25 14:46     ` Andre Przywara
2019-01-25 14:46       ` Andre Przywara
2019-01-29 21:32       ` Dave Martin
2019-01-29 21:32         ` Dave Martin
2019-01-30 11:39         ` Andre Przywara
2019-01-30 11:39           ` Andre Przywara
2019-01-30 12:07           ` Dave Martin
2019-01-30 12:07             ` Dave Martin
2019-02-15  9:58           ` Andre Przywara
2019-02-15  9:58             ` Andre Przywara
2019-02-15 11:42             ` Marc Zyngier
2019-02-15 11:42               ` Marc Zyngier
2019-02-15 17:26               ` Dave Martin
2019-02-15 17:26                 ` Dave Martin
2019-02-18  9:07                 ` Marc Zyngier
2019-02-18  9:07                   ` Marc Zyngier
2019-02-18 10:28                   ` Dave Martin
2019-02-18 10:28                     ` Dave Martin
2019-02-18 10:59                     ` Marc Zyngier
2019-02-18 10:59                       ` Marc Zyngier
2019-02-18 11:29                   ` André Przywara
2019-02-18 11:29                     ` André Przywara
2019-02-18 14:15                     ` Marc Zyngier
2019-02-18 14:15                       ` Marc Zyngier
2019-01-07 12:05 ` [PATCH 2/2] KVM: doc: add API documentation on the KVM_REG_ARM_WORKAROUNDS register Andre Przywara
2019-01-07 12:05   ` Andre Przywara
2019-01-22 10:17 ` [PATCH 0/2] KVM: arm/arm64: Add VCPU workarounds firmware register Dave Martin
2019-01-22 10:17   ` Dave Martin
2019-01-22 10:41   ` Andre Przywara
2019-01-22 10:41     ` Andre Przywara
2019-01-22 11:11   ` Marc Zyngier
2019-01-22 11:11     ` Marc Zyngier
2019-01-22 13:56     ` Dave Martin
2019-01-22 13:56       ` Dave Martin
2019-01-22 14:51       ` Marc Zyngier
2019-01-22 14:51         ` Marc Zyngier
2019-01-22 15:28         ` Dave Martin
2019-01-22 15:28           ` Dave Martin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190107120537.184252-2-andre.przywara@arm.com \
    --to=andre.przywara@arm.com \
    --cc=christoffer.dall@arm.com \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=marc.zyngier@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.