All of lore.kernel.org
 help / color / mirror / Atom feed
From: Will Deacon <will@kernel.org>
To: kvmarm@lists.cs.columbia.edu
Cc: Marc Zyngier <maz@kernel.org>,
	kernel-team@android.com, kvm@vger.kernel.org,
	Andy Lutomirski <luto@amacapital.net>,
	linux-arm-kernel@lists.infradead.org,
	Michael Roth <michael.roth@amd.com>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Chao Peng <chao.p.peng@linux.intel.com>,
	Will Deacon <will@kernel.org>
Subject: [PATCH 77/89] KVM: arm64: Handle PSCI for protected VMs in EL2
Date: Thu, 19 May 2022 14:41:52 +0100	[thread overview]
Message-ID: <20220519134204.5379-78-will@kernel.org> (raw)
In-Reply-To: <20220519134204.5379-1-will@kernel.org>

From: Fuad Tabba <tabba@google.com>

Add PSCI 1.1 support for protected VMs at EL2.

Signed-off-by: Fuad Tabba <tabba@google.com>
---
 arch/arm64/kvm/hyp/include/nvhe/pkvm.h |  13 +
 arch/arm64/kvm/hyp/nvhe/hyp-main.c     |  69 +++++-
 arch/arm64/kvm/hyp/nvhe/pkvm.c         | 320 ++++++++++++++++++++++++-
 3 files changed, 399 insertions(+), 3 deletions(-)

diff --git a/arch/arm64/kvm/hyp/include/nvhe/pkvm.h b/arch/arm64/kvm/hyp/include/nvhe/pkvm.h
index 33d34cc639ea..c1987115b217 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/pkvm.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/pkvm.h
@@ -28,6 +28,15 @@ struct kvm_shadow_vcpu_state {
 	/* Tracks exit code for the protected guest. */
 	u32 exit_code;
 
+	/*
+	 * Track the power state transition of a protected vcpu.
+	 * Can be in one of three states:
+	 * PSCI_0_2_AFFINITY_LEVEL_ON
+	 * PSCI_0_2_AFFINITY_LEVEL_OFF
+	 * PSCI_0_2_AFFINITY_LEVEL_PENDING
+	 */
+	int power_state;
+
 	/*
 	 * Points to the per-cpu pointer of the cpu where it's loaded, or NULL
 	 * if not loaded.
@@ -101,6 +110,10 @@ bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code);
 void kvm_reset_pvm_sys_regs(struct kvm_vcpu *vcpu);
 int kvm_check_pvm_sysreg_table(void);
 
+void pkvm_reset_vcpu(struct kvm_shadow_vcpu_state *shadow_state);
+
 bool kvm_handle_pvm_hvc64(struct kvm_vcpu *vcpu, u64 *exit_code);
 
+struct kvm_shadow_vcpu_state *pkvm_mpidr_to_vcpu_state(struct kvm_shadow_vm *vm, unsigned long mpidr);
+
 #endif /* __ARM64_KVM_NVHE_PKVM_H__ */
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index 26c8709f5494..c4778c7d8c4b 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -21,6 +21,7 @@
 #include <nvhe/trap_handler.h>
 
 #include <linux/irqchip/arm-gic-v3.h>
+#include <uapi/linux/psci.h>
 
 #include "../../sys_regs.h"
 
@@ -46,8 +47,37 @@ static void handle_pvm_entry_wfx(struct kvm_vcpu *host_vcpu, struct kvm_vcpu *sh
 
 static void handle_pvm_entry_hvc64(struct kvm_vcpu *host_vcpu, struct kvm_vcpu *shadow_vcpu)
 {
+	u32 psci_fn = smccc_get_function(shadow_vcpu);
 	u64 ret = READ_ONCE(host_vcpu->arch.ctxt.regs.regs[0]);
 
+	switch (psci_fn) {
+	case PSCI_0_2_FN_CPU_ON:
+	case PSCI_0_2_FN64_CPU_ON:
+		/*
+		 * Check whether the cpu_on request to the host was successful.
+		 * If not, reset the vcpu state from ON_PENDING to OFF.
+		 * This could happen if this vcpu attempted to turn on the other
+		 * vcpu while the other one is in the process of turning itself
+		 * off.
+		 */
+		if (ret != PSCI_RET_SUCCESS) {
+			unsigned long cpu_id = smccc_get_arg1(shadow_vcpu);
+			struct kvm_shadow_vm *shadow_vm;
+			struct kvm_shadow_vcpu_state *target_vcpu_state;
+
+			shadow_vm = get_shadow_vm(shadow_vcpu);
+			target_vcpu_state = pkvm_mpidr_to_vcpu_state(shadow_vm, cpu_id);
+
+			if (target_vcpu_state && READ_ONCE(target_vcpu_state->power_state) == PSCI_0_2_AFFINITY_LEVEL_ON_PENDING)
+				WRITE_ONCE(target_vcpu_state->power_state, PSCI_0_2_AFFINITY_LEVEL_OFF);
+
+			ret = PSCI_RET_INTERNAL_FAILURE;
+		}
+		break;
+	default:
+		break;
+	}
+
 	vcpu_set_reg(shadow_vcpu, 0, ret);
 }
 
@@ -206,13 +236,45 @@ static void handle_pvm_exit_sys64(struct kvm_vcpu *host_vcpu, struct kvm_vcpu *s
 
 static void handle_pvm_exit_hvc64(struct kvm_vcpu *host_vcpu, struct kvm_vcpu *shadow_vcpu)
 {
-	int i;
+	int n, i;
+
+	switch (smccc_get_function(shadow_vcpu)) {
+	/*
+	 * CPU_ON takes 3 arguments, however, to wake up the target vcpu the
+	 * host only needs to know the target's cpu_id, which is passed as the
+	 * first argument. The processing of the reset state is done at hyp.
+	 */
+	case PSCI_0_2_FN_CPU_ON:
+	case PSCI_0_2_FN64_CPU_ON:
+		n = 2;
+		break;
+
+	case PSCI_0_2_FN_CPU_OFF:
+	case PSCI_0_2_FN_SYSTEM_OFF:
+	case PSCI_0_2_FN_SYSTEM_RESET:
+	case PSCI_0_2_FN_CPU_SUSPEND:
+	case PSCI_0_2_FN64_CPU_SUSPEND:
+		n = 1;
+		break;
+
+	case PSCI_1_1_FN_SYSTEM_RESET2:
+	case PSCI_1_1_FN64_SYSTEM_RESET2:
+		n = 3;
+		break;
+
+	/*
+	 * The rest are either blocked or handled by HYP, so we should
+	 * really never be here.
+	 */
+	default:
+		BUG();
+	}
 
 	WRITE_ONCE(host_vcpu->arch.fault.esr_el2,
 		   shadow_vcpu->arch.fault.esr_el2);
 
 	/* Pass the hvc function id (r0) as well as any potential arguments. */
-	for (i = 0; i < 8; i++)
+	for (i = 0; i < n; i++)
 		WRITE_ONCE(host_vcpu->arch.ctxt.regs.regs[i],
 			   vcpu_get_reg(shadow_vcpu, i));
 }
@@ -430,6 +492,9 @@ static void flush_shadow_state(struct kvm_shadow_vcpu_state *shadow_state)
 	shadow_entry_exit_handler_fn ec_handler;
 	u8 esr_ec;
 
+	if (READ_ONCE(shadow_state->power_state) == PSCI_0_2_AFFINITY_LEVEL_ON_PENDING)
+		pkvm_reset_vcpu(shadow_state);
+
 	/*
 	 * If we deal with a non-protected guest and the state is potentially
 	 * dirty (from a host perspective), copy the state back into the shadow.
diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c
index 92e60ebeced5..d44f524c936b 100644
--- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
+++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
@@ -8,6 +8,7 @@
 #include <linux/mm.h>
 
 #include <kvm/arm_hypercalls.h>
+#include <kvm/arm_psci.h>
 
 #include <asm/kvm_emulate.h>
 
@@ -425,6 +426,27 @@ static int init_ptrauth(struct kvm_vcpu *shadow_vcpu)
 	return ret;
 }
 
+static int init_shadow_psci(struct kvm_shadow_vm *vm,
+			    struct kvm_shadow_vcpu_state *shadow_vcpu_state,
+			    struct kvm_vcpu *host_vcpu)
+{
+	struct kvm_vcpu *shadow_vcpu = &shadow_vcpu_state->shadow_vcpu;
+	struct vcpu_reset_state *reset_state = &shadow_vcpu->arch.reset_state;
+
+	if (test_bit(KVM_ARM_VCPU_POWER_OFF, shadow_vcpu->arch.features)) {
+		reset_state->reset = false;
+		shadow_vcpu_state->power_state = PSCI_0_2_AFFINITY_LEVEL_OFF;
+		return 0;
+	}
+
+	reset_state->pc = READ_ONCE(host_vcpu->arch.ctxt.regs.pc);
+	reset_state->r0 = READ_ONCE(host_vcpu->arch.ctxt.regs.regs[0]);
+	reset_state->reset = true;
+	shadow_vcpu_state->power_state = PSCI_0_2_AFFINITY_LEVEL_ON_PENDING;
+
+	return 0;
+}
+
 static int init_shadow_structs(struct kvm *kvm, struct kvm_shadow_vm *vm,
 			       struct kvm_vcpu **vcpu_array,
 			       int *last_ran,
@@ -485,6 +507,11 @@ static int init_shadow_structs(struct kvm *kvm, struct kvm_shadow_vm *vm,
 
 		pkvm_vcpu_init_traps(shadow_vcpu, host_vcpu);
 		kvm_reset_pvm_sys_regs(shadow_vcpu);
+
+		/* Must be done after reseting sys registers. */
+		ret = init_shadow_psci(vm, shadow_vcpu_state, host_vcpu);
+		if (ret)
+			return ret;
 	}
 
 	return 0;
@@ -800,6 +827,297 @@ int __pkvm_teardown_shadow(unsigned int shadow_handle)
 	return err;
 }
 
+/*
+ * This function sets the registers on the vcpu to their architecturally defined
+ * reset values.
+ *
+ * Note: Can only be called by the vcpu on itself, after it has been turned on.
+ */
+void pkvm_reset_vcpu(struct kvm_shadow_vcpu_state *shadow_state)
+{
+	struct kvm_vcpu *vcpu = &shadow_state->shadow_vcpu;
+	struct vcpu_reset_state *reset_state = &vcpu->arch.reset_state;
+
+	WARN_ON(!reset_state->reset);
+
+	init_ptrauth(vcpu);
+
+	kvm_reset_vcpu_core(vcpu);
+	kvm_reset_pvm_sys_regs(vcpu);
+
+	/* Must be done after reseting sys registers. */
+	kvm_reset_vcpu_psci(vcpu, reset_state);
+
+	reset_state->reset = false;
+
+	shadow_state->exit_code = 0;
+
+	WARN_ON(shadow_state->power_state != PSCI_0_2_AFFINITY_LEVEL_ON_PENDING);
+	WRITE_ONCE(vcpu->arch.power_off, false);
+	WRITE_ONCE(shadow_state->power_state, PSCI_0_2_AFFINITY_LEVEL_ON);
+}
+
+struct kvm_shadow_vcpu_state *pkvm_mpidr_to_vcpu_state(struct kvm_shadow_vm *vm, unsigned long mpidr)
+{
+	struct kvm_vcpu *vcpu;
+	int i;
+
+	mpidr &= MPIDR_HWID_BITMASK;
+
+	for (i = 0; i < vm->kvm.created_vcpus; i++) {
+		vcpu = &vm->shadow_vcpu_states[i].shadow_vcpu;
+
+		if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))
+			return &vm->shadow_vcpu_states[i];
+	}
+
+	return NULL;
+}
+
+/*
+ * Returns true if the hypervisor has handled the PSCI call, and control should
+ * go back to the guest, or false if the host needs to do some additional work
+ * (i.e., wake up the vcpu).
+ */
+static bool pvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
+{
+	struct kvm_shadow_vcpu_state *target_vcpu_state;
+	struct kvm_shadow_vm *vm;
+	struct vcpu_reset_state *reset_state;
+	unsigned long cpu_id;
+	unsigned long hvc_ret_val;
+	int power_state;
+
+	cpu_id = smccc_get_arg1(source_vcpu);
+	if (!kvm_psci_valid_affinity(source_vcpu, cpu_id)) {
+		hvc_ret_val = PSCI_RET_INVALID_PARAMS;
+		goto error;
+	}
+
+	vm = get_shadow_vm(source_vcpu);
+	target_vcpu_state = pkvm_mpidr_to_vcpu_state(vm, cpu_id);
+
+	/* Make sure the caller requested a valid vcpu. */
+	if (!target_vcpu_state) {
+		hvc_ret_val = PSCI_RET_INVALID_PARAMS;
+		goto error;
+	}
+
+	/*
+	 * Make sure the requested vcpu is not on to begin with.
+	 * Atomic to avoid race between vcpus trying to power on the same vcpu.
+	 */
+	power_state = cmpxchg(&target_vcpu_state->power_state,
+		PSCI_0_2_AFFINITY_LEVEL_OFF,
+		PSCI_0_2_AFFINITY_LEVEL_ON_PENDING);
+	switch (power_state) {
+	case PSCI_0_2_AFFINITY_LEVEL_ON_PENDING:
+		hvc_ret_val = PSCI_RET_ON_PENDING;
+		goto error;
+	case PSCI_0_2_AFFINITY_LEVEL_ON:
+		hvc_ret_val = PSCI_RET_ALREADY_ON;
+		goto error;
+	case PSCI_0_2_AFFINITY_LEVEL_OFF:
+		break;
+	default:
+		hvc_ret_val = PSCI_RET_INTERNAL_FAILURE;
+		goto error;
+	}
+
+	reset_state = &target_vcpu_state->shadow_vcpu.arch.reset_state;
+
+	reset_state->pc = smccc_get_arg2(source_vcpu);
+	reset_state->r0 = smccc_get_arg3(source_vcpu);
+
+	/* Propagate caller endianness */
+	reset_state->be = kvm_vcpu_is_be(source_vcpu);
+
+	reset_state->reset = true;
+
+	/*
+	 * Return to the host, which should make the KVM_REQ_VCPU_RESET request
+	 * as well as kvm_vcpu_wake_up() to schedule the vcpu.
+	 */
+	return false;
+
+error:
+	/* If there's an error go back straight to the guest. */
+	smccc_set_retval(source_vcpu, hvc_ret_val, 0, 0, 0);
+	return true;
+}
+
+static bool pvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
+{
+	int i, matching_cpus = 0;
+	unsigned long mpidr;
+	unsigned long target_affinity;
+	unsigned long target_affinity_mask;
+	unsigned long lowest_affinity_level;
+	struct kvm_shadow_vm *vm;
+	unsigned long hvc_ret_val;
+
+	target_affinity = smccc_get_arg1(vcpu);
+	lowest_affinity_level = smccc_get_arg2(vcpu);
+
+	if (!kvm_psci_valid_affinity(vcpu, target_affinity)) {
+		hvc_ret_val = PSCI_RET_INVALID_PARAMS;
+		goto done;
+	}
+
+	/* Determine target affinity mask */
+	target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
+	if (!target_affinity_mask) {
+		hvc_ret_val = PSCI_RET_INVALID_PARAMS;
+		goto done;
+	}
+
+	vm = get_shadow_vm(vcpu);
+
+	/* Ignore other bits of target affinity */
+	target_affinity &= target_affinity_mask;
+
+	hvc_ret_val = PSCI_0_2_AFFINITY_LEVEL_OFF;
+
+	/*
+	 * If at least one vcpu matching target affinity is ON then return ON,
+	 * then if at least one is PENDING_ON then return PENDING_ON.
+	 * Otherwise, return OFF.
+	 */
+	for (i = 0; i < vm->kvm.created_vcpus; i++) {
+		struct kvm_shadow_vcpu_state *tmp = &vm->shadow_vcpu_states[i];
+
+		mpidr = kvm_vcpu_get_mpidr_aff(&tmp->shadow_vcpu);
+
+		if ((mpidr & target_affinity_mask) == target_affinity) {
+			int power_state;
+
+			matching_cpus++;
+			power_state = READ_ONCE(tmp->power_state);
+			switch (power_state) {
+			case PSCI_0_2_AFFINITY_LEVEL_ON_PENDING:
+				hvc_ret_val = PSCI_0_2_AFFINITY_LEVEL_ON_PENDING;
+				break;
+			case PSCI_0_2_AFFINITY_LEVEL_ON:
+				hvc_ret_val = PSCI_0_2_AFFINITY_LEVEL_ON;
+				goto done;
+			case PSCI_0_2_AFFINITY_LEVEL_OFF:
+				break;
+			default:
+				hvc_ret_val = PSCI_RET_INTERNAL_FAILURE;
+				goto done;
+			}
+		}
+	}
+
+	if (!matching_cpus)
+		hvc_ret_val = PSCI_RET_INVALID_PARAMS;
+
+done:
+	/* Nothing to be handled by the host. Go back to the guest. */
+	smccc_set_retval(vcpu, hvc_ret_val, 0, 0, 0);
+	return true;
+}
+
+/*
+ * Returns true if the hypervisor has handled the PSCI call, and control should
+ * go back to the guest, or false if the host needs to do some additional work
+ * (e.g., turn off and update vcpu scheduling status).
+ */
+static bool pvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
+{
+	struct kvm_shadow_vcpu_state *vcpu_state = get_shadow_state(vcpu);
+
+	WARN_ON(vcpu->arch.power_off);
+	WARN_ON(vcpu_state->power_state != PSCI_0_2_AFFINITY_LEVEL_ON);
+
+	WRITE_ONCE(vcpu->arch.power_off, true);
+	WRITE_ONCE(vcpu_state->power_state, PSCI_0_2_AFFINITY_LEVEL_OFF);
+
+	/* Return to the host so that it can finish powering off the vcpu. */
+	return false;
+}
+
+static bool pvm_psci_version(struct kvm_vcpu *vcpu)
+{
+	/* Nothing to be handled by the host. Go back to the guest. */
+	smccc_set_retval(vcpu, KVM_ARM_PSCI_1_1, 0, 0, 0);
+	return true;
+}
+
+static bool pvm_psci_not_supported(struct kvm_vcpu *vcpu)
+{
+	/* Nothing to be handled by the host. Go back to the guest. */
+	smccc_set_retval(vcpu, PSCI_RET_NOT_SUPPORTED, 0, 0, 0);
+	return true;
+}
+
+static bool pvm_psci_features(struct kvm_vcpu *vcpu)
+{
+	u32 feature = smccc_get_arg1(vcpu);
+	unsigned long val;
+
+	switch (feature) {
+	case PSCI_0_2_FN_PSCI_VERSION:
+	case PSCI_0_2_FN_CPU_SUSPEND:
+	case PSCI_0_2_FN64_CPU_SUSPEND:
+	case PSCI_0_2_FN_CPU_OFF:
+	case PSCI_0_2_FN_CPU_ON:
+	case PSCI_0_2_FN64_CPU_ON:
+	case PSCI_0_2_FN_AFFINITY_INFO:
+	case PSCI_0_2_FN64_AFFINITY_INFO:
+	case PSCI_0_2_FN_SYSTEM_OFF:
+	case PSCI_0_2_FN_SYSTEM_RESET:
+	case PSCI_1_0_FN_PSCI_FEATURES:
+	case PSCI_1_1_FN_SYSTEM_RESET2:
+	case PSCI_1_1_FN64_SYSTEM_RESET2:
+	case ARM_SMCCC_VERSION_FUNC_ID:
+		val = PSCI_RET_SUCCESS;
+		break;
+	default:
+		val = PSCI_RET_NOT_SUPPORTED;
+		break;
+	}
+
+	/* Nothing to be handled by the host. Go back to the guest. */
+	smccc_set_retval(vcpu, val, 0, 0, 0);
+	return true;
+}
+
+static bool pkvm_handle_psci(struct kvm_vcpu *vcpu)
+{
+	u32 psci_fn = smccc_get_function(vcpu);
+
+	switch (psci_fn) {
+	case PSCI_0_2_FN_CPU_ON:
+		kvm_psci_narrow_to_32bit(vcpu);
+		fallthrough;
+	case PSCI_0_2_FN64_CPU_ON:
+		return pvm_psci_vcpu_on(vcpu);
+	case PSCI_0_2_FN_CPU_OFF:
+		return pvm_psci_vcpu_off(vcpu);
+	case PSCI_0_2_FN_AFFINITY_INFO:
+		kvm_psci_narrow_to_32bit(vcpu);
+		fallthrough;
+	case PSCI_0_2_FN64_AFFINITY_INFO:
+		return pvm_psci_vcpu_affinity_info(vcpu);
+	case PSCI_0_2_FN_PSCI_VERSION:
+		return pvm_psci_version(vcpu);
+	case PSCI_1_0_FN_PSCI_FEATURES:
+		return pvm_psci_features(vcpu);
+	case PSCI_0_2_FN_SYSTEM_RESET:
+	case PSCI_0_2_FN_CPU_SUSPEND:
+	case PSCI_0_2_FN64_CPU_SUSPEND:
+	case PSCI_0_2_FN_SYSTEM_OFF:
+	case PSCI_1_1_FN_SYSTEM_RESET2:
+	case PSCI_1_1_FN64_SYSTEM_RESET2:
+		return false; /* Handled by the host. */
+	default:
+		break;
+	}
+
+	return pvm_psci_not_supported(vcpu);
+}
+
 /*
  * Handler for protected VM HVC calls.
  *
@@ -816,6 +1134,6 @@ bool kvm_handle_pvm_hvc64(struct kvm_vcpu *vcpu, u64 *exit_code)
 		smccc_set_retval(vcpu, ARM_SMCCC_VERSION_1_1, 0, 0, 0);
 		return true;
 	default:
-		return false;
+		return pkvm_handle_psci(vcpu);
 	}
 }
-- 
2.36.1.124.g0e6072fb45-goog

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

WARNING: multiple messages have this Message-ID (diff)
From: Will Deacon <will@kernel.org>
To: kvmarm@lists.cs.columbia.edu
Cc: Will Deacon <will@kernel.org>, Ard Biesheuvel <ardb@kernel.org>,
	Sean Christopherson <seanjc@google.com>,
	Alexandru Elisei <alexandru.elisei@arm.com>,
	Andy Lutomirski <luto@amacapital.net>,
	Catalin Marinas <catalin.marinas@arm.com>,
	James Morse <james.morse@arm.com>,
	Chao Peng <chao.p.peng@linux.intel.com>,
	Quentin Perret <qperret@google.com>,
	Suzuki K Poulose <suzuki.poulose@arm.com>,
	Michael Roth <michael.roth@amd.com>,
	Mark Rutland <mark.rutland@arm.com>,
	Fuad Tabba <tabba@google.com>, Oliver Upton <oupton@google.com>,
	Marc Zyngier <maz@kernel.org>,
	kernel-team@android.com, kvm@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org
Subject: [PATCH 77/89] KVM: arm64: Handle PSCI for protected VMs in EL2
Date: Thu, 19 May 2022 14:41:52 +0100	[thread overview]
Message-ID: <20220519134204.5379-78-will@kernel.org> (raw)
In-Reply-To: <20220519134204.5379-1-will@kernel.org>

From: Fuad Tabba <tabba@google.com>

Add PSCI 1.1 support for protected VMs at EL2.

Signed-off-by: Fuad Tabba <tabba@google.com>
---
 arch/arm64/kvm/hyp/include/nvhe/pkvm.h |  13 +
 arch/arm64/kvm/hyp/nvhe/hyp-main.c     |  69 +++++-
 arch/arm64/kvm/hyp/nvhe/pkvm.c         | 320 ++++++++++++++++++++++++-
 3 files changed, 399 insertions(+), 3 deletions(-)

diff --git a/arch/arm64/kvm/hyp/include/nvhe/pkvm.h b/arch/arm64/kvm/hyp/include/nvhe/pkvm.h
index 33d34cc639ea..c1987115b217 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/pkvm.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/pkvm.h
@@ -28,6 +28,15 @@ struct kvm_shadow_vcpu_state {
 	/* Tracks exit code for the protected guest. */
 	u32 exit_code;
 
+	/*
+	 * Track the power state transition of a protected vcpu.
+	 * Can be in one of three states:
+	 * PSCI_0_2_AFFINITY_LEVEL_ON
+	 * PSCI_0_2_AFFINITY_LEVEL_OFF
+	 * PSCI_0_2_AFFINITY_LEVEL_PENDING
+	 */
+	int power_state;
+
 	/*
 	 * Points to the per-cpu pointer of the cpu where it's loaded, or NULL
 	 * if not loaded.
@@ -101,6 +110,10 @@ bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code);
 void kvm_reset_pvm_sys_regs(struct kvm_vcpu *vcpu);
 int kvm_check_pvm_sysreg_table(void);
 
+void pkvm_reset_vcpu(struct kvm_shadow_vcpu_state *shadow_state);
+
 bool kvm_handle_pvm_hvc64(struct kvm_vcpu *vcpu, u64 *exit_code);
 
+struct kvm_shadow_vcpu_state *pkvm_mpidr_to_vcpu_state(struct kvm_shadow_vm *vm, unsigned long mpidr);
+
 #endif /* __ARM64_KVM_NVHE_PKVM_H__ */
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index 26c8709f5494..c4778c7d8c4b 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -21,6 +21,7 @@
 #include <nvhe/trap_handler.h>
 
 #include <linux/irqchip/arm-gic-v3.h>
+#include <uapi/linux/psci.h>
 
 #include "../../sys_regs.h"
 
@@ -46,8 +47,37 @@ static void handle_pvm_entry_wfx(struct kvm_vcpu *host_vcpu, struct kvm_vcpu *sh
 
 static void handle_pvm_entry_hvc64(struct kvm_vcpu *host_vcpu, struct kvm_vcpu *shadow_vcpu)
 {
+	u32 psci_fn = smccc_get_function(shadow_vcpu);
 	u64 ret = READ_ONCE(host_vcpu->arch.ctxt.regs.regs[0]);
 
+	switch (psci_fn) {
+	case PSCI_0_2_FN_CPU_ON:
+	case PSCI_0_2_FN64_CPU_ON:
+		/*
+		 * Check whether the cpu_on request to the host was successful.
+		 * If not, reset the vcpu state from ON_PENDING to OFF.
+		 * This could happen if this vcpu attempted to turn on the other
+		 * vcpu while the other one is in the process of turning itself
+		 * off.
+		 */
+		if (ret != PSCI_RET_SUCCESS) {
+			unsigned long cpu_id = smccc_get_arg1(shadow_vcpu);
+			struct kvm_shadow_vm *shadow_vm;
+			struct kvm_shadow_vcpu_state *target_vcpu_state;
+
+			shadow_vm = get_shadow_vm(shadow_vcpu);
+			target_vcpu_state = pkvm_mpidr_to_vcpu_state(shadow_vm, cpu_id);
+
+			if (target_vcpu_state && READ_ONCE(target_vcpu_state->power_state) == PSCI_0_2_AFFINITY_LEVEL_ON_PENDING)
+				WRITE_ONCE(target_vcpu_state->power_state, PSCI_0_2_AFFINITY_LEVEL_OFF);
+
+			ret = PSCI_RET_INTERNAL_FAILURE;
+		}
+		break;
+	default:
+		break;
+	}
+
 	vcpu_set_reg(shadow_vcpu, 0, ret);
 }
 
@@ -206,13 +236,45 @@ static void handle_pvm_exit_sys64(struct kvm_vcpu *host_vcpu, struct kvm_vcpu *s
 
 static void handle_pvm_exit_hvc64(struct kvm_vcpu *host_vcpu, struct kvm_vcpu *shadow_vcpu)
 {
-	int i;
+	int n, i;
+
+	switch (smccc_get_function(shadow_vcpu)) {
+	/*
+	 * CPU_ON takes 3 arguments, however, to wake up the target vcpu the
+	 * host only needs to know the target's cpu_id, which is passed as the
+	 * first argument. The processing of the reset state is done at hyp.
+	 */
+	case PSCI_0_2_FN_CPU_ON:
+	case PSCI_0_2_FN64_CPU_ON:
+		n = 2;
+		break;
+
+	case PSCI_0_2_FN_CPU_OFF:
+	case PSCI_0_2_FN_SYSTEM_OFF:
+	case PSCI_0_2_FN_SYSTEM_RESET:
+	case PSCI_0_2_FN_CPU_SUSPEND:
+	case PSCI_0_2_FN64_CPU_SUSPEND:
+		n = 1;
+		break;
+
+	case PSCI_1_1_FN_SYSTEM_RESET2:
+	case PSCI_1_1_FN64_SYSTEM_RESET2:
+		n = 3;
+		break;
+
+	/*
+	 * The rest are either blocked or handled by HYP, so we should
+	 * really never be here.
+	 */
+	default:
+		BUG();
+	}
 
 	WRITE_ONCE(host_vcpu->arch.fault.esr_el2,
 		   shadow_vcpu->arch.fault.esr_el2);
 
 	/* Pass the hvc function id (r0) as well as any potential arguments. */
-	for (i = 0; i < 8; i++)
+	for (i = 0; i < n; i++)
 		WRITE_ONCE(host_vcpu->arch.ctxt.regs.regs[i],
 			   vcpu_get_reg(shadow_vcpu, i));
 }
@@ -430,6 +492,9 @@ static void flush_shadow_state(struct kvm_shadow_vcpu_state *shadow_state)
 	shadow_entry_exit_handler_fn ec_handler;
 	u8 esr_ec;
 
+	if (READ_ONCE(shadow_state->power_state) == PSCI_0_2_AFFINITY_LEVEL_ON_PENDING)
+		pkvm_reset_vcpu(shadow_state);
+
 	/*
 	 * If we deal with a non-protected guest and the state is potentially
 	 * dirty (from a host perspective), copy the state back into the shadow.
diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c
index 92e60ebeced5..d44f524c936b 100644
--- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
+++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
@@ -8,6 +8,7 @@
 #include <linux/mm.h>
 
 #include <kvm/arm_hypercalls.h>
+#include <kvm/arm_psci.h>
 
 #include <asm/kvm_emulate.h>
 
@@ -425,6 +426,27 @@ static int init_ptrauth(struct kvm_vcpu *shadow_vcpu)
 	return ret;
 }
 
+static int init_shadow_psci(struct kvm_shadow_vm *vm,
+			    struct kvm_shadow_vcpu_state *shadow_vcpu_state,
+			    struct kvm_vcpu *host_vcpu)
+{
+	struct kvm_vcpu *shadow_vcpu = &shadow_vcpu_state->shadow_vcpu;
+	struct vcpu_reset_state *reset_state = &shadow_vcpu->arch.reset_state;
+
+	if (test_bit(KVM_ARM_VCPU_POWER_OFF, shadow_vcpu->arch.features)) {
+		reset_state->reset = false;
+		shadow_vcpu_state->power_state = PSCI_0_2_AFFINITY_LEVEL_OFF;
+		return 0;
+	}
+
+	reset_state->pc = READ_ONCE(host_vcpu->arch.ctxt.regs.pc);
+	reset_state->r0 = READ_ONCE(host_vcpu->arch.ctxt.regs.regs[0]);
+	reset_state->reset = true;
+	shadow_vcpu_state->power_state = PSCI_0_2_AFFINITY_LEVEL_ON_PENDING;
+
+	return 0;
+}
+
 static int init_shadow_structs(struct kvm *kvm, struct kvm_shadow_vm *vm,
 			       struct kvm_vcpu **vcpu_array,
 			       int *last_ran,
@@ -485,6 +507,11 @@ static int init_shadow_structs(struct kvm *kvm, struct kvm_shadow_vm *vm,
 
 		pkvm_vcpu_init_traps(shadow_vcpu, host_vcpu);
 		kvm_reset_pvm_sys_regs(shadow_vcpu);
+
+		/* Must be done after reseting sys registers. */
+		ret = init_shadow_psci(vm, shadow_vcpu_state, host_vcpu);
+		if (ret)
+			return ret;
 	}
 
 	return 0;
@@ -800,6 +827,297 @@ int __pkvm_teardown_shadow(unsigned int shadow_handle)
 	return err;
 }
 
+/*
+ * This function sets the registers on the vcpu to their architecturally defined
+ * reset values.
+ *
+ * Note: Can only be called by the vcpu on itself, after it has been turned on.
+ */
+void pkvm_reset_vcpu(struct kvm_shadow_vcpu_state *shadow_state)
+{
+	struct kvm_vcpu *vcpu = &shadow_state->shadow_vcpu;
+	struct vcpu_reset_state *reset_state = &vcpu->arch.reset_state;
+
+	WARN_ON(!reset_state->reset);
+
+	init_ptrauth(vcpu);
+
+	kvm_reset_vcpu_core(vcpu);
+	kvm_reset_pvm_sys_regs(vcpu);
+
+	/* Must be done after reseting sys registers. */
+	kvm_reset_vcpu_psci(vcpu, reset_state);
+
+	reset_state->reset = false;
+
+	shadow_state->exit_code = 0;
+
+	WARN_ON(shadow_state->power_state != PSCI_0_2_AFFINITY_LEVEL_ON_PENDING);
+	WRITE_ONCE(vcpu->arch.power_off, false);
+	WRITE_ONCE(shadow_state->power_state, PSCI_0_2_AFFINITY_LEVEL_ON);
+}
+
+struct kvm_shadow_vcpu_state *pkvm_mpidr_to_vcpu_state(struct kvm_shadow_vm *vm, unsigned long mpidr)
+{
+	struct kvm_vcpu *vcpu;
+	int i;
+
+	mpidr &= MPIDR_HWID_BITMASK;
+
+	for (i = 0; i < vm->kvm.created_vcpus; i++) {
+		vcpu = &vm->shadow_vcpu_states[i].shadow_vcpu;
+
+		if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))
+			return &vm->shadow_vcpu_states[i];
+	}
+
+	return NULL;
+}
+
+/*
+ * Returns true if the hypervisor has handled the PSCI call, and control should
+ * go back to the guest, or false if the host needs to do some additional work
+ * (i.e., wake up the vcpu).
+ */
+static bool pvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
+{
+	struct kvm_shadow_vcpu_state *target_vcpu_state;
+	struct kvm_shadow_vm *vm;
+	struct vcpu_reset_state *reset_state;
+	unsigned long cpu_id;
+	unsigned long hvc_ret_val;
+	int power_state;
+
+	cpu_id = smccc_get_arg1(source_vcpu);
+	if (!kvm_psci_valid_affinity(source_vcpu, cpu_id)) {
+		hvc_ret_val = PSCI_RET_INVALID_PARAMS;
+		goto error;
+	}
+
+	vm = get_shadow_vm(source_vcpu);
+	target_vcpu_state = pkvm_mpidr_to_vcpu_state(vm, cpu_id);
+
+	/* Make sure the caller requested a valid vcpu. */
+	if (!target_vcpu_state) {
+		hvc_ret_val = PSCI_RET_INVALID_PARAMS;
+		goto error;
+	}
+
+	/*
+	 * Make sure the requested vcpu is not on to begin with.
+	 * Atomic to avoid race between vcpus trying to power on the same vcpu.
+	 */
+	power_state = cmpxchg(&target_vcpu_state->power_state,
+		PSCI_0_2_AFFINITY_LEVEL_OFF,
+		PSCI_0_2_AFFINITY_LEVEL_ON_PENDING);
+	switch (power_state) {
+	case PSCI_0_2_AFFINITY_LEVEL_ON_PENDING:
+		hvc_ret_val = PSCI_RET_ON_PENDING;
+		goto error;
+	case PSCI_0_2_AFFINITY_LEVEL_ON:
+		hvc_ret_val = PSCI_RET_ALREADY_ON;
+		goto error;
+	case PSCI_0_2_AFFINITY_LEVEL_OFF:
+		break;
+	default:
+		hvc_ret_val = PSCI_RET_INTERNAL_FAILURE;
+		goto error;
+	}
+
+	reset_state = &target_vcpu_state->shadow_vcpu.arch.reset_state;
+
+	reset_state->pc = smccc_get_arg2(source_vcpu);
+	reset_state->r0 = smccc_get_arg3(source_vcpu);
+
+	/* Propagate caller endianness */
+	reset_state->be = kvm_vcpu_is_be(source_vcpu);
+
+	reset_state->reset = true;
+
+	/*
+	 * Return to the host, which should make the KVM_REQ_VCPU_RESET request
+	 * as well as kvm_vcpu_wake_up() to schedule the vcpu.
+	 */
+	return false;
+
+error:
+	/* If there's an error go back straight to the guest. */
+	smccc_set_retval(source_vcpu, hvc_ret_val, 0, 0, 0);
+	return true;
+}
+
+static bool pvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
+{
+	int i, matching_cpus = 0;
+	unsigned long mpidr;
+	unsigned long target_affinity;
+	unsigned long target_affinity_mask;
+	unsigned long lowest_affinity_level;
+	struct kvm_shadow_vm *vm;
+	unsigned long hvc_ret_val;
+
+	target_affinity = smccc_get_arg1(vcpu);
+	lowest_affinity_level = smccc_get_arg2(vcpu);
+
+	if (!kvm_psci_valid_affinity(vcpu, target_affinity)) {
+		hvc_ret_val = PSCI_RET_INVALID_PARAMS;
+		goto done;
+	}
+
+	/* Determine target affinity mask */
+	target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
+	if (!target_affinity_mask) {
+		hvc_ret_val = PSCI_RET_INVALID_PARAMS;
+		goto done;
+	}
+
+	vm = get_shadow_vm(vcpu);
+
+	/* Ignore other bits of target affinity */
+	target_affinity &= target_affinity_mask;
+
+	hvc_ret_val = PSCI_0_2_AFFINITY_LEVEL_OFF;
+
+	/*
+	 * If at least one vcpu matching target affinity is ON then return ON,
+	 * then if at least one is PENDING_ON then return PENDING_ON.
+	 * Otherwise, return OFF.
+	 */
+	for (i = 0; i < vm->kvm.created_vcpus; i++) {
+		struct kvm_shadow_vcpu_state *tmp = &vm->shadow_vcpu_states[i];
+
+		mpidr = kvm_vcpu_get_mpidr_aff(&tmp->shadow_vcpu);
+
+		if ((mpidr & target_affinity_mask) == target_affinity) {
+			int power_state;
+
+			matching_cpus++;
+			power_state = READ_ONCE(tmp->power_state);
+			switch (power_state) {
+			case PSCI_0_2_AFFINITY_LEVEL_ON_PENDING:
+				hvc_ret_val = PSCI_0_2_AFFINITY_LEVEL_ON_PENDING;
+				break;
+			case PSCI_0_2_AFFINITY_LEVEL_ON:
+				hvc_ret_val = PSCI_0_2_AFFINITY_LEVEL_ON;
+				goto done;
+			case PSCI_0_2_AFFINITY_LEVEL_OFF:
+				break;
+			default:
+				hvc_ret_val = PSCI_RET_INTERNAL_FAILURE;
+				goto done;
+			}
+		}
+	}
+
+	if (!matching_cpus)
+		hvc_ret_val = PSCI_RET_INVALID_PARAMS;
+
+done:
+	/* Nothing to be handled by the host. Go back to the guest. */
+	smccc_set_retval(vcpu, hvc_ret_val, 0, 0, 0);
+	return true;
+}
+
+/*
+ * Returns true if the hypervisor has handled the PSCI call, and control should
+ * go back to the guest, or false if the host needs to do some additional work
+ * (e.g., turn off and update vcpu scheduling status).
+ */
+static bool pvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
+{
+	struct kvm_shadow_vcpu_state *vcpu_state = get_shadow_state(vcpu);
+
+	WARN_ON(vcpu->arch.power_off);
+	WARN_ON(vcpu_state->power_state != PSCI_0_2_AFFINITY_LEVEL_ON);
+
+	WRITE_ONCE(vcpu->arch.power_off, true);
+	WRITE_ONCE(vcpu_state->power_state, PSCI_0_2_AFFINITY_LEVEL_OFF);
+
+	/* Return to the host so that it can finish powering off the vcpu. */
+	return false;
+}
+
+static bool pvm_psci_version(struct kvm_vcpu *vcpu)
+{
+	/* Nothing to be handled by the host. Go back to the guest. */
+	smccc_set_retval(vcpu, KVM_ARM_PSCI_1_1, 0, 0, 0);
+	return true;
+}
+
+static bool pvm_psci_not_supported(struct kvm_vcpu *vcpu)
+{
+	/* Nothing to be handled by the host. Go back to the guest. */
+	smccc_set_retval(vcpu, PSCI_RET_NOT_SUPPORTED, 0, 0, 0);
+	return true;
+}
+
+static bool pvm_psci_features(struct kvm_vcpu *vcpu)
+{
+	u32 feature = smccc_get_arg1(vcpu);
+	unsigned long val;
+
+	switch (feature) {
+	case PSCI_0_2_FN_PSCI_VERSION:
+	case PSCI_0_2_FN_CPU_SUSPEND:
+	case PSCI_0_2_FN64_CPU_SUSPEND:
+	case PSCI_0_2_FN_CPU_OFF:
+	case PSCI_0_2_FN_CPU_ON:
+	case PSCI_0_2_FN64_CPU_ON:
+	case PSCI_0_2_FN_AFFINITY_INFO:
+	case PSCI_0_2_FN64_AFFINITY_INFO:
+	case PSCI_0_2_FN_SYSTEM_OFF:
+	case PSCI_0_2_FN_SYSTEM_RESET:
+	case PSCI_1_0_FN_PSCI_FEATURES:
+	case PSCI_1_1_FN_SYSTEM_RESET2:
+	case PSCI_1_1_FN64_SYSTEM_RESET2:
+	case ARM_SMCCC_VERSION_FUNC_ID:
+		val = PSCI_RET_SUCCESS;
+		break;
+	default:
+		val = PSCI_RET_NOT_SUPPORTED;
+		break;
+	}
+
+	/* Nothing to be handled by the host. Go back to the guest. */
+	smccc_set_retval(vcpu, val, 0, 0, 0);
+	return true;
+}
+
+static bool pkvm_handle_psci(struct kvm_vcpu *vcpu)
+{
+	u32 psci_fn = smccc_get_function(vcpu);
+
+	switch (psci_fn) {
+	case PSCI_0_2_FN_CPU_ON:
+		kvm_psci_narrow_to_32bit(vcpu);
+		fallthrough;
+	case PSCI_0_2_FN64_CPU_ON:
+		return pvm_psci_vcpu_on(vcpu);
+	case PSCI_0_2_FN_CPU_OFF:
+		return pvm_psci_vcpu_off(vcpu);
+	case PSCI_0_2_FN_AFFINITY_INFO:
+		kvm_psci_narrow_to_32bit(vcpu);
+		fallthrough;
+	case PSCI_0_2_FN64_AFFINITY_INFO:
+		return pvm_psci_vcpu_affinity_info(vcpu);
+	case PSCI_0_2_FN_PSCI_VERSION:
+		return pvm_psci_version(vcpu);
+	case PSCI_1_0_FN_PSCI_FEATURES:
+		return pvm_psci_features(vcpu);
+	case PSCI_0_2_FN_SYSTEM_RESET:
+	case PSCI_0_2_FN_CPU_SUSPEND:
+	case PSCI_0_2_FN64_CPU_SUSPEND:
+	case PSCI_0_2_FN_SYSTEM_OFF:
+	case PSCI_1_1_FN_SYSTEM_RESET2:
+	case PSCI_1_1_FN64_SYSTEM_RESET2:
+		return false; /* Handled by the host. */
+	default:
+		break;
+	}
+
+	return pvm_psci_not_supported(vcpu);
+}
+
 /*
  * Handler for protected VM HVC calls.
  *
@@ -816,6 +1134,6 @@ bool kvm_handle_pvm_hvc64(struct kvm_vcpu *vcpu, u64 *exit_code)
 		smccc_set_retval(vcpu, ARM_SMCCC_VERSION_1_1, 0, 0, 0);
 		return true;
 	default:
-		return false;
+		return pkvm_handle_psci(vcpu);
 	}
 }
-- 
2.36.1.124.g0e6072fb45-goog


WARNING: multiple messages have this Message-ID (diff)
From: Will Deacon <will@kernel.org>
To: kvmarm@lists.cs.columbia.edu
Cc: Will Deacon <will@kernel.org>, Ard Biesheuvel <ardb@kernel.org>,
	Sean Christopherson <seanjc@google.com>,
	Alexandru Elisei <alexandru.elisei@arm.com>,
	Andy Lutomirski <luto@amacapital.net>,
	Catalin Marinas <catalin.marinas@arm.com>,
	James Morse <james.morse@arm.com>,
	Chao Peng <chao.p.peng@linux.intel.com>,
	Quentin Perret <qperret@google.com>,
	Suzuki K Poulose <suzuki.poulose@arm.com>,
	Michael Roth <michael.roth@amd.com>,
	Mark Rutland <mark.rutland@arm.com>,
	Fuad Tabba <tabba@google.com>, Oliver Upton <oupton@google.com>,
	Marc Zyngier <maz@kernel.org>,
	kernel-team@android.com, kvm@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org
Subject: [PATCH 77/89] KVM: arm64: Handle PSCI for protected VMs in EL2
Date: Thu, 19 May 2022 14:41:52 +0100	[thread overview]
Message-ID: <20220519134204.5379-78-will@kernel.org> (raw)
In-Reply-To: <20220519134204.5379-1-will@kernel.org>

From: Fuad Tabba <tabba@google.com>

Add PSCI 1.1 support for protected VMs at EL2.

Signed-off-by: Fuad Tabba <tabba@google.com>
---
 arch/arm64/kvm/hyp/include/nvhe/pkvm.h |  13 +
 arch/arm64/kvm/hyp/nvhe/hyp-main.c     |  69 +++++-
 arch/arm64/kvm/hyp/nvhe/pkvm.c         | 320 ++++++++++++++++++++++++-
 3 files changed, 399 insertions(+), 3 deletions(-)

diff --git a/arch/arm64/kvm/hyp/include/nvhe/pkvm.h b/arch/arm64/kvm/hyp/include/nvhe/pkvm.h
index 33d34cc639ea..c1987115b217 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/pkvm.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/pkvm.h
@@ -28,6 +28,15 @@ struct kvm_shadow_vcpu_state {
 	/* Tracks exit code for the protected guest. */
 	u32 exit_code;
 
+	/*
+	 * Track the power state transition of a protected vcpu.
+	 * Can be in one of three states:
+	 * PSCI_0_2_AFFINITY_LEVEL_ON
+	 * PSCI_0_2_AFFINITY_LEVEL_OFF
+	 * PSCI_0_2_AFFINITY_LEVEL_PENDING
+	 */
+	int power_state;
+
 	/*
 	 * Points to the per-cpu pointer of the cpu where it's loaded, or NULL
 	 * if not loaded.
@@ -101,6 +110,10 @@ bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code);
 void kvm_reset_pvm_sys_regs(struct kvm_vcpu *vcpu);
 int kvm_check_pvm_sysreg_table(void);
 
+void pkvm_reset_vcpu(struct kvm_shadow_vcpu_state *shadow_state);
+
 bool kvm_handle_pvm_hvc64(struct kvm_vcpu *vcpu, u64 *exit_code);
 
+struct kvm_shadow_vcpu_state *pkvm_mpidr_to_vcpu_state(struct kvm_shadow_vm *vm, unsigned long mpidr);
+
 #endif /* __ARM64_KVM_NVHE_PKVM_H__ */
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index 26c8709f5494..c4778c7d8c4b 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -21,6 +21,7 @@
 #include <nvhe/trap_handler.h>
 
 #include <linux/irqchip/arm-gic-v3.h>
+#include <uapi/linux/psci.h>
 
 #include "../../sys_regs.h"
 
@@ -46,8 +47,37 @@ static void handle_pvm_entry_wfx(struct kvm_vcpu *host_vcpu, struct kvm_vcpu *sh
 
 static void handle_pvm_entry_hvc64(struct kvm_vcpu *host_vcpu, struct kvm_vcpu *shadow_vcpu)
 {
+	u32 psci_fn = smccc_get_function(shadow_vcpu);
 	u64 ret = READ_ONCE(host_vcpu->arch.ctxt.regs.regs[0]);
 
+	switch (psci_fn) {
+	case PSCI_0_2_FN_CPU_ON:
+	case PSCI_0_2_FN64_CPU_ON:
+		/*
+		 * Check whether the cpu_on request to the host was successful.
+		 * If not, reset the vcpu state from ON_PENDING to OFF.
+		 * This could happen if this vcpu attempted to turn on the other
+		 * vcpu while the other one is in the process of turning itself
+		 * off.
+		 */
+		if (ret != PSCI_RET_SUCCESS) {
+			unsigned long cpu_id = smccc_get_arg1(shadow_vcpu);
+			struct kvm_shadow_vm *shadow_vm;
+			struct kvm_shadow_vcpu_state *target_vcpu_state;
+
+			shadow_vm = get_shadow_vm(shadow_vcpu);
+			target_vcpu_state = pkvm_mpidr_to_vcpu_state(shadow_vm, cpu_id);
+
+			if (target_vcpu_state && READ_ONCE(target_vcpu_state->power_state) == PSCI_0_2_AFFINITY_LEVEL_ON_PENDING)
+				WRITE_ONCE(target_vcpu_state->power_state, PSCI_0_2_AFFINITY_LEVEL_OFF);
+
+			ret = PSCI_RET_INTERNAL_FAILURE;
+		}
+		break;
+	default:
+		break;
+	}
+
 	vcpu_set_reg(shadow_vcpu, 0, ret);
 }
 
@@ -206,13 +236,45 @@ static void handle_pvm_exit_sys64(struct kvm_vcpu *host_vcpu, struct kvm_vcpu *s
 
 static void handle_pvm_exit_hvc64(struct kvm_vcpu *host_vcpu, struct kvm_vcpu *shadow_vcpu)
 {
-	int i;
+	int n, i;
+
+	switch (smccc_get_function(shadow_vcpu)) {
+	/*
+	 * CPU_ON takes 3 arguments, however, to wake up the target vcpu the
+	 * host only needs to know the target's cpu_id, which is passed as the
+	 * first argument. The processing of the reset state is done at hyp.
+	 */
+	case PSCI_0_2_FN_CPU_ON:
+	case PSCI_0_2_FN64_CPU_ON:
+		n = 2;
+		break;
+
+	case PSCI_0_2_FN_CPU_OFF:
+	case PSCI_0_2_FN_SYSTEM_OFF:
+	case PSCI_0_2_FN_SYSTEM_RESET:
+	case PSCI_0_2_FN_CPU_SUSPEND:
+	case PSCI_0_2_FN64_CPU_SUSPEND:
+		n = 1;
+		break;
+
+	case PSCI_1_1_FN_SYSTEM_RESET2:
+	case PSCI_1_1_FN64_SYSTEM_RESET2:
+		n = 3;
+		break;
+
+	/*
+	 * The rest are either blocked or handled by HYP, so we should
+	 * really never be here.
+	 */
+	default:
+		BUG();
+	}
 
 	WRITE_ONCE(host_vcpu->arch.fault.esr_el2,
 		   shadow_vcpu->arch.fault.esr_el2);
 
 	/* Pass the hvc function id (r0) as well as any potential arguments. */
-	for (i = 0; i < 8; i++)
+	for (i = 0; i < n; i++)
 		WRITE_ONCE(host_vcpu->arch.ctxt.regs.regs[i],
 			   vcpu_get_reg(shadow_vcpu, i));
 }
@@ -430,6 +492,9 @@ static void flush_shadow_state(struct kvm_shadow_vcpu_state *shadow_state)
 	shadow_entry_exit_handler_fn ec_handler;
 	u8 esr_ec;
 
+	if (READ_ONCE(shadow_state->power_state) == PSCI_0_2_AFFINITY_LEVEL_ON_PENDING)
+		pkvm_reset_vcpu(shadow_state);
+
 	/*
 	 * If we deal with a non-protected guest and the state is potentially
 	 * dirty (from a host perspective), copy the state back into the shadow.
diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c
index 92e60ebeced5..d44f524c936b 100644
--- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
+++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
@@ -8,6 +8,7 @@
 #include <linux/mm.h>
 
 #include <kvm/arm_hypercalls.h>
+#include <kvm/arm_psci.h>
 
 #include <asm/kvm_emulate.h>
 
@@ -425,6 +426,27 @@ static int init_ptrauth(struct kvm_vcpu *shadow_vcpu)
 	return ret;
 }
 
+static int init_shadow_psci(struct kvm_shadow_vm *vm,
+			    struct kvm_shadow_vcpu_state *shadow_vcpu_state,
+			    struct kvm_vcpu *host_vcpu)
+{
+	struct kvm_vcpu *shadow_vcpu = &shadow_vcpu_state->shadow_vcpu;
+	struct vcpu_reset_state *reset_state = &shadow_vcpu->arch.reset_state;
+
+	if (test_bit(KVM_ARM_VCPU_POWER_OFF, shadow_vcpu->arch.features)) {
+		reset_state->reset = false;
+		shadow_vcpu_state->power_state = PSCI_0_2_AFFINITY_LEVEL_OFF;
+		return 0;
+	}
+
+	reset_state->pc = READ_ONCE(host_vcpu->arch.ctxt.regs.pc);
+	reset_state->r0 = READ_ONCE(host_vcpu->arch.ctxt.regs.regs[0]);
+	reset_state->reset = true;
+	shadow_vcpu_state->power_state = PSCI_0_2_AFFINITY_LEVEL_ON_PENDING;
+
+	return 0;
+}
+
 static int init_shadow_structs(struct kvm *kvm, struct kvm_shadow_vm *vm,
 			       struct kvm_vcpu **vcpu_array,
 			       int *last_ran,
@@ -485,6 +507,11 @@ static int init_shadow_structs(struct kvm *kvm, struct kvm_shadow_vm *vm,
 
 		pkvm_vcpu_init_traps(shadow_vcpu, host_vcpu);
 		kvm_reset_pvm_sys_regs(shadow_vcpu);
+
+		/* Must be done after reseting sys registers. */
+		ret = init_shadow_psci(vm, shadow_vcpu_state, host_vcpu);
+		if (ret)
+			return ret;
 	}
 
 	return 0;
@@ -800,6 +827,297 @@ int __pkvm_teardown_shadow(unsigned int shadow_handle)
 	return err;
 }
 
+/*
+ * This function sets the registers on the vcpu to their architecturally defined
+ * reset values.
+ *
+ * Note: Can only be called by the vcpu on itself, after it has been turned on.
+ */
+void pkvm_reset_vcpu(struct kvm_shadow_vcpu_state *shadow_state)
+{
+	struct kvm_vcpu *vcpu = &shadow_state->shadow_vcpu;
+	struct vcpu_reset_state *reset_state = &vcpu->arch.reset_state;
+
+	WARN_ON(!reset_state->reset);
+
+	init_ptrauth(vcpu);
+
+	kvm_reset_vcpu_core(vcpu);
+	kvm_reset_pvm_sys_regs(vcpu);
+
+	/* Must be done after reseting sys registers. */
+	kvm_reset_vcpu_psci(vcpu, reset_state);
+
+	reset_state->reset = false;
+
+	shadow_state->exit_code = 0;
+
+	WARN_ON(shadow_state->power_state != PSCI_0_2_AFFINITY_LEVEL_ON_PENDING);
+	WRITE_ONCE(vcpu->arch.power_off, false);
+	WRITE_ONCE(shadow_state->power_state, PSCI_0_2_AFFINITY_LEVEL_ON);
+}
+
+struct kvm_shadow_vcpu_state *pkvm_mpidr_to_vcpu_state(struct kvm_shadow_vm *vm, unsigned long mpidr)
+{
+	struct kvm_vcpu *vcpu;
+	int i;
+
+	mpidr &= MPIDR_HWID_BITMASK;
+
+	for (i = 0; i < vm->kvm.created_vcpus; i++) {
+		vcpu = &vm->shadow_vcpu_states[i].shadow_vcpu;
+
+		if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))
+			return &vm->shadow_vcpu_states[i];
+	}
+
+	return NULL;
+}
+
+/*
+ * Returns true if the hypervisor has handled the PSCI call, and control should
+ * go back to the guest, or false if the host needs to do some additional work
+ * (i.e., wake up the vcpu).
+ */
+static bool pvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
+{
+	struct kvm_shadow_vcpu_state *target_vcpu_state;
+	struct kvm_shadow_vm *vm;
+	struct vcpu_reset_state *reset_state;
+	unsigned long cpu_id;
+	unsigned long hvc_ret_val;
+	int power_state;
+
+	cpu_id = smccc_get_arg1(source_vcpu);
+	if (!kvm_psci_valid_affinity(source_vcpu, cpu_id)) {
+		hvc_ret_val = PSCI_RET_INVALID_PARAMS;
+		goto error;
+	}
+
+	vm = get_shadow_vm(source_vcpu);
+	target_vcpu_state = pkvm_mpidr_to_vcpu_state(vm, cpu_id);
+
+	/* Make sure the caller requested a valid vcpu. */
+	if (!target_vcpu_state) {
+		hvc_ret_val = PSCI_RET_INVALID_PARAMS;
+		goto error;
+	}
+
+	/*
+	 * Make sure the requested vcpu is not on to begin with.
+	 * Atomic to avoid race between vcpus trying to power on the same vcpu.
+	 */
+	power_state = cmpxchg(&target_vcpu_state->power_state,
+		PSCI_0_2_AFFINITY_LEVEL_OFF,
+		PSCI_0_2_AFFINITY_LEVEL_ON_PENDING);
+	switch (power_state) {
+	case PSCI_0_2_AFFINITY_LEVEL_ON_PENDING:
+		hvc_ret_val = PSCI_RET_ON_PENDING;
+		goto error;
+	case PSCI_0_2_AFFINITY_LEVEL_ON:
+		hvc_ret_val = PSCI_RET_ALREADY_ON;
+		goto error;
+	case PSCI_0_2_AFFINITY_LEVEL_OFF:
+		break;
+	default:
+		hvc_ret_val = PSCI_RET_INTERNAL_FAILURE;
+		goto error;
+	}
+
+	reset_state = &target_vcpu_state->shadow_vcpu.arch.reset_state;
+
+	reset_state->pc = smccc_get_arg2(source_vcpu);
+	reset_state->r0 = smccc_get_arg3(source_vcpu);
+
+	/* Propagate caller endianness */
+	reset_state->be = kvm_vcpu_is_be(source_vcpu);
+
+	reset_state->reset = true;
+
+	/*
+	 * Return to the host, which should make the KVM_REQ_VCPU_RESET request
+	 * as well as kvm_vcpu_wake_up() to schedule the vcpu.
+	 */
+	return false;
+
+error:
+	/* If there's an error go back straight to the guest. */
+	smccc_set_retval(source_vcpu, hvc_ret_val, 0, 0, 0);
+	return true;
+}
+
+static bool pvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
+{
+	int i, matching_cpus = 0;
+	unsigned long mpidr;
+	unsigned long target_affinity;
+	unsigned long target_affinity_mask;
+	unsigned long lowest_affinity_level;
+	struct kvm_shadow_vm *vm;
+	unsigned long hvc_ret_val;
+
+	target_affinity = smccc_get_arg1(vcpu);
+	lowest_affinity_level = smccc_get_arg2(vcpu);
+
+	if (!kvm_psci_valid_affinity(vcpu, target_affinity)) {
+		hvc_ret_val = PSCI_RET_INVALID_PARAMS;
+		goto done;
+	}
+
+	/* Determine target affinity mask */
+	target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
+	if (!target_affinity_mask) {
+		hvc_ret_val = PSCI_RET_INVALID_PARAMS;
+		goto done;
+	}
+
+	vm = get_shadow_vm(vcpu);
+
+	/* Ignore other bits of target affinity */
+	target_affinity &= target_affinity_mask;
+
+	hvc_ret_val = PSCI_0_2_AFFINITY_LEVEL_OFF;
+
+	/*
+	 * If at least one vcpu matching target affinity is ON then return ON,
+	 * then if at least one is PENDING_ON then return PENDING_ON.
+	 * Otherwise, return OFF.
+	 */
+	for (i = 0; i < vm->kvm.created_vcpus; i++) {
+		struct kvm_shadow_vcpu_state *tmp = &vm->shadow_vcpu_states[i];
+
+		mpidr = kvm_vcpu_get_mpidr_aff(&tmp->shadow_vcpu);
+
+		if ((mpidr & target_affinity_mask) == target_affinity) {
+			int power_state;
+
+			matching_cpus++;
+			power_state = READ_ONCE(tmp->power_state);
+			switch (power_state) {
+			case PSCI_0_2_AFFINITY_LEVEL_ON_PENDING:
+				hvc_ret_val = PSCI_0_2_AFFINITY_LEVEL_ON_PENDING;
+				break;
+			case PSCI_0_2_AFFINITY_LEVEL_ON:
+				hvc_ret_val = PSCI_0_2_AFFINITY_LEVEL_ON;
+				goto done;
+			case PSCI_0_2_AFFINITY_LEVEL_OFF:
+				break;
+			default:
+				hvc_ret_val = PSCI_RET_INTERNAL_FAILURE;
+				goto done;
+			}
+		}
+	}
+
+	if (!matching_cpus)
+		hvc_ret_val = PSCI_RET_INVALID_PARAMS;
+
+done:
+	/* Nothing to be handled by the host. Go back to the guest. */
+	smccc_set_retval(vcpu, hvc_ret_val, 0, 0, 0);
+	return true;
+}
+
+/*
+ * Returns true if the hypervisor has handled the PSCI call, and control should
+ * go back to the guest, or false if the host needs to do some additional work
+ * (e.g., turn off and update vcpu scheduling status).
+ */
+static bool pvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
+{
+	struct kvm_shadow_vcpu_state *vcpu_state = get_shadow_state(vcpu);
+
+	WARN_ON(vcpu->arch.power_off);
+	WARN_ON(vcpu_state->power_state != PSCI_0_2_AFFINITY_LEVEL_ON);
+
+	WRITE_ONCE(vcpu->arch.power_off, true);
+	WRITE_ONCE(vcpu_state->power_state, PSCI_0_2_AFFINITY_LEVEL_OFF);
+
+	/* Return to the host so that it can finish powering off the vcpu. */
+	return false;
+}
+
+static bool pvm_psci_version(struct kvm_vcpu *vcpu)
+{
+	/* Nothing to be handled by the host. Go back to the guest. */
+	smccc_set_retval(vcpu, KVM_ARM_PSCI_1_1, 0, 0, 0);
+	return true;
+}
+
+static bool pvm_psci_not_supported(struct kvm_vcpu *vcpu)
+{
+	/* Nothing to be handled by the host. Go back to the guest. */
+	smccc_set_retval(vcpu, PSCI_RET_NOT_SUPPORTED, 0, 0, 0);
+	return true;
+}
+
+static bool pvm_psci_features(struct kvm_vcpu *vcpu)
+{
+	u32 feature = smccc_get_arg1(vcpu);
+	unsigned long val;
+
+	switch (feature) {
+	case PSCI_0_2_FN_PSCI_VERSION:
+	case PSCI_0_2_FN_CPU_SUSPEND:
+	case PSCI_0_2_FN64_CPU_SUSPEND:
+	case PSCI_0_2_FN_CPU_OFF:
+	case PSCI_0_2_FN_CPU_ON:
+	case PSCI_0_2_FN64_CPU_ON:
+	case PSCI_0_2_FN_AFFINITY_INFO:
+	case PSCI_0_2_FN64_AFFINITY_INFO:
+	case PSCI_0_2_FN_SYSTEM_OFF:
+	case PSCI_0_2_FN_SYSTEM_RESET:
+	case PSCI_1_0_FN_PSCI_FEATURES:
+	case PSCI_1_1_FN_SYSTEM_RESET2:
+	case PSCI_1_1_FN64_SYSTEM_RESET2:
+	case ARM_SMCCC_VERSION_FUNC_ID:
+		val = PSCI_RET_SUCCESS;
+		break;
+	default:
+		val = PSCI_RET_NOT_SUPPORTED;
+		break;
+	}
+
+	/* Nothing to be handled by the host. Go back to the guest. */
+	smccc_set_retval(vcpu, val, 0, 0, 0);
+	return true;
+}
+
+static bool pkvm_handle_psci(struct kvm_vcpu *vcpu)
+{
+	u32 psci_fn = smccc_get_function(vcpu);
+
+	switch (psci_fn) {
+	case PSCI_0_2_FN_CPU_ON:
+		kvm_psci_narrow_to_32bit(vcpu);
+		fallthrough;
+	case PSCI_0_2_FN64_CPU_ON:
+		return pvm_psci_vcpu_on(vcpu);
+	case PSCI_0_2_FN_CPU_OFF:
+		return pvm_psci_vcpu_off(vcpu);
+	case PSCI_0_2_FN_AFFINITY_INFO:
+		kvm_psci_narrow_to_32bit(vcpu);
+		fallthrough;
+	case PSCI_0_2_FN64_AFFINITY_INFO:
+		return pvm_psci_vcpu_affinity_info(vcpu);
+	case PSCI_0_2_FN_PSCI_VERSION:
+		return pvm_psci_version(vcpu);
+	case PSCI_1_0_FN_PSCI_FEATURES:
+		return pvm_psci_features(vcpu);
+	case PSCI_0_2_FN_SYSTEM_RESET:
+	case PSCI_0_2_FN_CPU_SUSPEND:
+	case PSCI_0_2_FN64_CPU_SUSPEND:
+	case PSCI_0_2_FN_SYSTEM_OFF:
+	case PSCI_1_1_FN_SYSTEM_RESET2:
+	case PSCI_1_1_FN64_SYSTEM_RESET2:
+		return false; /* Handled by the host. */
+	default:
+		break;
+	}
+
+	return pvm_psci_not_supported(vcpu);
+}
+
 /*
  * Handler for protected VM HVC calls.
  *
@@ -816,6 +1134,6 @@ bool kvm_handle_pvm_hvc64(struct kvm_vcpu *vcpu, u64 *exit_code)
 		smccc_set_retval(vcpu, ARM_SMCCC_VERSION_1_1, 0, 0, 0);
 		return true;
 	default:
-		return false;
+		return pkvm_handle_psci(vcpu);
 	}
 }
-- 
2.36.1.124.g0e6072fb45-goog


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2022-05-19 13:47 UTC|newest]

Thread overview: 321+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-05-19 13:40 [PATCH 00/89] KVM: arm64: Base support for the pKVM hypervisor at EL2 Will Deacon
2022-05-19 13:40 ` Will Deacon
2022-05-19 13:40 ` Will Deacon
2022-05-19 13:40 ` [PATCH 01/89] KVM: arm64: Handle all ID registers trapped for a protected VM Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40 ` [PATCH 02/89] KVM: arm64: Remove redundant hyp_assert_lock_held() assertions Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40 ` [PATCH 03/89] KVM: arm64: Return error from kvm_arch_init_vm() on allocation failure Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-20 15:55   ` Alexandru Elisei
2022-05-20 15:55     ` Alexandru Elisei
2022-05-20 15:55     ` Alexandru Elisei
2022-05-31 16:15     ` Will Deacon
2022-05-31 16:15       ` Will Deacon
2022-05-31 16:15       ` Will Deacon
2022-05-19 13:40 ` [PATCH 04/89] KVM: arm64: Ignore 'kvm-arm.mode=protected' when using VHE Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40 ` [PATCH 05/89] KVM: arm64: Extend comment in has_vhe() Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40 ` [PATCH 06/89] KVM: arm64: Drop stale comment Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40 ` [PATCH 07/89] KVM: arm64: Move hyp refcount manipulation helpers Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40 ` [PATCH 08/89] KVM: arm64: Back hyp_vmemmap for all of memory Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40 ` [PATCH 09/89] KVM: arm64: Unify identifiers used to distinguish host and hypervisor Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40 ` [PATCH 10/89] KVM: arm64: Implement do_donate() helper for donating memory Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40 ` [PATCH 11/89] KVM: arm64: Prevent the donation of no-map pages Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40 ` [PATCH 12/89] KVM: arm64: Add helpers to pin memory shared with hyp Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40 ` [PATCH 13/89] KVM: arm64: Include asm/kvm_mmu.h in nvhe/mem_protect.h Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40 ` [PATCH 14/89] KVM: arm64: Add hyp_spinlock_t static initializer Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40 ` [PATCH 15/89] KVM: arm64: Introduce shadow VM state at EL2 Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40 ` [PATCH 16/89] KVM: arm64: Instantiate VM shadow data from EL1 Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40 ` [PATCH 17/89] KVM: arm64: Make hyp stage-1 refcnt correct on the whole range Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40 ` [PATCH 18/89] KVM: arm64: Factor out private range VA allocation Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40 ` [PATCH 19/89] KVM: arm64: Add pcpu fixmap infrastructure at EL2 Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40 ` [PATCH 20/89] KVM: arm64: Provide I-cache invalidation by VA " Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40 ` [PATCH 21/89] KVM: arm64: Allow non-coallescable pages in a hyp_pool Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40 ` [PATCH 22/89] KVM: arm64: Add generic hyp_memcache helpers Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40 ` [PATCH 23/89] KVM: arm64: Instantiate guest stage-2 page-tables at EL2 Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40 ` [PATCH 24/89] KVM: arm64: Return guest memory from EL2 via dedicated teardown memcache Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:40   ` Will Deacon
2022-05-19 13:41 ` [PATCH 25/89] KVM: arm64: Add flags to struct hyp_page Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 26/89] KVM: arm64: Provide a hypercall for the host to reclaim guest memory Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 27/89] KVM: arm64: Extend memory sharing to allow host-to-guest transitions Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 28/89] KVM: arm64: Consolidate stage-2 init in one function Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 29/89] KVM: arm64: Check for PTE validity when checking for executable/cacheable Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 30/89] KVM: arm64: Do not allow memslot changes after first VM run under pKVM Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 31/89] KVM: arm64: Disallow dirty logging and RO memslots with pKVM Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 32/89] KVM: arm64: Use the shadow vCPU structure in handle___kvm_vcpu_run() Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 33/89] KVM: arm64: Handle guest stage-2 page-tables entirely at EL2 Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-20 16:03   ` Alexandru Elisei
2022-05-20 16:03     ` Alexandru Elisei
2022-05-20 16:03     ` Alexandru Elisei
2022-05-31 16:45     ` Will Deacon
2022-05-31 16:45       ` Will Deacon
2022-05-31 16:45       ` Will Deacon
2022-06-08  1:16       ` Huang, Shaoqin
2022-06-08  1:16         ` Huang, Shaoqin
2022-06-08  1:16         ` Huang, Shaoqin
2022-07-27  9:59         ` Alexandru Elisei
2022-07-27  9:59           ` Alexandru Elisei
2022-07-27  9:59           ` Alexandru Elisei
2022-07-28  6:50           ` Huang, Shaoqin
2022-07-28  6:50             ` Huang, Shaoqin
2022-07-28  6:50             ` Huang, Shaoqin
2022-05-19 13:41 ` [PATCH 34/89] KVM: arm64: Don't access kvm_arm_hyp_percpu_base at EL1 Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 35/89] KVM: arm64: Unmap kvm_arm_hyp_percpu_base from the host Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 36/89] KVM: arm64: Maintain a copy of 'kvm_arm_vmid_bits' at EL2 Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 37/89] KVM: arm64: Explicitly map kvm_vgic_global_state " Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 38/89] KVM: arm64: Don't map host sections in pkvm Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 39/89] KVM: arm64: Extend memory donation to allow host-to-guest transitions Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 40/89] KVM: arm64: Split up nvhe/fixed_config.h Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 41/89] KVM: arm64: Make vcpu_{read, write}_sys_reg available to HYP code Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` [PATCH 41/89] KVM: arm64: Make vcpu_{read,write}_sys_reg " Will Deacon
2022-05-19 13:41 ` [PATCH 42/89] KVM: arm64: Simplify vgic-v3 hypercalls Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 43/89] KVM: arm64: Add the {flush, sync}_vgic_state() primitives Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` [PATCH 43/89] KVM: arm64: Add the {flush,sync}_vgic_state() primitives Will Deacon
2022-05-19 13:41 ` [PATCH 44/89] KVM: arm64: Introduce predicates to check for protected state Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 45/89] KVM: arm64: Add the {flush,sync}_timer_state() primitives Will Deacon
2022-05-19 13:41   ` [PATCH 45/89] KVM: arm64: Add the {flush, sync}_timer_state() primitives Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 46/89] KVM: arm64: Introduce the pkvm_vcpu_{load, put} hypercalls Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` [PATCH 46/89] KVM: arm64: Introduce the pkvm_vcpu_{load,put} hypercalls Will Deacon
2022-05-19 13:41 ` [PATCH 47/89] KVM: arm64: Add current vcpu and shadow_state lookup primitive Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 48/89] KVM: arm64: Skip __kvm_adjust_pc() for protected vcpus Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 49/89] KVM: arm64: Add hyp per_cpu variable to track current physical cpu number Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 50/89] KVM: arm64: Ensure that TLBs and I-cache are private to each vcpu Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 51/89] KVM: arm64: Introduce per-EC entry/exit handlers Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 52/89] KVM: arm64: Introduce lazy-ish state sync for non-protected VMs Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 53/89] KVM: arm64: Lazy host FP save/restore Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 54/89] KVM: arm64: Reduce host/shadow vcpu state copying Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 55/89] KVM: arm64: Do not pass the vcpu to __pkvm_host_map_guest() Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 56/89] KVM: arm64: Check directly whether the vcpu is protected Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 57/89] KVM: arm64: Trap debug break and watch from guest Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 58/89] KVM: arm64: Restrict protected VM capabilities Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 59/89] KVM: arm64: Do not support MTE for protected VMs Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-26 20:08   ` Peter Collingbourne
2022-05-26 20:08     ` Peter Collingbourne
2022-05-26 20:08     ` Peter Collingbourne
2022-05-27  7:55     ` Fuad Tabba
2022-05-27  7:55       ` Fuad Tabba
2022-05-27  7:55       ` Fuad Tabba
2022-06-03  3:00       ` Peter Collingbourne
2022-06-03  3:00         ` Peter Collingbourne
2022-06-03  3:00         ` Peter Collingbourne
2022-06-04  8:26         ` Marc Zyngier
2022-06-04  8:26           ` Marc Zyngier
2022-06-04  8:26           ` Marc Zyngier
2022-06-07  0:20           ` Peter Collingbourne
2022-06-07  0:20             ` Peter Collingbourne
2022-06-07  0:20             ` Peter Collingbourne
2022-06-08 18:41             ` Catalin Marinas
2022-06-08 18:41               ` Catalin Marinas
2022-06-08 18:41               ` Catalin Marinas
2022-06-07  0:42   ` Peter Collingbourne
2022-06-07  0:42     ` Peter Collingbourne
2022-06-07  0:42     ` Peter Collingbourne
2022-06-08  7:40     ` Fuad Tabba
2022-06-08  7:40       ` Fuad Tabba
2022-06-08  7:40       ` Fuad Tabba
2022-06-08 17:39       ` Peter Collingbourne
2022-06-08 17:39         ` Peter Collingbourne
2022-06-08 17:39         ` Peter Collingbourne
2022-05-19 13:41 ` [PATCH 60/89] KVM: arm64: Refactor reset_mpidr to extract its computation Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 61/89] KVM: arm64: Reset sysregs for protected VMs Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 62/89] KVM: arm64: Move pkvm_vcpu_init_traps to shadow vcpu init Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 63/89] KVM: arm64: Fix initializing traps in protected mode Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 64/89] KVM: arm64: Advertise GICv3 sysreg interface to protected guests Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 65/89] KVM: arm64: Force injection of a data abort on NISV MMIO exit Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 66/89] KVM: arm64: Donate memory to protected guests Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 67/89] KVM: arm64: Add EL2 entry/exit handlers for pKVM guests Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 68/89] KVM: arm64: Move vgic state between host and shadow vcpu structures Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 69/89] KVM: arm64: Do not update virtual timer state for protected VMs Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 70/89] KVM: arm64: Refactor kvm_vcpu_enable_ptrauth() for hyp use Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 71/89] KVM: arm64: Initialize shadow vm state at hyp Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 72/89] KVM: arm64: Track the SVE state in the shadow vcpu Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 73/89] KVM: arm64: Add HVC handling for protected guests at EL2 Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 74/89] KVM: arm64: Move pstate reset values to kvm_arm.h Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 75/89] KVM: arm64: Move some kvm_psci functions to a shared header Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 76/89] KVM: arm64: Factor out vcpu_reset code for core registers and PSCI Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` Will Deacon [this message]
2022-05-19 13:41   ` [PATCH 77/89] KVM: arm64: Handle PSCI for protected VMs in EL2 Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 78/89] KVM: arm64: Don't expose TLBI hypercalls after de-privilege Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 79/89] KVM: arm64: Add is_pkvm_initialized() helper Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 80/89] KVM: arm64: Refactor enter_exception64() Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 81/89] KVM: arm64: Inject SIGSEGV on illegal accesses Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 82/89] KVM: arm64: Support TLB invalidation in guest context Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 83/89] KVM: arm64: Avoid BBM when changing only s/w bits in Stage-2 PTE Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41 ` [PATCH 84/89] KVM: arm64: Extend memory sharing to allow guest-to-host transitions Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:41   ` Will Deacon
2022-05-19 13:42 ` [PATCH 85/89] KVM: arm64: Document the KVM/arm64-specific calls in hypercalls.rst Will Deacon
2022-05-19 13:42   ` Will Deacon
2022-05-19 13:42   ` Will Deacon
2022-05-19 13:42 ` [PATCH 86/89] KVM: arm64: Reformat/beautify PTP hypercall documentation Will Deacon
2022-05-19 13:42   ` Will Deacon
2022-05-19 13:42   ` Will Deacon
2022-05-19 13:42 ` [PATCH 87/89] KVM: arm64: Expose memory sharing hypercalls to protected guests Will Deacon
2022-05-19 13:42   ` Will Deacon
2022-05-19 13:42   ` Will Deacon
2022-05-19 13:42 ` [PATCH 88/89] KVM: arm64: Introduce KVM_VM_TYPE_ARM_PROTECTED machine type for PVMs Will Deacon
2022-05-19 13:42   ` Will Deacon
2022-05-19 13:42   ` Will Deacon
2022-05-19 13:42 ` [PATCH 89/89] Documentation: KVM: Add some documentation for Protected KVM on arm64 Will Deacon
2022-05-19 13:42   ` Will Deacon
2022-05-19 13:42   ` Will Deacon
2022-06-07 22:39   ` Peter Collingbourne
2022-06-07 22:39     ` Peter Collingbourne
2022-06-07 22:39     ` Peter Collingbourne

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220519134204.5379-78-will@kernel.org \
    --to=will@kernel.org \
    --cc=catalin.marinas@arm.com \
    --cc=chao.p.peng@linux.intel.com \
    --cc=kernel-team@android.com \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=luto@amacapital.net \
    --cc=maz@kernel.org \
    --cc=michael.roth@amd.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.