All of lore.kernel.org
 help / color / mirror / Atom feed
From: Marc Zyngier <maz@kernel.org>
To: linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu, kvm@vger.kernel.org
Cc: Andre Przywara <andre.przywara@arm.com>,
	Christoffer Dall <christoffer.dall@arm.com>,
	Jintack Lim <jintack@cs.columbia.edu>,
	Haibo Xu <haibo.xu@linaro.org>,
	Ganapatrao Kulkarni <gankulkarni@os.amperecomputing.com>,
	Chase Conklin <chase.conklin@arm.com>,
	"Russell King (Oracle)" <linux@armlinux.org.uk>,
	James Morse <james.morse@arm.com>,
	Suzuki K Poulose <suzuki.poulose@arm.com>,
	Alexandru Elisei <alexandru.elisei@arm.com>,
	karl.heubaum@oracle.com, mihai.carabas@oracle.com,
	miguel.luis@oracle.com, kernel-team@android.com
Subject: [PATCH v6 34/64] KVM: arm64: nv: Support multiple nested Stage-2 mmu structures
Date: Fri, 28 Jan 2022 12:18:42 +0000	[thread overview]
Message-ID: <20220128121912.509006-35-maz@kernel.org> (raw)
In-Reply-To: <20220128121912.509006-1-maz@kernel.org>

Add Stage-2 mmu data structures for virtual EL2 and for nested guests.
We don't yet populate shadow Stage-2 page tables, but we now have a
framework for getting to a shadow Stage-2 pgd.

We allocate twice the number of vcpus as Stage-2 mmu structures because
that's sufficient for each vcpu running two translation regimes without
having to flush the Stage-2 page tables.

Co-developed-by: Christoffer Dall <christoffer.dall@arm.com>
Signed-off-by: Christoffer Dall <christoffer.dall@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
 arch/arm64/include/asm/kvm_host.h   |  29 +++++
 arch/arm64/include/asm/kvm_mmu.h    |   9 ++
 arch/arm64/include/asm/kvm_nested.h |   7 +
 arch/arm64/kvm/arm.c                |  16 ++-
 arch/arm64/kvm/mmu.c                |  29 +++--
 arch/arm64/kvm/nested.c             | 195 ++++++++++++++++++++++++++++
 6 files changed, 275 insertions(+), 10 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index fa253f08e0fd..a15183d0e1bf 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -101,14 +101,43 @@ struct kvm_s2_mmu {
 	int __percpu *last_vcpu_ran;
 
 	struct kvm_arch *arch;
+
+	/*
+	 * For a shadow stage-2 MMU, the virtual vttbr programmed by the guest
+	 * hypervisor.  Unused for kvm_arch->mmu. Set to 1 when the structure
+	 * contains no valid information.
+	 */
+	u64	vttbr;
+
+	/* true when this represents a nested context where virtual HCR_EL2.VM == 1 */
+	bool	nested_stage2_enabled;
+
+	/*
+	 *  0: Nobody is currently using this, check vttbr for validity
+	 * >0: Somebody is actively using this.
+	 */
+	atomic_t refcnt;
 };
 
+static inline bool kvm_s2_mmu_valid(struct kvm_s2_mmu *mmu)
+{
+	return !(mmu->vttbr & 1);
+}
+
 struct kvm_arch_memory_slot {
 };
 
 struct kvm_arch {
 	struct kvm_s2_mmu mmu;
 
+	/*
+	 * Stage 2 paging stage for VMs with nested virtual using a virtual
+	 * VMID.
+	 */
+	struct kvm_s2_mmu *nested_mmus;
+	size_t nested_mmus_size;
+	int nested_mmus_next;
+
 	/* VTCR_EL2 value for this VM */
 	u64    vtcr;
 
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 1b314b2a69bc..0750d022bbf8 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -116,6 +116,7 @@ alternative_cb_end
 #include <asm/cacheflush.h>
 #include <asm/mmu_context.h>
 #include <asm/kvm_emulate.h>
+#include <asm/kvm_nested.h>
 
 void kvm_update_va_mask(struct alt_instr *alt,
 			__le32 *origptr, __le32 *updptr, int nr_inst);
@@ -161,6 +162,7 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
 			     void **haddr);
 void free_hyp_pgds(void);
 
+void kvm_unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size);
 void stage2_unmap_vm(struct kvm *kvm);
 int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu);
 void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
@@ -296,5 +298,12 @@ static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu)
 {
 	return container_of(mmu->arch, struct kvm, arch);
 }
+
+static inline u64 get_vmid(u64 vttbr)
+{
+	return (vttbr & VTTBR_VMID_MASK(kvm_get_vmid_bits())) >>
+		VTTBR_VMID_SHIFT;
+}
+
 #endif /* __ASSEMBLY__ */
 #endif /* __ARM64_KVM_MMU_H__ */
diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/kvm_nested.h
index 7d398510fd9d..8bb7159f2b6b 100644
--- a/arch/arm64/include/asm/kvm_nested.h
+++ b/arch/arm64/include/asm/kvm_nested.h
@@ -65,6 +65,13 @@ static inline u64 translate_cnthctl_el2_to_cntkctl_el1(u64 cnthctl)
 		(cnthctl & (CNTHCTL_EVNTI | CNTHCTL_EVNTDIR | CNTHCTL_EVNTEN)));
 }
 
+extern void kvm_init_nested(struct kvm *kvm);
+extern int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu);
+extern void kvm_init_nested_s2_mmu(struct kvm_s2_mmu *mmu);
+extern struct kvm_s2_mmu *lookup_s2_mmu(struct kvm *kvm, u64 vttbr, u64 hcr);
+extern void kvm_vcpu_load_hw_mmu(struct kvm_vcpu *vcpu);
+extern void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu);
+
 int handle_wfx_nested(struct kvm_vcpu *vcpu, bool is_wfe);
 extern bool __forward_traps(struct kvm_vcpu *vcpu, unsigned int reg,
 			    u64 control_bit);
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 06ca11e90482..14f85f1e15b2 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -37,6 +37,7 @@
 #include <asm/kvm_arm.h>
 #include <asm/kvm_asm.h>
 #include <asm/kvm_mmu.h>
+#include <asm/kvm_nested.h>
 #include <asm/kvm_emulate.h>
 #include <asm/sections.h>
 
@@ -146,6 +147,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 	if (ret)
 		return ret;
 
+	kvm_init_nested(kvm);
+
 	ret = kvm_share_hyp(kvm, kvm + 1);
 	if (ret)
 		goto out_free_stage2_pgd;
@@ -375,6 +378,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 	struct kvm_s2_mmu *mmu;
 	int *last_ran;
 
+	if (vcpu_has_nv(vcpu))
+		kvm_vcpu_load_hw_mmu(vcpu);
+
 	mmu = vcpu->arch.hw_mmu;
 	last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
 
@@ -423,6 +429,9 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 	kvm_vgic_put(vcpu);
 	kvm_vcpu_pmu_restore_host(vcpu);
 
+	if (vcpu_has_nv(vcpu))
+		kvm_vcpu_put_hw_mmu(vcpu);
+
 	vcpu->cpu = -1;
 }
 
@@ -1122,8 +1131,13 @@ static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
 
 	vcpu->arch.target = phys_target;
 
+	/* Prepare for nested if required */
+	ret = kvm_vcpu_init_nested(vcpu);
+
 	/* Now we know what it is, we can reset it. */
-	ret = kvm_reset_vcpu(vcpu);
+	if (!ret)
+		ret = kvm_reset_vcpu(vcpu);
+
 	if (ret) {
 		vcpu->arch.target = -1;
 		bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index bc2aba953299..55525fd5743d 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -162,7 +162,7 @@ static void invalidate_icache_guest_page(void *va, size_t size)
  * does.
  */
 /**
- * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
+ * __unmap_stage2_range -- Clear stage2 page table entries to unmap a range
  * @mmu:   The KVM stage-2 MMU pointer
  * @start: The intermediate physical base address of the range to unmap
  * @size:  The size of the area to unmap
@@ -185,7 +185,7 @@ static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64
 				   may_block));
 }
 
-static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
+void kvm_unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
 {
 	__unmap_stage2_range(mmu, start, size, true);
 }
@@ -628,7 +628,20 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
 	int cpu, err;
 	struct kvm_pgtable *pgt;
 
+	/*
+	 * If we already have our page tables in place, and that the
+	 * MMU context is the canonical one, we have a bug somewhere,
+	 * as this is only supposed to ever happen once per VM.
+	 *
+	 * Otherwise, we're building nested page tables, and that's
+	 * probably because userspace called KVM_ARM_VCPU_INIT more
+	 * than once on the same vcpu. Since that's actually legal,
+	 * don't kick a fuss and leave gracefully.
+	 */
 	if (mmu->pgt != NULL) {
+		if (&kvm->arch.mmu != mmu)
+			return 0;
+
 		kvm_err("kvm_arch already initialized?\n");
 		return -EINVAL;
 	}
@@ -654,6 +667,9 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
 	mmu->pgt = pgt;
 	mmu->pgd_phys = __pa(pgt->pgd);
 	WRITE_ONCE(mmu->vmid.vmid_gen, 0);
+
+	kvm_init_nested_s2_mmu(mmu);
+
 	return 0;
 
 out_destroy_pgtable:
@@ -699,7 +715,7 @@ static void stage2_unmap_memslot(struct kvm *kvm,
 
 		if (!(vma->vm_flags & VM_PFNMAP)) {
 			gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
-			unmap_stage2_range(&kvm->arch.mmu, gpa, vm_end - vm_start);
+			kvm_unmap_stage2_range(&kvm->arch.mmu, gpa, vm_end - vm_start);
 		}
 		hva = vm_end;
 	} while (hva < reg_end);
@@ -1681,11 +1697,6 @@ void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
 {
 }
 
-void kvm_arch_flush_shadow_all(struct kvm *kvm)
-{
-	kvm_free_stage2_pgd(&kvm->arch.mmu);
-}
-
 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
 				   struct kvm_memory_slot *slot)
 {
@@ -1693,7 +1704,7 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
 	phys_addr_t size = slot->npages << PAGE_SHIFT;
 
 	spin_lock(&kvm->mmu_lock);
-	unmap_stage2_range(&kvm->arch.mmu, gpa, size);
+	kvm_unmap_stage2_range(&kvm->arch.mmu, gpa, size);
 	spin_unlock(&kvm->mmu_lock);
 }
 
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index 254152cd791e..bfa2b9229173 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -7,12 +7,189 @@
 #include <linux/kvm.h>
 #include <linux/kvm_host.h>
 
+#include <asm/kvm_arm.h>
 #include <asm/kvm_emulate.h>
+#include <asm/kvm_mmu.h>
 #include <asm/kvm_nested.h>
 #include <asm/sysreg.h>
 
 #include "sys_regs.h"
 
+void kvm_init_nested(struct kvm *kvm)
+{
+	kvm->arch.nested_mmus = NULL;
+	kvm->arch.nested_mmus_size = 0;
+}
+
+int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu)
+{
+	struct kvm *kvm = vcpu->kvm;
+	struct kvm_s2_mmu *tmp;
+	int num_mmus;
+	int ret = -ENOMEM;
+
+	if (!test_bit(KVM_ARM_VCPU_HAS_EL2, vcpu->arch.features))
+		return 0;
+
+	if (!cpus_have_final_cap(ARM64_HAS_NESTED_VIRT))
+		return -EINVAL;
+
+	mutex_lock(&kvm->lock);
+
+	/*
+	 * Let's treat memory allocation failures as benign: If we fail to
+	 * allocate anything, return an error and keep the allocated array
+	 * alive. Userspace may try to recover by intializing the vcpu
+	 * again, and there is no reason to affect the whole VM for this.
+	 */
+	num_mmus = atomic_read(&kvm->online_vcpus) * 2;
+	tmp = krealloc(kvm->arch.nested_mmus,
+		       num_mmus * sizeof(*kvm->arch.nested_mmus),
+		       GFP_KERNEL_ACCOUNT | __GFP_ZERO);
+	if (tmp) {
+		/*
+		 * If we went through a realocation, adjust the MMU
+		 * back-pointers in the previously initialised
+		 * pg_table structures.
+		 */
+		if (kvm->arch.nested_mmus != tmp) {
+			int i;
+
+			for (i = 0; i < num_mmus - 2; i++)
+				tmp[i].pgt->mmu = &tmp[i];
+		}
+
+		if (kvm_init_stage2_mmu(kvm, &tmp[num_mmus - 1]) ||
+		    kvm_init_stage2_mmu(kvm, &tmp[num_mmus - 2])) {
+			kvm_free_stage2_pgd(&tmp[num_mmus - 1]);
+			kvm_free_stage2_pgd(&tmp[num_mmus - 2]);
+		} else {
+			kvm->arch.nested_mmus_size = num_mmus;
+			ret = 0;
+		}
+
+		kvm->arch.nested_mmus = tmp;
+	}
+
+	mutex_unlock(&kvm->lock);
+	return ret;
+}
+
+/* Must be called with kvm->lock held */
+struct kvm_s2_mmu *lookup_s2_mmu(struct kvm *kvm, u64 vttbr, u64 hcr)
+{
+	bool nested_stage2_enabled = hcr & HCR_VM;
+	int i;
+
+	/* Don't consider the CnP bit for the vttbr match */
+	vttbr = vttbr & ~VTTBR_CNP_BIT;
+
+	/*
+	 * Two possibilities when looking up a S2 MMU context:
+	 *
+	 * - either S2 is enabled in the guest, and we need a context that
+         *   is S2-enabled and matches the full VTTBR (VMID+BADDR), which
+         *   makes it safe from a TLB conflict perspective (a broken guest
+         *   won't be able to generate them),
+	 *
+	 * - or S2 is disabled, and we need a context that is S2-disabled
+         *   and matches the VMID only, as all TLBs are tagged by VMID even
+         *   if S2 translation is disabled.
+	 */
+	for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
+		struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
+
+		if (!kvm_s2_mmu_valid(mmu))
+			continue;
+
+		if (nested_stage2_enabled &&
+		    mmu->nested_stage2_enabled &&
+		    vttbr == mmu->vttbr)
+			return mmu;
+
+		if (!nested_stage2_enabled &&
+		    !mmu->nested_stage2_enabled &&
+		    get_vmid(vttbr) == get_vmid(mmu->vttbr))
+			return mmu;
+	}
+	return NULL;
+}
+
+static struct kvm_s2_mmu *get_s2_mmu_nested(struct kvm_vcpu *vcpu)
+{
+	struct kvm *kvm = vcpu->kvm;
+	u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
+	u64 hcr= vcpu_read_sys_reg(vcpu, HCR_EL2);
+	struct kvm_s2_mmu *s2_mmu;
+	int i;
+
+	s2_mmu = lookup_s2_mmu(kvm, vttbr, hcr);
+	if (s2_mmu)
+		goto out;
+
+	/*
+	 * Make sure we don't always search from the same point, or we
+	 * will always reuse a potentially active context, leaving
+	 * free contexts unused.
+	 */
+	for (i = kvm->arch.nested_mmus_next;
+	     i < (kvm->arch.nested_mmus_size + kvm->arch.nested_mmus_next);
+	     i++) {
+		s2_mmu = &kvm->arch.nested_mmus[i % kvm->arch.nested_mmus_size];
+
+		if (atomic_read(&s2_mmu->refcnt) == 0)
+			break;
+	}
+	BUG_ON(atomic_read(&s2_mmu->refcnt)); /* We have struct MMUs to spare */
+
+	/* Set the scene for the next search */
+	kvm->arch.nested_mmus_next = (i + 1) % kvm->arch.nested_mmus_size;
+
+	if (kvm_s2_mmu_valid(s2_mmu)) {
+		/* Clear the old state */
+		kvm_unmap_stage2_range(s2_mmu, 0, kvm_phys_size(kvm));
+		if (s2_mmu->vmid.vmid_gen)
+			kvm_call_hyp(__kvm_tlb_flush_vmid, s2_mmu);
+	}
+
+	/*
+	 * The virtual VMID (modulo CnP) will be used as a key when matching
+	 * an existing kvm_s2_mmu.
+	 */
+	s2_mmu->vttbr = vttbr & ~VTTBR_CNP_BIT;
+	s2_mmu->nested_stage2_enabled = hcr & HCR_VM;
+
+out:
+	atomic_inc(&s2_mmu->refcnt);
+	return s2_mmu;
+}
+
+void kvm_init_nested_s2_mmu(struct kvm_s2_mmu *mmu)
+{
+	mmu->vttbr = 1;
+	mmu->nested_stage2_enabled = false;
+	atomic_set(&mmu->refcnt, 0);
+}
+
+void kvm_vcpu_load_hw_mmu(struct kvm_vcpu *vcpu)
+{
+	if (is_hyp_ctxt(vcpu)) {
+		vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;
+	} else {
+		spin_lock(&vcpu->kvm->mmu_lock);
+		vcpu->arch.hw_mmu = get_s2_mmu_nested(vcpu);
+		spin_unlock(&vcpu->kvm->mmu_lock);
+	}
+}
+
+void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu)
+{
+	if (vcpu->arch.hw_mmu != &vcpu->kvm->arch.mmu) {
+		atomic_dec(&vcpu->arch.hw_mmu->refcnt);
+		vcpu->arch.hw_mmu = NULL;
+	}
+}
+
 /*
  * Inject wfx to the virtual EL2 if this is not from the virtual EL2 and
  * the virtual HCR_EL2.TWX is set. Otherwise, let the host hypervisor
@@ -31,6 +208,24 @@ int handle_wfx_nested(struct kvm_vcpu *vcpu, bool is_wfe)
 	return -EINVAL;
 }
 
+void kvm_arch_flush_shadow_all(struct kvm *kvm)
+{
+	int i;
+
+	for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
+		struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
+
+		WARN_ON(atomic_read(&mmu->refcnt));
+
+		if (!atomic_read(&mmu->refcnt))
+			kvm_free_stage2_pgd(mmu);
+	}
+	kfree(kvm->arch.nested_mmus);
+	kvm->arch.nested_mmus = NULL;
+	kvm->arch.nested_mmus_size = 0;
+	kvm_free_stage2_pgd(&kvm->arch.mmu);
+}
+
 /*
  * Our emulated CPU doesn't support all the possible features. For the
  * sake of simplicity (and probably mental sanity), wipe out a number
-- 
2.30.2


WARNING: multiple messages have this Message-ID (diff)
From: Marc Zyngier <maz@kernel.org>
To: linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu, kvm@vger.kernel.org
Cc: kernel-team@android.com, Andre Przywara <andre.przywara@arm.com>,
	Christoffer Dall <christoffer.dall@arm.com>,
	Chase Conklin <chase.conklin@arm.com>,
	"Russell King \(Oracle\)" <linux@armlinux.org.uk>,
	mihai.carabas@oracle.com,
	Ganapatrao Kulkarni <gankulkarni@os.amperecomputing.com>
Subject: [PATCH v6 34/64] KVM: arm64: nv: Support multiple nested Stage-2 mmu structures
Date: Fri, 28 Jan 2022 12:18:42 +0000	[thread overview]
Message-ID: <20220128121912.509006-35-maz@kernel.org> (raw)
In-Reply-To: <20220128121912.509006-1-maz@kernel.org>

Add Stage-2 mmu data structures for virtual EL2 and for nested guests.
We don't yet populate shadow Stage-2 page tables, but we now have a
framework for getting to a shadow Stage-2 pgd.

We allocate twice the number of vcpus as Stage-2 mmu structures because
that's sufficient for each vcpu running two translation regimes without
having to flush the Stage-2 page tables.

Co-developed-by: Christoffer Dall <christoffer.dall@arm.com>
Signed-off-by: Christoffer Dall <christoffer.dall@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
 arch/arm64/include/asm/kvm_host.h   |  29 +++++
 arch/arm64/include/asm/kvm_mmu.h    |   9 ++
 arch/arm64/include/asm/kvm_nested.h |   7 +
 arch/arm64/kvm/arm.c                |  16 ++-
 arch/arm64/kvm/mmu.c                |  29 +++--
 arch/arm64/kvm/nested.c             | 195 ++++++++++++++++++++++++++++
 6 files changed, 275 insertions(+), 10 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index fa253f08e0fd..a15183d0e1bf 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -101,14 +101,43 @@ struct kvm_s2_mmu {
 	int __percpu *last_vcpu_ran;
 
 	struct kvm_arch *arch;
+
+	/*
+	 * For a shadow stage-2 MMU, the virtual vttbr programmed by the guest
+	 * hypervisor.  Unused for kvm_arch->mmu. Set to 1 when the structure
+	 * contains no valid information.
+	 */
+	u64	vttbr;
+
+	/* true when this represents a nested context where virtual HCR_EL2.VM == 1 */
+	bool	nested_stage2_enabled;
+
+	/*
+	 *  0: Nobody is currently using this, check vttbr for validity
+	 * >0: Somebody is actively using this.
+	 */
+	atomic_t refcnt;
 };
 
+static inline bool kvm_s2_mmu_valid(struct kvm_s2_mmu *mmu)
+{
+	return !(mmu->vttbr & 1);
+}
+
 struct kvm_arch_memory_slot {
 };
 
 struct kvm_arch {
 	struct kvm_s2_mmu mmu;
 
+	/*
+	 * Stage 2 paging stage for VMs with nested virtual using a virtual
+	 * VMID.
+	 */
+	struct kvm_s2_mmu *nested_mmus;
+	size_t nested_mmus_size;
+	int nested_mmus_next;
+
 	/* VTCR_EL2 value for this VM */
 	u64    vtcr;
 
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 1b314b2a69bc..0750d022bbf8 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -116,6 +116,7 @@ alternative_cb_end
 #include <asm/cacheflush.h>
 #include <asm/mmu_context.h>
 #include <asm/kvm_emulate.h>
+#include <asm/kvm_nested.h>
 
 void kvm_update_va_mask(struct alt_instr *alt,
 			__le32 *origptr, __le32 *updptr, int nr_inst);
@@ -161,6 +162,7 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
 			     void **haddr);
 void free_hyp_pgds(void);
 
+void kvm_unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size);
 void stage2_unmap_vm(struct kvm *kvm);
 int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu);
 void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
@@ -296,5 +298,12 @@ static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu)
 {
 	return container_of(mmu->arch, struct kvm, arch);
 }
+
+static inline u64 get_vmid(u64 vttbr)
+{
+	return (vttbr & VTTBR_VMID_MASK(kvm_get_vmid_bits())) >>
+		VTTBR_VMID_SHIFT;
+}
+
 #endif /* __ASSEMBLY__ */
 #endif /* __ARM64_KVM_MMU_H__ */
diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/kvm_nested.h
index 7d398510fd9d..8bb7159f2b6b 100644
--- a/arch/arm64/include/asm/kvm_nested.h
+++ b/arch/arm64/include/asm/kvm_nested.h
@@ -65,6 +65,13 @@ static inline u64 translate_cnthctl_el2_to_cntkctl_el1(u64 cnthctl)
 		(cnthctl & (CNTHCTL_EVNTI | CNTHCTL_EVNTDIR | CNTHCTL_EVNTEN)));
 }
 
+extern void kvm_init_nested(struct kvm *kvm);
+extern int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu);
+extern void kvm_init_nested_s2_mmu(struct kvm_s2_mmu *mmu);
+extern struct kvm_s2_mmu *lookup_s2_mmu(struct kvm *kvm, u64 vttbr, u64 hcr);
+extern void kvm_vcpu_load_hw_mmu(struct kvm_vcpu *vcpu);
+extern void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu);
+
 int handle_wfx_nested(struct kvm_vcpu *vcpu, bool is_wfe);
 extern bool __forward_traps(struct kvm_vcpu *vcpu, unsigned int reg,
 			    u64 control_bit);
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 06ca11e90482..14f85f1e15b2 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -37,6 +37,7 @@
 #include <asm/kvm_arm.h>
 #include <asm/kvm_asm.h>
 #include <asm/kvm_mmu.h>
+#include <asm/kvm_nested.h>
 #include <asm/kvm_emulate.h>
 #include <asm/sections.h>
 
@@ -146,6 +147,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 	if (ret)
 		return ret;
 
+	kvm_init_nested(kvm);
+
 	ret = kvm_share_hyp(kvm, kvm + 1);
 	if (ret)
 		goto out_free_stage2_pgd;
@@ -375,6 +378,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 	struct kvm_s2_mmu *mmu;
 	int *last_ran;
 
+	if (vcpu_has_nv(vcpu))
+		kvm_vcpu_load_hw_mmu(vcpu);
+
 	mmu = vcpu->arch.hw_mmu;
 	last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
 
@@ -423,6 +429,9 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 	kvm_vgic_put(vcpu);
 	kvm_vcpu_pmu_restore_host(vcpu);
 
+	if (vcpu_has_nv(vcpu))
+		kvm_vcpu_put_hw_mmu(vcpu);
+
 	vcpu->cpu = -1;
 }
 
@@ -1122,8 +1131,13 @@ static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
 
 	vcpu->arch.target = phys_target;
 
+	/* Prepare for nested if required */
+	ret = kvm_vcpu_init_nested(vcpu);
+
 	/* Now we know what it is, we can reset it. */
-	ret = kvm_reset_vcpu(vcpu);
+	if (!ret)
+		ret = kvm_reset_vcpu(vcpu);
+
 	if (ret) {
 		vcpu->arch.target = -1;
 		bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index bc2aba953299..55525fd5743d 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -162,7 +162,7 @@ static void invalidate_icache_guest_page(void *va, size_t size)
  * does.
  */
 /**
- * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
+ * __unmap_stage2_range -- Clear stage2 page table entries to unmap a range
  * @mmu:   The KVM stage-2 MMU pointer
  * @start: The intermediate physical base address of the range to unmap
  * @size:  The size of the area to unmap
@@ -185,7 +185,7 @@ static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64
 				   may_block));
 }
 
-static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
+void kvm_unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
 {
 	__unmap_stage2_range(mmu, start, size, true);
 }
@@ -628,7 +628,20 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
 	int cpu, err;
 	struct kvm_pgtable *pgt;
 
+	/*
+	 * If we already have our page tables in place, and that the
+	 * MMU context is the canonical one, we have a bug somewhere,
+	 * as this is only supposed to ever happen once per VM.
+	 *
+	 * Otherwise, we're building nested page tables, and that's
+	 * probably because userspace called KVM_ARM_VCPU_INIT more
+	 * than once on the same vcpu. Since that's actually legal,
+	 * don't kick a fuss and leave gracefully.
+	 */
 	if (mmu->pgt != NULL) {
+		if (&kvm->arch.mmu != mmu)
+			return 0;
+
 		kvm_err("kvm_arch already initialized?\n");
 		return -EINVAL;
 	}
@@ -654,6 +667,9 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
 	mmu->pgt = pgt;
 	mmu->pgd_phys = __pa(pgt->pgd);
 	WRITE_ONCE(mmu->vmid.vmid_gen, 0);
+
+	kvm_init_nested_s2_mmu(mmu);
+
 	return 0;
 
 out_destroy_pgtable:
@@ -699,7 +715,7 @@ static void stage2_unmap_memslot(struct kvm *kvm,
 
 		if (!(vma->vm_flags & VM_PFNMAP)) {
 			gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
-			unmap_stage2_range(&kvm->arch.mmu, gpa, vm_end - vm_start);
+			kvm_unmap_stage2_range(&kvm->arch.mmu, gpa, vm_end - vm_start);
 		}
 		hva = vm_end;
 	} while (hva < reg_end);
@@ -1681,11 +1697,6 @@ void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
 {
 }
 
-void kvm_arch_flush_shadow_all(struct kvm *kvm)
-{
-	kvm_free_stage2_pgd(&kvm->arch.mmu);
-}
-
 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
 				   struct kvm_memory_slot *slot)
 {
@@ -1693,7 +1704,7 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
 	phys_addr_t size = slot->npages << PAGE_SHIFT;
 
 	spin_lock(&kvm->mmu_lock);
-	unmap_stage2_range(&kvm->arch.mmu, gpa, size);
+	kvm_unmap_stage2_range(&kvm->arch.mmu, gpa, size);
 	spin_unlock(&kvm->mmu_lock);
 }
 
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index 254152cd791e..bfa2b9229173 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -7,12 +7,189 @@
 #include <linux/kvm.h>
 #include <linux/kvm_host.h>
 
+#include <asm/kvm_arm.h>
 #include <asm/kvm_emulate.h>
+#include <asm/kvm_mmu.h>
 #include <asm/kvm_nested.h>
 #include <asm/sysreg.h>
 
 #include "sys_regs.h"
 
+void kvm_init_nested(struct kvm *kvm)
+{
+	kvm->arch.nested_mmus = NULL;
+	kvm->arch.nested_mmus_size = 0;
+}
+
+int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu)
+{
+	struct kvm *kvm = vcpu->kvm;
+	struct kvm_s2_mmu *tmp;
+	int num_mmus;
+	int ret = -ENOMEM;
+
+	if (!test_bit(KVM_ARM_VCPU_HAS_EL2, vcpu->arch.features))
+		return 0;
+
+	if (!cpus_have_final_cap(ARM64_HAS_NESTED_VIRT))
+		return -EINVAL;
+
+	mutex_lock(&kvm->lock);
+
+	/*
+	 * Let's treat memory allocation failures as benign: If we fail to
+	 * allocate anything, return an error and keep the allocated array
+	 * alive. Userspace may try to recover by intializing the vcpu
+	 * again, and there is no reason to affect the whole VM for this.
+	 */
+	num_mmus = atomic_read(&kvm->online_vcpus) * 2;
+	tmp = krealloc(kvm->arch.nested_mmus,
+		       num_mmus * sizeof(*kvm->arch.nested_mmus),
+		       GFP_KERNEL_ACCOUNT | __GFP_ZERO);
+	if (tmp) {
+		/*
+		 * If we went through a realocation, adjust the MMU
+		 * back-pointers in the previously initialised
+		 * pg_table structures.
+		 */
+		if (kvm->arch.nested_mmus != tmp) {
+			int i;
+
+			for (i = 0; i < num_mmus - 2; i++)
+				tmp[i].pgt->mmu = &tmp[i];
+		}
+
+		if (kvm_init_stage2_mmu(kvm, &tmp[num_mmus - 1]) ||
+		    kvm_init_stage2_mmu(kvm, &tmp[num_mmus - 2])) {
+			kvm_free_stage2_pgd(&tmp[num_mmus - 1]);
+			kvm_free_stage2_pgd(&tmp[num_mmus - 2]);
+		} else {
+			kvm->arch.nested_mmus_size = num_mmus;
+			ret = 0;
+		}
+
+		kvm->arch.nested_mmus = tmp;
+	}
+
+	mutex_unlock(&kvm->lock);
+	return ret;
+}
+
+/* Must be called with kvm->lock held */
+struct kvm_s2_mmu *lookup_s2_mmu(struct kvm *kvm, u64 vttbr, u64 hcr)
+{
+	bool nested_stage2_enabled = hcr & HCR_VM;
+	int i;
+
+	/* Don't consider the CnP bit for the vttbr match */
+	vttbr = vttbr & ~VTTBR_CNP_BIT;
+
+	/*
+	 * Two possibilities when looking up a S2 MMU context:
+	 *
+	 * - either S2 is enabled in the guest, and we need a context that
+         *   is S2-enabled and matches the full VTTBR (VMID+BADDR), which
+         *   makes it safe from a TLB conflict perspective (a broken guest
+         *   won't be able to generate them),
+	 *
+	 * - or S2 is disabled, and we need a context that is S2-disabled
+         *   and matches the VMID only, as all TLBs are tagged by VMID even
+         *   if S2 translation is disabled.
+	 */
+	for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
+		struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
+
+		if (!kvm_s2_mmu_valid(mmu))
+			continue;
+
+		if (nested_stage2_enabled &&
+		    mmu->nested_stage2_enabled &&
+		    vttbr == mmu->vttbr)
+			return mmu;
+
+		if (!nested_stage2_enabled &&
+		    !mmu->nested_stage2_enabled &&
+		    get_vmid(vttbr) == get_vmid(mmu->vttbr))
+			return mmu;
+	}
+	return NULL;
+}
+
+static struct kvm_s2_mmu *get_s2_mmu_nested(struct kvm_vcpu *vcpu)
+{
+	struct kvm *kvm = vcpu->kvm;
+	u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
+	u64 hcr= vcpu_read_sys_reg(vcpu, HCR_EL2);
+	struct kvm_s2_mmu *s2_mmu;
+	int i;
+
+	s2_mmu = lookup_s2_mmu(kvm, vttbr, hcr);
+	if (s2_mmu)
+		goto out;
+
+	/*
+	 * Make sure we don't always search from the same point, or we
+	 * will always reuse a potentially active context, leaving
+	 * free contexts unused.
+	 */
+	for (i = kvm->arch.nested_mmus_next;
+	     i < (kvm->arch.nested_mmus_size + kvm->arch.nested_mmus_next);
+	     i++) {
+		s2_mmu = &kvm->arch.nested_mmus[i % kvm->arch.nested_mmus_size];
+
+		if (atomic_read(&s2_mmu->refcnt) == 0)
+			break;
+	}
+	BUG_ON(atomic_read(&s2_mmu->refcnt)); /* We have struct MMUs to spare */
+
+	/* Set the scene for the next search */
+	kvm->arch.nested_mmus_next = (i + 1) % kvm->arch.nested_mmus_size;
+
+	if (kvm_s2_mmu_valid(s2_mmu)) {
+		/* Clear the old state */
+		kvm_unmap_stage2_range(s2_mmu, 0, kvm_phys_size(kvm));
+		if (s2_mmu->vmid.vmid_gen)
+			kvm_call_hyp(__kvm_tlb_flush_vmid, s2_mmu);
+	}
+
+	/*
+	 * The virtual VMID (modulo CnP) will be used as a key when matching
+	 * an existing kvm_s2_mmu.
+	 */
+	s2_mmu->vttbr = vttbr & ~VTTBR_CNP_BIT;
+	s2_mmu->nested_stage2_enabled = hcr & HCR_VM;
+
+out:
+	atomic_inc(&s2_mmu->refcnt);
+	return s2_mmu;
+}
+
+void kvm_init_nested_s2_mmu(struct kvm_s2_mmu *mmu)
+{
+	mmu->vttbr = 1;
+	mmu->nested_stage2_enabled = false;
+	atomic_set(&mmu->refcnt, 0);
+}
+
+void kvm_vcpu_load_hw_mmu(struct kvm_vcpu *vcpu)
+{
+	if (is_hyp_ctxt(vcpu)) {
+		vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;
+	} else {
+		spin_lock(&vcpu->kvm->mmu_lock);
+		vcpu->arch.hw_mmu = get_s2_mmu_nested(vcpu);
+		spin_unlock(&vcpu->kvm->mmu_lock);
+	}
+}
+
+void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu)
+{
+	if (vcpu->arch.hw_mmu != &vcpu->kvm->arch.mmu) {
+		atomic_dec(&vcpu->arch.hw_mmu->refcnt);
+		vcpu->arch.hw_mmu = NULL;
+	}
+}
+
 /*
  * Inject wfx to the virtual EL2 if this is not from the virtual EL2 and
  * the virtual HCR_EL2.TWX is set. Otherwise, let the host hypervisor
@@ -31,6 +208,24 @@ int handle_wfx_nested(struct kvm_vcpu *vcpu, bool is_wfe)
 	return -EINVAL;
 }
 
+void kvm_arch_flush_shadow_all(struct kvm *kvm)
+{
+	int i;
+
+	for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
+		struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
+
+		WARN_ON(atomic_read(&mmu->refcnt));
+
+		if (!atomic_read(&mmu->refcnt))
+			kvm_free_stage2_pgd(mmu);
+	}
+	kfree(kvm->arch.nested_mmus);
+	kvm->arch.nested_mmus = NULL;
+	kvm->arch.nested_mmus_size = 0;
+	kvm_free_stage2_pgd(&kvm->arch.mmu);
+}
+
 /*
  * Our emulated CPU doesn't support all the possible features. For the
  * sake of simplicity (and probably mental sanity), wipe out a number
-- 
2.30.2

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

WARNING: multiple messages have this Message-ID (diff)
From: Marc Zyngier <maz@kernel.org>
To: linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu, kvm@vger.kernel.org
Cc: Andre Przywara <andre.przywara@arm.com>,
	Christoffer Dall <christoffer.dall@arm.com>,
	Jintack Lim <jintack@cs.columbia.edu>,
	Haibo Xu <haibo.xu@linaro.org>,
	Ganapatrao Kulkarni <gankulkarni@os.amperecomputing.com>,
	Chase Conklin <chase.conklin@arm.com>,
	"Russell King (Oracle)" <linux@armlinux.org.uk>,
	James Morse <james.morse@arm.com>,
	Suzuki K Poulose <suzuki.poulose@arm.com>,
	Alexandru Elisei <alexandru.elisei@arm.com>,
	karl.heubaum@oracle.com, mihai.carabas@oracle.com,
	miguel.luis@oracle.com, kernel-team@android.com
Subject: [PATCH v6 34/64] KVM: arm64: nv: Support multiple nested Stage-2 mmu structures
Date: Fri, 28 Jan 2022 12:18:42 +0000	[thread overview]
Message-ID: <20220128121912.509006-35-maz@kernel.org> (raw)
In-Reply-To: <20220128121912.509006-1-maz@kernel.org>

Add Stage-2 mmu data structures for virtual EL2 and for nested guests.
We don't yet populate shadow Stage-2 page tables, but we now have a
framework for getting to a shadow Stage-2 pgd.

We allocate twice the number of vcpus as Stage-2 mmu structures because
that's sufficient for each vcpu running two translation regimes without
having to flush the Stage-2 page tables.

Co-developed-by: Christoffer Dall <christoffer.dall@arm.com>
Signed-off-by: Christoffer Dall <christoffer.dall@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
 arch/arm64/include/asm/kvm_host.h   |  29 +++++
 arch/arm64/include/asm/kvm_mmu.h    |   9 ++
 arch/arm64/include/asm/kvm_nested.h |   7 +
 arch/arm64/kvm/arm.c                |  16 ++-
 arch/arm64/kvm/mmu.c                |  29 +++--
 arch/arm64/kvm/nested.c             | 195 ++++++++++++++++++++++++++++
 6 files changed, 275 insertions(+), 10 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index fa253f08e0fd..a15183d0e1bf 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -101,14 +101,43 @@ struct kvm_s2_mmu {
 	int __percpu *last_vcpu_ran;
 
 	struct kvm_arch *arch;
+
+	/*
+	 * For a shadow stage-2 MMU, the virtual vttbr programmed by the guest
+	 * hypervisor.  Unused for kvm_arch->mmu. Set to 1 when the structure
+	 * contains no valid information.
+	 */
+	u64	vttbr;
+
+	/* true when this represents a nested context where virtual HCR_EL2.VM == 1 */
+	bool	nested_stage2_enabled;
+
+	/*
+	 *  0: Nobody is currently using this, check vttbr for validity
+	 * >0: Somebody is actively using this.
+	 */
+	atomic_t refcnt;
 };
 
+static inline bool kvm_s2_mmu_valid(struct kvm_s2_mmu *mmu)
+{
+	return !(mmu->vttbr & 1);
+}
+
 struct kvm_arch_memory_slot {
 };
 
 struct kvm_arch {
 	struct kvm_s2_mmu mmu;
 
+	/*
+	 * Stage 2 paging stage for VMs with nested virtual using a virtual
+	 * VMID.
+	 */
+	struct kvm_s2_mmu *nested_mmus;
+	size_t nested_mmus_size;
+	int nested_mmus_next;
+
 	/* VTCR_EL2 value for this VM */
 	u64    vtcr;
 
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 1b314b2a69bc..0750d022bbf8 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -116,6 +116,7 @@ alternative_cb_end
 #include <asm/cacheflush.h>
 #include <asm/mmu_context.h>
 #include <asm/kvm_emulate.h>
+#include <asm/kvm_nested.h>
 
 void kvm_update_va_mask(struct alt_instr *alt,
 			__le32 *origptr, __le32 *updptr, int nr_inst);
@@ -161,6 +162,7 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
 			     void **haddr);
 void free_hyp_pgds(void);
 
+void kvm_unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size);
 void stage2_unmap_vm(struct kvm *kvm);
 int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu);
 void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
@@ -296,5 +298,12 @@ static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu)
 {
 	return container_of(mmu->arch, struct kvm, arch);
 }
+
+static inline u64 get_vmid(u64 vttbr)
+{
+	return (vttbr & VTTBR_VMID_MASK(kvm_get_vmid_bits())) >>
+		VTTBR_VMID_SHIFT;
+}
+
 #endif /* __ASSEMBLY__ */
 #endif /* __ARM64_KVM_MMU_H__ */
diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/kvm_nested.h
index 7d398510fd9d..8bb7159f2b6b 100644
--- a/arch/arm64/include/asm/kvm_nested.h
+++ b/arch/arm64/include/asm/kvm_nested.h
@@ -65,6 +65,13 @@ static inline u64 translate_cnthctl_el2_to_cntkctl_el1(u64 cnthctl)
 		(cnthctl & (CNTHCTL_EVNTI | CNTHCTL_EVNTDIR | CNTHCTL_EVNTEN)));
 }
 
+extern void kvm_init_nested(struct kvm *kvm);
+extern int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu);
+extern void kvm_init_nested_s2_mmu(struct kvm_s2_mmu *mmu);
+extern struct kvm_s2_mmu *lookup_s2_mmu(struct kvm *kvm, u64 vttbr, u64 hcr);
+extern void kvm_vcpu_load_hw_mmu(struct kvm_vcpu *vcpu);
+extern void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu);
+
 int handle_wfx_nested(struct kvm_vcpu *vcpu, bool is_wfe);
 extern bool __forward_traps(struct kvm_vcpu *vcpu, unsigned int reg,
 			    u64 control_bit);
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 06ca11e90482..14f85f1e15b2 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -37,6 +37,7 @@
 #include <asm/kvm_arm.h>
 #include <asm/kvm_asm.h>
 #include <asm/kvm_mmu.h>
+#include <asm/kvm_nested.h>
 #include <asm/kvm_emulate.h>
 #include <asm/sections.h>
 
@@ -146,6 +147,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 	if (ret)
 		return ret;
 
+	kvm_init_nested(kvm);
+
 	ret = kvm_share_hyp(kvm, kvm + 1);
 	if (ret)
 		goto out_free_stage2_pgd;
@@ -375,6 +378,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 	struct kvm_s2_mmu *mmu;
 	int *last_ran;
 
+	if (vcpu_has_nv(vcpu))
+		kvm_vcpu_load_hw_mmu(vcpu);
+
 	mmu = vcpu->arch.hw_mmu;
 	last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
 
@@ -423,6 +429,9 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 	kvm_vgic_put(vcpu);
 	kvm_vcpu_pmu_restore_host(vcpu);
 
+	if (vcpu_has_nv(vcpu))
+		kvm_vcpu_put_hw_mmu(vcpu);
+
 	vcpu->cpu = -1;
 }
 
@@ -1122,8 +1131,13 @@ static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
 
 	vcpu->arch.target = phys_target;
 
+	/* Prepare for nested if required */
+	ret = kvm_vcpu_init_nested(vcpu);
+
 	/* Now we know what it is, we can reset it. */
-	ret = kvm_reset_vcpu(vcpu);
+	if (!ret)
+		ret = kvm_reset_vcpu(vcpu);
+
 	if (ret) {
 		vcpu->arch.target = -1;
 		bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index bc2aba953299..55525fd5743d 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -162,7 +162,7 @@ static void invalidate_icache_guest_page(void *va, size_t size)
  * does.
  */
 /**
- * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
+ * __unmap_stage2_range -- Clear stage2 page table entries to unmap a range
  * @mmu:   The KVM stage-2 MMU pointer
  * @start: The intermediate physical base address of the range to unmap
  * @size:  The size of the area to unmap
@@ -185,7 +185,7 @@ static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64
 				   may_block));
 }
 
-static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
+void kvm_unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
 {
 	__unmap_stage2_range(mmu, start, size, true);
 }
@@ -628,7 +628,20 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
 	int cpu, err;
 	struct kvm_pgtable *pgt;
 
+	/*
+	 * If we already have our page tables in place, and that the
+	 * MMU context is the canonical one, we have a bug somewhere,
+	 * as this is only supposed to ever happen once per VM.
+	 *
+	 * Otherwise, we're building nested page tables, and that's
+	 * probably because userspace called KVM_ARM_VCPU_INIT more
+	 * than once on the same vcpu. Since that's actually legal,
+	 * don't kick a fuss and leave gracefully.
+	 */
 	if (mmu->pgt != NULL) {
+		if (&kvm->arch.mmu != mmu)
+			return 0;
+
 		kvm_err("kvm_arch already initialized?\n");
 		return -EINVAL;
 	}
@@ -654,6 +667,9 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
 	mmu->pgt = pgt;
 	mmu->pgd_phys = __pa(pgt->pgd);
 	WRITE_ONCE(mmu->vmid.vmid_gen, 0);
+
+	kvm_init_nested_s2_mmu(mmu);
+
 	return 0;
 
 out_destroy_pgtable:
@@ -699,7 +715,7 @@ static void stage2_unmap_memslot(struct kvm *kvm,
 
 		if (!(vma->vm_flags & VM_PFNMAP)) {
 			gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
-			unmap_stage2_range(&kvm->arch.mmu, gpa, vm_end - vm_start);
+			kvm_unmap_stage2_range(&kvm->arch.mmu, gpa, vm_end - vm_start);
 		}
 		hva = vm_end;
 	} while (hva < reg_end);
@@ -1681,11 +1697,6 @@ void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
 {
 }
 
-void kvm_arch_flush_shadow_all(struct kvm *kvm)
-{
-	kvm_free_stage2_pgd(&kvm->arch.mmu);
-}
-
 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
 				   struct kvm_memory_slot *slot)
 {
@@ -1693,7 +1704,7 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
 	phys_addr_t size = slot->npages << PAGE_SHIFT;
 
 	spin_lock(&kvm->mmu_lock);
-	unmap_stage2_range(&kvm->arch.mmu, gpa, size);
+	kvm_unmap_stage2_range(&kvm->arch.mmu, gpa, size);
 	spin_unlock(&kvm->mmu_lock);
 }
 
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index 254152cd791e..bfa2b9229173 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -7,12 +7,189 @@
 #include <linux/kvm.h>
 #include <linux/kvm_host.h>
 
+#include <asm/kvm_arm.h>
 #include <asm/kvm_emulate.h>
+#include <asm/kvm_mmu.h>
 #include <asm/kvm_nested.h>
 #include <asm/sysreg.h>
 
 #include "sys_regs.h"
 
+void kvm_init_nested(struct kvm *kvm)
+{
+	kvm->arch.nested_mmus = NULL;
+	kvm->arch.nested_mmus_size = 0;
+}
+
+int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu)
+{
+	struct kvm *kvm = vcpu->kvm;
+	struct kvm_s2_mmu *tmp;
+	int num_mmus;
+	int ret = -ENOMEM;
+
+	if (!test_bit(KVM_ARM_VCPU_HAS_EL2, vcpu->arch.features))
+		return 0;
+
+	if (!cpus_have_final_cap(ARM64_HAS_NESTED_VIRT))
+		return -EINVAL;
+
+	mutex_lock(&kvm->lock);
+
+	/*
+	 * Let's treat memory allocation failures as benign: If we fail to
+	 * allocate anything, return an error and keep the allocated array
+	 * alive. Userspace may try to recover by intializing the vcpu
+	 * again, and there is no reason to affect the whole VM for this.
+	 */
+	num_mmus = atomic_read(&kvm->online_vcpus) * 2;
+	tmp = krealloc(kvm->arch.nested_mmus,
+		       num_mmus * sizeof(*kvm->arch.nested_mmus),
+		       GFP_KERNEL_ACCOUNT | __GFP_ZERO);
+	if (tmp) {
+		/*
+		 * If we went through a realocation, adjust the MMU
+		 * back-pointers in the previously initialised
+		 * pg_table structures.
+		 */
+		if (kvm->arch.nested_mmus != tmp) {
+			int i;
+
+			for (i = 0; i < num_mmus - 2; i++)
+				tmp[i].pgt->mmu = &tmp[i];
+		}
+
+		if (kvm_init_stage2_mmu(kvm, &tmp[num_mmus - 1]) ||
+		    kvm_init_stage2_mmu(kvm, &tmp[num_mmus - 2])) {
+			kvm_free_stage2_pgd(&tmp[num_mmus - 1]);
+			kvm_free_stage2_pgd(&tmp[num_mmus - 2]);
+		} else {
+			kvm->arch.nested_mmus_size = num_mmus;
+			ret = 0;
+		}
+
+		kvm->arch.nested_mmus = tmp;
+	}
+
+	mutex_unlock(&kvm->lock);
+	return ret;
+}
+
+/* Must be called with kvm->lock held */
+struct kvm_s2_mmu *lookup_s2_mmu(struct kvm *kvm, u64 vttbr, u64 hcr)
+{
+	bool nested_stage2_enabled = hcr & HCR_VM;
+	int i;
+
+	/* Don't consider the CnP bit for the vttbr match */
+	vttbr = vttbr & ~VTTBR_CNP_BIT;
+
+	/*
+	 * Two possibilities when looking up a S2 MMU context:
+	 *
+	 * - either S2 is enabled in the guest, and we need a context that
+         *   is S2-enabled and matches the full VTTBR (VMID+BADDR), which
+         *   makes it safe from a TLB conflict perspective (a broken guest
+         *   won't be able to generate them),
+	 *
+	 * - or S2 is disabled, and we need a context that is S2-disabled
+         *   and matches the VMID only, as all TLBs are tagged by VMID even
+         *   if S2 translation is disabled.
+	 */
+	for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
+		struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
+
+		if (!kvm_s2_mmu_valid(mmu))
+			continue;
+
+		if (nested_stage2_enabled &&
+		    mmu->nested_stage2_enabled &&
+		    vttbr == mmu->vttbr)
+			return mmu;
+
+		if (!nested_stage2_enabled &&
+		    !mmu->nested_stage2_enabled &&
+		    get_vmid(vttbr) == get_vmid(mmu->vttbr))
+			return mmu;
+	}
+	return NULL;
+}
+
+static struct kvm_s2_mmu *get_s2_mmu_nested(struct kvm_vcpu *vcpu)
+{
+	struct kvm *kvm = vcpu->kvm;
+	u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
+	u64 hcr= vcpu_read_sys_reg(vcpu, HCR_EL2);
+	struct kvm_s2_mmu *s2_mmu;
+	int i;
+
+	s2_mmu = lookup_s2_mmu(kvm, vttbr, hcr);
+	if (s2_mmu)
+		goto out;
+
+	/*
+	 * Make sure we don't always search from the same point, or we
+	 * will always reuse a potentially active context, leaving
+	 * free contexts unused.
+	 */
+	for (i = kvm->arch.nested_mmus_next;
+	     i < (kvm->arch.nested_mmus_size + kvm->arch.nested_mmus_next);
+	     i++) {
+		s2_mmu = &kvm->arch.nested_mmus[i % kvm->arch.nested_mmus_size];
+
+		if (atomic_read(&s2_mmu->refcnt) == 0)
+			break;
+	}
+	BUG_ON(atomic_read(&s2_mmu->refcnt)); /* We have struct MMUs to spare */
+
+	/* Set the scene for the next search */
+	kvm->arch.nested_mmus_next = (i + 1) % kvm->arch.nested_mmus_size;
+
+	if (kvm_s2_mmu_valid(s2_mmu)) {
+		/* Clear the old state */
+		kvm_unmap_stage2_range(s2_mmu, 0, kvm_phys_size(kvm));
+		if (s2_mmu->vmid.vmid_gen)
+			kvm_call_hyp(__kvm_tlb_flush_vmid, s2_mmu);
+	}
+
+	/*
+	 * The virtual VMID (modulo CnP) will be used as a key when matching
+	 * an existing kvm_s2_mmu.
+	 */
+	s2_mmu->vttbr = vttbr & ~VTTBR_CNP_BIT;
+	s2_mmu->nested_stage2_enabled = hcr & HCR_VM;
+
+out:
+	atomic_inc(&s2_mmu->refcnt);
+	return s2_mmu;
+}
+
+void kvm_init_nested_s2_mmu(struct kvm_s2_mmu *mmu)
+{
+	mmu->vttbr = 1;
+	mmu->nested_stage2_enabled = false;
+	atomic_set(&mmu->refcnt, 0);
+}
+
+void kvm_vcpu_load_hw_mmu(struct kvm_vcpu *vcpu)
+{
+	if (is_hyp_ctxt(vcpu)) {
+		vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;
+	} else {
+		spin_lock(&vcpu->kvm->mmu_lock);
+		vcpu->arch.hw_mmu = get_s2_mmu_nested(vcpu);
+		spin_unlock(&vcpu->kvm->mmu_lock);
+	}
+}
+
+void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu)
+{
+	if (vcpu->arch.hw_mmu != &vcpu->kvm->arch.mmu) {
+		atomic_dec(&vcpu->arch.hw_mmu->refcnt);
+		vcpu->arch.hw_mmu = NULL;
+	}
+}
+
 /*
  * Inject wfx to the virtual EL2 if this is not from the virtual EL2 and
  * the virtual HCR_EL2.TWX is set. Otherwise, let the host hypervisor
@@ -31,6 +208,24 @@ int handle_wfx_nested(struct kvm_vcpu *vcpu, bool is_wfe)
 	return -EINVAL;
 }
 
+void kvm_arch_flush_shadow_all(struct kvm *kvm)
+{
+	int i;
+
+	for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
+		struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
+
+		WARN_ON(atomic_read(&mmu->refcnt));
+
+		if (!atomic_read(&mmu->refcnt))
+			kvm_free_stage2_pgd(mmu);
+	}
+	kfree(kvm->arch.nested_mmus);
+	kvm->arch.nested_mmus = NULL;
+	kvm->arch.nested_mmus_size = 0;
+	kvm_free_stage2_pgd(&kvm->arch.mmu);
+}
+
 /*
  * Our emulated CPU doesn't support all the possible features. For the
  * sake of simplicity (and probably mental sanity), wipe out a number
-- 
2.30.2


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2022-01-28 12:50 UTC|newest]

Thread overview: 378+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-01-28 12:18 [PATCH v6 00/64] KVM: arm64: ARMv8.3/8.4 Nested Virtualization support Marc Zyngier
2022-01-28 12:18 ` Marc Zyngier
2022-01-28 12:18 ` Marc Zyngier
2022-01-28 12:18 ` [PATCH v6 01/64] arm64: Add ARM64_HAS_NESTED_VIRT cpufeature Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-01 14:22   ` Russell King (Oracle)
2022-02-01 14:22     ` Russell King (Oracle)
2022-02-01 14:22     ` Russell King (Oracle)
2022-01-28 12:18 ` [PATCH v6 02/64] KVM: arm64: nv: Introduce nested virtualization VCPU feature Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18 ` [PATCH v6 03/64] KVM: arm64: nv: Reset VCPU to EL2 registers if VCPU nested virt is set Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-02 11:40   ` Alexandru Elisei
2022-02-02 11:40     ` Alexandru Elisei
2022-02-02 11:40     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 04/64] KVM: arm64: nv: Allow userspace to set PSR_MODE_EL2x Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-02 11:53   ` Alexandru Elisei
2022-02-02 11:53     ` Alexandru Elisei
2022-02-02 11:53     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 05/64] KVM: arm64: nv: Add EL2 system registers to vcpu context Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-11 16:35   ` Miguel Luis
2022-02-11 16:35     ` Miguel Luis
2022-02-11 16:35     ` Miguel Luis
2022-01-28 12:18 ` [PATCH v6 06/64] KVM: arm64: nv: Add nested virt VCPU primitives for vEL2 VCPU state Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-02 12:10   ` Alexandru Elisei
2022-02-02 12:10     ` Alexandru Elisei
2022-02-02 12:10     ` Alexandru Elisei
2022-02-14 12:39   ` Miguel Luis
2022-02-14 12:39     ` Miguel Luis
2022-02-14 12:39     ` Miguel Luis
2022-02-14 14:20     ` Marc Zyngier
2022-02-14 14:20       ` Marc Zyngier
2022-02-14 14:20       ` Marc Zyngier
2022-01-28 12:18 ` [PATCH v6 07/64] KVM: arm64: nv: Handle HCR_EL2.NV system register traps Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-01 14:32   ` Russell King (Oracle)
2022-02-01 14:32     ` Russell King (Oracle)
2022-02-01 14:32     ` Russell King (Oracle)
2022-01-28 12:18 ` [PATCH v6 08/64] KVM: arm64: nv: Reset VMPIDR_EL2 and VPIDR_EL2 to sane values Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18 ` [PATCH v6 09/64] KVM: arm64: nv: Support virtual EL2 exceptions Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-02 15:23   ` Alexandru Elisei
2022-02-02 15:23     ` Alexandru Elisei
2022-02-02 15:23     ` Alexandru Elisei
2022-02-03 17:43     ` Marc Zyngier
2022-02-03 17:43       ` Marc Zyngier
2022-02-03 17:43       ` Marc Zyngier
2022-02-04 11:47       ` Alexandru Elisei
2022-02-04 11:47         ` Alexandru Elisei
2022-02-04 11:47         ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 10/64] KVM: arm64: nv: Inject HVC exceptions to the virtual EL2 Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18 ` [PATCH v6 11/64] KVM: arm64: nv: Handle trapped ERET from " Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18 ` [PATCH v6 12/64] KVM: arm64: nv: Add non-VHE-EL2->EL1 translation helpers Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-01 16:37   ` Russell King (Oracle)
2022-02-01 16:37     ` Russell King (Oracle)
2022-02-01 16:37     ` Russell King (Oracle)
2022-02-02 17:08   ` Alexandru Elisei
2022-02-02 17:08     ` Alexandru Elisei
2022-02-02 17:08     ` Alexandru Elisei
2022-02-03 18:29     ` Marc Zyngier
2022-02-03 18:29       ` Marc Zyngier
2022-02-03 18:29       ` Marc Zyngier
2022-02-04 12:05       ` Alexandru Elisei
2022-02-04 12:05         ` Alexandru Elisei
2022-02-04 12:05         ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 13/64] KVM: arm64: nv: Handle virtual EL2 registers in vcpu_read/write_sys_reg() Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-01 16:40   ` Russell King (Oracle)
2022-02-01 16:40     ` Russell King (Oracle)
2022-02-01 16:40     ` Russell King (Oracle)
2022-01-28 12:18 ` [PATCH v6 14/64] KVM: arm64: nv: Handle SPSR_EL2 specially Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-01 16:43   ` Russell King (Oracle)
2022-02-01 16:43     ` Russell King (Oracle)
2022-02-01 16:43     ` Russell King (Oracle)
2022-01-28 12:18 ` [PATCH v6 15/64] KVM: arm64: nv: Handle HCR_EL2.E2H specially Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-01 16:51   ` Russell King (Oracle)
2022-02-01 16:51     ` Russell King (Oracle)
2022-02-01 16:51     ` Russell King (Oracle)
2022-02-01 18:17     ` Marc Zyngier
2022-02-01 18:17       ` Marc Zyngier
2022-02-01 18:17       ` Marc Zyngier
2022-01-28 12:18 ` [PATCH v6 16/64] KVM: arm64: nv: Save/Restore vEL2 sysregs Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-03 15:14   ` Alexandru Elisei
2022-02-03 15:14     ` Alexandru Elisei
2022-02-03 15:14     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 17/64] KVM: arm64: nv: Emulate PSTATE.M for a guest hypervisor Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-01 18:06   ` Russell King (Oracle)
2022-02-01 18:06     ` Russell King (Oracle)
2022-02-01 18:06     ` Russell King (Oracle)
2022-02-03 15:53   ` Alexandru Elisei
2022-02-03 15:53     ` Alexandru Elisei
2022-02-03 15:53     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 18/64] KVM: arm64: nv: Trap EL1 VM register accesses in virtual EL2 Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-01 18:08   ` Russell King (Oracle)
2022-02-01 18:08     ` Russell King (Oracle)
2022-02-01 18:08     ` Russell King (Oracle)
2022-02-03 17:11   ` Alexandru Elisei
2022-02-03 17:11     ` Alexandru Elisei
2022-02-03 17:11     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 19/64] KVM: arm64: nv: Trap SPSR_EL1, ELR_EL1 and VBAR_EL1 from " Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-01 18:13   ` Russell King (Oracle)
2022-02-01 18:13     ` Russell King (Oracle)
2022-02-01 18:13     ` Russell King (Oracle)
2022-02-03 17:27   ` Alexandru Elisei
2022-02-03 17:27     ` Alexandru Elisei
2022-02-03 17:27     ` Alexandru Elisei
2022-02-04 10:58   ` Alexandru Elisei
2022-02-04 10:58     ` Alexandru Elisei
2022-02-04 10:58     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 20/64] KVM: arm64: nv: Trap CPACR_EL1 access in " Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-04 11:10   ` Alexandru Elisei
2022-02-04 11:10     ` Alexandru Elisei
2022-02-04 11:10     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 21/64] KVM: arm64: nv: Handle PSCI call via smc from the guest Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-04 14:02   ` Alexandru Elisei
2022-02-04 14:02     ` Alexandru Elisei
2022-02-04 14:02     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 22/64] KVM: arm64: nv: Respect virtual HCR_EL2.TWX setting Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-04 15:40   ` Alexandru Elisei
2022-02-04 15:40     ` Alexandru Elisei
2022-02-04 15:40     ` Alexandru Elisei
2022-02-04 16:01     ` Alexandru Elisei
2022-02-04 16:01       ` Alexandru Elisei
2022-02-04 16:01       ` Alexandru Elisei
2022-02-07 15:38     ` Alexandru Elisei
2022-02-07 15:38       ` Alexandru Elisei
2022-02-07 15:38       ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 23/64] KVM: arm64: nv: Respect virtual CPTR_EL2.{TFP,FPEN} settings Marc Zyngier
2022-01-28 12:18   ` [PATCH v6 23/64] KVM: arm64: nv: Respect virtual CPTR_EL2.{TFP, FPEN} settings Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18 ` [PATCH v6 24/64] KVM: arm64: nv: Respect the virtual HCR_EL2.NV bit setting Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-07 15:33   ` Alexandru Elisei
2022-02-07 15:33     ` Alexandru Elisei
2022-02-07 15:33     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 25/64] KVM: arm64: nv: Respect virtual HCR_EL2.TVM and TRVM settings Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-07 16:18   ` Alexandru Elisei
2022-02-07 16:18     ` Alexandru Elisei
2022-02-07 16:18     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 26/64] KVM: arm64: nv: Respect the virtual HCR_EL2.NV1 bit setting Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-07 16:36   ` Alexandru Elisei
2022-02-07 16:36     ` Alexandru Elisei
2022-02-07 16:36     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 27/64] KVM: arm64: nv: Allow a sysreg to be hidden from userspace only Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-08 14:36   ` Alexandru Elisei
2022-02-08 14:36     ` Alexandru Elisei
2022-02-08 14:36     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 28/64] KVM: arm64: nv: Emulate EL12 register accesses from the virtual EL2 Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-08 15:35   ` Alexandru Elisei
2022-02-08 15:35     ` Alexandru Elisei
2022-02-08 15:35     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 29/64] KVM: arm64: nv: Forward debug traps to the nested guest Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-09 11:04   ` Alexandru Elisei
2022-02-09 11:04     ` Alexandru Elisei
2022-02-09 11:04     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 30/64] KVM: arm64: nv: Configure HCR_EL2 for nested virtualization Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-09 16:41   ` Alexandru Elisei
2022-02-09 16:41     ` Alexandru Elisei
2022-02-09 16:41     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 31/64] KVM: arm64: nv: Only toggle cache for virtual EL2 when SCTLR_EL2 changes Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-09 16:56   ` Alexandru Elisei
2022-02-09 16:56     ` Alexandru Elisei
2022-02-09 16:56     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 32/64] KVM: arm64: nv: Filter out unsupported features from ID regs Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-09 17:33   ` Alexandru Elisei
2022-02-09 17:33     ` Alexandru Elisei
2022-02-09 17:33     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 33/64] KVM: arm64: nv: Hide RAS from nested guests Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18 ` Marc Zyngier [this message]
2022-01-28 12:18   ` [PATCH v6 34/64] KVM: arm64: nv: Support multiple nested Stage-2 mmu structures Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-16 16:12   ` Alexandru Elisei
2022-02-16 16:12     ` Alexandru Elisei
2022-02-16 16:12     ` Alexandru Elisei
2022-02-24 14:25   ` Alexandru Elisei
2022-02-24 14:25     ` Alexandru Elisei
2022-02-24 14:25     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 35/64] KVM: arm64: nv: Implement nested Stage-2 page table walk logic Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18 ` [PATCH v6 36/64] KVM: arm64: nv: Handle shadow stage 2 page faults Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-17 15:23   ` Alexandru Elisei
2022-02-17 15:23     ` Alexandru Elisei
2022-02-17 15:23     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 37/64] KVM: arm64: nv: Restrict S2 RD/WR permissions to match the guest's Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-17 16:29   ` Alexandru Elisei
2022-02-17 16:29     ` Alexandru Elisei
2022-02-17 16:29     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 38/64] KVM: arm64: nv: Unmap/flush shadow stage 2 page tables Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-22 16:13   ` Alexandru Elisei
2022-02-22 16:13     ` Alexandru Elisei
2022-02-22 16:13     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 39/64] KVM: arm64: nv: Set a handler for the system instruction traps Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-24 11:59   ` Alexandru Elisei
2022-02-24 11:59     ` Alexandru Elisei
2022-02-24 11:59     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 40/64] KVM: arm64: nv: Trap and emulate AT instructions from virtual EL2 Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-24 15:39   ` Alexandru Elisei
2022-02-24 15:39     ` Alexandru Elisei
2022-02-24 15:39     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 41/64] KVM: arm64: nv: Trap and emulate TLBI " Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-24 15:56   ` Alexandru Elisei
2022-02-24 15:56     ` Alexandru Elisei
2022-02-24 15:56     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 42/64] KVM: arm64: nv: Fold guest's HCR_EL2 configuration into the host's Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-25 13:45   ` Alexandru Elisei
2022-02-25 13:45     ` Alexandru Elisei
2022-02-25 13:45     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 43/64] KVM: arm64: nv: arch_timer: Support hyp timer emulation Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-03-07 14:52   ` Alexandru Elisei
2022-03-07 14:52     ` Alexandru Elisei
2022-03-07 14:52     ` Alexandru Elisei
2022-03-07 15:48     ` Marc Zyngier
2022-03-07 15:48       ` Marc Zyngier
2022-03-07 15:48       ` Marc Zyngier
2022-03-07 16:28       ` Alexandru Elisei
2022-03-07 16:28         ` Alexandru Elisei
2022-03-07 16:28         ` Alexandru Elisei
2022-03-07 16:52         ` Marc Zyngier
2022-03-07 16:52           ` Marc Zyngier
2022-03-07 16:52           ` Marc Zyngier
2022-03-07 17:13           ` Alexandru Elisei
2022-03-07 17:13             ` Alexandru Elisei
2022-03-07 17:13             ` Alexandru Elisei
2022-03-07 15:23   ` Alexandru Elisei
2022-03-07 15:23     ` Alexandru Elisei
2022-03-07 15:23     ` Alexandru Elisei
2022-03-07 15:44     ` Marc Zyngier
2022-03-07 15:44       ` Marc Zyngier
2022-03-07 15:44       ` Marc Zyngier
2022-03-07 16:24       ` Alexandru Elisei
2022-03-07 16:24         ` Alexandru Elisei
2022-03-07 16:24         ` Alexandru Elisei
2022-03-07 16:40         ` Marc Zyngier
2022-03-07 16:40           ` Marc Zyngier
2022-03-07 16:40           ` Marc Zyngier
2022-01-28 12:18 ` [PATCH v6 44/64] KVM: arm64: nv: Add handling of EL2-specific timer registers Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-03-07 16:01   ` Alexandru Elisei
2022-03-07 16:01     ` Alexandru Elisei
2022-03-07 16:01     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 45/64] KVM: arm64: nv: Load timer before the GIC Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18 ` [PATCH v6 46/64] KVM: arm64: nv: Nested GICv3 Support Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18 ` [PATCH v6 47/64] KVM: arm64: nv: Don't load the GICv4 context on entering a nested guest Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18 ` [PATCH v6 48/64] KVM: arm64: nv: vgic: Emulate the HW bit in software Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18 ` [PATCH v6 49/64] KVM: arm64: nv: vgic: Allow userland to set VGIC maintenance IRQ Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18 ` [PATCH v6 50/64] KVM: arm64: nv: Implement maintenance interrupt forwarding Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18 ` [PATCH v6 51/64] KVM: arm64: nv: Add nested GICv3 tracepoints Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:19 ` [PATCH v6 52/64] KVM: arm64: nv: Allow userspace to request KVM_ARM_VCPU_NESTED_VIRT Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19 ` [PATCH v6 53/64] KVM: arm64: nv: Add handling of ARMv8.4-TTL TLB invalidation Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19 ` [PATCH v6 54/64] KVM: arm64: nv: Invalidate TLBs based on shadow S2 TTL-like information Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19 ` [PATCH v6 55/64] KVM: arm64: nv: Tag shadow S2 entries with nested level Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19 ` [PATCH v6 56/64] KVM: arm64: nv: Add include containing the VNCR_EL2 offsets Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19 ` [PATCH v6 57/64] KVM: arm64: nv: Map VNCR-capable registers to a separate page Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19 ` [PATCH v6 58/64] KVM: arm64: nv: Move nested vgic state into the sysreg file Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19 ` [PATCH v6 59/64] KVM: arm64: Add ARMv8.4 Enhanced Nested Virt cpufeature Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19 ` [PATCH v6 60/64] KVM: arm64: nv: Sync nested timer state with ARMv8.4 Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-04-01 17:51   ` Chase Conklin
2022-04-01 17:51     ` Chase Conklin
2022-04-01 17:51     ` Chase Conklin
2022-01-28 12:19 ` [PATCH v6 61/64] KVM: arm64: nv: Allocate VNCR page when required Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19 ` [PATCH v6 62/64] KVM: arm64: nv: Enable ARMv8.4-NV support Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19 ` [PATCH v6 63/64] KVM: arm64: nv: Fast-track 'InHost' exception returns Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19 ` [PATCH v6 64/64] KVM: arm64: nv: Fast-track EL1 TLBIs for VHE guests Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220128121912.509006-35-maz@kernel.org \
    --to=maz@kernel.org \
    --cc=alexandru.elisei@arm.com \
    --cc=andre.przywara@arm.com \
    --cc=chase.conklin@arm.com \
    --cc=christoffer.dall@arm.com \
    --cc=gankulkarni@os.amperecomputing.com \
    --cc=haibo.xu@linaro.org \
    --cc=james.morse@arm.com \
    --cc=jintack@cs.columbia.edu \
    --cc=karl.heubaum@oracle.com \
    --cc=kernel-team@android.com \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux@armlinux.org.uk \
    --cc=miguel.luis@oracle.com \
    --cc=mihai.carabas@oracle.com \
    --cc=suzuki.poulose@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.