From: Marc Zyngier <maz@kernel.org>
To: linux-arm-kernel@lists.infradead.org,
kvmarm@lists.cs.columbia.edu, kvm@vger.kernel.org
Cc: Andre Przywara <andre.przywara@arm.com>,
Christoffer Dall <christoffer.dall@arm.com>,
Jintack Lim <jintack@cs.columbia.edu>,
Haibo Xu <haibo.xu@linaro.org>, James Morse <james.morse@arm.com>,
Suzuki K Poulose <suzuki.poulose@arm.com>,
Alexandru Elisei <alexandru.elisei@arm.com>,
kernel-team@android.com
Subject: [PATCH v4 33/66] KVM: arm64: nv: Support multiple nested Stage-2 mmu structures
Date: Mon, 10 May 2021 17:58:47 +0100 [thread overview]
Message-ID: <20210510165920.1913477-34-maz@kernel.org> (raw)
In-Reply-To: <20210510165920.1913477-1-maz@kernel.org>
Add Stage-2 mmu data structures for virtual EL2 and for nested guests.
We don't yet populate shadow Stage-2 page tables, but we now have a
framework for getting to a shadow Stage-2 pgd.
We allocate twice the number of vcpus as Stage-2 mmu structures because
that's sufficient for each vcpu running two translation regimes without
having to flush the Stage-2 page tables.
Co-developed-by: Christoffer Dall <christoffer.dall@arm.com>
Signed-off-by: Christoffer Dall <christoffer.dall@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
arch/arm64/include/asm/kvm_host.h | 29 +++++
arch/arm64/include/asm/kvm_mmu.h | 9 ++
arch/arm64/include/asm/kvm_nested.h | 7 ++
arch/arm64/kvm/arm.c | 16 ++-
arch/arm64/kvm/mmu.c | 31 +++--
arch/arm64/kvm/nested.c | 183 ++++++++++++++++++++++++++++
6 files changed, 264 insertions(+), 11 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 1d82ad3c63b8..b7d6a829091c 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -95,14 +95,43 @@ struct kvm_s2_mmu {
int __percpu *last_vcpu_ran;
struct kvm_arch *arch;
+
+ /*
+ * For a shadow stage-2 MMU, the virtual vttbr programmed by the guest
+ * hypervisor. Unused for kvm_arch->mmu. Set to 1 when the structure
+ * contains no valid information.
+ */
+ u64 vttbr;
+
+ /* true when this represents a nested context where virtual HCR_EL2.VM == 1 */
+ bool nested_stage2_enabled;
+
+ /*
+ * 0: Nobody is currently using this, check vttbr for validity
+ * >0: Somebody is actively using this.
+ */
+ atomic_t refcnt;
};
+static inline bool kvm_s2_mmu_valid(struct kvm_s2_mmu *mmu)
+{
+ return !(mmu->vttbr & 1);
+}
+
struct kvm_arch_memory_slot {
};
struct kvm_arch {
struct kvm_s2_mmu mmu;
+ /*
+ * Stage 2 paging stage for VMs with nested virtual using a virtual
+ * VMID.
+ */
+ struct kvm_s2_mmu *nested_mmus;
+ size_t nested_mmus_size;
+ int nested_mmus_next;
+
/* VTCR_EL2 value for this VM */
u64 vtcr;
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 0be00ec66e0f..579980a8b05f 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -116,6 +116,7 @@ alternative_cb_end
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
#include <asm/kvm_emulate.h>
+#include <asm/kvm_nested.h>
void kvm_update_va_mask(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst);
@@ -159,6 +160,7 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
void **haddr);
void free_hyp_pgds(void);
+void kvm_unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size);
void stage2_unmap_vm(struct kvm *kvm);
int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu);
void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
@@ -298,5 +300,12 @@ static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu)
{
return container_of(mmu->arch, struct kvm, arch);
}
+
+static inline u64 get_vmid(u64 vttbr)
+{
+ return (vttbr & VTTBR_VMID_MASK(kvm_get_vmid_bits())) >>
+ VTTBR_VMID_SHIFT;
+}
+
#endif /* __ASSEMBLY__ */
#endif /* __ARM64_KVM_MMU_H__ */
diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/kvm_nested.h
index 026ddaad972c..473ecd1d60d0 100644
--- a/arch/arm64/include/asm/kvm_nested.h
+++ b/arch/arm64/include/asm/kvm_nested.h
@@ -61,6 +61,13 @@ static inline u64 translate_cnthctl_el2_to_cntkctl_el1(u64 cnthctl)
(cnthctl & (CNTHCTL_EVNTI | CNTHCTL_EVNTDIR | CNTHCTL_EVNTEN)));
}
+extern void kvm_init_nested(struct kvm *kvm);
+extern int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu);
+extern void kvm_init_nested_s2_mmu(struct kvm_s2_mmu *mmu);
+extern struct kvm_s2_mmu *lookup_s2_mmu(struct kvm *kvm, u64 vttbr, u64 hcr);
+extern void kvm_vcpu_load_hw_mmu(struct kvm_vcpu *vcpu);
+extern void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu);
+
int handle_wfx_nested(struct kvm_vcpu *vcpu, bool is_wfe);
extern bool __forward_traps(struct kvm_vcpu *vcpu, unsigned int reg,
u64 control_bit);
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 1cb39c0803a4..8cadfaa2a310 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -35,6 +35,7 @@
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h>
+#include <asm/kvm_nested.h>
#include <asm/kvm_emulate.h>
#include <asm/sections.h>
@@ -138,6 +139,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (ret)
return ret;
+ kvm_init_nested(kvm);
+
ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP);
if (ret)
goto out_free_stage2_pgd;
@@ -384,6 +387,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
struct kvm_s2_mmu *mmu;
int *last_ran;
+ if (nested_virt_in_use(vcpu))
+ kvm_vcpu_load_hw_mmu(vcpu);
+
mmu = vcpu->arch.hw_mmu;
last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
@@ -432,6 +438,9 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
kvm_vgic_put(vcpu);
kvm_vcpu_pmu_restore_host(vcpu);
+ if (nested_virt_in_use(vcpu))
+ kvm_vcpu_put_hw_mmu(vcpu);
+
vcpu->cpu = -1;
}
@@ -1032,8 +1041,13 @@ static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
vcpu->arch.target = phys_target;
+ /* Prepare for nested if required */
+ ret = kvm_vcpu_init_nested(vcpu);
+
/* Now we know what it is, we can reset it. */
- ret = kvm_reset_vcpu(vcpu);
+ if (!ret)
+ ret = kvm_reset_vcpu(vcpu);
+
if (ret) {
vcpu->arch.target = -1;
bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index c5d1f3c87dbd..5c1a9966ff31 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -151,7 +151,7 @@ static void *kvm_host_va(phys_addr_t phys)
* does.
*/
/**
- * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
+ * kvm_unmap_stage2_range -- Clear stage2 page table entries to unmap a range
* @mmu: The KVM stage-2 MMU pointer
* @start: The intermediate physical base address of the range to unmap
* @size: The size of the area to unmap
@@ -174,7 +174,7 @@ static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64
may_block));
}
-static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
+void kvm_unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
{
__unmap_stage2_range(mmu, start, size, true);
}
@@ -448,7 +448,20 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
int cpu, err;
struct kvm_pgtable *pgt;
+ /*
+ * If we already have our page tables in place, and that the
+ * MMU context is the canonical one, we have a bug somewhere,
+ * as this is only supposed to ever happen once per VM.
+ *
+ * Otherwise, we're building nested page tables, and that's
+ * probably because userspace called KVM_ARM_VCPU_INIT more
+ * than once on the same vcpu. Since that's actually legal,
+ * don't kick a fuss and leave gracefully.
+ */
if (mmu->pgt != NULL) {
+ if (&kvm->arch.mmu != mmu)
+ return 0;
+
kvm_err("kvm_arch already initialized?\n");
return -EINVAL;
}
@@ -474,6 +487,9 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
mmu->pgt = pgt;
mmu->pgd_phys = __pa(pgt->pgd);
mmu->vmid.vmid_gen = 0;
+
+ kvm_init_nested_s2_mmu(mmu);
+
return 0;
out_destroy_pgtable:
@@ -519,7 +535,7 @@ static void stage2_unmap_memslot(struct kvm *kvm,
if (!(vma->vm_flags & VM_PFNMAP)) {
gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
- unmap_stage2_range(&kvm->arch.mmu, gpa, vm_end - vm_start);
+ kvm_unmap_stage2_range(&kvm->arch.mmu, gpa, vm_end - vm_start);
}
hva = vm_end;
} while (hva < reg_end);
@@ -1415,7 +1431,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
spin_lock(&kvm->mmu_lock);
if (ret)
- unmap_stage2_range(&kvm->arch.mmu, mem->guest_phys_addr, mem->memory_size);
+ kvm_unmap_stage2_range(&kvm->arch.mmu, mem->guest_phys_addr, mem->memory_size);
else if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
stage2_flush_memslot(kvm, memslot);
spin_unlock(&kvm->mmu_lock);
@@ -1432,11 +1448,6 @@ void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
{
}
-void kvm_arch_flush_shadow_all(struct kvm *kvm)
-{
- kvm_free_stage2_pgd(&kvm->arch.mmu);
-}
-
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot)
{
@@ -1444,7 +1455,7 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
phys_addr_t size = slot->npages << PAGE_SHIFT;
spin_lock(&kvm->mmu_lock);
- unmap_stage2_range(&kvm->arch.mmu, gpa, size);
+ kvm_unmap_stage2_range(&kvm->arch.mmu, gpa, size);
spin_unlock(&kvm->mmu_lock);
}
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index 99e1b97ae3ca..c33cc29756fa 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -19,12 +19,177 @@
#include <linux/kvm.h>
#include <linux/kvm_host.h>
+#include <asm/kvm_arm.h>
#include <asm/kvm_emulate.h>
+#include <asm/kvm_mmu.h>
#include <asm/kvm_nested.h>
#include <asm/sysreg.h>
#include "sys_regs.h"
+void kvm_init_nested(struct kvm *kvm)
+{
+ kvm->arch.nested_mmus = NULL;
+ kvm->arch.nested_mmus_size = 0;
+}
+
+int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_s2_mmu *tmp;
+ int num_mmus;
+ int ret = -ENOMEM;
+
+ if (!test_bit(KVM_ARM_VCPU_HAS_EL2, vcpu->arch.features))
+ return 0;
+
+ if (!cpus_have_final_cap(ARM64_HAS_NESTED_VIRT))
+ return -EINVAL;
+
+ mutex_lock(&kvm->lock);
+
+ /*
+ * Let's treat memory allocation failures as benign: If we fail to
+ * allocate anything, return an error and keep the allocated array
+ * alive. Userspace may try to recover by intializing the vcpu
+ * again, and there is no reason to affect the whole VM for this.
+ */
+ num_mmus = atomic_read(&kvm->online_vcpus) * 2;
+ tmp = krealloc(kvm->arch.nested_mmus,
+ num_mmus * sizeof(*kvm->arch.nested_mmus),
+ GFP_KERNEL | __GFP_ZERO);
+ if (tmp) {
+ if (kvm_init_stage2_mmu(kvm, &tmp[num_mmus - 1]) ||
+ kvm_init_stage2_mmu(kvm, &tmp[num_mmus - 2])) {
+ kvm_free_stage2_pgd(&tmp[num_mmus - 1]);
+ kvm_free_stage2_pgd(&tmp[num_mmus - 2]);
+ } else {
+ kvm->arch.nested_mmus_size = num_mmus;
+ ret = 0;
+ }
+
+ kvm->arch.nested_mmus = tmp;
+ }
+
+ mutex_unlock(&kvm->lock);
+ return ret;
+}
+
+/* Must be called with kvm->lock held */
+struct kvm_s2_mmu *lookup_s2_mmu(struct kvm *kvm, u64 vttbr, u64 hcr)
+{
+ bool nested_stage2_enabled = hcr & HCR_VM;
+ int i;
+
+ /* Don't consider the CnP bit for the vttbr match */
+ vttbr = vttbr & ~VTTBR_CNP_BIT;
+
+ /*
+ * Two possibilities when looking up a S2 MMU context:
+ *
+ * - either S2 is enabled in the guest, and we need a context that
+ * is S2-enabled and matches the full VTTBR (VMID+BADDR), which
+ * makes it safe from a TLB conflict perspective (a broken guest
+ * won't be able to generate them),
+ *
+ * - or S2 is disabled, and we need a context that is S2-disabled
+ * and matches the VMID only, as all TLBs are tagged by VMID even
+ * if S2 translation is enabled.
+ */
+ for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
+ struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
+
+ if (!kvm_s2_mmu_valid(mmu))
+ continue;
+
+ if (nested_stage2_enabled &&
+ mmu->nested_stage2_enabled &&
+ vttbr == mmu->vttbr)
+ return mmu;
+
+ if (!nested_stage2_enabled &&
+ !mmu->nested_stage2_enabled &&
+ get_vmid(vttbr) == get_vmid(mmu->vttbr))
+ return mmu;
+ }
+ return NULL;
+}
+
+static struct kvm_s2_mmu *get_s2_mmu_nested(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = vcpu->kvm;
+ u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
+ u64 hcr= vcpu_read_sys_reg(vcpu, HCR_EL2);
+ struct kvm_s2_mmu *s2_mmu;
+ int i;
+
+ s2_mmu = lookup_s2_mmu(kvm, vttbr, hcr);
+ if (s2_mmu)
+ goto out;
+
+ /*
+ * Make sure we don't always search from the same point, or we
+ * will always reuse a potentially active context, leaving
+ * free contexts unused.
+ */
+ for (i = kvm->arch.nested_mmus_next;
+ i < (kvm->arch.nested_mmus_size + kvm->arch.nested_mmus_next);
+ i++) {
+ s2_mmu = &kvm->arch.nested_mmus[i % kvm->arch.nested_mmus_size];
+
+ if (atomic_read(&s2_mmu->refcnt) == 0)
+ break;
+ }
+ BUG_ON(atomic_read(&s2_mmu->refcnt)); /* We have struct MMUs to spare */
+
+ /* Set the scene for the next search */
+ kvm->arch.nested_mmus_next = (i + 1) % kvm->arch.nested_mmus_size;
+
+ if (kvm_s2_mmu_valid(s2_mmu)) {
+ /* Clear the old state */
+ kvm_unmap_stage2_range(s2_mmu, 0, kvm_phys_size(kvm));
+ if (s2_mmu->vmid.vmid_gen)
+ kvm_call_hyp(__kvm_tlb_flush_vmid, s2_mmu);
+ }
+
+ /*
+ * The virtual VMID (modulo CnP) will be used as a key when matching
+ * an existing kvm_s2_mmu.
+ */
+ s2_mmu->vttbr = vttbr & ~VTTBR_CNP_BIT;
+ s2_mmu->nested_stage2_enabled = hcr & HCR_VM;
+
+out:
+ atomic_inc(&s2_mmu->refcnt);
+ return s2_mmu;
+}
+
+void kvm_init_nested_s2_mmu(struct kvm_s2_mmu *mmu)
+{
+ mmu->vttbr = 1;
+ mmu->nested_stage2_enabled = false;
+ atomic_set(&mmu->refcnt, 0);
+}
+
+void kvm_vcpu_load_hw_mmu(struct kvm_vcpu *vcpu)
+{
+ if (is_hyp_ctxt(vcpu)) {
+ vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;
+ } else {
+ spin_lock(&vcpu->kvm->mmu_lock);
+ vcpu->arch.hw_mmu = get_s2_mmu_nested(vcpu);
+ spin_unlock(&vcpu->kvm->mmu_lock);
+ }
+}
+
+void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->arch.hw_mmu != &vcpu->kvm->arch.mmu) {
+ atomic_dec(&vcpu->arch.hw_mmu->refcnt);
+ vcpu->arch.hw_mmu = NULL;
+ }
+}
+
/*
* Inject wfx to the virtual EL2 if this is not from the virtual EL2 and
* the virtual HCR_EL2.TWX is set. Otherwise, let the host hypervisor
@@ -43,6 +208,24 @@ int handle_wfx_nested(struct kvm_vcpu *vcpu, bool is_wfe)
return -EINVAL;
}
+void kvm_arch_flush_shadow_all(struct kvm *kvm)
+{
+ int i;
+
+ for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
+ struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
+
+ WARN_ON(atomic_read(&mmu->refcnt));
+
+ if (!atomic_read(&mmu->refcnt))
+ kvm_free_stage2_pgd(mmu);
+ }
+ kfree(kvm->arch.nested_mmus);
+ kvm->arch.nested_mmus = NULL;
+ kvm->arch.nested_mmus_size = 0;
+ kvm_free_stage2_pgd(&kvm->arch.mmu);
+}
+
/*
* Our emulated CPU doesn't support all the possible features. For the
* sake of simplicity (and probably mental sanity), wipe out a number
--
2.29.2
next prev parent reply other threads:[~2021-05-10 17:28 UTC|newest]
Thread overview: 75+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-05-10 16:58 [PATCH v4 00/66] KVM: arm64: ARMv8.3/8.4 Nested Virtualization support Marc Zyngier
2021-05-10 16:58 ` [PATCH v4 01/66] arm64: Add ARM64_HAS_NESTED_VIRT cpufeature Marc Zyngier
2021-05-20 13:32 ` Zenghui Yu
2021-05-10 16:58 ` [PATCH v4 02/66] KVM: arm64: nv: Introduce nested virtualization VCPU feature Marc Zyngier
2021-05-10 16:58 ` [PATCH v4 03/66] KVM: arm64: nv: Reset VCPU to EL2 registers if VCPU nested virt is set Marc Zyngier
2021-05-10 16:58 ` [PATCH v4 04/66] KVM: arm64: nv: Allow userspace to set PSR_MODE_EL2x Marc Zyngier
2021-05-10 16:58 ` [PATCH v4 05/66] KVM: arm64: nv: Add EL2 system registers to vcpu context Marc Zyngier
2021-05-10 16:58 ` [PATCH v4 06/66] KVM: arm64: nv: Add nested virt VCPU primitives for vEL2 VCPU state Marc Zyngier
2021-05-10 16:58 ` [PATCH v4 07/66] KVM: arm64: nv: Handle HCR_EL2.NV system register traps Marc Zyngier
2021-05-10 16:58 ` [PATCH v4 08/66] KVM: arm64: nv: Reset VMPIDR_EL2 and VPIDR_EL2 to sane values Marc Zyngier
2021-05-10 16:58 ` [PATCH v4 09/66] KVM: arm64: nv: Support virtual EL2 exceptions Marc Zyngier
2021-05-20 12:55 ` Zenghui Yu
2021-05-10 16:58 ` [PATCH v4 10/66] KVM: arm64: nv: Inject HVC exceptions to the virtual EL2 Marc Zyngier
2021-05-10 16:58 ` [PATCH v4 11/66] KVM: arm64: nv: Handle trapped ERET from " Marc Zyngier
2021-05-10 16:58 ` [PATCH v4 12/66] KVM: arm64: nv: Add non-VHE-EL2->EL1 translation helpers Marc Zyngier
2021-05-10 16:58 ` [PATCH v4 13/66] KVM: arm64: nv: Handle virtual EL2 registers in vcpu_read/write_sys_reg() Marc Zyngier
2021-05-10 16:58 ` [PATCH v4 14/66] KVM: arm64: nv: Handle SPSR_EL2 specially Marc Zyngier
2021-05-10 16:58 ` [PATCH v4 15/66] KVM: arm64: nv: Handle HCR_EL2.E2H specially Marc Zyngier
2021-05-10 16:58 ` [PATCH v4 16/66] KVM: arm64: nv: Save/Restore vEL2 sysregs Marc Zyngier
2021-05-10 16:58 ` [PATCH v4 17/66] KVM: arm64: nv: Emulate PSTATE.M for a guest hypervisor Marc Zyngier
2021-05-10 16:58 ` [PATCH v4 18/66] KVM: arm64: nv: Trap EL1 VM register accesses in virtual EL2 Marc Zyngier
2021-05-10 16:58 ` [PATCH v4 19/66] KVM: arm64: nv: Trap SPSR_EL1, ELR_EL1 and VBAR_EL1 from " Marc Zyngier
2021-05-10 16:58 ` [PATCH v4 20/66] KVM: arm64: nv: Trap CPACR_EL1 access in " Marc Zyngier
2021-05-10 16:58 ` [PATCH v4 21/66] KVM: arm64: nv: Handle PSCI call via smc from the guest Marc Zyngier
2021-05-10 16:58 ` [PATCH v4 22/66] KVM: arm64: nv: Respect virtual HCR_EL2.TWX setting Marc Zyngier
2021-05-10 16:58 ` [PATCH v4 23/66] KVM: arm64: nv: Respect virtual CPTR_EL2.{TFP,FPEN} settings Marc Zyngier
2021-05-10 16:58 ` [PATCH v4 24/66] KVM: arm64: nv: Respect the virtual HCR_EL2.NV bit setting Marc Zyngier
2021-05-10 16:58 ` [PATCH v4 25/66] KVM: arm64: nv: Respect virtual HCR_EL2.TVM and TRVM settings Marc Zyngier
2021-05-10 16:58 ` [PATCH v4 26/66] KVM: arm64: nv: Respect the virtual HCR_EL2.NV1 bit setting Marc Zyngier
2021-05-10 16:58 ` [PATCH v4 27/66] KVM: arm64: nv: Emulate EL12 register accesses from the virtual EL2 Marc Zyngier
2021-05-10 16:58 ` [PATCH v4 28/66] KVM: arm64: nv: Forward debug traps to the nested guest Marc Zyngier
2021-05-10 16:58 ` [PATCH v4 29/66] KVM: arm64: nv: Configure HCR_EL2 for nested virtualization Marc Zyngier
2021-05-10 16:58 ` [PATCH v4 30/66] KVM: arm64: nv: Only toggle cache for virtual EL2 when SCTLR_EL2 changes Marc Zyngier
2021-05-10 16:58 ` [PATCH v4 31/66] KVM: arm64: nv: Filter out unsupported features from ID regs Marc Zyngier
2021-05-10 16:58 ` [PATCH v4 32/66] KVM: arm64: nv: Hide RAS from nested guests Marc Zyngier
2021-05-10 16:58 ` Marc Zyngier [this message]
2021-05-10 16:58 ` [PATCH v4 34/66] KVM: arm64: nv: Implement nested Stage-2 page table walk logic Marc Zyngier
2021-05-10 16:58 ` [PATCH v4 35/66] KVM: arm64: nv: Handle shadow stage 2 page faults Marc Zyngier
2021-05-10 16:58 ` [PATCH v4 36/66] KVM: arm64: nv: Restrict S2 RD/WR permissions to match the guest's Marc Zyngier
2021-05-10 16:58 ` [PATCH v4 37/66] KVM: arm64: nv: Unmap/flush shadow stage 2 page tables Marc Zyngier
2021-05-10 16:58 ` [PATCH v4 38/66] KVM: arm64: nv: Introduce sys_reg_desc.forward_trap Marc Zyngier
2021-05-10 16:58 ` [PATCH v4 39/66] KVM: arm64: nv: Set a handler for the system instruction traps Marc Zyngier
2021-05-10 16:58 ` [PATCH v4 40/66] KVM: arm64: nv: Trap and emulate AT instructions from virtual EL2 Marc Zyngier
2021-05-10 16:58 ` [PATCH v4 41/66] KVM: arm64: nv: Trap and emulate TLBI " Marc Zyngier
2021-07-14 16:40 ` Chase Conklin
2021-11-29 18:12 ` Marc Zyngier
2021-05-10 16:58 ` [PATCH v4 42/66] KVM: arm64: nv: Fold guest's HCR_EL2 configuration into the host's Marc Zyngier
2021-05-10 16:58 ` [PATCH v4 43/66] KVM: arm64: nv: arch_timer: Support hyp timer emulation Marc Zyngier
2021-05-10 16:58 ` [PATCH v4 44/66] KVM: arm64: nv: Add handling of EL2-specific timer registers Marc Zyngier
2021-05-10 16:58 ` [PATCH v4 45/66] KVM: arm64: nv: Load timer before the GIC Marc Zyngier
2021-05-10 16:59 ` [PATCH v4 46/66] KVM: arm64: nv: Nested GICv3 Support Marc Zyngier
2021-05-10 16:59 ` [PATCH v4 47/66] KVM: arm64: nv: Don't load the GICv4 context on entering a nested guest Marc Zyngier
2021-05-10 16:59 ` [PATCH v4 48/66] KVM: arm64: nv: vgic: Emulate the HW bit in software Marc Zyngier
2021-05-10 16:59 ` [PATCH v4 49/66] KVM: arm64: nv: vgic: Allow userland to set VGIC maintenance IRQ Marc Zyngier
2021-05-10 16:59 ` [PATCH v4 50/66] KVM: arm64: nv: Implement maintenance interrupt forwarding Marc Zyngier
2021-05-10 16:59 ` [PATCH v4 51/66] KVM: arm64: nv: Add nested GICv3 tracepoints Marc Zyngier
2021-05-10 16:59 ` [PATCH v4 52/66] KVM: arm64: nv: Allow userspace to request KVM_ARM_VCPU_NESTED_VIRT Marc Zyngier
2021-05-10 16:59 ` [PATCH v4 53/66] KVM: arm64: nv: Add handling of ARMv8.4-TTL TLB invalidation Marc Zyngier
2021-05-10 16:59 ` [PATCH v4 54/66] KVM: arm64: nv: Invalidate TLBs based on shadow S2 TTL-like information Marc Zyngier
2021-05-10 16:59 ` [PATCH v4 55/66] KVM: arm64: Allow populating S2 SW bits Marc Zyngier
2021-05-10 16:59 ` [PATCH v4 56/66] KVM: arm64: nv: Tag shadow S2 entries with nested level Marc Zyngier
2021-05-10 16:59 ` [PATCH v4 57/66] KVM: arm64: nv: Add include containing the VNCR_EL2 offsets Marc Zyngier
2021-05-10 16:59 ` [PATCH v4 58/66] KVM: arm64: Map VNCR-capable registers to a separate page Marc Zyngier
2021-05-10 16:59 ` [PATCH v4 59/66] KVM: arm64: nv: Move nested vgic state into the sysreg file Marc Zyngier
2021-05-10 16:59 ` [PATCH v4 60/66] KVM: arm64: Add ARMv8.4 Enhanced Nested Virt cpufeature Marc Zyngier
2021-05-10 16:59 ` [PATCH v4 61/66] KVM: arm64: nv: Synchronize PSTATE early on exit Marc Zyngier
2021-05-10 16:59 ` [PATCH v4 62/66] KVM: arm64: nv: Sync nested timer state with ARMv8.4 Marc Zyngier
2021-05-10 16:59 ` [PATCH v4 63/66] KVM: arm64: nv: Allocate VNCR page when required Marc Zyngier
2021-05-10 16:59 ` [PATCH v4 64/66] KVM: arm64: nv: Enable ARMv8.4-NV support Marc Zyngier
2021-05-10 16:59 ` [PATCH v4 65/66] KVM: arm64: nv: Fast-track 'InHost' exception returns Marc Zyngier
2021-05-10 16:59 ` [PATCH v4 66/66] KVM: arm64: nv: Fast-track EL1 TLBIs for VHE guests Marc Zyngier
2021-06-03 7:07 ` [PATCH v4 00/66] KVM: arm64: ARMv8.3/8.4 Nested Virtualization support Jamie Iles
2021-06-03 8:39 ` Marc Zyngier
2021-06-03 11:08 ` Marc Zyngier
2021-06-07 9:59 ` Jamie Iles
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210510165920.1913477-34-maz@kernel.org \
--to=maz@kernel.org \
--cc=alexandru.elisei@arm.com \
--cc=andre.przywara@arm.com \
--cc=christoffer.dall@arm.com \
--cc=haibo.xu@linaro.org \
--cc=james.morse@arm.com \
--cc=jintack@cs.columbia.edu \
--cc=kernel-team@android.com \
--cc=kvm@vger.kernel.org \
--cc=kvmarm@lists.cs.columbia.edu \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=suzuki.poulose@arm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).