All of lore.kernel.org
 help / color / mirror / Atom feed
From: Ganapatrao Kulkarni <gankulkarni@os.amperecomputing.com>
To: Marc Zyngier <maz@kernel.org>,
	linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu, kvm@vger.kernel.org
Cc: Andre Przywara <andre.przywara@arm.com>,
	Christoffer Dall <christoffer.dall@arm.com>,
	Jintack Lim <jintack@cs.columbia.edu>,
	Haibo Xu <haibo.xu@linaro.org>, James Morse <james.morse@arm.com>,
	Suzuki K Poulose <suzuki.poulose@arm.com>,
	Alexandru Elisei <alexandru.elisei@arm.com>,
	kernel-team@android.com
Subject: Re: [PATCH v5 38/69] KVM: arm64: nv: Support multiple nested Stage-2 mmu structures
Date: Tue, 18 Jan 2022 16:54:04 +0530	[thread overview]
Message-ID: <21b0fca8-6b31-dc63-7637-2f80c4b3a272@os.amperecomputing.com> (raw)
In-Reply-To: <20211129200150.351436-39-maz@kernel.org>



On 30-11-2021 01:31 am, Marc Zyngier wrote:
> Add Stage-2 mmu data structures for virtual EL2 and for nested guests.
> We don't yet populate shadow Stage-2 page tables, but we now have a
> framework for getting to a shadow Stage-2 pgd.
> 
> We allocate twice the number of vcpus as Stage-2 mmu structures because
> that's sufficient for each vcpu running two translation regimes without
> having to flush the Stage-2 page tables.
> 
> Co-developed-by: Christoffer Dall <christoffer.dall@arm.com>
> Signed-off-by: Christoffer Dall <christoffer.dall@arm.com>
> Signed-off-by: Marc Zyngier <maz@kernel.org>
> ---
>   arch/arm64/include/asm/kvm_host.h   |  29 +++++
>   arch/arm64/include/asm/kvm_mmu.h    |   9 ++
>   arch/arm64/include/asm/kvm_nested.h |   7 +
>   arch/arm64/kvm/arm.c                |  16 ++-
>   arch/arm64/kvm/mmu.c                |  29 +++--
>   arch/arm64/kvm/nested.c             | 195 ++++++++++++++++++++++++++++
>   6 files changed, 275 insertions(+), 10 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
> index 6a7b13edc5cb..00c3366129b8 100644
> --- a/arch/arm64/include/asm/kvm_host.h
> +++ b/arch/arm64/include/asm/kvm_host.h
> @@ -102,14 +102,43 @@ struct kvm_s2_mmu {
>   	int __percpu *last_vcpu_ran;
>   
>   	struct kvm_arch *arch;
> +
> +	/*
> +	 * For a shadow stage-2 MMU, the virtual vttbr programmed by the guest
> +	 * hypervisor.  Unused for kvm_arch->mmu. Set to 1 when the structure
> +	 * contains no valid information.
> +	 */
> +	u64	vttbr;
> +
> +	/* true when this represents a nested context where virtual HCR_EL2.VM == 1 */
> +	bool	nested_stage2_enabled;
> +
> +	/*
> +	 *  0: Nobody is currently using this, check vttbr for validity
> +	 * >0: Somebody is actively using this.
> +	 */
> +	atomic_t refcnt;
>   };
>   
> +static inline bool kvm_s2_mmu_valid(struct kvm_s2_mmu *mmu)
> +{
> +	return !(mmu->vttbr & 1);
> +}
> +
>   struct kvm_arch_memory_slot {
>   };
>   
>   struct kvm_arch {
>   	struct kvm_s2_mmu mmu;
>   
> +	/*
> +	 * Stage 2 paging stage for VMs with nested virtual using a virtual
> +	 * VMID.
> +	 */
> +	struct kvm_s2_mmu *nested_mmus;
> +	size_t nested_mmus_size;
> +	int nested_mmus_next;
> +
>   	/* VTCR_EL2 value for this VM */
>   	u64    vtcr;
>   
> diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
> index c018c7b40761..7250594e3d68 100644
> --- a/arch/arm64/include/asm/kvm_mmu.h
> +++ b/arch/arm64/include/asm/kvm_mmu.h
> @@ -116,6 +116,7 @@ alternative_cb_end
>   #include <asm/cacheflush.h>
>   #include <asm/mmu_context.h>
>   #include <asm/kvm_emulate.h>
> +#include <asm/kvm_nested.h>
>   
>   void kvm_update_va_mask(struct alt_instr *alt,
>   			__le32 *origptr, __le32 *updptr, int nr_inst);
> @@ -159,6 +160,7 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
>   			     void **haddr);
>   void free_hyp_pgds(void);
>   
> +void kvm_unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size);
>   void stage2_unmap_vm(struct kvm *kvm);
>   int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu);
>   void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
> @@ -294,5 +296,12 @@ static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu)
>   {
>   	return container_of(mmu->arch, struct kvm, arch);
>   }
> +
> +static inline u64 get_vmid(u64 vttbr)
> +{
> +	return (vttbr & VTTBR_VMID_MASK(kvm_get_vmid_bits())) >>
> +		VTTBR_VMID_SHIFT;
> +}
> +
>   #endif /* __ASSEMBLY__ */
>   #endif /* __ARM64_KVM_MMU_H__ */
> diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/kvm_nested.h
> index 026ddaad972c..473ecd1d60d0 100644
> --- a/arch/arm64/include/asm/kvm_nested.h
> +++ b/arch/arm64/include/asm/kvm_nested.h
> @@ -61,6 +61,13 @@ static inline u64 translate_cnthctl_el2_to_cntkctl_el1(u64 cnthctl)
>   		(cnthctl & (CNTHCTL_EVNTI | CNTHCTL_EVNTDIR | CNTHCTL_EVNTEN)));
>   }
>   
> +extern void kvm_init_nested(struct kvm *kvm);
> +extern int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu);
> +extern void kvm_init_nested_s2_mmu(struct kvm_s2_mmu *mmu);
> +extern struct kvm_s2_mmu *lookup_s2_mmu(struct kvm *kvm, u64 vttbr, u64 hcr);
> +extern void kvm_vcpu_load_hw_mmu(struct kvm_vcpu *vcpu);
> +extern void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu);
> +
>   int handle_wfx_nested(struct kvm_vcpu *vcpu, bool is_wfe);
>   extern bool __forward_traps(struct kvm_vcpu *vcpu, unsigned int reg,
>   			    u64 control_bit);
> diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> index 68da54d58cd0..1dbf63319b99 100644
> --- a/arch/arm64/kvm/arm.c
> +++ b/arch/arm64/kvm/arm.c
> @@ -37,6 +37,7 @@
>   #include <asm/kvm_arm.h>
>   #include <asm/kvm_asm.h>
>   #include <asm/kvm_mmu.h>
> +#include <asm/kvm_nested.h>
>   #include <asm/kvm_emulate.h>
>   #include <asm/sections.h>
>   
> @@ -146,6 +147,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
>   	if (ret)
>   		return ret;
>   
> +	kvm_init_nested(kvm);
> +
>   	ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP);
>   	if (ret)
>   		goto out_free_stage2_pgd;
> @@ -389,6 +392,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
>   	struct kvm_s2_mmu *mmu;
>   	int *last_ran;
>   
> +	if (nested_virt_in_use(vcpu))
> +		kvm_vcpu_load_hw_mmu(vcpu);
> +
>   	mmu = vcpu->arch.hw_mmu;
>   	last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
>   
> @@ -437,6 +443,9 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
>   	kvm_vgic_put(vcpu);
>   	kvm_vcpu_pmu_restore_host(vcpu);
>   
> +	if (nested_virt_in_use(vcpu))
> +		kvm_vcpu_put_hw_mmu(vcpu);
> +
>   	vcpu->cpu = -1;
>   }
>   
> @@ -1088,8 +1097,13 @@ static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
>   
>   	vcpu->arch.target = phys_target;
>   
> +	/* Prepare for nested if required */
> +	ret = kvm_vcpu_init_nested(vcpu);
> +
>   	/* Now we know what it is, we can reset it. */
> -	ret = kvm_reset_vcpu(vcpu);
> +	if (!ret)
> +		ret = kvm_reset_vcpu(vcpu);
> +
>   	if (ret) {
>   		vcpu->arch.target = -1;
>   		bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
> diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> index 9eec548fccd1..ab1653b8e601 100644
> --- a/arch/arm64/kvm/mmu.c
> +++ b/arch/arm64/kvm/mmu.c
> @@ -162,7 +162,7 @@ static void invalidate_icache_guest_page(void *va, size_t size)
>    * does.
>    */
>   /**
> - * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
> + * __unmap_stage2_range -- Clear stage2 page table entries to unmap a range
>    * @mmu:   The KVM stage-2 MMU pointer
>    * @start: The intermediate physical base address of the range to unmap
>    * @size:  The size of the area to unmap
> @@ -185,7 +185,7 @@ static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64
>   				   may_block));
>   }
>   
> -static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
> +void kvm_unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
>   {
>   	__unmap_stage2_range(mmu, start, size, true);
>   }
> @@ -507,7 +507,20 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
>   	int cpu, err;
>   	struct kvm_pgtable *pgt;
>   
> +	/*
> +	 * If we already have our page tables in place, and that the
> +	 * MMU context is the canonical one, we have a bug somewhere,
> +	 * as this is only supposed to ever happen once per VM.
> +	 *
> +	 * Otherwise, we're building nested page tables, and that's
> +	 * probably because userspace called KVM_ARM_VCPU_INIT more
> +	 * than once on the same vcpu. Since that's actually legal,
> +	 * don't kick a fuss and leave gracefully.
> +	 */
>   	if (mmu->pgt != NULL) {
> +		if (&kvm->arch.mmu != mmu)
> +			return 0;
> +
>   		kvm_err("kvm_arch already initialized?\n");
>   		return -EINVAL;
>   	}
> @@ -533,6 +546,9 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
>   	mmu->pgt = pgt;
>   	mmu->pgd_phys = __pa(pgt->pgd);
>   	WRITE_ONCE(mmu->vmid.vmid_gen, 0);
> +
> +	kvm_init_nested_s2_mmu(mmu);
> +
>   	return 0;
>   
>   out_destroy_pgtable:
> @@ -578,7 +594,7 @@ static void stage2_unmap_memslot(struct kvm *kvm,
>   
>   		if (!(vma->vm_flags & VM_PFNMAP)) {
>   			gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
> -			unmap_stage2_range(&kvm->arch.mmu, gpa, vm_end - vm_start);
> +			kvm_unmap_stage2_range(&kvm->arch.mmu, gpa, vm_end - vm_start);
>   		}
>   		hva = vm_end;
>   	} while (hva < reg_end);
> @@ -1556,11 +1572,6 @@ void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
>   {
>   }
>   
> -void kvm_arch_flush_shadow_all(struct kvm *kvm)
> -{
> -	kvm_free_stage2_pgd(&kvm->arch.mmu);
> -}
> -
>   void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
>   				   struct kvm_memory_slot *slot)
>   {
> @@ -1568,7 +1579,7 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
>   	phys_addr_t size = slot->npages << PAGE_SHIFT;
>   
>   	spin_lock(&kvm->mmu_lock);
> -	unmap_stage2_range(&kvm->arch.mmu, gpa, size);
> +	kvm_unmap_stage2_range(&kvm->arch.mmu, gpa, size);
>   	spin_unlock(&kvm->mmu_lock);
>   }
>   
> diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
> index 19b674983e13..b034a2343374 100644
> --- a/arch/arm64/kvm/nested.c
> +++ b/arch/arm64/kvm/nested.c
> @@ -19,12 +19,189 @@
>   #include <linux/kvm.h>
>   #include <linux/kvm_host.h>
>   
> +#include <asm/kvm_arm.h>
>   #include <asm/kvm_emulate.h>
> +#include <asm/kvm_mmu.h>
>   #include <asm/kvm_nested.h>
>   #include <asm/sysreg.h>
>   
>   #include "sys_regs.h"
>   
> +void kvm_init_nested(struct kvm *kvm)
> +{
> +	kvm->arch.nested_mmus = NULL;
> +	kvm->arch.nested_mmus_size = 0;
> +}
> +
> +int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu)
> +{
> +	struct kvm *kvm = vcpu->kvm;
> +	struct kvm_s2_mmu *tmp;
> +	int num_mmus;
> +	int ret = -ENOMEM;
> +
> +	if (!test_bit(KVM_ARM_VCPU_HAS_EL2, vcpu->arch.features))
> +		return 0;
> +
> +	if (!cpus_have_final_cap(ARM64_HAS_NESTED_VIRT))
> +		return -EINVAL;
> +
> +	mutex_lock(&kvm->lock);
> +
> +	/*
> +	 * Let's treat memory allocation failures as benign: If we fail to
> +	 * allocate anything, return an error and keep the allocated array
> +	 * alive. Userspace may try to recover by intializing the vcpu
> +	 * again, and there is no reason to affect the whole VM for this.
> +	 */
> +	num_mmus = atomic_read(&kvm->online_vcpus) * 2;
> +	tmp = krealloc(kvm->arch.nested_mmus,
> +		       num_mmus * sizeof(*kvm->arch.nested_mmus),
> +		       GFP_KERNEL_ACCOUNT | __GFP_ZERO);
> +	if (tmp) {
> +		/*
> +		 * If we went through a realocation, adjust the MMU
> +		 * back-pointers in the previously initialised
> +		 * pg_table structures.
> +		 */
> +		if (kvm->arch.nested_mmus != tmp) {
> +			int i;
> +
> +			for (i = 0; i < num_mmus - 2; i++)
> +				tmp[i].pgt->mmu = &tmp[i];
> +		}
> +
> +		if (kvm_init_stage2_mmu(kvm, &tmp[num_mmus - 1]) ||
> +		    kvm_init_stage2_mmu(kvm, &tmp[num_mmus - 2])) {
> +			kvm_free_stage2_pgd(&tmp[num_mmus - 1]);
> +			kvm_free_stage2_pgd(&tmp[num_mmus - 2]);
> +		} else {
> +			kvm->arch.nested_mmus_size = num_mmus;
> +			ret = 0;
> +		}
> +
> +		kvm->arch.nested_mmus = tmp;
> +	}
> +
> +	mutex_unlock(&kvm->lock);
> +	return ret;
> +}
> +
> +/* Must be called with kvm->lock held */
> +struct kvm_s2_mmu *lookup_s2_mmu(struct kvm *kvm, u64 vttbr, u64 hcr)
> +{
> +	bool nested_stage2_enabled = hcr & HCR_VM;
> +	int i;
> +
> +	/* Don't consider the CnP bit for the vttbr match */
> +	vttbr = vttbr & ~VTTBR_CNP_BIT;
> +
> +	/*
> +	 * Two possibilities when looking up a S2 MMU context:
> +	 *
> +	 * - either S2 is enabled in the guest, and we need a context that
> +         *   is S2-enabled and matches the full VTTBR (VMID+BADDR), which
> +         *   makes it safe from a TLB conflict perspective (a broken guest
> +         *   won't be able to generate them),
> +	 *
> +	 * - or S2 is disabled, and we need a context that is S2-disabled
> +         *   and matches the VMID only, as all TLBs are tagged by VMID even
> +         *   if S2 translation is enabled.

I think you were intended to say "if S2 translation is disabled".
> +	 */
> +	for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
> +		struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
> +
> +		if (!kvm_s2_mmu_valid(mmu))
> +			continue;
> +
> +		if (nested_stage2_enabled &&
> +		    mmu->nested_stage2_enabled &&
> +		    vttbr == mmu->vttbr)
> +			return mmu;
> +
> +		if (!nested_stage2_enabled &&
> +		    !mmu->nested_stage2_enabled &&
> +		    get_vmid(vttbr) == get_vmid(mmu->vttbr))
> +			return mmu;
> +	}
> +	return NULL;
> +}
> +
> +static struct kvm_s2_mmu *get_s2_mmu_nested(struct kvm_vcpu *vcpu)
> +{
> +	struct kvm *kvm = vcpu->kvm;
> +	u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
> +	u64 hcr= vcpu_read_sys_reg(vcpu, HCR_EL2);
> +	struct kvm_s2_mmu *s2_mmu;
> +	int i;
> +
> +	s2_mmu = lookup_s2_mmu(kvm, vttbr, hcr);
> +	if (s2_mmu)
> +		goto out;
> +
> +	/*
> +	 * Make sure we don't always search from the same point, or we
> +	 * will always reuse a potentially active context, leaving
> +	 * free contexts unused.
> +	 */
> +	for (i = kvm->arch.nested_mmus_next;
> +	     i < (kvm->arch.nested_mmus_size + kvm->arch.nested_mmus_next);
> +	     i++) {
> +		s2_mmu = &kvm->arch.nested_mmus[i % kvm->arch.nested_mmus_size];
> +
> +		if (atomic_read(&s2_mmu->refcnt) == 0)
> +			break;
> +	}
> +	BUG_ON(atomic_read(&s2_mmu->refcnt)); /* We have struct MMUs to spare */
> +
> +	/* Set the scene for the next search */
> +	kvm->arch.nested_mmus_next = (i + 1) % kvm->arch.nested_mmus_size;
> +
> +	if (kvm_s2_mmu_valid(s2_mmu)) {
> +		/* Clear the old state */
> +		kvm_unmap_stage2_range(s2_mmu, 0, kvm_phys_size(kvm));
> +		if (s2_mmu->vmid.vmid_gen)
> +			kvm_call_hyp(__kvm_tlb_flush_vmid, s2_mmu);
> +	}
> +
> +	/*
> +	 * The virtual VMID (modulo CnP) will be used as a key when matching
> +	 * an existing kvm_s2_mmu.
> +	 */
> +	s2_mmu->vttbr = vttbr & ~VTTBR_CNP_BIT;
> +	s2_mmu->nested_stage2_enabled = hcr & HCR_VM;
> +
> +out:
> +	atomic_inc(&s2_mmu->refcnt);
> +	return s2_mmu;
> +}
> +
> +void kvm_init_nested_s2_mmu(struct kvm_s2_mmu *mmu)
> +{
> +	mmu->vttbr = 1;
> +	mmu->nested_stage2_enabled = false;
> +	atomic_set(&mmu->refcnt, 0);
> +}
> +
> +void kvm_vcpu_load_hw_mmu(struct kvm_vcpu *vcpu)
> +{
> +	if (is_hyp_ctxt(vcpu)) {
> +		vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;
> +	} else {
> +		spin_lock(&vcpu->kvm->mmu_lock);
> +		vcpu->arch.hw_mmu = get_s2_mmu_nested(vcpu);
> +		spin_unlock(&vcpu->kvm->mmu_lock);
> +	}
> +}
> +
> +void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu)
> +{
> +	if (vcpu->arch.hw_mmu != &vcpu->kvm->arch.mmu) {
> +		atomic_dec(&vcpu->arch.hw_mmu->refcnt);
> +		vcpu->arch.hw_mmu = NULL;
> +	}
> +}
> +
>   /*
>    * Inject wfx to the virtual EL2 if this is not from the virtual EL2 and
>    * the virtual HCR_EL2.TWX is set. Otherwise, let the host hypervisor
> @@ -43,6 +220,24 @@ int handle_wfx_nested(struct kvm_vcpu *vcpu, bool is_wfe)
>   	return -EINVAL;
>   }
>   
> +void kvm_arch_flush_shadow_all(struct kvm *kvm)
> +{
> +	int i;
> +
> +	for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
> +		struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
> +
> +		WARN_ON(atomic_read(&mmu->refcnt));
> +
> +		if (!atomic_read(&mmu->refcnt))
> +			kvm_free_stage2_pgd(mmu);
> +	}
> +	kfree(kvm->arch.nested_mmus);
> +	kvm->arch.nested_mmus = NULL;
> +	kvm->arch.nested_mmus_size = 0;
> +	kvm_free_stage2_pgd(&kvm->arch.mmu);
> +}
> +
>   /*
>    * Our emulated CPU doesn't support all the possible features. For the
>    * sake of simplicity (and probably mental sanity), wipe out a number

It looks good to me, please feel free to add.
Reviewed-by: Ganapatrao Kulkarni <gankulkarni@os.amperecomputing.com>

Thanks,
Ganapat

WARNING: multiple messages have this Message-ID (diff)
From: Ganapatrao Kulkarni <gankulkarni@os.amperecomputing.com>
To: Marc Zyngier <maz@kernel.org>,
	linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu, kvm@vger.kernel.org
Cc: Andre Przywara <andre.przywara@arm.com>,
	Christoffer Dall <christoffer.dall@arm.com>,
	Jintack Lim <jintack@cs.columbia.edu>,
	Haibo Xu <haibo.xu@linaro.org>, James Morse <james.morse@arm.com>,
	Suzuki K Poulose <suzuki.poulose@arm.com>,
	Alexandru Elisei <alexandru.elisei@arm.com>,
	kernel-team@android.com
Subject: Re: [PATCH v5 38/69] KVM: arm64: nv: Support multiple nested Stage-2 mmu structures
Date: Tue, 18 Jan 2022 16:54:04 +0530	[thread overview]
Message-ID: <21b0fca8-6b31-dc63-7637-2f80c4b3a272@os.amperecomputing.com> (raw)
In-Reply-To: <20211129200150.351436-39-maz@kernel.org>



On 30-11-2021 01:31 am, Marc Zyngier wrote:
> Add Stage-2 mmu data structures for virtual EL2 and for nested guests.
> We don't yet populate shadow Stage-2 page tables, but we now have a
> framework for getting to a shadow Stage-2 pgd.
> 
> We allocate twice the number of vcpus as Stage-2 mmu structures because
> that's sufficient for each vcpu running two translation regimes without
> having to flush the Stage-2 page tables.
> 
> Co-developed-by: Christoffer Dall <christoffer.dall@arm.com>
> Signed-off-by: Christoffer Dall <christoffer.dall@arm.com>
> Signed-off-by: Marc Zyngier <maz@kernel.org>
> ---
>   arch/arm64/include/asm/kvm_host.h   |  29 +++++
>   arch/arm64/include/asm/kvm_mmu.h    |   9 ++
>   arch/arm64/include/asm/kvm_nested.h |   7 +
>   arch/arm64/kvm/arm.c                |  16 ++-
>   arch/arm64/kvm/mmu.c                |  29 +++--
>   arch/arm64/kvm/nested.c             | 195 ++++++++++++++++++++++++++++
>   6 files changed, 275 insertions(+), 10 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
> index 6a7b13edc5cb..00c3366129b8 100644
> --- a/arch/arm64/include/asm/kvm_host.h
> +++ b/arch/arm64/include/asm/kvm_host.h
> @@ -102,14 +102,43 @@ struct kvm_s2_mmu {
>   	int __percpu *last_vcpu_ran;
>   
>   	struct kvm_arch *arch;
> +
> +	/*
> +	 * For a shadow stage-2 MMU, the virtual vttbr programmed by the guest
> +	 * hypervisor.  Unused for kvm_arch->mmu. Set to 1 when the structure
> +	 * contains no valid information.
> +	 */
> +	u64	vttbr;
> +
> +	/* true when this represents a nested context where virtual HCR_EL2.VM == 1 */
> +	bool	nested_stage2_enabled;
> +
> +	/*
> +	 *  0: Nobody is currently using this, check vttbr for validity
> +	 * >0: Somebody is actively using this.
> +	 */
> +	atomic_t refcnt;
>   };
>   
> +static inline bool kvm_s2_mmu_valid(struct kvm_s2_mmu *mmu)
> +{
> +	return !(mmu->vttbr & 1);
> +}
> +
>   struct kvm_arch_memory_slot {
>   };
>   
>   struct kvm_arch {
>   	struct kvm_s2_mmu mmu;
>   
> +	/*
> +	 * Stage 2 paging stage for VMs with nested virtual using a virtual
> +	 * VMID.
> +	 */
> +	struct kvm_s2_mmu *nested_mmus;
> +	size_t nested_mmus_size;
> +	int nested_mmus_next;
> +
>   	/* VTCR_EL2 value for this VM */
>   	u64    vtcr;
>   
> diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
> index c018c7b40761..7250594e3d68 100644
> --- a/arch/arm64/include/asm/kvm_mmu.h
> +++ b/arch/arm64/include/asm/kvm_mmu.h
> @@ -116,6 +116,7 @@ alternative_cb_end
>   #include <asm/cacheflush.h>
>   #include <asm/mmu_context.h>
>   #include <asm/kvm_emulate.h>
> +#include <asm/kvm_nested.h>
>   
>   void kvm_update_va_mask(struct alt_instr *alt,
>   			__le32 *origptr, __le32 *updptr, int nr_inst);
> @@ -159,6 +160,7 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
>   			     void **haddr);
>   void free_hyp_pgds(void);
>   
> +void kvm_unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size);
>   void stage2_unmap_vm(struct kvm *kvm);
>   int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu);
>   void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
> @@ -294,5 +296,12 @@ static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu)
>   {
>   	return container_of(mmu->arch, struct kvm, arch);
>   }
> +
> +static inline u64 get_vmid(u64 vttbr)
> +{
> +	return (vttbr & VTTBR_VMID_MASK(kvm_get_vmid_bits())) >>
> +		VTTBR_VMID_SHIFT;
> +}
> +
>   #endif /* __ASSEMBLY__ */
>   #endif /* __ARM64_KVM_MMU_H__ */
> diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/kvm_nested.h
> index 026ddaad972c..473ecd1d60d0 100644
> --- a/arch/arm64/include/asm/kvm_nested.h
> +++ b/arch/arm64/include/asm/kvm_nested.h
> @@ -61,6 +61,13 @@ static inline u64 translate_cnthctl_el2_to_cntkctl_el1(u64 cnthctl)
>   		(cnthctl & (CNTHCTL_EVNTI | CNTHCTL_EVNTDIR | CNTHCTL_EVNTEN)));
>   }
>   
> +extern void kvm_init_nested(struct kvm *kvm);
> +extern int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu);
> +extern void kvm_init_nested_s2_mmu(struct kvm_s2_mmu *mmu);
> +extern struct kvm_s2_mmu *lookup_s2_mmu(struct kvm *kvm, u64 vttbr, u64 hcr);
> +extern void kvm_vcpu_load_hw_mmu(struct kvm_vcpu *vcpu);
> +extern void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu);
> +
>   int handle_wfx_nested(struct kvm_vcpu *vcpu, bool is_wfe);
>   extern bool __forward_traps(struct kvm_vcpu *vcpu, unsigned int reg,
>   			    u64 control_bit);
> diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> index 68da54d58cd0..1dbf63319b99 100644
> --- a/arch/arm64/kvm/arm.c
> +++ b/arch/arm64/kvm/arm.c
> @@ -37,6 +37,7 @@
>   #include <asm/kvm_arm.h>
>   #include <asm/kvm_asm.h>
>   #include <asm/kvm_mmu.h>
> +#include <asm/kvm_nested.h>
>   #include <asm/kvm_emulate.h>
>   #include <asm/sections.h>
>   
> @@ -146,6 +147,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
>   	if (ret)
>   		return ret;
>   
> +	kvm_init_nested(kvm);
> +
>   	ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP);
>   	if (ret)
>   		goto out_free_stage2_pgd;
> @@ -389,6 +392,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
>   	struct kvm_s2_mmu *mmu;
>   	int *last_ran;
>   
> +	if (nested_virt_in_use(vcpu))
> +		kvm_vcpu_load_hw_mmu(vcpu);
> +
>   	mmu = vcpu->arch.hw_mmu;
>   	last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
>   
> @@ -437,6 +443,9 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
>   	kvm_vgic_put(vcpu);
>   	kvm_vcpu_pmu_restore_host(vcpu);
>   
> +	if (nested_virt_in_use(vcpu))
> +		kvm_vcpu_put_hw_mmu(vcpu);
> +
>   	vcpu->cpu = -1;
>   }
>   
> @@ -1088,8 +1097,13 @@ static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
>   
>   	vcpu->arch.target = phys_target;
>   
> +	/* Prepare for nested if required */
> +	ret = kvm_vcpu_init_nested(vcpu);
> +
>   	/* Now we know what it is, we can reset it. */
> -	ret = kvm_reset_vcpu(vcpu);
> +	if (!ret)
> +		ret = kvm_reset_vcpu(vcpu);
> +
>   	if (ret) {
>   		vcpu->arch.target = -1;
>   		bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
> diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> index 9eec548fccd1..ab1653b8e601 100644
> --- a/arch/arm64/kvm/mmu.c
> +++ b/arch/arm64/kvm/mmu.c
> @@ -162,7 +162,7 @@ static void invalidate_icache_guest_page(void *va, size_t size)
>    * does.
>    */
>   /**
> - * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
> + * __unmap_stage2_range -- Clear stage2 page table entries to unmap a range
>    * @mmu:   The KVM stage-2 MMU pointer
>    * @start: The intermediate physical base address of the range to unmap
>    * @size:  The size of the area to unmap
> @@ -185,7 +185,7 @@ static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64
>   				   may_block));
>   }
>   
> -static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
> +void kvm_unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
>   {
>   	__unmap_stage2_range(mmu, start, size, true);
>   }
> @@ -507,7 +507,20 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
>   	int cpu, err;
>   	struct kvm_pgtable *pgt;
>   
> +	/*
> +	 * If we already have our page tables in place, and that the
> +	 * MMU context is the canonical one, we have a bug somewhere,
> +	 * as this is only supposed to ever happen once per VM.
> +	 *
> +	 * Otherwise, we're building nested page tables, and that's
> +	 * probably because userspace called KVM_ARM_VCPU_INIT more
> +	 * than once on the same vcpu. Since that's actually legal,
> +	 * don't kick a fuss and leave gracefully.
> +	 */
>   	if (mmu->pgt != NULL) {
> +		if (&kvm->arch.mmu != mmu)
> +			return 0;
> +
>   		kvm_err("kvm_arch already initialized?\n");
>   		return -EINVAL;
>   	}
> @@ -533,6 +546,9 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
>   	mmu->pgt = pgt;
>   	mmu->pgd_phys = __pa(pgt->pgd);
>   	WRITE_ONCE(mmu->vmid.vmid_gen, 0);
> +
> +	kvm_init_nested_s2_mmu(mmu);
> +
>   	return 0;
>   
>   out_destroy_pgtable:
> @@ -578,7 +594,7 @@ static void stage2_unmap_memslot(struct kvm *kvm,
>   
>   		if (!(vma->vm_flags & VM_PFNMAP)) {
>   			gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
> -			unmap_stage2_range(&kvm->arch.mmu, gpa, vm_end - vm_start);
> +			kvm_unmap_stage2_range(&kvm->arch.mmu, gpa, vm_end - vm_start);
>   		}
>   		hva = vm_end;
>   	} while (hva < reg_end);
> @@ -1556,11 +1572,6 @@ void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
>   {
>   }
>   
> -void kvm_arch_flush_shadow_all(struct kvm *kvm)
> -{
> -	kvm_free_stage2_pgd(&kvm->arch.mmu);
> -}
> -
>   void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
>   				   struct kvm_memory_slot *slot)
>   {
> @@ -1568,7 +1579,7 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
>   	phys_addr_t size = slot->npages << PAGE_SHIFT;
>   
>   	spin_lock(&kvm->mmu_lock);
> -	unmap_stage2_range(&kvm->arch.mmu, gpa, size);
> +	kvm_unmap_stage2_range(&kvm->arch.mmu, gpa, size);
>   	spin_unlock(&kvm->mmu_lock);
>   }
>   
> diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
> index 19b674983e13..b034a2343374 100644
> --- a/arch/arm64/kvm/nested.c
> +++ b/arch/arm64/kvm/nested.c
> @@ -19,12 +19,189 @@
>   #include <linux/kvm.h>
>   #include <linux/kvm_host.h>
>   
> +#include <asm/kvm_arm.h>
>   #include <asm/kvm_emulate.h>
> +#include <asm/kvm_mmu.h>
>   #include <asm/kvm_nested.h>
>   #include <asm/sysreg.h>
>   
>   #include "sys_regs.h"
>   
> +void kvm_init_nested(struct kvm *kvm)
> +{
> +	kvm->arch.nested_mmus = NULL;
> +	kvm->arch.nested_mmus_size = 0;
> +}
> +
> +int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu)
> +{
> +	struct kvm *kvm = vcpu->kvm;
> +	struct kvm_s2_mmu *tmp;
> +	int num_mmus;
> +	int ret = -ENOMEM;
> +
> +	if (!test_bit(KVM_ARM_VCPU_HAS_EL2, vcpu->arch.features))
> +		return 0;
> +
> +	if (!cpus_have_final_cap(ARM64_HAS_NESTED_VIRT))
> +		return -EINVAL;
> +
> +	mutex_lock(&kvm->lock);
> +
> +	/*
> +	 * Let's treat memory allocation failures as benign: If we fail to
> +	 * allocate anything, return an error and keep the allocated array
> +	 * alive. Userspace may try to recover by intializing the vcpu
> +	 * again, and there is no reason to affect the whole VM for this.
> +	 */
> +	num_mmus = atomic_read(&kvm->online_vcpus) * 2;
> +	tmp = krealloc(kvm->arch.nested_mmus,
> +		       num_mmus * sizeof(*kvm->arch.nested_mmus),
> +		       GFP_KERNEL_ACCOUNT | __GFP_ZERO);
> +	if (tmp) {
> +		/*
> +		 * If we went through a realocation, adjust the MMU
> +		 * back-pointers in the previously initialised
> +		 * pg_table structures.
> +		 */
> +		if (kvm->arch.nested_mmus != tmp) {
> +			int i;
> +
> +			for (i = 0; i < num_mmus - 2; i++)
> +				tmp[i].pgt->mmu = &tmp[i];
> +		}
> +
> +		if (kvm_init_stage2_mmu(kvm, &tmp[num_mmus - 1]) ||
> +		    kvm_init_stage2_mmu(kvm, &tmp[num_mmus - 2])) {
> +			kvm_free_stage2_pgd(&tmp[num_mmus - 1]);
> +			kvm_free_stage2_pgd(&tmp[num_mmus - 2]);
> +		} else {
> +			kvm->arch.nested_mmus_size = num_mmus;
> +			ret = 0;
> +		}
> +
> +		kvm->arch.nested_mmus = tmp;
> +	}
> +
> +	mutex_unlock(&kvm->lock);
> +	return ret;
> +}
> +
> +/* Must be called with kvm->lock held */
> +struct kvm_s2_mmu *lookup_s2_mmu(struct kvm *kvm, u64 vttbr, u64 hcr)
> +{
> +	bool nested_stage2_enabled = hcr & HCR_VM;
> +	int i;
> +
> +	/* Don't consider the CnP bit for the vttbr match */
> +	vttbr = vttbr & ~VTTBR_CNP_BIT;
> +
> +	/*
> +	 * Two possibilities when looking up a S2 MMU context:
> +	 *
> +	 * - either S2 is enabled in the guest, and we need a context that
> +         *   is S2-enabled and matches the full VTTBR (VMID+BADDR), which
> +         *   makes it safe from a TLB conflict perspective (a broken guest
> +         *   won't be able to generate them),
> +	 *
> +	 * - or S2 is disabled, and we need a context that is S2-disabled
> +         *   and matches the VMID only, as all TLBs are tagged by VMID even
> +         *   if S2 translation is enabled.

I think you were intended to say "if S2 translation is disabled".
> +	 */
> +	for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
> +		struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
> +
> +		if (!kvm_s2_mmu_valid(mmu))
> +			continue;
> +
> +		if (nested_stage2_enabled &&
> +		    mmu->nested_stage2_enabled &&
> +		    vttbr == mmu->vttbr)
> +			return mmu;
> +
> +		if (!nested_stage2_enabled &&
> +		    !mmu->nested_stage2_enabled &&
> +		    get_vmid(vttbr) == get_vmid(mmu->vttbr))
> +			return mmu;
> +	}
> +	return NULL;
> +}
> +
> +static struct kvm_s2_mmu *get_s2_mmu_nested(struct kvm_vcpu *vcpu)
> +{
> +	struct kvm *kvm = vcpu->kvm;
> +	u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
> +	u64 hcr= vcpu_read_sys_reg(vcpu, HCR_EL2);
> +	struct kvm_s2_mmu *s2_mmu;
> +	int i;
> +
> +	s2_mmu = lookup_s2_mmu(kvm, vttbr, hcr);
> +	if (s2_mmu)
> +		goto out;
> +
> +	/*
> +	 * Make sure we don't always search from the same point, or we
> +	 * will always reuse a potentially active context, leaving
> +	 * free contexts unused.
> +	 */
> +	for (i = kvm->arch.nested_mmus_next;
> +	     i < (kvm->arch.nested_mmus_size + kvm->arch.nested_mmus_next);
> +	     i++) {
> +		s2_mmu = &kvm->arch.nested_mmus[i % kvm->arch.nested_mmus_size];
> +
> +		if (atomic_read(&s2_mmu->refcnt) == 0)
> +			break;
> +	}
> +	BUG_ON(atomic_read(&s2_mmu->refcnt)); /* We have struct MMUs to spare */
> +
> +	/* Set the scene for the next search */
> +	kvm->arch.nested_mmus_next = (i + 1) % kvm->arch.nested_mmus_size;
> +
> +	if (kvm_s2_mmu_valid(s2_mmu)) {
> +		/* Clear the old state */
> +		kvm_unmap_stage2_range(s2_mmu, 0, kvm_phys_size(kvm));
> +		if (s2_mmu->vmid.vmid_gen)
> +			kvm_call_hyp(__kvm_tlb_flush_vmid, s2_mmu);
> +	}
> +
> +	/*
> +	 * The virtual VMID (modulo CnP) will be used as a key when matching
> +	 * an existing kvm_s2_mmu.
> +	 */
> +	s2_mmu->vttbr = vttbr & ~VTTBR_CNP_BIT;
> +	s2_mmu->nested_stage2_enabled = hcr & HCR_VM;
> +
> +out:
> +	atomic_inc(&s2_mmu->refcnt);
> +	return s2_mmu;
> +}
> +
> +void kvm_init_nested_s2_mmu(struct kvm_s2_mmu *mmu)
> +{
> +	mmu->vttbr = 1;
> +	mmu->nested_stage2_enabled = false;
> +	atomic_set(&mmu->refcnt, 0);
> +}
> +
> +void kvm_vcpu_load_hw_mmu(struct kvm_vcpu *vcpu)
> +{
> +	if (is_hyp_ctxt(vcpu)) {
> +		vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;
> +	} else {
> +		spin_lock(&vcpu->kvm->mmu_lock);
> +		vcpu->arch.hw_mmu = get_s2_mmu_nested(vcpu);
> +		spin_unlock(&vcpu->kvm->mmu_lock);
> +	}
> +}
> +
> +void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu)
> +{
> +	if (vcpu->arch.hw_mmu != &vcpu->kvm->arch.mmu) {
> +		atomic_dec(&vcpu->arch.hw_mmu->refcnt);
> +		vcpu->arch.hw_mmu = NULL;
> +	}
> +}
> +
>   /*
>    * Inject wfx to the virtual EL2 if this is not from the virtual EL2 and
>    * the virtual HCR_EL2.TWX is set. Otherwise, let the host hypervisor
> @@ -43,6 +220,24 @@ int handle_wfx_nested(struct kvm_vcpu *vcpu, bool is_wfe)
>   	return -EINVAL;
>   }
>   
> +void kvm_arch_flush_shadow_all(struct kvm *kvm)
> +{
> +	int i;
> +
> +	for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
> +		struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
> +
> +		WARN_ON(atomic_read(&mmu->refcnt));
> +
> +		if (!atomic_read(&mmu->refcnt))
> +			kvm_free_stage2_pgd(mmu);
> +	}
> +	kfree(kvm->arch.nested_mmus);
> +	kvm->arch.nested_mmus = NULL;
> +	kvm->arch.nested_mmus_size = 0;
> +	kvm_free_stage2_pgd(&kvm->arch.mmu);
> +}
> +
>   /*
>    * Our emulated CPU doesn't support all the possible features. For the
>    * sake of simplicity (and probably mental sanity), wipe out a number

It looks good to me, please feel free to add.
Reviewed-by: Ganapatrao Kulkarni <gankulkarni@os.amperecomputing.com>

Thanks,
Ganapat

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

WARNING: multiple messages have this Message-ID (diff)
From: Ganapatrao Kulkarni <gankulkarni@os.amperecomputing.com>
To: Marc Zyngier <maz@kernel.org>,
	linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu, kvm@vger.kernel.org
Cc: kernel-team@android.com, Andre Przywara <andre.przywara@arm.com>,
	Christoffer Dall <christoffer.dall@arm.com>
Subject: Re: [PATCH v5 38/69] KVM: arm64: nv: Support multiple nested Stage-2 mmu structures
Date: Tue, 18 Jan 2022 16:54:04 +0530	[thread overview]
Message-ID: <21b0fca8-6b31-dc63-7637-2f80c4b3a272@os.amperecomputing.com> (raw)
In-Reply-To: <20211129200150.351436-39-maz@kernel.org>



On 30-11-2021 01:31 am, Marc Zyngier wrote:
> Add Stage-2 mmu data structures for virtual EL2 and for nested guests.
> We don't yet populate shadow Stage-2 page tables, but we now have a
> framework for getting to a shadow Stage-2 pgd.
> 
> We allocate twice the number of vcpus as Stage-2 mmu structures because
> that's sufficient for each vcpu running two translation regimes without
> having to flush the Stage-2 page tables.
> 
> Co-developed-by: Christoffer Dall <christoffer.dall@arm.com>
> Signed-off-by: Christoffer Dall <christoffer.dall@arm.com>
> Signed-off-by: Marc Zyngier <maz@kernel.org>
> ---
>   arch/arm64/include/asm/kvm_host.h   |  29 +++++
>   arch/arm64/include/asm/kvm_mmu.h    |   9 ++
>   arch/arm64/include/asm/kvm_nested.h |   7 +
>   arch/arm64/kvm/arm.c                |  16 ++-
>   arch/arm64/kvm/mmu.c                |  29 +++--
>   arch/arm64/kvm/nested.c             | 195 ++++++++++++++++++++++++++++
>   6 files changed, 275 insertions(+), 10 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
> index 6a7b13edc5cb..00c3366129b8 100644
> --- a/arch/arm64/include/asm/kvm_host.h
> +++ b/arch/arm64/include/asm/kvm_host.h
> @@ -102,14 +102,43 @@ struct kvm_s2_mmu {
>   	int __percpu *last_vcpu_ran;
>   
>   	struct kvm_arch *arch;
> +
> +	/*
> +	 * For a shadow stage-2 MMU, the virtual vttbr programmed by the guest
> +	 * hypervisor.  Unused for kvm_arch->mmu. Set to 1 when the structure
> +	 * contains no valid information.
> +	 */
> +	u64	vttbr;
> +
> +	/* true when this represents a nested context where virtual HCR_EL2.VM == 1 */
> +	bool	nested_stage2_enabled;
> +
> +	/*
> +	 *  0: Nobody is currently using this, check vttbr for validity
> +	 * >0: Somebody is actively using this.
> +	 */
> +	atomic_t refcnt;
>   };
>   
> +static inline bool kvm_s2_mmu_valid(struct kvm_s2_mmu *mmu)
> +{
> +	return !(mmu->vttbr & 1);
> +}
> +
>   struct kvm_arch_memory_slot {
>   };
>   
>   struct kvm_arch {
>   	struct kvm_s2_mmu mmu;
>   
> +	/*
> +	 * Stage 2 paging stage for VMs with nested virtual using a virtual
> +	 * VMID.
> +	 */
> +	struct kvm_s2_mmu *nested_mmus;
> +	size_t nested_mmus_size;
> +	int nested_mmus_next;
> +
>   	/* VTCR_EL2 value for this VM */
>   	u64    vtcr;
>   
> diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
> index c018c7b40761..7250594e3d68 100644
> --- a/arch/arm64/include/asm/kvm_mmu.h
> +++ b/arch/arm64/include/asm/kvm_mmu.h
> @@ -116,6 +116,7 @@ alternative_cb_end
>   #include <asm/cacheflush.h>
>   #include <asm/mmu_context.h>
>   #include <asm/kvm_emulate.h>
> +#include <asm/kvm_nested.h>
>   
>   void kvm_update_va_mask(struct alt_instr *alt,
>   			__le32 *origptr, __le32 *updptr, int nr_inst);
> @@ -159,6 +160,7 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
>   			     void **haddr);
>   void free_hyp_pgds(void);
>   
> +void kvm_unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size);
>   void stage2_unmap_vm(struct kvm *kvm);
>   int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu);
>   void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
> @@ -294,5 +296,12 @@ static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu)
>   {
>   	return container_of(mmu->arch, struct kvm, arch);
>   }
> +
> +static inline u64 get_vmid(u64 vttbr)
> +{
> +	return (vttbr & VTTBR_VMID_MASK(kvm_get_vmid_bits())) >>
> +		VTTBR_VMID_SHIFT;
> +}
> +
>   #endif /* __ASSEMBLY__ */
>   #endif /* __ARM64_KVM_MMU_H__ */
> diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/kvm_nested.h
> index 026ddaad972c..473ecd1d60d0 100644
> --- a/arch/arm64/include/asm/kvm_nested.h
> +++ b/arch/arm64/include/asm/kvm_nested.h
> @@ -61,6 +61,13 @@ static inline u64 translate_cnthctl_el2_to_cntkctl_el1(u64 cnthctl)
>   		(cnthctl & (CNTHCTL_EVNTI | CNTHCTL_EVNTDIR | CNTHCTL_EVNTEN)));
>   }
>   
> +extern void kvm_init_nested(struct kvm *kvm);
> +extern int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu);
> +extern void kvm_init_nested_s2_mmu(struct kvm_s2_mmu *mmu);
> +extern struct kvm_s2_mmu *lookup_s2_mmu(struct kvm *kvm, u64 vttbr, u64 hcr);
> +extern void kvm_vcpu_load_hw_mmu(struct kvm_vcpu *vcpu);
> +extern void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu);
> +
>   int handle_wfx_nested(struct kvm_vcpu *vcpu, bool is_wfe);
>   extern bool __forward_traps(struct kvm_vcpu *vcpu, unsigned int reg,
>   			    u64 control_bit);
> diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> index 68da54d58cd0..1dbf63319b99 100644
> --- a/arch/arm64/kvm/arm.c
> +++ b/arch/arm64/kvm/arm.c
> @@ -37,6 +37,7 @@
>   #include <asm/kvm_arm.h>
>   #include <asm/kvm_asm.h>
>   #include <asm/kvm_mmu.h>
> +#include <asm/kvm_nested.h>
>   #include <asm/kvm_emulate.h>
>   #include <asm/sections.h>
>   
> @@ -146,6 +147,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
>   	if (ret)
>   		return ret;
>   
> +	kvm_init_nested(kvm);
> +
>   	ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP);
>   	if (ret)
>   		goto out_free_stage2_pgd;
> @@ -389,6 +392,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
>   	struct kvm_s2_mmu *mmu;
>   	int *last_ran;
>   
> +	if (nested_virt_in_use(vcpu))
> +		kvm_vcpu_load_hw_mmu(vcpu);
> +
>   	mmu = vcpu->arch.hw_mmu;
>   	last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
>   
> @@ -437,6 +443,9 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
>   	kvm_vgic_put(vcpu);
>   	kvm_vcpu_pmu_restore_host(vcpu);
>   
> +	if (nested_virt_in_use(vcpu))
> +		kvm_vcpu_put_hw_mmu(vcpu);
> +
>   	vcpu->cpu = -1;
>   }
>   
> @@ -1088,8 +1097,13 @@ static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
>   
>   	vcpu->arch.target = phys_target;
>   
> +	/* Prepare for nested if required */
> +	ret = kvm_vcpu_init_nested(vcpu);
> +
>   	/* Now we know what it is, we can reset it. */
> -	ret = kvm_reset_vcpu(vcpu);
> +	if (!ret)
> +		ret = kvm_reset_vcpu(vcpu);
> +
>   	if (ret) {
>   		vcpu->arch.target = -1;
>   		bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
> diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> index 9eec548fccd1..ab1653b8e601 100644
> --- a/arch/arm64/kvm/mmu.c
> +++ b/arch/arm64/kvm/mmu.c
> @@ -162,7 +162,7 @@ static void invalidate_icache_guest_page(void *va, size_t size)
>    * does.
>    */
>   /**
> - * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
> + * __unmap_stage2_range -- Clear stage2 page table entries to unmap a range
>    * @mmu:   The KVM stage-2 MMU pointer
>    * @start: The intermediate physical base address of the range to unmap
>    * @size:  The size of the area to unmap
> @@ -185,7 +185,7 @@ static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64
>   				   may_block));
>   }
>   
> -static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
> +void kvm_unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
>   {
>   	__unmap_stage2_range(mmu, start, size, true);
>   }
> @@ -507,7 +507,20 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
>   	int cpu, err;
>   	struct kvm_pgtable *pgt;
>   
> +	/*
> +	 * If we already have our page tables in place, and that the
> +	 * MMU context is the canonical one, we have a bug somewhere,
> +	 * as this is only supposed to ever happen once per VM.
> +	 *
> +	 * Otherwise, we're building nested page tables, and that's
> +	 * probably because userspace called KVM_ARM_VCPU_INIT more
> +	 * than once on the same vcpu. Since that's actually legal,
> +	 * don't kick a fuss and leave gracefully.
> +	 */
>   	if (mmu->pgt != NULL) {
> +		if (&kvm->arch.mmu != mmu)
> +			return 0;
> +
>   		kvm_err("kvm_arch already initialized?\n");
>   		return -EINVAL;
>   	}
> @@ -533,6 +546,9 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
>   	mmu->pgt = pgt;
>   	mmu->pgd_phys = __pa(pgt->pgd);
>   	WRITE_ONCE(mmu->vmid.vmid_gen, 0);
> +
> +	kvm_init_nested_s2_mmu(mmu);
> +
>   	return 0;
>   
>   out_destroy_pgtable:
> @@ -578,7 +594,7 @@ static void stage2_unmap_memslot(struct kvm *kvm,
>   
>   		if (!(vma->vm_flags & VM_PFNMAP)) {
>   			gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
> -			unmap_stage2_range(&kvm->arch.mmu, gpa, vm_end - vm_start);
> +			kvm_unmap_stage2_range(&kvm->arch.mmu, gpa, vm_end - vm_start);
>   		}
>   		hva = vm_end;
>   	} while (hva < reg_end);
> @@ -1556,11 +1572,6 @@ void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
>   {
>   }
>   
> -void kvm_arch_flush_shadow_all(struct kvm *kvm)
> -{
> -	kvm_free_stage2_pgd(&kvm->arch.mmu);
> -}
> -
>   void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
>   				   struct kvm_memory_slot *slot)
>   {
> @@ -1568,7 +1579,7 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
>   	phys_addr_t size = slot->npages << PAGE_SHIFT;
>   
>   	spin_lock(&kvm->mmu_lock);
> -	unmap_stage2_range(&kvm->arch.mmu, gpa, size);
> +	kvm_unmap_stage2_range(&kvm->arch.mmu, gpa, size);
>   	spin_unlock(&kvm->mmu_lock);
>   }
>   
> diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
> index 19b674983e13..b034a2343374 100644
> --- a/arch/arm64/kvm/nested.c
> +++ b/arch/arm64/kvm/nested.c
> @@ -19,12 +19,189 @@
>   #include <linux/kvm.h>
>   #include <linux/kvm_host.h>
>   
> +#include <asm/kvm_arm.h>
>   #include <asm/kvm_emulate.h>
> +#include <asm/kvm_mmu.h>
>   #include <asm/kvm_nested.h>
>   #include <asm/sysreg.h>
>   
>   #include "sys_regs.h"
>   
> +void kvm_init_nested(struct kvm *kvm)
> +{
> +	kvm->arch.nested_mmus = NULL;
> +	kvm->arch.nested_mmus_size = 0;
> +}
> +
> +int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu)
> +{
> +	struct kvm *kvm = vcpu->kvm;
> +	struct kvm_s2_mmu *tmp;
> +	int num_mmus;
> +	int ret = -ENOMEM;
> +
> +	if (!test_bit(KVM_ARM_VCPU_HAS_EL2, vcpu->arch.features))
> +		return 0;
> +
> +	if (!cpus_have_final_cap(ARM64_HAS_NESTED_VIRT))
> +		return -EINVAL;
> +
> +	mutex_lock(&kvm->lock);
> +
> +	/*
> +	 * Let's treat memory allocation failures as benign: If we fail to
> +	 * allocate anything, return an error and keep the allocated array
> +	 * alive. Userspace may try to recover by intializing the vcpu
> +	 * again, and there is no reason to affect the whole VM for this.
> +	 */
> +	num_mmus = atomic_read(&kvm->online_vcpus) * 2;
> +	tmp = krealloc(kvm->arch.nested_mmus,
> +		       num_mmus * sizeof(*kvm->arch.nested_mmus),
> +		       GFP_KERNEL_ACCOUNT | __GFP_ZERO);
> +	if (tmp) {
> +		/*
> +		 * If we went through a realocation, adjust the MMU
> +		 * back-pointers in the previously initialised
> +		 * pg_table structures.
> +		 */
> +		if (kvm->arch.nested_mmus != tmp) {
> +			int i;
> +
> +			for (i = 0; i < num_mmus - 2; i++)
> +				tmp[i].pgt->mmu = &tmp[i];
> +		}
> +
> +		if (kvm_init_stage2_mmu(kvm, &tmp[num_mmus - 1]) ||
> +		    kvm_init_stage2_mmu(kvm, &tmp[num_mmus - 2])) {
> +			kvm_free_stage2_pgd(&tmp[num_mmus - 1]);
> +			kvm_free_stage2_pgd(&tmp[num_mmus - 2]);
> +		} else {
> +			kvm->arch.nested_mmus_size = num_mmus;
> +			ret = 0;
> +		}
> +
> +		kvm->arch.nested_mmus = tmp;
> +	}
> +
> +	mutex_unlock(&kvm->lock);
> +	return ret;
> +}
> +
> +/* Must be called with kvm->lock held */
> +struct kvm_s2_mmu *lookup_s2_mmu(struct kvm *kvm, u64 vttbr, u64 hcr)
> +{
> +	bool nested_stage2_enabled = hcr & HCR_VM;
> +	int i;
> +
> +	/* Don't consider the CnP bit for the vttbr match */
> +	vttbr = vttbr & ~VTTBR_CNP_BIT;
> +
> +	/*
> +	 * Two possibilities when looking up a S2 MMU context:
> +	 *
> +	 * - either S2 is enabled in the guest, and we need a context that
> +         *   is S2-enabled and matches the full VTTBR (VMID+BADDR), which
> +         *   makes it safe from a TLB conflict perspective (a broken guest
> +         *   won't be able to generate them),
> +	 *
> +	 * - or S2 is disabled, and we need a context that is S2-disabled
> +         *   and matches the VMID only, as all TLBs are tagged by VMID even
> +         *   if S2 translation is enabled.

I think you were intended to say "if S2 translation is disabled".
> +	 */
> +	for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
> +		struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
> +
> +		if (!kvm_s2_mmu_valid(mmu))
> +			continue;
> +
> +		if (nested_stage2_enabled &&
> +		    mmu->nested_stage2_enabled &&
> +		    vttbr == mmu->vttbr)
> +			return mmu;
> +
> +		if (!nested_stage2_enabled &&
> +		    !mmu->nested_stage2_enabled &&
> +		    get_vmid(vttbr) == get_vmid(mmu->vttbr))
> +			return mmu;
> +	}
> +	return NULL;
> +}
> +
> +static struct kvm_s2_mmu *get_s2_mmu_nested(struct kvm_vcpu *vcpu)
> +{
> +	struct kvm *kvm = vcpu->kvm;
> +	u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
> +	u64 hcr= vcpu_read_sys_reg(vcpu, HCR_EL2);
> +	struct kvm_s2_mmu *s2_mmu;
> +	int i;
> +
> +	s2_mmu = lookup_s2_mmu(kvm, vttbr, hcr);
> +	if (s2_mmu)
> +		goto out;
> +
> +	/*
> +	 * Make sure we don't always search from the same point, or we
> +	 * will always reuse a potentially active context, leaving
> +	 * free contexts unused.
> +	 */
> +	for (i = kvm->arch.nested_mmus_next;
> +	     i < (kvm->arch.nested_mmus_size + kvm->arch.nested_mmus_next);
> +	     i++) {
> +		s2_mmu = &kvm->arch.nested_mmus[i % kvm->arch.nested_mmus_size];
> +
> +		if (atomic_read(&s2_mmu->refcnt) == 0)
> +			break;
> +	}
> +	BUG_ON(atomic_read(&s2_mmu->refcnt)); /* We have struct MMUs to spare */
> +
> +	/* Set the scene for the next search */
> +	kvm->arch.nested_mmus_next = (i + 1) % kvm->arch.nested_mmus_size;
> +
> +	if (kvm_s2_mmu_valid(s2_mmu)) {
> +		/* Clear the old state */
> +		kvm_unmap_stage2_range(s2_mmu, 0, kvm_phys_size(kvm));
> +		if (s2_mmu->vmid.vmid_gen)
> +			kvm_call_hyp(__kvm_tlb_flush_vmid, s2_mmu);
> +	}
> +
> +	/*
> +	 * The virtual VMID (modulo CnP) will be used as a key when matching
> +	 * an existing kvm_s2_mmu.
> +	 */
> +	s2_mmu->vttbr = vttbr & ~VTTBR_CNP_BIT;
> +	s2_mmu->nested_stage2_enabled = hcr & HCR_VM;
> +
> +out:
> +	atomic_inc(&s2_mmu->refcnt);
> +	return s2_mmu;
> +}
> +
> +void kvm_init_nested_s2_mmu(struct kvm_s2_mmu *mmu)
> +{
> +	mmu->vttbr = 1;
> +	mmu->nested_stage2_enabled = false;
> +	atomic_set(&mmu->refcnt, 0);
> +}
> +
> +void kvm_vcpu_load_hw_mmu(struct kvm_vcpu *vcpu)
> +{
> +	if (is_hyp_ctxt(vcpu)) {
> +		vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;
> +	} else {
> +		spin_lock(&vcpu->kvm->mmu_lock);
> +		vcpu->arch.hw_mmu = get_s2_mmu_nested(vcpu);
> +		spin_unlock(&vcpu->kvm->mmu_lock);
> +	}
> +}
> +
> +void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu)
> +{
> +	if (vcpu->arch.hw_mmu != &vcpu->kvm->arch.mmu) {
> +		atomic_dec(&vcpu->arch.hw_mmu->refcnt);
> +		vcpu->arch.hw_mmu = NULL;
> +	}
> +}
> +
>   /*
>    * Inject wfx to the virtual EL2 if this is not from the virtual EL2 and
>    * the virtual HCR_EL2.TWX is set. Otherwise, let the host hypervisor
> @@ -43,6 +220,24 @@ int handle_wfx_nested(struct kvm_vcpu *vcpu, bool is_wfe)
>   	return -EINVAL;
>   }
>   
> +void kvm_arch_flush_shadow_all(struct kvm *kvm)
> +{
> +	int i;
> +
> +	for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
> +		struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
> +
> +		WARN_ON(atomic_read(&mmu->refcnt));
> +
> +		if (!atomic_read(&mmu->refcnt))
> +			kvm_free_stage2_pgd(mmu);
> +	}
> +	kfree(kvm->arch.nested_mmus);
> +	kvm->arch.nested_mmus = NULL;
> +	kvm->arch.nested_mmus_size = 0;
> +	kvm_free_stage2_pgd(&kvm->arch.mmu);
> +}
> +
>   /*
>    * Our emulated CPU doesn't support all the possible features. For the
>    * sake of simplicity (and probably mental sanity), wipe out a number

It looks good to me, please feel free to add.
Reviewed-by: Ganapatrao Kulkarni <gankulkarni@os.amperecomputing.com>

Thanks,
Ganapat
_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

  reply	other threads:[~2022-01-18 11:24 UTC|newest]

Thread overview: 418+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-11-29 20:00 [PATCH v5 00/69] KVM: arm64: ARMv8.3/8.4 Nested Virtualization support Marc Zyngier
2021-11-29 20:00 ` Marc Zyngier
2021-11-29 20:00 ` Marc Zyngier
2021-11-29 20:00 ` [PATCH v5 01/69] KVM: arm64: Save PSTATE early on exit Marc Zyngier
2021-11-29 20:00   ` Marc Zyngier
2021-11-29 20:00   ` Marc Zyngier
2022-01-17 15:36   ` Russell King (Oracle)
2022-01-17 15:36     ` Russell King (Oracle)
2022-01-17 15:36     ` Russell King (Oracle)
2021-11-29 20:00 ` [PATCH v5 02/69] KVM: arm64: Move pkvm's special 32bit handling into a generic infrastructure Marc Zyngier
2021-11-29 20:00   ` Marc Zyngier
2021-11-29 20:00   ` Marc Zyngier
2022-01-17 15:34   ` Russell King (Oracle)
2022-01-17 15:34     ` Russell King (Oracle)
2022-01-17 15:34     ` Russell King (Oracle)
2021-11-29 20:00 ` [PATCH v5 03/69] KVM: arm64: Add minimal handling for the ARMv8.7 PMU Marc Zyngier
2021-11-29 20:00   ` Marc Zyngier
2021-11-29 20:00   ` Marc Zyngier
2022-01-17 15:40   ` Russell King (Oracle)
2022-01-17 15:40     ` Russell King (Oracle)
2022-01-17 15:40     ` Russell King (Oracle)
2021-11-29 20:00 ` [PATCH v5 04/69] KVM: arm64: Rework kvm_pgtable initialisation Marc Zyngier
2021-11-29 20:00   ` Marc Zyngier
2021-11-29 20:00   ` Marc Zyngier
2021-12-20 14:00   ` [irqchip: irq/irqchip-next] " irqchip-bot for Marc Zyngier
2022-01-17 15:43   ` [PATCH v5 04/69] " Russell King (Oracle)
2022-01-17 15:43     ` Russell King (Oracle)
2022-01-17 15:43     ` Russell King (Oracle)
2021-11-29 20:00 ` [PATCH v5 05/69] KVM: arm64: Allow preservation of the S2 SW bits Marc Zyngier
2021-11-29 20:00   ` Marc Zyngier
2021-11-29 20:00   ` Marc Zyngier
2022-01-13 12:12   ` Alexandru Elisei
2022-01-13 12:12     ` Alexandru Elisei
2022-01-13 12:12     ` Alexandru Elisei
2022-01-13 13:14     ` Marc Zyngier
2022-01-13 13:14       ` Marc Zyngier
2022-01-13 13:14       ` Marc Zyngier
2022-01-17 15:51   ` Russell King (Oracle)
2022-01-17 15:51     ` Russell King (Oracle)
2022-01-17 15:51     ` Russell King (Oracle)
2021-11-29 20:00 ` [PATCH v5 06/69] arm64: Add ARM64_HAS_NESTED_VIRT cpufeature Marc Zyngier
2021-11-29 20:00   ` Marc Zyngier
2021-11-29 20:00   ` Marc Zyngier
2021-11-29 20:00 ` [PATCH v5 07/69] KVM: arm64: nv: Introduce nested virtualization VCPU feature Marc Zyngier
2021-11-29 20:00   ` Marc Zyngier
2021-11-29 20:00   ` Marc Zyngier
2021-12-20  6:45   ` Ganapatrao Kulkarni
2021-12-20  6:45     ` Ganapatrao Kulkarni
2021-12-20  6:45     ` Ganapatrao Kulkarni
2022-01-13 14:10   ` Alexandru Elisei
2022-01-13 14:10     ` Alexandru Elisei
2022-01-13 14:10     ` Alexandru Elisei
2022-01-13 14:24     ` Marc Zyngier
2022-01-13 14:24       ` Marc Zyngier
2022-01-13 14:24       ` Marc Zyngier
2022-01-17 16:57   ` Russell King (Oracle)
2022-01-17 16:57     ` Russell King (Oracle)
2022-01-17 16:57     ` Russell King (Oracle)
2021-11-29 20:00 ` [PATCH v5 08/69] KVM: arm64: nv: Reset VCPU to EL2 registers if VCPU nested virt is set Marc Zyngier
2021-11-29 20:00   ` Marc Zyngier
2021-11-29 20:00   ` Marc Zyngier
2022-01-07 21:54   ` Chase Conklin
2022-01-07 21:54     ` Chase Conklin
2022-01-07 21:54     ` Chase Conklin
2022-01-27 12:42     ` Marc Zyngier
2022-01-27 12:42       ` Marc Zyngier
2022-01-27 12:42       ` Marc Zyngier
2022-01-17 17:06   ` Russell King (Oracle)
2022-01-17 17:06     ` Russell King (Oracle)
2022-01-17 17:06     ` Russell King (Oracle)
2022-01-27 12:43     ` Marc Zyngier
2022-01-27 12:43       ` Marc Zyngier
2022-01-27 12:43       ` Marc Zyngier
2021-11-29 20:00 ` [PATCH v5 09/69] KVM: arm64: nv: Allow userspace to set PSR_MODE_EL2x Marc Zyngier
2021-11-29 20:00   ` Marc Zyngier
2021-11-29 20:00   ` Marc Zyngier
2022-01-17 17:07   ` Russell King (Oracle)
2022-01-17 17:07     ` Russell King (Oracle)
2022-01-17 17:07     ` Russell King (Oracle)
2021-11-29 20:00 ` [PATCH v5 10/69] KVM: arm64: nv: Add EL2 system registers to vcpu context Marc Zyngier
2021-11-29 20:00   ` Marc Zyngier
2021-11-29 20:00   ` Marc Zyngier
2022-01-17 17:14   ` Russell King (Oracle)
2022-01-17 17:14     ` Russell King (Oracle)
2022-01-17 17:14     ` Russell King (Oracle)
2021-11-29 20:00 ` [PATCH v5 11/69] KVM: arm64: nv: Add nested virt VCPU primitives for vEL2 VCPU state Marc Zyngier
2021-11-29 20:00   ` Marc Zyngier
2021-11-29 20:00   ` Marc Zyngier
2022-01-14 17:42   ` Alexandru Elisei
2022-01-14 17:42     ` Alexandru Elisei
2022-01-14 17:42     ` Alexandru Elisei
2022-01-15 12:19     ` Marc Zyngier
2022-01-15 12:19       ` Marc Zyngier
2022-01-15 12:19       ` Marc Zyngier
2022-01-17 10:19       ` Alexandru Elisei
2022-01-17 10:19         ` Alexandru Elisei
2022-01-17 10:19         ` Alexandru Elisei
2022-01-18 15:45   ` Russell King (Oracle)
2022-01-18 15:45     ` Russell King (Oracle)
2022-01-18 15:45     ` Russell King (Oracle)
2021-11-29 20:00 ` [PATCH v5 12/69] KVM: arm64: nv: Handle HCR_EL2.NV system register traps Marc Zyngier
2021-11-29 20:00   ` Marc Zyngier
2021-11-29 20:00   ` Marc Zyngier
2022-01-17 11:31   ` Alexandru Elisei
2022-01-17 11:31     ` Alexandru Elisei
2022-01-17 11:31     ` Alexandru Elisei
2022-01-26 16:08     ` Marc Zyngier
2022-01-26 16:08       ` Marc Zyngier
2022-01-26 16:08       ` Marc Zyngier
2022-01-18 15:51   ` Russell King (Oracle)
2022-01-18 15:51     ` Russell King (Oracle)
2022-01-18 15:51     ` Russell King (Oracle)
2022-01-26 16:01     ` Marc Zyngier
2022-01-26 16:01       ` Marc Zyngier
2022-01-26 16:01       ` Marc Zyngier
2021-11-29 20:00 ` [PATCH v5 13/69] KVM: arm64: nv: Reset VMPIDR_EL2 and VPIDR_EL2 to sane values Marc Zyngier
2021-11-29 20:00   ` Marc Zyngier
2021-11-29 20:00   ` Marc Zyngier
2022-01-18 15:52   ` Russell King (Oracle)
2022-01-18 15:52     ` Russell King (Oracle)
2022-01-18 15:52     ` Russell King (Oracle)
2021-11-29 20:00 ` [PATCH v5 14/69] KVM: arm64: nv: Support virtual EL2 exceptions Marc Zyngier
2021-11-29 20:00   ` Marc Zyngier
2021-11-29 20:00   ` Marc Zyngier
2021-12-20  6:57   ` Ganapatrao Kulkarni
2021-12-20  6:57     ` Ganapatrao Kulkarni
2021-12-20  6:57     ` Ganapatrao Kulkarni
2022-01-18 14:11   ` Alexandru Elisei
2022-01-18 14:11     ` Alexandru Elisei
2022-01-18 14:11     ` Alexandru Elisei
2022-01-26 20:11     ` Marc Zyngier
2022-01-26 20:11       ` Marc Zyngier
2022-01-26 20:11       ` Marc Zyngier
2022-01-18 16:02   ` Russell King (Oracle)
2022-01-18 16:02     ` Russell King (Oracle)
2022-01-18 16:02     ` Russell King (Oracle)
2022-01-26 20:32     ` Marc Zyngier
2022-01-26 20:32       ` Marc Zyngier
2022-01-26 20:32       ` Marc Zyngier
2022-01-20 13:58   ` Alexandru Elisei
2022-01-20 13:58     ` Alexandru Elisei
2022-01-20 13:58     ` Alexandru Elisei
2022-01-27 11:08     ` Marc Zyngier
2022-01-27 11:08       ` Marc Zyngier
2022-01-27 11:08       ` Marc Zyngier
2021-11-29 20:00 ` [PATCH v5 15/69] KVM: arm64: nv: Inject HVC exceptions to the virtual EL2 Marc Zyngier
2021-11-29 20:00   ` Marc Zyngier
2021-11-29 20:00   ` Marc Zyngier
2022-01-18 16:04   ` Russell King (Oracle)
2022-01-18 16:04     ` Russell King (Oracle)
2022-01-18 16:04     ` Russell King (Oracle)
2022-01-18 16:35   ` Alexandru Elisei
2022-01-18 16:35     ` Alexandru Elisei
2022-01-18 16:35     ` Alexandru Elisei
2021-11-29 20:00 ` [PATCH v5 16/69] KVM: arm64: nv: Handle trapped ERET from " Marc Zyngier
2021-11-29 20:00   ` Marc Zyngier
2021-11-29 20:00   ` Marc Zyngier
2022-01-18 16:05   ` Russell King (Oracle)
2022-01-18 16:05     ` Russell King (Oracle)
2022-01-18 16:05     ` Russell King (Oracle)
2022-01-18 16:36   ` Alexandru Elisei
2022-01-18 16:36     ` Alexandru Elisei
2022-01-18 16:36     ` Alexandru Elisei
2022-01-27 11:50     ` Marc Zyngier
2022-01-27 11:50       ` Marc Zyngier
2022-01-27 11:50       ` Marc Zyngier
2021-11-29 20:00 ` [PATCH v5 17/69] KVM: arm64: nv: Add non-VHE-EL2->EL1 translation helpers Marc Zyngier
2021-11-29 20:00   ` Marc Zyngier
2021-11-29 20:00   ` Marc Zyngier
2022-01-20 11:52   ` Alexandru Elisei
2022-01-20 11:52     ` Alexandru Elisei
2022-01-20 11:52     ` Alexandru Elisei
2022-01-27 17:22     ` Marc Zyngier
2022-01-27 17:22       ` Marc Zyngier
2022-01-27 17:22       ` Marc Zyngier
2021-11-29 20:00 ` [PATCH v5 18/69] KVM: arm64: nv: Handle virtual EL2 registers in vcpu_read/write_sys_reg() Marc Zyngier
2021-11-29 20:00   ` Marc Zyngier
2021-11-29 20:00   ` Marc Zyngier
2021-12-20  7:04   ` Ganapatrao Kulkarni
2021-12-20  7:04     ` Ganapatrao Kulkarni
2021-12-20  7:04     ` Ganapatrao Kulkarni
2021-12-20  9:10     ` Marc Zyngier
2021-12-20  9:10       ` Marc Zyngier
2021-12-20  9:10       ` Marc Zyngier
2021-12-21  7:12       ` Ganapatrao Kulkarni
2021-12-21  7:12         ` Ganapatrao Kulkarni
2021-12-21  7:12         ` Ganapatrao Kulkarni
2021-12-21  8:39         ` Marc Zyngier
2021-12-21  8:39           ` Marc Zyngier
2021-12-21  8:39           ` Marc Zyngier
2021-12-21 10:12           ` Ganapatrao Kulkarni
2021-12-21 10:12             ` Ganapatrao Kulkarni
2021-12-21 10:12             ` Ganapatrao Kulkarni
2022-01-20 15:12   ` Alexandru Elisei
2022-01-20 15:12     ` Alexandru Elisei
2022-01-20 15:12     ` Alexandru Elisei
2021-11-29 20:01 ` [PATCH v5 19/69] KVM: arm64: nv: Handle SPSR_EL2 specially Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2022-01-20 16:28   ` Alexandru Elisei
2022-01-20 16:28     ` Alexandru Elisei
2022-01-20 16:28     ` Alexandru Elisei
2021-11-29 20:01 ` [PATCH v5 20/69] KVM: arm64: nv: Handle HCR_EL2.E2H specially Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 21/69] KVM: arm64: nv: Save/Restore vEL2 sysregs Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 22/69] KVM: arm64: nv: Emulate PSTATE.M for a guest hypervisor Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 23/69] KVM: arm64: nv: Trap EL1 VM register accesses in virtual EL2 Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 24/69] KVM: arm64: nv: Trap SPSR_EL1, ELR_EL1 and VBAR_EL1 from " Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 25/69] KVM: arm64: nv: Trap CPACR_EL1 access in " Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 26/69] KVM: arm64: nv: Handle PSCI call via smc from the guest Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 27/69] KVM: arm64: nv: Respect virtual HCR_EL2.TWX setting Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 28/69] KVM: arm64: nv: Respect virtual CPTR_EL2.{TFP, FPEN} settings Marc Zyngier
2021-11-29 20:01   ` [PATCH v5 28/69] KVM: arm64: nv: Respect virtual CPTR_EL2.{TFP,FPEN} settings Marc Zyngier
2021-11-29 20:01   ` [PATCH v5 28/69] KVM: arm64: nv: Respect virtual CPTR_EL2.{TFP, FPEN} settings Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 29/69] KVM: arm64: nv: Respect the virtual HCR_EL2.NV bit setting Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-12-20  7:11   ` Ganapatrao Kulkarni
2021-12-20  7:11     ` Ganapatrao Kulkarni
2021-12-20  7:11     ` Ganapatrao Kulkarni
2021-12-20  9:18     ` Marc Zyngier
2021-12-20  9:18       ` Marc Zyngier
2021-12-20  9:18       ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 30/69] KVM: arm64: nv: Respect virtual HCR_EL2.TVM and TRVM settings Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 31/69] KVM: arm64: nv: Respect the virtual HCR_EL2.NV1 bit setting Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-12-20  7:18   ` Ganapatrao Kulkarni
2021-12-20  7:18     ` Ganapatrao Kulkarni
2021-12-20  7:18     ` Ganapatrao Kulkarni
2021-12-20  9:39     ` Marc Zyngier
2021-12-20  9:39       ` Marc Zyngier
2021-12-20  9:39       ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 32/69] KVM: arm64: nv: Emulate EL12 register accesses from the virtual EL2 Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 33/69] KVM: arm64: nv: Forward debug traps to the nested guest Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 34/69] KVM: arm64: nv: Configure HCR_EL2 for nested virtualization Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2022-01-04  8:53   ` Ganapatrao Kulkarni
2022-01-04  8:53     ` Ganapatrao Kulkarni
2022-01-04  8:53     ` Ganapatrao Kulkarni
2022-01-04  9:39     ` Marc Zyngier
2022-01-04  9:39       ` Marc Zyngier
2022-01-04  9:39       ` Marc Zyngier
2022-01-04  9:53       ` Ganapatrao Kulkarni
2022-01-04  9:53         ` Ganapatrao Kulkarni
2022-01-04  9:53         ` Ganapatrao Kulkarni
2021-11-29 20:01 ` [PATCH v5 35/69] KVM: arm64: nv: Only toggle cache for virtual EL2 when SCTLR_EL2 changes Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 36/69] KVM: arm64: nv: Filter out unsupported features from ID regs Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-12-20  7:26   ` Ganapatrao Kulkarni
2021-12-20  7:26     ` Ganapatrao Kulkarni
2021-12-20  7:26     ` Ganapatrao Kulkarni
2021-12-20  9:56     ` Marc Zyngier
2021-12-20  9:56       ` Marc Zyngier
2021-12-20  9:56       ` Marc Zyngier
2021-12-21  6:03       ` Ganapatrao Kulkarni
2021-12-21  6:03         ` Ganapatrao Kulkarni
2021-12-21  6:03         ` Ganapatrao Kulkarni
2021-12-21  9:10         ` Marc Zyngier
2021-12-21  9:10           ` Marc Zyngier
2021-12-21  9:10           ` Marc Zyngier
2021-12-21 10:07           ` Ganapatrao Kulkarni
2021-12-21 10:07             ` Ganapatrao Kulkarni
2021-12-21 10:07             ` Ganapatrao Kulkarni
2022-01-21 11:33           ` Ganapatrao Kulkarni
2022-01-21 11:33             ` Ganapatrao Kulkarni
2022-01-21 11:33             ` Ganapatrao Kulkarni
2022-01-27 13:04             ` Marc Zyngier
2022-01-27 13:04               ` Marc Zyngier
2022-01-27 13:04               ` Marc Zyngier
2022-01-04 10:24   ` Ganapatrao Kulkarni
2022-01-04 10:24     ` Ganapatrao Kulkarni
2022-01-04 10:24     ` Ganapatrao Kulkarni
2021-11-29 20:01 ` [PATCH v5 37/69] KVM: arm64: nv: Hide RAS from nested guests Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 38/69] KVM: arm64: nv: Support multiple nested Stage-2 mmu structures Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2022-01-18 11:24   ` Ganapatrao Kulkarni [this message]
2022-01-18 11:24     ` Ganapatrao Kulkarni
2022-01-18 11:24     ` Ganapatrao Kulkarni
2022-01-27 11:50     ` Marc Zyngier
2022-01-27 11:50       ` Marc Zyngier
2022-01-27 11:50       ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 39/69] KVM: arm64: nv: Implement nested Stage-2 page table walk logic Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 40/69] KVM: arm64: nv: Handle shadow stage 2 page faults Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 41/69] KVM: arm64: nv: Restrict S2 RD/WR permissions to match the guest's Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 42/69] KVM: arm64: nv: Unmap/flush shadow stage 2 page tables Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 43/69] KVM: arm64: nv: Introduce sys_reg_desc.forward_trap Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 44/69] KVM: arm64: nv: Set a handler for the system instruction traps Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2022-01-18 11:29   ` Ganapatrao Kulkarni
2022-01-18 11:29     ` Ganapatrao Kulkarni
2022-01-18 11:29     ` Ganapatrao Kulkarni
2021-11-29 20:01 ` [PATCH v5 45/69] KVM: arm64: nv: Trap and emulate AT instructions from virtual EL2 Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 46/69] KVM: arm64: nv: Trap and emulate TLBI " Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 47/69] KVM: arm64: nv: Fold guest's HCR_EL2 configuration into the host's Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 48/69] KVM: arm64: nv: arch_timer: Support hyp timer emulation Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 49/69] KVM: arm64: nv: Add handling of EL2-specific timer registers Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 50/69] KVM: arm64: nv: Load timer before the GIC Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 51/69] KVM: arm64: nv: Nested GICv3 Support Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 52/69] KVM: arm64: nv: Don't load the GICv4 context on entering a nested guest Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 53/69] KVM: arm64: nv: vgic: Emulate the HW bit in software Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 54/69] KVM: arm64: nv: vgic: Allow userland to set VGIC maintenance IRQ Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 55/69] KVM: arm64: nv: Implement maintenance interrupt forwarding Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 56/69] KVM: arm64: nv: Add nested GICv3 tracepoints Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 57/69] KVM: arm64: nv: Allow userspace to request KVM_ARM_VCPU_NESTED_VIRT Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 58/69] KVM: arm64: nv: Add handling of ARMv8.4-TTL TLB invalidation Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2022-01-18 11:35   ` Ganapatrao Kulkarni
2022-01-18 11:35     ` Ganapatrao Kulkarni
2022-01-18 11:35     ` Ganapatrao Kulkarni
2021-11-29 20:01 ` [PATCH v5 59/69] KVM: arm64: nv: Invalidate TLBs based on shadow S2 TTL-like information Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 60/69] KVM: arm64: nv: Tag shadow S2 entries with nested level Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 61/69] KVM: arm64: nv: Add include containing the VNCR_EL2 offsets Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 62/69] KVM: arm64: nv: Map VNCR-capable registers to a separate page Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 63/69] KVM: arm64: nv: Move nested vgic state into the sysreg file Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 64/69] KVM: arm64: Add ARMv8.4 Enhanced Nested Virt cpufeature Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 65/69] KVM: arm64: nv: Sync nested timer state with ARMv8.4 Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 66/69] KVM: arm64: nv: Allocate VNCR page when required Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 67/69] KVM: arm64: nv: Enable ARMv8.4-NV support Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2022-01-18 11:50   ` Ganapatrao Kulkarni
2022-01-18 11:50     ` Ganapatrao Kulkarni
2022-01-18 11:50     ` Ganapatrao Kulkarni
2022-01-27 11:48     ` Marc Zyngier
2022-01-27 11:48       ` Marc Zyngier
2022-01-27 11:48       ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 68/69] KVM: arm64: nv: Fast-track 'InHost' exception returns Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01 ` [PATCH v5 69/69] KVM: arm64: nv: Fast-track EL1 TLBIs for VHE guests Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-11-29 20:01   ` Marc Zyngier
2021-12-16 17:19 ` (subset) [PATCH v5 00/69] KVM: arm64: ARMv8.3/8.4 Nested Virtualization support Marc Zyngier
2021-12-16 17:19   ` Marc Zyngier
2021-12-16 17:19   ` Marc Zyngier

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=21b0fca8-6b31-dc63-7637-2f80c4b3a272@os.amperecomputing.com \
    --to=gankulkarni@os.amperecomputing.com \
    --cc=alexandru.elisei@arm.com \
    --cc=andre.przywara@arm.com \
    --cc=christoffer.dall@arm.com \
    --cc=haibo.xu@linaro.org \
    --cc=james.morse@arm.com \
    --cc=jintack@cs.columbia.edu \
    --cc=kernel-team@android.com \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=maz@kernel.org \
    --cc=suzuki.poulose@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.