All of lore.kernel.org
 help / color / mirror / Atom feed
From: Alexandru Elisei <alexandru.elisei@arm.com>
To: Marc Zyngier <marc.zyngier@arm.com>,
	linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu, kvm@vger.kernel.org
Cc: Andre Przywara <andre.przywara@arm.com>,
	Dave Martin <Dave.Martin@arm.com>
Subject: Re: [PATCH 37/59] KVM: arm64: nv: Handle shadow stage 2 page faults
Date: Fri, 5 Jul 2019 15:28:27 +0100	[thread overview]
Message-ID: <de40863e-1688-c028-329c-0cf94149f0b3@arm.com> (raw)
In-Reply-To: <20190621093843.220980-38-marc.zyngier@arm.com>

On 6/21/19 10:38 AM, Marc Zyngier wrote:
> From: Christoffer Dall <christoffer.dall@linaro.org>
>
> If we are faulting on a shadow stage 2 translation, we first walk the
> guest hypervisor's stage 2 page table to see if it has a mapping. If
> not, we inject a stage 2 page fault to the virtual EL2. Otherwise, we
> create a mapping in the shadow stage 2 page table.
>
> Note that we have to deal with two IPAs when we got a showdow stage 2

I think it should be "shadow", not "shodow".

> page fault. One is the address we faulted on, and is in the L2 guest
> phys space. The other is from the guest stage-2 page table walk, and is
> in the L1 guest phys space.  To differentiate them, we rename variable
> names so that fault_ipa is used for the former and ipa is used for the

How about "To differentiate them, we renames variables so that [..]"?

> latter.
>
> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
> Signed-off-by: Jintack Lim <jintack.lim@linaro.org>
> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> ---
>  arch/arm/include/asm/kvm_mmu.h       | 52 +++++++++++++++
>  arch/arm64/include/asm/kvm_emulate.h |  6 ++
>  arch/arm64/include/asm/kvm_nested.h  | 20 +++++-
>  arch/arm64/kvm/nested.c              | 41 ++++++++++++
>  virt/kvm/arm/mmio.c                  | 12 ++--
>  virt/kvm/arm/mmu.c                   | 99 ++++++++++++++++++++++------
>  6 files changed, 203 insertions(+), 27 deletions(-)
>
> diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
> index e6984b6da2ce..afabf1fd1d17 100644
> --- a/arch/arm/include/asm/kvm_mmu.h
> +++ b/arch/arm/include/asm/kvm_mmu.h
> @@ -423,6 +423,58 @@ static inline void kvm_set_ipa_limit(void) {}
>  static inline void kvm_init_s2_mmu(struct kvm_s2_mmu *mmu) {}
>  static inline void kvm_init_nested(struct kvm *kvm) {}
>  
> +struct kvm_s2_trans {};
> +static inline phys_addr_t kvm_s2_trans_output(struct kvm_s2_trans *trans)
> +{
> +	BUG();
> +}
> +
> +static inline unsigned long kvm_s2_trans_size(struct kvm_s2_trans *trans)
> +{
> +	BUG();
> +}
> +
> +static inline u32 kvm_s2_trans_esr(struct kvm_s2_trans *trans)
> +{
> +	BUG();
> +}
> +
> +static inline int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t ipa,
> +				     struct kvm_s2_trans *trans)
> +{
> +	BUG();
> +}
> +
> +static inline int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu,
> +					   struct kvm_s2_trans *trans)
> +{
> +	BUG();
> +}
> +
> +static inline void kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u32 esr)
> +{
> +	BUG();
> +}
> +
> +static inline bool kvm_s2_trans_readable(struct kvm_s2_trans *trans)
> +{
> +	BUG();
> +}
> +
> +static inline bool kvm_s2_trans_writable(struct kvm_s2_trans *trans)
> +{
> +	BUG();
> +}
> +
> +static inline void kvm_nested_s2_flush(struct kvm *kvm) {}
> +static inline void kvm_nested_s2_wp(struct kvm *kvm) {}
> +static inline void kvm_nested_s2_clear(struct kvm *kvm) {}
> +
> +static inline bool kvm_is_shadow_s2_fault(struct kvm_vcpu *vcpu)
> +{
> +	return false;
> +}
> +
>  static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
>  {
>  	struct kvm_vmid *vmid = &mmu->vmid;
> diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
> index 73d8c54a52c6..b49a47f3daa8 100644
> --- a/arch/arm64/include/asm/kvm_emulate.h
> +++ b/arch/arm64/include/asm/kvm_emulate.h
> @@ -606,4 +606,10 @@ static inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu)
>  	write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
>  }
>  
> +static inline bool kvm_is_shadow_s2_fault(struct kvm_vcpu *vcpu)
> +{
> +	return (vcpu->arch.hw_mmu != &vcpu->kvm->arch.mmu &&
> +		vcpu->arch.hw_mmu->nested_stage2_enabled);
> +}
> +
>  #endif /* __ARM64_KVM_EMULATE_H__ */
> diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/kvm_nested.h
> index 686ba53379ab..052d46d96201 100644
> --- a/arch/arm64/include/asm/kvm_nested.h
> +++ b/arch/arm64/include/asm/kvm_nested.h
> @@ -19,7 +19,7 @@ extern void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu);
>  
>  struct kvm_s2_trans {
>  	phys_addr_t output;
> -	phys_addr_t block_size;
> +	unsigned long block_size;

Shouldn't this be part of the previous patch, where we introduce the struct?

>  	bool writable;
>  	bool readable;
>  	int level;
> @@ -27,9 +27,27 @@ struct kvm_s2_trans {
>  	u64 upper_attr;
>  };
>  
> +static inline phys_addr_t kvm_s2_trans_output(struct kvm_s2_trans *trans)
> +{
> +	return trans->output;
> +}
> +
> +static inline unsigned long kvm_s2_trans_size(struct kvm_s2_trans *trans)
> +{
> +	return trans->block_size;
> +}
> +
> +static inline u32 kvm_s2_trans_esr(struct kvm_s2_trans *trans)
> +{
> +	return trans->esr;
> +}
> +
>  extern int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa,
>  			      struct kvm_s2_trans *result);
>  
> +extern int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu,
> +				    struct kvm_s2_trans *trans);
> +extern int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2);
>  int handle_wfx_nested(struct kvm_vcpu *vcpu, bool is_wfe);
>  extern bool forward_traps(struct kvm_vcpu *vcpu, u64 control_bit);
>  extern bool forward_nv_traps(struct kvm_vcpu *vcpu);
> diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
> index 6a9bd68b769b..023027fa2db5 100644
> --- a/arch/arm64/kvm/nested.c
> +++ b/arch/arm64/kvm/nested.c
> @@ -300,6 +300,8 @@ int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa,
>  	u64 vtcr = vcpu_read_sys_reg(vcpu, VTCR_EL2);
>  	struct s2_walk_info wi;
>  
> +	result->esr = 0;

I think this should be part of the previous patch, looks like a fix to me.

Thanks,
Alex
> +
>  	if (!nested_virt_in_use(vcpu))
>  		return 0;
>  
> @@ -415,6 +417,45 @@ void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu)
>  	}
>  }
>  
> +/*
> + * Returns non-zero if permission fault is handled by injecting it to the next
> + * level hypervisor.
> + */
> +int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu, struct kvm_s2_trans *trans)
> +{
> +	unsigned long fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
> +	bool forward_fault = false;
> +
> +	trans->esr = 0;
> +
> +	if (fault_status != FSC_PERM)
> +		return 0;
> +
> +	if (kvm_vcpu_trap_is_iabt(vcpu)) {
> +		forward_fault = (trans->upper_attr & PTE_S2_XN);
> +	} else {
> +		bool write_fault = kvm_is_write_fault(vcpu);
> +
> +		forward_fault = ((write_fault && !trans->writable) ||
> +				 (!write_fault && !trans->readable));
> +	}
> +
> +	if (forward_fault) {
> +		trans->esr = esr_s2_fault(vcpu, trans->level, ESR_ELx_FSC_PERM);
> +		return 1;
> +	}
> +
> +	return 0;
> +}
> +
> +int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2)
> +{
> +	vcpu_write_sys_reg(vcpu, vcpu->arch.fault.far_el2, FAR_EL2);
> +	vcpu_write_sys_reg(vcpu, vcpu->arch.fault.hpfar_el2, HPFAR_EL2);
> +
> +	return kvm_inject_nested_sync(vcpu, esr_el2);
> +}
> +
>  /*
>   * Inject wfx to the virtual EL2 if this is not from the virtual EL2 and
>   * the virtual HCR_EL2.TWX is set. Otherwise, let the host hypervisor
> diff --git a/virt/kvm/arm/mmio.c b/virt/kvm/arm/mmio.c
> index a8a6a0c883f1..2b5de8388bf4 100644
> --- a/virt/kvm/arm/mmio.c
> +++ b/virt/kvm/arm/mmio.c
> @@ -142,7 +142,7 @@ static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len)
>  }
>  
>  int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
> -		 phys_addr_t fault_ipa)
> +		 phys_addr_t ipa)
>  {
>  	unsigned long data;
>  	unsigned long rt;
> @@ -171,22 +171,22 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
>  		data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
>  					       len);
>  
> -		trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, &data);
> +		trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, ipa, &data);
>  		kvm_mmio_write_buf(data_buf, len, data);
>  
> -		ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
> +		ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, ipa, len,
>  				       data_buf);
>  	} else {
>  		trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len,
> -			       fault_ipa, NULL);
> +			       ipa, NULL);
>  
> -		ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len,
> +		ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, ipa, len,
>  				      data_buf);
>  	}
>  
>  	/* Now prepare kvm_run for the potential return to userland. */
>  	run->mmio.is_write	= is_write;
> -	run->mmio.phys_addr	= fault_ipa;
> +	run->mmio.phys_addr	= ipa;
>  	run->mmio.len		= len;
>  
>  	if (!ret) {
> diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
> index faa61a81c8cc..3c7845832db8 100644
> --- a/virt/kvm/arm/mmu.c
> +++ b/virt/kvm/arm/mmu.c
> @@ -1384,7 +1384,8 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
>  	return ret;
>  }
>  
> -static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
> +static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap,
> +					phys_addr_t *fault_ipap)
>  {
>  	kvm_pfn_t pfn = *pfnp;
>  	gfn_t gfn = *ipap >> PAGE_SHIFT;
> @@ -1418,6 +1419,7 @@ static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
>  		mask = PTRS_PER_PMD - 1;
>  		VM_BUG_ON((gfn & mask) != (pfn & mask));
>  		if (pfn & mask) {
> +			*fault_ipap &= PMD_MASK;
>  			*ipap &= PMD_MASK;
>  			kvm_release_pfn_clean(pfn);
>  			pfn &= ~mask;
> @@ -1681,14 +1683,16 @@ static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
>  }
>  
>  static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
> -			  struct kvm_memory_slot *memslot, unsigned long hva,
> -			  unsigned long fault_status)
> +			  struct kvm_s2_trans *nested,
> +			  struct kvm_memory_slot *memslot,
> +			  unsigned long hva, unsigned long fault_status)
>  {
>  	int ret;
> -	bool write_fault, writable, force_pte = false;
> +	bool write_fault, writable;
>  	bool exec_fault, needs_exec;
>  	unsigned long mmu_seq;
> -	gfn_t gfn = fault_ipa >> PAGE_SHIFT;
> +	phys_addr_t ipa = fault_ipa;
> +	gfn_t gfn;
>  	struct kvm *kvm = vcpu->kvm;
>  	struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
>  	struct vm_area_struct *vma;
> @@ -1697,6 +1701,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
>  	bool logging_active = memslot_is_logging(memslot);
>  	unsigned long vma_pagesize, flags = 0;
>  	struct kvm_s2_mmu *mmu = vcpu->arch.hw_mmu;
> +	unsigned long max_map_size = PUD_SIZE;
>  
>  	write_fault = kvm_is_write_fault(vcpu);
>  	exec_fault = kvm_vcpu_trap_is_iabt(vcpu);
> @@ -1717,11 +1722,26 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
>  	}
>  
>  	vma_pagesize = vma_kernel_pagesize(vma);
> -	if (logging_active ||
> -	    !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
> -		force_pte = true;
> -		vma_pagesize = PAGE_SIZE;
> +
> +	if (!fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize))
> +		max_map_size = PAGE_SIZE;
> +
> +	if (logging_active)
> +		max_map_size = PAGE_SIZE;
> +
> +	if (kvm_is_shadow_s2_fault(vcpu)) {
> +               ipa = kvm_s2_trans_output(nested);
> +
> +		/*
> +		 * If we're about to create a shadow stage 2 entry, then we
> +		 * can only create a block mapping if the guest stage 2 page
> +		 * table uses at least as big a mapping.
> +		 */
> +		max_map_size = min(kvm_s2_trans_size(nested), max_map_size);
>  	}
> +	gfn = ipa >> PAGE_SHIFT;
> +
> +	vma_pagesize = min(vma_pagesize, max_map_size);
>  
>  	/*
>  	 * The stage2 has a minimum of 2 level table (For arm64 see
> @@ -1731,8 +1751,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
>  	 * 3 levels, i.e, PMD is not folded.
>  	 */
>  	if (vma_pagesize == PMD_SIZE ||
> -	    (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm)))
> -		gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
> +	    (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm))) {
> +		gfn = (ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
> +	}
>  	up_read(&current->mm->mmap_sem);
>  
>  	/* We need minimum second+third level pages */
> @@ -1784,7 +1805,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
>  	if (mmu_notifier_retry(kvm, mmu_seq))
>  		goto out_unlock;
>  
> -	if (vma_pagesize == PAGE_SIZE && !force_pte) {
> +	if (vma_pagesize == PAGE_SIZE && max_map_size >= PMD_SIZE) {
>  		/*
>  		 * Only PMD_SIZE transparent hugepages(THP) are
>  		 * currently supported. This code will need to be
> @@ -1794,7 +1815,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
>  		 * aligned and that the block is contained within the memslot.
>  		 */
>  		if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE) &&
> -		    transparent_hugepage_adjust(&pfn, &fault_ipa))
> +		    transparent_hugepage_adjust(&pfn, &ipa, &fault_ipa))
>  			vma_pagesize = PMD_SIZE;
>  	}
>  
> @@ -1919,8 +1940,10 @@ static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
>  int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
>  {
>  	unsigned long fault_status;
> -	phys_addr_t fault_ipa;
> +	phys_addr_t fault_ipa; /* The address we faulted on */
> +	phys_addr_t ipa; /* Always the IPA in the L1 guest phys space */
>  	struct kvm_memory_slot *memslot;
> +	struct kvm_s2_trans nested_trans;
>  	unsigned long hva;
>  	bool is_iabt, write_fault, writable;
>  	gfn_t gfn;
> @@ -1928,7 +1951,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
>  
>  	fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
>  
> -	fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
> +	ipa = fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
>  	is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
>  
>  	/* Synchronous External Abort? */
> @@ -1952,6 +1975,12 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
>  	/* Check the stage-2 fault is trans. fault or write fault */
>  	if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
>  	    fault_status != FSC_ACCESS) {
> +		/*
> +		 * We must never see an address size fault on shadow stage 2
> +		 * page table walk, because we would have injected an addr
> +		 * size fault when we walked the nested s2 page and not
> +		 * create the shadow entry.
> +		 */
>  		kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
>  			kvm_vcpu_trap_get_class(vcpu),
>  			(unsigned long)kvm_vcpu_trap_get_fault(vcpu),
> @@ -1961,7 +1990,36 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
>  
>  	idx = srcu_read_lock(&vcpu->kvm->srcu);
>  
> -	gfn = fault_ipa >> PAGE_SHIFT;
> +	/*
> +	 * We may have faulted on a shadow stage 2 page table if we are
> +	 * running a nested guest.  In this case, we have to resolve the L2
> +	 * IPA to the L1 IPA first, before knowing what kind of memory should
> +	 * back the L1 IPA.
> +	 *
> +	 * If the shadow stage 2 page table walk faults, then we simply inject
> +	 * this to the guest and carry on.
> +	 */
> +	if (kvm_is_shadow_s2_fault(vcpu)) {
> +		u32 esr;
> +
> +		ret = kvm_walk_nested_s2(vcpu, fault_ipa, &nested_trans);
> +		esr = kvm_s2_trans_esr(&nested_trans);
> +		if (esr)
> +			kvm_inject_s2_fault(vcpu, esr);
> +		if (ret)
> +			goto out_unlock;
> +
> +		ret = kvm_s2_handle_perm_fault(vcpu, &nested_trans);
> +		esr = kvm_s2_trans_esr(&nested_trans);
> +		if (esr)
> +			kvm_inject_s2_fault(vcpu, esr);
> +		if (ret)
> +			goto out_unlock;
> +
> +		ipa = kvm_s2_trans_output(&nested_trans);
> +	}
> +
> +	gfn = ipa >> PAGE_SHIFT;
>  	memslot = gfn_to_memslot(vcpu->kvm, gfn);
>  	hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
>  	write_fault = kvm_is_write_fault(vcpu);
> @@ -1995,13 +2053,13 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
>  		 * faulting VA. This is always 12 bits, irrespective
>  		 * of the page size.
>  		 */
> -		fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
> -		ret = io_mem_abort(vcpu, run, fault_ipa);
> +		ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
> +		ret = io_mem_abort(vcpu, run, ipa);
>  		goto out_unlock;
>  	}
>  
>  	/* Userspace should not be able to register out-of-bounds IPAs */
> -	VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm));
> +	VM_BUG_ON(ipa >= kvm_phys_size(vcpu->kvm));
>  
>  	if (fault_status == FSC_ACCESS) {
>  		handle_access_fault(vcpu, fault_ipa);
> @@ -2009,7 +2067,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
>  		goto out_unlock;
>  	}
>  
> -	ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
> +	ret = user_mem_abort(vcpu, fault_ipa, &nested_trans,
> +			     memslot, hva, fault_status);
>  	if (ret == 0)
>  		ret = 1;
>  out_unlock:

WARNING: multiple messages have this Message-ID (diff)
From: Alexandru Elisei <alexandru.elisei@arm.com>
To: Marc Zyngier <marc.zyngier@arm.com>,
	linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu, kvm@vger.kernel.org
Cc: Andre Przywara <andre.przywara@arm.com>,
	Dave Martin <Dave.Martin@arm.com>
Subject: Re: [PATCH 37/59] KVM: arm64: nv: Handle shadow stage 2 page faults
Date: Fri, 5 Jul 2019 15:28:27 +0100	[thread overview]
Message-ID: <de40863e-1688-c028-329c-0cf94149f0b3@arm.com> (raw)
In-Reply-To: <20190621093843.220980-38-marc.zyngier@arm.com>

On 6/21/19 10:38 AM, Marc Zyngier wrote:
> From: Christoffer Dall <christoffer.dall@linaro.org>
>
> If we are faulting on a shadow stage 2 translation, we first walk the
> guest hypervisor's stage 2 page table to see if it has a mapping. If
> not, we inject a stage 2 page fault to the virtual EL2. Otherwise, we
> create a mapping in the shadow stage 2 page table.
>
> Note that we have to deal with two IPAs when we got a showdow stage 2

I think it should be "shadow", not "shodow".

> page fault. One is the address we faulted on, and is in the L2 guest
> phys space. The other is from the guest stage-2 page table walk, and is
> in the L1 guest phys space.  To differentiate them, we rename variable
> names so that fault_ipa is used for the former and ipa is used for the

How about "To differentiate them, we renames variables so that [..]"?

> latter.
>
> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
> Signed-off-by: Jintack Lim <jintack.lim@linaro.org>
> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> ---
>  arch/arm/include/asm/kvm_mmu.h       | 52 +++++++++++++++
>  arch/arm64/include/asm/kvm_emulate.h |  6 ++
>  arch/arm64/include/asm/kvm_nested.h  | 20 +++++-
>  arch/arm64/kvm/nested.c              | 41 ++++++++++++
>  virt/kvm/arm/mmio.c                  | 12 ++--
>  virt/kvm/arm/mmu.c                   | 99 ++++++++++++++++++++++------
>  6 files changed, 203 insertions(+), 27 deletions(-)
>
> diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
> index e6984b6da2ce..afabf1fd1d17 100644
> --- a/arch/arm/include/asm/kvm_mmu.h
> +++ b/arch/arm/include/asm/kvm_mmu.h
> @@ -423,6 +423,58 @@ static inline void kvm_set_ipa_limit(void) {}
>  static inline void kvm_init_s2_mmu(struct kvm_s2_mmu *mmu) {}
>  static inline void kvm_init_nested(struct kvm *kvm) {}
>  
> +struct kvm_s2_trans {};
> +static inline phys_addr_t kvm_s2_trans_output(struct kvm_s2_trans *trans)
> +{
> +	BUG();
> +}
> +
> +static inline unsigned long kvm_s2_trans_size(struct kvm_s2_trans *trans)
> +{
> +	BUG();
> +}
> +
> +static inline u32 kvm_s2_trans_esr(struct kvm_s2_trans *trans)
> +{
> +	BUG();
> +}
> +
> +static inline int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t ipa,
> +				     struct kvm_s2_trans *trans)
> +{
> +	BUG();
> +}
> +
> +static inline int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu,
> +					   struct kvm_s2_trans *trans)
> +{
> +	BUG();
> +}
> +
> +static inline void kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u32 esr)
> +{
> +	BUG();
> +}
> +
> +static inline bool kvm_s2_trans_readable(struct kvm_s2_trans *trans)
> +{
> +	BUG();
> +}
> +
> +static inline bool kvm_s2_trans_writable(struct kvm_s2_trans *trans)
> +{
> +	BUG();
> +}
> +
> +static inline void kvm_nested_s2_flush(struct kvm *kvm) {}
> +static inline void kvm_nested_s2_wp(struct kvm *kvm) {}
> +static inline void kvm_nested_s2_clear(struct kvm *kvm) {}
> +
> +static inline bool kvm_is_shadow_s2_fault(struct kvm_vcpu *vcpu)
> +{
> +	return false;
> +}
> +
>  static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
>  {
>  	struct kvm_vmid *vmid = &mmu->vmid;
> diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
> index 73d8c54a52c6..b49a47f3daa8 100644
> --- a/arch/arm64/include/asm/kvm_emulate.h
> +++ b/arch/arm64/include/asm/kvm_emulate.h
> @@ -606,4 +606,10 @@ static inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu)
>  	write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
>  }
>  
> +static inline bool kvm_is_shadow_s2_fault(struct kvm_vcpu *vcpu)
> +{
> +	return (vcpu->arch.hw_mmu != &vcpu->kvm->arch.mmu &&
> +		vcpu->arch.hw_mmu->nested_stage2_enabled);
> +}
> +
>  #endif /* __ARM64_KVM_EMULATE_H__ */
> diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/kvm_nested.h
> index 686ba53379ab..052d46d96201 100644
> --- a/arch/arm64/include/asm/kvm_nested.h
> +++ b/arch/arm64/include/asm/kvm_nested.h
> @@ -19,7 +19,7 @@ extern void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu);
>  
>  struct kvm_s2_trans {
>  	phys_addr_t output;
> -	phys_addr_t block_size;
> +	unsigned long block_size;

Shouldn't this be part of the previous patch, where we introduce the struct?

>  	bool writable;
>  	bool readable;
>  	int level;
> @@ -27,9 +27,27 @@ struct kvm_s2_trans {
>  	u64 upper_attr;
>  };
>  
> +static inline phys_addr_t kvm_s2_trans_output(struct kvm_s2_trans *trans)
> +{
> +	return trans->output;
> +}
> +
> +static inline unsigned long kvm_s2_trans_size(struct kvm_s2_trans *trans)
> +{
> +	return trans->block_size;
> +}
> +
> +static inline u32 kvm_s2_trans_esr(struct kvm_s2_trans *trans)
> +{
> +	return trans->esr;
> +}
> +
>  extern int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa,
>  			      struct kvm_s2_trans *result);
>  
> +extern int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu,
> +				    struct kvm_s2_trans *trans);
> +extern int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2);
>  int handle_wfx_nested(struct kvm_vcpu *vcpu, bool is_wfe);
>  extern bool forward_traps(struct kvm_vcpu *vcpu, u64 control_bit);
>  extern bool forward_nv_traps(struct kvm_vcpu *vcpu);
> diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
> index 6a9bd68b769b..023027fa2db5 100644
> --- a/arch/arm64/kvm/nested.c
> +++ b/arch/arm64/kvm/nested.c
> @@ -300,6 +300,8 @@ int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa,
>  	u64 vtcr = vcpu_read_sys_reg(vcpu, VTCR_EL2);
>  	struct s2_walk_info wi;
>  
> +	result->esr = 0;

I think this should be part of the previous patch, looks like a fix to me.

Thanks,
Alex
> +
>  	if (!nested_virt_in_use(vcpu))
>  		return 0;
>  
> @@ -415,6 +417,45 @@ void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu)
>  	}
>  }
>  
> +/*
> + * Returns non-zero if permission fault is handled by injecting it to the next
> + * level hypervisor.
> + */
> +int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu, struct kvm_s2_trans *trans)
> +{
> +	unsigned long fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
> +	bool forward_fault = false;
> +
> +	trans->esr = 0;
> +
> +	if (fault_status != FSC_PERM)
> +		return 0;
> +
> +	if (kvm_vcpu_trap_is_iabt(vcpu)) {
> +		forward_fault = (trans->upper_attr & PTE_S2_XN);
> +	} else {
> +		bool write_fault = kvm_is_write_fault(vcpu);
> +
> +		forward_fault = ((write_fault && !trans->writable) ||
> +				 (!write_fault && !trans->readable));
> +	}
> +
> +	if (forward_fault) {
> +		trans->esr = esr_s2_fault(vcpu, trans->level, ESR_ELx_FSC_PERM);
> +		return 1;
> +	}
> +
> +	return 0;
> +}
> +
> +int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2)
> +{
> +	vcpu_write_sys_reg(vcpu, vcpu->arch.fault.far_el2, FAR_EL2);
> +	vcpu_write_sys_reg(vcpu, vcpu->arch.fault.hpfar_el2, HPFAR_EL2);
> +
> +	return kvm_inject_nested_sync(vcpu, esr_el2);
> +}
> +
>  /*
>   * Inject wfx to the virtual EL2 if this is not from the virtual EL2 and
>   * the virtual HCR_EL2.TWX is set. Otherwise, let the host hypervisor
> diff --git a/virt/kvm/arm/mmio.c b/virt/kvm/arm/mmio.c
> index a8a6a0c883f1..2b5de8388bf4 100644
> --- a/virt/kvm/arm/mmio.c
> +++ b/virt/kvm/arm/mmio.c
> @@ -142,7 +142,7 @@ static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len)
>  }
>  
>  int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
> -		 phys_addr_t fault_ipa)
> +		 phys_addr_t ipa)
>  {
>  	unsigned long data;
>  	unsigned long rt;
> @@ -171,22 +171,22 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
>  		data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
>  					       len);
>  
> -		trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, &data);
> +		trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, ipa, &data);
>  		kvm_mmio_write_buf(data_buf, len, data);
>  
> -		ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
> +		ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, ipa, len,
>  				       data_buf);
>  	} else {
>  		trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len,
> -			       fault_ipa, NULL);
> +			       ipa, NULL);
>  
> -		ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len,
> +		ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, ipa, len,
>  				      data_buf);
>  	}
>  
>  	/* Now prepare kvm_run for the potential return to userland. */
>  	run->mmio.is_write	= is_write;
> -	run->mmio.phys_addr	= fault_ipa;
> +	run->mmio.phys_addr	= ipa;
>  	run->mmio.len		= len;
>  
>  	if (!ret) {
> diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
> index faa61a81c8cc..3c7845832db8 100644
> --- a/virt/kvm/arm/mmu.c
> +++ b/virt/kvm/arm/mmu.c
> @@ -1384,7 +1384,8 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
>  	return ret;
>  }
>  
> -static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
> +static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap,
> +					phys_addr_t *fault_ipap)
>  {
>  	kvm_pfn_t pfn = *pfnp;
>  	gfn_t gfn = *ipap >> PAGE_SHIFT;
> @@ -1418,6 +1419,7 @@ static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
>  		mask = PTRS_PER_PMD - 1;
>  		VM_BUG_ON((gfn & mask) != (pfn & mask));
>  		if (pfn & mask) {
> +			*fault_ipap &= PMD_MASK;
>  			*ipap &= PMD_MASK;
>  			kvm_release_pfn_clean(pfn);
>  			pfn &= ~mask;
> @@ -1681,14 +1683,16 @@ static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
>  }
>  
>  static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
> -			  struct kvm_memory_slot *memslot, unsigned long hva,
> -			  unsigned long fault_status)
> +			  struct kvm_s2_trans *nested,
> +			  struct kvm_memory_slot *memslot,
> +			  unsigned long hva, unsigned long fault_status)
>  {
>  	int ret;
> -	bool write_fault, writable, force_pte = false;
> +	bool write_fault, writable;
>  	bool exec_fault, needs_exec;
>  	unsigned long mmu_seq;
> -	gfn_t gfn = fault_ipa >> PAGE_SHIFT;
> +	phys_addr_t ipa = fault_ipa;
> +	gfn_t gfn;
>  	struct kvm *kvm = vcpu->kvm;
>  	struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
>  	struct vm_area_struct *vma;
> @@ -1697,6 +1701,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
>  	bool logging_active = memslot_is_logging(memslot);
>  	unsigned long vma_pagesize, flags = 0;
>  	struct kvm_s2_mmu *mmu = vcpu->arch.hw_mmu;
> +	unsigned long max_map_size = PUD_SIZE;
>  
>  	write_fault = kvm_is_write_fault(vcpu);
>  	exec_fault = kvm_vcpu_trap_is_iabt(vcpu);
> @@ -1717,11 +1722,26 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
>  	}
>  
>  	vma_pagesize = vma_kernel_pagesize(vma);
> -	if (logging_active ||
> -	    !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
> -		force_pte = true;
> -		vma_pagesize = PAGE_SIZE;
> +
> +	if (!fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize))
> +		max_map_size = PAGE_SIZE;
> +
> +	if (logging_active)
> +		max_map_size = PAGE_SIZE;
> +
> +	if (kvm_is_shadow_s2_fault(vcpu)) {
> +               ipa = kvm_s2_trans_output(nested);
> +
> +		/*
> +		 * If we're about to create a shadow stage 2 entry, then we
> +		 * can only create a block mapping if the guest stage 2 page
> +		 * table uses at least as big a mapping.
> +		 */
> +		max_map_size = min(kvm_s2_trans_size(nested), max_map_size);
>  	}
> +	gfn = ipa >> PAGE_SHIFT;
> +
> +	vma_pagesize = min(vma_pagesize, max_map_size);
>  
>  	/*
>  	 * The stage2 has a minimum of 2 level table (For arm64 see
> @@ -1731,8 +1751,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
>  	 * 3 levels, i.e, PMD is not folded.
>  	 */
>  	if (vma_pagesize == PMD_SIZE ||
> -	    (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm)))
> -		gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
> +	    (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm))) {
> +		gfn = (ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
> +	}
>  	up_read(&current->mm->mmap_sem);
>  
>  	/* We need minimum second+third level pages */
> @@ -1784,7 +1805,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
>  	if (mmu_notifier_retry(kvm, mmu_seq))
>  		goto out_unlock;
>  
> -	if (vma_pagesize == PAGE_SIZE && !force_pte) {
> +	if (vma_pagesize == PAGE_SIZE && max_map_size >= PMD_SIZE) {
>  		/*
>  		 * Only PMD_SIZE transparent hugepages(THP) are
>  		 * currently supported. This code will need to be
> @@ -1794,7 +1815,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
>  		 * aligned and that the block is contained within the memslot.
>  		 */
>  		if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE) &&
> -		    transparent_hugepage_adjust(&pfn, &fault_ipa))
> +		    transparent_hugepage_adjust(&pfn, &ipa, &fault_ipa))
>  			vma_pagesize = PMD_SIZE;
>  	}
>  
> @@ -1919,8 +1940,10 @@ static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
>  int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
>  {
>  	unsigned long fault_status;
> -	phys_addr_t fault_ipa;
> +	phys_addr_t fault_ipa; /* The address we faulted on */
> +	phys_addr_t ipa; /* Always the IPA in the L1 guest phys space */
>  	struct kvm_memory_slot *memslot;
> +	struct kvm_s2_trans nested_trans;
>  	unsigned long hva;
>  	bool is_iabt, write_fault, writable;
>  	gfn_t gfn;
> @@ -1928,7 +1951,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
>  
>  	fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
>  
> -	fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
> +	ipa = fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
>  	is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
>  
>  	/* Synchronous External Abort? */
> @@ -1952,6 +1975,12 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
>  	/* Check the stage-2 fault is trans. fault or write fault */
>  	if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
>  	    fault_status != FSC_ACCESS) {
> +		/*
> +		 * We must never see an address size fault on shadow stage 2
> +		 * page table walk, because we would have injected an addr
> +		 * size fault when we walked the nested s2 page and not
> +		 * create the shadow entry.
> +		 */
>  		kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
>  			kvm_vcpu_trap_get_class(vcpu),
>  			(unsigned long)kvm_vcpu_trap_get_fault(vcpu),
> @@ -1961,7 +1990,36 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
>  
>  	idx = srcu_read_lock(&vcpu->kvm->srcu);
>  
> -	gfn = fault_ipa >> PAGE_SHIFT;
> +	/*
> +	 * We may have faulted on a shadow stage 2 page table if we are
> +	 * running a nested guest.  In this case, we have to resolve the L2
> +	 * IPA to the L1 IPA first, before knowing what kind of memory should
> +	 * back the L1 IPA.
> +	 *
> +	 * If the shadow stage 2 page table walk faults, then we simply inject
> +	 * this to the guest and carry on.
> +	 */
> +	if (kvm_is_shadow_s2_fault(vcpu)) {
> +		u32 esr;
> +
> +		ret = kvm_walk_nested_s2(vcpu, fault_ipa, &nested_trans);
> +		esr = kvm_s2_trans_esr(&nested_trans);
> +		if (esr)
> +			kvm_inject_s2_fault(vcpu, esr);
> +		if (ret)
> +			goto out_unlock;
> +
> +		ret = kvm_s2_handle_perm_fault(vcpu, &nested_trans);
> +		esr = kvm_s2_trans_esr(&nested_trans);
> +		if (esr)
> +			kvm_inject_s2_fault(vcpu, esr);
> +		if (ret)
> +			goto out_unlock;
> +
> +		ipa = kvm_s2_trans_output(&nested_trans);
> +	}
> +
> +	gfn = ipa >> PAGE_SHIFT;
>  	memslot = gfn_to_memslot(vcpu->kvm, gfn);
>  	hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
>  	write_fault = kvm_is_write_fault(vcpu);
> @@ -1995,13 +2053,13 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
>  		 * faulting VA. This is always 12 bits, irrespective
>  		 * of the page size.
>  		 */
> -		fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
> -		ret = io_mem_abort(vcpu, run, fault_ipa);
> +		ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
> +		ret = io_mem_abort(vcpu, run, ipa);
>  		goto out_unlock;
>  	}
>  
>  	/* Userspace should not be able to register out-of-bounds IPAs */
> -	VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm));
> +	VM_BUG_ON(ipa >= kvm_phys_size(vcpu->kvm));
>  
>  	if (fault_status == FSC_ACCESS) {
>  		handle_access_fault(vcpu, fault_ipa);
> @@ -2009,7 +2067,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
>  		goto out_unlock;
>  	}
>  
> -	ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
> +	ret = user_mem_abort(vcpu, fault_ipa, &nested_trans,
> +			     memslot, hva, fault_status);
>  	if (ret == 0)
>  		ret = 1;
>  out_unlock:
_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

WARNING: multiple messages have this Message-ID (diff)
From: Alexandru Elisei <alexandru.elisei@arm.com>
To: Marc Zyngier <marc.zyngier@arm.com>,
	linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu, kvm@vger.kernel.org
Cc: Andre Przywara <andre.przywara@arm.com>,
	Dave Martin <Dave.Martin@arm.com>
Subject: Re: [PATCH 37/59] KVM: arm64: nv: Handle shadow stage 2 page faults
Date: Fri, 5 Jul 2019 15:28:27 +0100	[thread overview]
Message-ID: <de40863e-1688-c028-329c-0cf94149f0b3@arm.com> (raw)
In-Reply-To: <20190621093843.220980-38-marc.zyngier@arm.com>

On 6/21/19 10:38 AM, Marc Zyngier wrote:
> From: Christoffer Dall <christoffer.dall@linaro.org>
>
> If we are faulting on a shadow stage 2 translation, we first walk the
> guest hypervisor's stage 2 page table to see if it has a mapping. If
> not, we inject a stage 2 page fault to the virtual EL2. Otherwise, we
> create a mapping in the shadow stage 2 page table.
>
> Note that we have to deal with two IPAs when we got a showdow stage 2

I think it should be "shadow", not "shodow".

> page fault. One is the address we faulted on, and is in the L2 guest
> phys space. The other is from the guest stage-2 page table walk, and is
> in the L1 guest phys space.  To differentiate them, we rename variable
> names so that fault_ipa is used for the former and ipa is used for the

How about "To differentiate them, we renames variables so that [..]"?

> latter.
>
> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
> Signed-off-by: Jintack Lim <jintack.lim@linaro.org>
> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> ---
>  arch/arm/include/asm/kvm_mmu.h       | 52 +++++++++++++++
>  arch/arm64/include/asm/kvm_emulate.h |  6 ++
>  arch/arm64/include/asm/kvm_nested.h  | 20 +++++-
>  arch/arm64/kvm/nested.c              | 41 ++++++++++++
>  virt/kvm/arm/mmio.c                  | 12 ++--
>  virt/kvm/arm/mmu.c                   | 99 ++++++++++++++++++++++------
>  6 files changed, 203 insertions(+), 27 deletions(-)
>
> diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
> index e6984b6da2ce..afabf1fd1d17 100644
> --- a/arch/arm/include/asm/kvm_mmu.h
> +++ b/arch/arm/include/asm/kvm_mmu.h
> @@ -423,6 +423,58 @@ static inline void kvm_set_ipa_limit(void) {}
>  static inline void kvm_init_s2_mmu(struct kvm_s2_mmu *mmu) {}
>  static inline void kvm_init_nested(struct kvm *kvm) {}
>  
> +struct kvm_s2_trans {};
> +static inline phys_addr_t kvm_s2_trans_output(struct kvm_s2_trans *trans)
> +{
> +	BUG();
> +}
> +
> +static inline unsigned long kvm_s2_trans_size(struct kvm_s2_trans *trans)
> +{
> +	BUG();
> +}
> +
> +static inline u32 kvm_s2_trans_esr(struct kvm_s2_trans *trans)
> +{
> +	BUG();
> +}
> +
> +static inline int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t ipa,
> +				     struct kvm_s2_trans *trans)
> +{
> +	BUG();
> +}
> +
> +static inline int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu,
> +					   struct kvm_s2_trans *trans)
> +{
> +	BUG();
> +}
> +
> +static inline void kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u32 esr)
> +{
> +	BUG();
> +}
> +
> +static inline bool kvm_s2_trans_readable(struct kvm_s2_trans *trans)
> +{
> +	BUG();
> +}
> +
> +static inline bool kvm_s2_trans_writable(struct kvm_s2_trans *trans)
> +{
> +	BUG();
> +}
> +
> +static inline void kvm_nested_s2_flush(struct kvm *kvm) {}
> +static inline void kvm_nested_s2_wp(struct kvm *kvm) {}
> +static inline void kvm_nested_s2_clear(struct kvm *kvm) {}
> +
> +static inline bool kvm_is_shadow_s2_fault(struct kvm_vcpu *vcpu)
> +{
> +	return false;
> +}
> +
>  static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
>  {
>  	struct kvm_vmid *vmid = &mmu->vmid;
> diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
> index 73d8c54a52c6..b49a47f3daa8 100644
> --- a/arch/arm64/include/asm/kvm_emulate.h
> +++ b/arch/arm64/include/asm/kvm_emulate.h
> @@ -606,4 +606,10 @@ static inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu)
>  	write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
>  }
>  
> +static inline bool kvm_is_shadow_s2_fault(struct kvm_vcpu *vcpu)
> +{
> +	return (vcpu->arch.hw_mmu != &vcpu->kvm->arch.mmu &&
> +		vcpu->arch.hw_mmu->nested_stage2_enabled);
> +}
> +
>  #endif /* __ARM64_KVM_EMULATE_H__ */
> diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/kvm_nested.h
> index 686ba53379ab..052d46d96201 100644
> --- a/arch/arm64/include/asm/kvm_nested.h
> +++ b/arch/arm64/include/asm/kvm_nested.h
> @@ -19,7 +19,7 @@ extern void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu);
>  
>  struct kvm_s2_trans {
>  	phys_addr_t output;
> -	phys_addr_t block_size;
> +	unsigned long block_size;

Shouldn't this be part of the previous patch, where we introduce the struct?

>  	bool writable;
>  	bool readable;
>  	int level;
> @@ -27,9 +27,27 @@ struct kvm_s2_trans {
>  	u64 upper_attr;
>  };
>  
> +static inline phys_addr_t kvm_s2_trans_output(struct kvm_s2_trans *trans)
> +{
> +	return trans->output;
> +}
> +
> +static inline unsigned long kvm_s2_trans_size(struct kvm_s2_trans *trans)
> +{
> +	return trans->block_size;
> +}
> +
> +static inline u32 kvm_s2_trans_esr(struct kvm_s2_trans *trans)
> +{
> +	return trans->esr;
> +}
> +
>  extern int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa,
>  			      struct kvm_s2_trans *result);
>  
> +extern int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu,
> +				    struct kvm_s2_trans *trans);
> +extern int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2);
>  int handle_wfx_nested(struct kvm_vcpu *vcpu, bool is_wfe);
>  extern bool forward_traps(struct kvm_vcpu *vcpu, u64 control_bit);
>  extern bool forward_nv_traps(struct kvm_vcpu *vcpu);
> diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
> index 6a9bd68b769b..023027fa2db5 100644
> --- a/arch/arm64/kvm/nested.c
> +++ b/arch/arm64/kvm/nested.c
> @@ -300,6 +300,8 @@ int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa,
>  	u64 vtcr = vcpu_read_sys_reg(vcpu, VTCR_EL2);
>  	struct s2_walk_info wi;
>  
> +	result->esr = 0;

I think this should be part of the previous patch, looks like a fix to me.

Thanks,
Alex
> +
>  	if (!nested_virt_in_use(vcpu))
>  		return 0;
>  
> @@ -415,6 +417,45 @@ void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu)
>  	}
>  }
>  
> +/*
> + * Returns non-zero if permission fault is handled by injecting it to the next
> + * level hypervisor.
> + */
> +int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu, struct kvm_s2_trans *trans)
> +{
> +	unsigned long fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
> +	bool forward_fault = false;
> +
> +	trans->esr = 0;
> +
> +	if (fault_status != FSC_PERM)
> +		return 0;
> +
> +	if (kvm_vcpu_trap_is_iabt(vcpu)) {
> +		forward_fault = (trans->upper_attr & PTE_S2_XN);
> +	} else {
> +		bool write_fault = kvm_is_write_fault(vcpu);
> +
> +		forward_fault = ((write_fault && !trans->writable) ||
> +				 (!write_fault && !trans->readable));
> +	}
> +
> +	if (forward_fault) {
> +		trans->esr = esr_s2_fault(vcpu, trans->level, ESR_ELx_FSC_PERM);
> +		return 1;
> +	}
> +
> +	return 0;
> +}
> +
> +int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2)
> +{
> +	vcpu_write_sys_reg(vcpu, vcpu->arch.fault.far_el2, FAR_EL2);
> +	vcpu_write_sys_reg(vcpu, vcpu->arch.fault.hpfar_el2, HPFAR_EL2);
> +
> +	return kvm_inject_nested_sync(vcpu, esr_el2);
> +}
> +
>  /*
>   * Inject wfx to the virtual EL2 if this is not from the virtual EL2 and
>   * the virtual HCR_EL2.TWX is set. Otherwise, let the host hypervisor
> diff --git a/virt/kvm/arm/mmio.c b/virt/kvm/arm/mmio.c
> index a8a6a0c883f1..2b5de8388bf4 100644
> --- a/virt/kvm/arm/mmio.c
> +++ b/virt/kvm/arm/mmio.c
> @@ -142,7 +142,7 @@ static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len)
>  }
>  
>  int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
> -		 phys_addr_t fault_ipa)
> +		 phys_addr_t ipa)
>  {
>  	unsigned long data;
>  	unsigned long rt;
> @@ -171,22 +171,22 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
>  		data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
>  					       len);
>  
> -		trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, &data);
> +		trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, ipa, &data);
>  		kvm_mmio_write_buf(data_buf, len, data);
>  
> -		ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
> +		ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, ipa, len,
>  				       data_buf);
>  	} else {
>  		trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len,
> -			       fault_ipa, NULL);
> +			       ipa, NULL);
>  
> -		ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len,
> +		ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, ipa, len,
>  				      data_buf);
>  	}
>  
>  	/* Now prepare kvm_run for the potential return to userland. */
>  	run->mmio.is_write	= is_write;
> -	run->mmio.phys_addr	= fault_ipa;
> +	run->mmio.phys_addr	= ipa;
>  	run->mmio.len		= len;
>  
>  	if (!ret) {
> diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
> index faa61a81c8cc..3c7845832db8 100644
> --- a/virt/kvm/arm/mmu.c
> +++ b/virt/kvm/arm/mmu.c
> @@ -1384,7 +1384,8 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
>  	return ret;
>  }
>  
> -static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
> +static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap,
> +					phys_addr_t *fault_ipap)
>  {
>  	kvm_pfn_t pfn = *pfnp;
>  	gfn_t gfn = *ipap >> PAGE_SHIFT;
> @@ -1418,6 +1419,7 @@ static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
>  		mask = PTRS_PER_PMD - 1;
>  		VM_BUG_ON((gfn & mask) != (pfn & mask));
>  		if (pfn & mask) {
> +			*fault_ipap &= PMD_MASK;
>  			*ipap &= PMD_MASK;
>  			kvm_release_pfn_clean(pfn);
>  			pfn &= ~mask;
> @@ -1681,14 +1683,16 @@ static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
>  }
>  
>  static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
> -			  struct kvm_memory_slot *memslot, unsigned long hva,
> -			  unsigned long fault_status)
> +			  struct kvm_s2_trans *nested,
> +			  struct kvm_memory_slot *memslot,
> +			  unsigned long hva, unsigned long fault_status)
>  {
>  	int ret;
> -	bool write_fault, writable, force_pte = false;
> +	bool write_fault, writable;
>  	bool exec_fault, needs_exec;
>  	unsigned long mmu_seq;
> -	gfn_t gfn = fault_ipa >> PAGE_SHIFT;
> +	phys_addr_t ipa = fault_ipa;
> +	gfn_t gfn;
>  	struct kvm *kvm = vcpu->kvm;
>  	struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
>  	struct vm_area_struct *vma;
> @@ -1697,6 +1701,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
>  	bool logging_active = memslot_is_logging(memslot);
>  	unsigned long vma_pagesize, flags = 0;
>  	struct kvm_s2_mmu *mmu = vcpu->arch.hw_mmu;
> +	unsigned long max_map_size = PUD_SIZE;
>  
>  	write_fault = kvm_is_write_fault(vcpu);
>  	exec_fault = kvm_vcpu_trap_is_iabt(vcpu);
> @@ -1717,11 +1722,26 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
>  	}
>  
>  	vma_pagesize = vma_kernel_pagesize(vma);
> -	if (logging_active ||
> -	    !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
> -		force_pte = true;
> -		vma_pagesize = PAGE_SIZE;
> +
> +	if (!fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize))
> +		max_map_size = PAGE_SIZE;
> +
> +	if (logging_active)
> +		max_map_size = PAGE_SIZE;
> +
> +	if (kvm_is_shadow_s2_fault(vcpu)) {
> +               ipa = kvm_s2_trans_output(nested);
> +
> +		/*
> +		 * If we're about to create a shadow stage 2 entry, then we
> +		 * can only create a block mapping if the guest stage 2 page
> +		 * table uses at least as big a mapping.
> +		 */
> +		max_map_size = min(kvm_s2_trans_size(nested), max_map_size);
>  	}
> +	gfn = ipa >> PAGE_SHIFT;
> +
> +	vma_pagesize = min(vma_pagesize, max_map_size);
>  
>  	/*
>  	 * The stage2 has a minimum of 2 level table (For arm64 see
> @@ -1731,8 +1751,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
>  	 * 3 levels, i.e, PMD is not folded.
>  	 */
>  	if (vma_pagesize == PMD_SIZE ||
> -	    (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm)))
> -		gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
> +	    (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm))) {
> +		gfn = (ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
> +	}
>  	up_read(&current->mm->mmap_sem);
>  
>  	/* We need minimum second+third level pages */
> @@ -1784,7 +1805,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
>  	if (mmu_notifier_retry(kvm, mmu_seq))
>  		goto out_unlock;
>  
> -	if (vma_pagesize == PAGE_SIZE && !force_pte) {
> +	if (vma_pagesize == PAGE_SIZE && max_map_size >= PMD_SIZE) {
>  		/*
>  		 * Only PMD_SIZE transparent hugepages(THP) are
>  		 * currently supported. This code will need to be
> @@ -1794,7 +1815,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
>  		 * aligned and that the block is contained within the memslot.
>  		 */
>  		if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE) &&
> -		    transparent_hugepage_adjust(&pfn, &fault_ipa))
> +		    transparent_hugepage_adjust(&pfn, &ipa, &fault_ipa))
>  			vma_pagesize = PMD_SIZE;
>  	}
>  
> @@ -1919,8 +1940,10 @@ static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
>  int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
>  {
>  	unsigned long fault_status;
> -	phys_addr_t fault_ipa;
> +	phys_addr_t fault_ipa; /* The address we faulted on */
> +	phys_addr_t ipa; /* Always the IPA in the L1 guest phys space */
>  	struct kvm_memory_slot *memslot;
> +	struct kvm_s2_trans nested_trans;
>  	unsigned long hva;
>  	bool is_iabt, write_fault, writable;
>  	gfn_t gfn;
> @@ -1928,7 +1951,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
>  
>  	fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
>  
> -	fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
> +	ipa = fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
>  	is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
>  
>  	/* Synchronous External Abort? */
> @@ -1952,6 +1975,12 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
>  	/* Check the stage-2 fault is trans. fault or write fault */
>  	if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
>  	    fault_status != FSC_ACCESS) {
> +		/*
> +		 * We must never see an address size fault on shadow stage 2
> +		 * page table walk, because we would have injected an addr
> +		 * size fault when we walked the nested s2 page and not
> +		 * create the shadow entry.
> +		 */
>  		kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
>  			kvm_vcpu_trap_get_class(vcpu),
>  			(unsigned long)kvm_vcpu_trap_get_fault(vcpu),
> @@ -1961,7 +1990,36 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
>  
>  	idx = srcu_read_lock(&vcpu->kvm->srcu);
>  
> -	gfn = fault_ipa >> PAGE_SHIFT;
> +	/*
> +	 * We may have faulted on a shadow stage 2 page table if we are
> +	 * running a nested guest.  In this case, we have to resolve the L2
> +	 * IPA to the L1 IPA first, before knowing what kind of memory should
> +	 * back the L1 IPA.
> +	 *
> +	 * If the shadow stage 2 page table walk faults, then we simply inject
> +	 * this to the guest and carry on.
> +	 */
> +	if (kvm_is_shadow_s2_fault(vcpu)) {
> +		u32 esr;
> +
> +		ret = kvm_walk_nested_s2(vcpu, fault_ipa, &nested_trans);
> +		esr = kvm_s2_trans_esr(&nested_trans);
> +		if (esr)
> +			kvm_inject_s2_fault(vcpu, esr);
> +		if (ret)
> +			goto out_unlock;
> +
> +		ret = kvm_s2_handle_perm_fault(vcpu, &nested_trans);
> +		esr = kvm_s2_trans_esr(&nested_trans);
> +		if (esr)
> +			kvm_inject_s2_fault(vcpu, esr);
> +		if (ret)
> +			goto out_unlock;
> +
> +		ipa = kvm_s2_trans_output(&nested_trans);
> +	}
> +
> +	gfn = ipa >> PAGE_SHIFT;
>  	memslot = gfn_to_memslot(vcpu->kvm, gfn);
>  	hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
>  	write_fault = kvm_is_write_fault(vcpu);
> @@ -1995,13 +2053,13 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
>  		 * faulting VA. This is always 12 bits, irrespective
>  		 * of the page size.
>  		 */
> -		fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
> -		ret = io_mem_abort(vcpu, run, fault_ipa);
> +		ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
> +		ret = io_mem_abort(vcpu, run, ipa);
>  		goto out_unlock;
>  	}
>  
>  	/* Userspace should not be able to register out-of-bounds IPAs */
> -	VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm));
> +	VM_BUG_ON(ipa >= kvm_phys_size(vcpu->kvm));
>  
>  	if (fault_status == FSC_ACCESS) {
>  		handle_access_fault(vcpu, fault_ipa);
> @@ -2009,7 +2067,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
>  		goto out_unlock;
>  	}
>  
> -	ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
> +	ret = user_mem_abort(vcpu, fault_ipa, &nested_trans,
> +			     memslot, hva, fault_status);
>  	if (ret == 0)
>  		ret = 1;
>  out_unlock:

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  reply	other threads:[~2019-07-05 14:28 UTC|newest]

Thread overview: 531+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-06-21  9:37 [PATCH 00/59] KVM: arm64: ARMv8.3 Nested Virtualization support Marc Zyngier
2019-06-21  9:37 ` Marc Zyngier
2019-06-21  9:37 ` Marc Zyngier
2019-06-21  9:37 ` [PATCH 01/59] KVM: arm64: Migrate _elx sysreg accessors to msr_s/mrs_s Marc Zyngier
2019-06-21  9:37   ` Marc Zyngier
2019-06-21  9:37   ` Marc Zyngier
2019-06-24 11:16   ` Dave Martin
2019-06-24 11:16     ` Dave Martin
2019-06-24 11:16     ` Dave Martin
2019-06-24 12:59   ` Alexandru Elisei
2019-06-24 12:59     ` Alexandru Elisei
2019-06-24 12:59     ` Alexandru Elisei
2019-07-03 12:32     ` Marc Zyngier
2019-07-03 12:32       ` Marc Zyngier
2019-07-03 12:32       ` Marc Zyngier
2019-06-21  9:37 ` [PATCH 02/59] KVM: arm64: Move __load_guest_stage2 to kvm_mmu.h Marc Zyngier
2019-06-21  9:37   ` Marc Zyngier
2019-06-21  9:37   ` Marc Zyngier
2019-06-24 11:19   ` Dave Martin
2019-06-24 11:19     ` Dave Martin
2019-06-24 11:19     ` Dave Martin
2019-07-03  9:30     ` Marc Zyngier
2019-07-03  9:30       ` Marc Zyngier
2019-07-03  9:30       ` Marc Zyngier
2019-07-03 16:13       ` Dave Martin
2019-07-03 16:13         ` Dave Martin
2019-07-03 16:13         ` Dave Martin
2019-06-21  9:37 ` [PATCH 03/59] arm64: Add ARM64_HAS_NESTED_VIRT cpufeature Marc Zyngier
2019-06-21  9:37   ` Marc Zyngier
2019-06-21  9:37   ` Marc Zyngier
2019-06-21 13:08   ` Julien Thierry
2019-06-21 13:08     ` Julien Thierry
2019-06-21 13:08     ` Julien Thierry
2019-06-21 13:22     ` Marc Zyngier
2019-06-21 13:22       ` Marc Zyngier
2019-06-21 13:22       ` Marc Zyngier
2019-06-21 13:44   ` Suzuki K Poulose
2019-06-21 13:44     ` Suzuki K Poulose
2019-06-21 13:44     ` Suzuki K Poulose
2019-06-24 11:24   ` Dave Martin
2019-06-24 11:24     ` Dave Martin
2019-06-24 11:24     ` Dave Martin
2019-06-21  9:37 ` [PATCH 04/59] KVM: arm64: nv: Introduce nested virtualization VCPU feature Marc Zyngier
2019-06-21  9:37   ` Marc Zyngier
2019-06-21  9:37   ` Marc Zyngier
2019-06-21 13:08   ` Julien Thierry
2019-06-21 13:08     ` Julien Thierry
2019-06-21 13:08     ` Julien Thierry
2019-06-24 11:28   ` Dave Martin
2019-06-24 11:28     ` Dave Martin
2019-06-24 11:28     ` Dave Martin
2019-07-03 11:53     ` Marc Zyngier
2019-07-03 11:53       ` Marc Zyngier
2019-07-03 11:53       ` Marc Zyngier
2019-07-03 16:27       ` Dave Martin
2019-07-03 16:27         ` Dave Martin
2019-07-03 16:27         ` Dave Martin
2019-06-24 11:43   ` Dave Martin
2019-06-24 11:43     ` Dave Martin
2019-06-24 11:43     ` Dave Martin
2019-07-03 11:56     ` Marc Zyngier
2019-07-03 11:56       ` Marc Zyngier
2019-07-03 11:56       ` Marc Zyngier
2019-07-03 16:24       ` Dave Martin
2019-07-03 16:24         ` Dave Martin
2019-07-03 16:24         ` Dave Martin
2019-06-21  9:37 ` [PATCH 05/59] KVM: arm64: nv: Reset VCPU to EL2 registers if VCPU nested virt is set Marc Zyngier
2019-06-21  9:37   ` Marc Zyngier
2019-06-21  9:37   ` Marc Zyngier
2019-06-24 10:19   ` Suzuki K Poulose
2019-06-24 10:19     ` Suzuki K Poulose
2019-06-24 10:19     ` Suzuki K Poulose
2019-06-24 11:38   ` Dave Martin
2019-06-24 11:38     ` Dave Martin
2019-06-24 11:38     ` Dave Martin
2019-06-21  9:37 ` [PATCH 06/59] KVM: arm64: nv: Allow userspace to set PSR_MODE_EL2x Marc Zyngier
2019-06-21  9:37   ` Marc Zyngier
2019-06-21  9:37   ` Marc Zyngier
2019-06-21 13:24   ` Julien Thierry
2019-06-21 13:24     ` Julien Thierry
2019-06-21 13:24     ` Julien Thierry
2019-06-21 13:50     ` Marc Zyngier
2019-06-21 13:50       ` Marc Zyngier
2019-06-21 13:50       ` Marc Zyngier
2019-06-24 12:48       ` Dave Martin
2019-06-24 12:48         ` Dave Martin
2019-06-24 12:48         ` Dave Martin
2019-07-03  9:21         ` Marc Zyngier
2019-07-03  9:21           ` Marc Zyngier
2019-07-03  9:21           ` Marc Zyngier
2019-07-04 10:00           ` Dave Martin
2019-07-04 10:00             ` Dave Martin
2019-07-04 10:00             ` Dave Martin
2019-06-21  9:37 ` [PATCH 07/59] KVM: arm64: nv: Add EL2 system registers to vcpu context Marc Zyngier
2019-06-21  9:37   ` Marc Zyngier
2019-06-21  9:37   ` Marc Zyngier
2019-06-24 12:54   ` Dave Martin
2019-06-24 12:54     ` Dave Martin
2019-06-24 12:54     ` Dave Martin
2019-07-03 12:20     ` Marc Zyngier
2019-07-03 12:20       ` Marc Zyngier
2019-07-03 12:20       ` Marc Zyngier
2019-07-03 16:31       ` Dave Martin
2019-07-03 16:31         ` Dave Martin
2019-07-03 16:31         ` Dave Martin
2019-06-24 15:47   ` Alexandru Elisei
2019-06-24 15:47     ` Alexandru Elisei
2019-06-24 15:47     ` Alexandru Elisei
2019-07-03 13:20     ` Marc Zyngier
2019-07-03 13:20       ` Marc Zyngier
2019-07-03 13:20       ` Marc Zyngier
2019-07-03 16:01       ` Marc Zyngier
2019-07-03 16:01         ` Marc Zyngier
2019-07-03 16:01         ` Marc Zyngier
2019-07-01 16:36   ` Suzuki K Poulose
2019-07-01 16:36     ` Suzuki K Poulose
2019-07-01 16:36     ` Suzuki K Poulose
2019-06-21  9:37 ` [PATCH 08/59] KVM: arm64: nv: Reset VMPIDR_EL2 and VPIDR_EL2 to sane values Marc Zyngier
2019-06-21  9:37   ` Marc Zyngier
2019-06-21  9:37   ` Marc Zyngier
2019-06-24 12:59   ` Dave Martin
2019-06-24 12:59     ` Dave Martin
2019-06-24 12:59     ` Dave Martin
2019-06-21  9:37 ` [PATCH 09/59] KVM: arm64: nv: Add nested virt VCPU primitives for vEL2 VCPU state Marc Zyngier
2019-06-21  9:37   ` Marc Zyngier
2019-06-21  9:37   ` Marc Zyngier
2019-06-24 13:08   ` Dave Martin
2019-06-24 13:08     ` Dave Martin
2019-06-24 13:08     ` Dave Martin
2019-06-21  9:37 ` [PATCH 10/59] KVM: arm64: nv: Support virtual EL2 exceptions Marc Zyngier
2019-06-21  9:37   ` Marc Zyngier
2019-06-21  9:37   ` Marc Zyngier
2019-07-08 13:56   ` Steven Price
2019-07-08 13:56     ` Steven Price
2019-07-08 13:56     ` Steven Price
2019-06-21  9:37 ` [PATCH 11/59] KVM: arm64: nv: Inject HVC exceptions to the virtual EL2 Marc Zyngier
2019-06-21  9:37   ` Marc Zyngier
2019-06-21  9:37   ` Marc Zyngier
2019-06-25 13:13   ` Alexandru Elisei
2019-06-25 13:13     ` Alexandru Elisei
2019-06-25 13:13     ` Alexandru Elisei
2019-07-03 14:16     ` Marc Zyngier
2019-07-03 14:16       ` Marc Zyngier
2019-07-03 14:16       ` Marc Zyngier
2019-07-30 14:08     ` Alexandru Elisei
2019-07-30 14:08       ` Alexandru Elisei
2019-07-30 14:08       ` Alexandru Elisei
2019-06-21  9:37 ` [PATCH 12/59] KVM: arm64: nv: Handle trapped ERET from " Marc Zyngier
2019-06-21  9:37   ` Marc Zyngier
2019-06-21  9:37   ` Marc Zyngier
2019-07-02 12:00   ` Alexandru Elisei
2019-07-02 12:00     ` Alexandru Elisei
2019-07-02 12:00     ` Alexandru Elisei
2019-06-21  9:37 ` [PATCH 13/59] KVM: arm64: nv: Handle virtual EL2 registers in vcpu_read/write_sys_reg() Marc Zyngier
2019-06-21  9:37   ` Marc Zyngier
2019-06-21  9:37   ` Marc Zyngier
2019-06-24 12:42   ` Julien Thierry
2019-06-24 12:42     ` Julien Thierry
2019-06-24 12:42     ` Julien Thierry
2019-06-25 14:02     ` Alexandru Elisei
2019-06-25 14:02       ` Alexandru Elisei
2019-06-25 14:02       ` Alexandru Elisei
2019-07-03 12:15     ` Marc Zyngier
2019-07-03 12:15       ` Marc Zyngier
2019-07-03 12:15       ` Marc Zyngier
2019-07-03 15:21       ` Julien Thierry
2019-07-03 15:21         ` Julien Thierry
2019-07-03 15:21         ` Julien Thierry
2019-06-25 15:18   ` Alexandru Elisei
2019-06-25 15:18     ` Alexandru Elisei
2019-06-25 15:18     ` Alexandru Elisei
2019-07-01  9:58     ` Alexandru Elisei
2019-07-01  9:58       ` Alexandru Elisei
2019-07-01  9:58       ` Alexandru Elisei
2019-07-03 15:59     ` Marc Zyngier
2019-07-03 15:59       ` Marc Zyngier
2019-07-03 15:59       ` Marc Zyngier
2019-07-03 16:32       ` Alexandru Elisei
2019-07-03 16:32         ` Alexandru Elisei
2019-07-03 16:32         ` Alexandru Elisei
2019-07-04 14:39         ` Marc Zyngier
2019-07-04 14:39           ` Marc Zyngier
2019-07-04 14:39           ` Marc Zyngier
2019-06-26 15:04   ` Alexandru Elisei
2019-06-26 15:04     ` Alexandru Elisei
2019-06-26 15:04     ` Alexandru Elisei
2019-07-04 15:05     ` Marc Zyngier
2019-07-04 15:05       ` Marc Zyngier
2019-07-04 15:05       ` Marc Zyngier
2019-07-01 12:10   ` Alexandru Elisei
2019-07-01 12:10     ` Alexandru Elisei
2019-07-01 12:10     ` Alexandru Elisei
2019-06-21  9:37 ` [PATCH 14/59] KVM: arm64: nv: Handle SPSR_EL2 specially Marc Zyngier
2019-06-21  9:37   ` Marc Zyngier
2019-06-21  9:37   ` Marc Zyngier
2019-06-21  9:37 ` [PATCH 15/59] KVM: arm64: nv: Refactor vcpu_{read,write}_sys_reg Marc Zyngier
2019-06-21  9:37   ` Marc Zyngier
2019-06-21  9:37   ` Marc Zyngier
2019-06-24 15:07   ` Julien Thierry
2019-06-24 15:07     ` Julien Thierry
2019-06-24 15:07     ` Julien Thierry
2019-07-03 13:09     ` Marc Zyngier
2019-07-03 13:09       ` Marc Zyngier
2019-07-03 13:09       ` Marc Zyngier
2019-06-27  9:21   ` Alexandru Elisei
2019-06-27  9:21     ` Alexandru Elisei
2019-06-27  9:21     ` Alexandru Elisei
2019-07-04 15:15     ` Marc Zyngier
2019-07-04 15:15       ` Marc Zyngier
2019-07-04 15:15       ` Marc Zyngier
2019-06-21  9:38 ` [PATCH 16/59] KVM: arm64: nv: Save/Restore vEL2 sysregs Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-25  8:48   ` Julien Thierry
2019-06-25  8:48     ` Julien Thierry
2019-06-25  8:48     ` Julien Thierry
2019-07-03 13:42     ` Marc Zyngier
2019-07-03 13:42       ` Marc Zyngier
2019-07-03 13:42       ` Marc Zyngier
2019-07-01 12:09   ` Alexandru Elisei
2019-07-01 12:09     ` Alexandru Elisei
2019-07-01 12:09     ` Alexandru Elisei
2019-08-21 11:57   ` Alexandru Elisei
2019-08-21 11:57     ` Alexandru Elisei
2019-08-21 11:57     ` Alexandru Elisei
2019-06-21  9:38 ` [PATCH 17/59] KVM: arm64: nv: Emulate PSTATE.M for a guest hypervisor Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38 ` [PATCH 18/59] KVM: arm64: nv: Trap EL1 VM register accesses in virtual EL2 Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-07-01 16:12   ` Alexandru Elisei
2019-07-01 16:12     ` Alexandru Elisei
2019-07-01 16:12     ` Alexandru Elisei
2019-06-21  9:38 ` [PATCH 19/59] KVM: arm64: nv: Trap SPSR_EL1, ELR_EL1 and VBAR_EL1 from " Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38 ` [PATCH 20/59] KVM: arm64: nv: Trap CPACR_EL1 access in " Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-07-01 16:40   ` Alexandru Elisei
2019-07-01 16:40     ` Alexandru Elisei
2019-07-01 16:40     ` Alexandru Elisei
2019-06-21  9:38 ` [PATCH 21/59] KVM: arm64: nv: Set a handler for the system instruction traps Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-25 12:55   ` Julien Thierry
2019-06-25 12:55     ` Julien Thierry
2019-06-25 12:55     ` Julien Thierry
2019-07-03 14:15     ` Marc Zyngier
2019-07-03 14:15       ` Marc Zyngier
2019-07-03 14:15       ` Marc Zyngier
2019-06-21  9:38 ` [PATCH 22/59] KVM: arm64: nv: Handle PSCI call via smc from the guest Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38 ` [PATCH 23/59] KVM: arm64: nv: Respect virtual HCR_EL2.TWX setting Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-25 14:19   ` Julien Thierry
2019-06-25 14:19     ` Julien Thierry
2019-06-25 14:19     ` Julien Thierry
2019-07-02 12:54     ` Alexandru Elisei
2019-07-02 12:54       ` Alexandru Elisei
2019-07-02 12:54       ` Alexandru Elisei
2019-07-03 14:18     ` Marc Zyngier
2019-07-03 14:18       ` Marc Zyngier
2019-07-03 14:18       ` Marc Zyngier
2019-06-21  9:38 ` [PATCH 24/59] KVM: arm64: nv: Respect virtual CPTR_EL2.TFP setting Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38 ` [PATCH 25/59] KVM: arm64: nv: Don't expose SVE to nested guests Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38 ` [PATCH 26/59] KVM: arm64: nv: Respect the virtual HCR_EL2.NV bit setting Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-26  5:31   ` Julien Thierry
2019-06-26  5:31     ` Julien Thierry
2019-06-26  5:31     ` Julien Thierry
2019-07-03 16:31     ` Marc Zyngier
2019-07-03 16:31       ` Marc Zyngier
2019-07-03 16:31       ` Marc Zyngier
2019-06-21  9:38 ` [PATCH 27/59] KVM: arm64: nv: Respect virtual HCR_EL2.TVM and TRVM settings Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-26  6:55   ` Julien Thierry
2019-06-26  6:55     ` Julien Thierry
2019-06-26  6:55     ` Julien Thierry
2019-07-04 14:57     ` Marc Zyngier
2019-07-04 14:57       ` Marc Zyngier
2019-07-04 14:57       ` Marc Zyngier
2019-06-21  9:38 ` [PATCH 28/59] KVM: arm64: nv: Respect the virtual HCR_EL2.NV1 bit setting Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-26  7:23   ` Julien Thierry
2019-06-26  7:23     ` Julien Thierry
2019-06-26  7:23     ` Julien Thierry
2019-07-02 16:32   ` Alexandru Elisei
2019-07-02 16:32     ` Alexandru Elisei
2019-07-02 16:32     ` Alexandru Elisei
2019-07-03  9:10     ` Alexandru Elisei
2019-07-03  9:10       ` Alexandru Elisei
2019-07-03  9:10       ` Alexandru Elisei
2019-06-21  9:38 ` [PATCH 29/59] KVM: arm64: nv: Emulate EL12 register accesses from the virtual EL2 Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-07-03  9:16   ` Alexandru Elisei
2019-07-03  9:16     ` Alexandru Elisei
2019-07-03  9:16     ` Alexandru Elisei
2019-06-21  9:38 ` [PATCH 30/59] KVM: arm64: nv: Configure HCR_EL2 for nested virtualization Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38 ` [PATCH 31/59] KVM: arm64: nv: Only toggle cache for virtual EL2 when SCTLR_EL2 changes Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38 ` [PATCH 32/59] KVM: arm64: nv: Hide RAS from nested guests Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-07-03 13:59   ` Alexandru Elisei
2019-07-03 13:59     ` Alexandru Elisei
2019-07-03 13:59     ` Alexandru Elisei
2019-06-21  9:38 ` [PATCH 33/59] KVM: arm64: nv: Pretend we only support larger-than-host page sizes Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-07-03 14:13   ` Alexandru Elisei
2019-07-03 14:13     ` Alexandru Elisei
2019-07-03 14:13     ` Alexandru Elisei
2019-06-21  9:38 ` [PATCH 34/59] KVM: arm/arm64: nv: Factor out stage 2 page table data from struct kvm Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-07-03 15:52   ` Alexandru Elisei
2019-07-03 15:52     ` Alexandru Elisei
2019-07-03 15:52     ` Alexandru Elisei
2019-06-21  9:38 ` [PATCH 35/59] KVM: arm/arm64: nv: Support multiple nested stage 2 mmu structures Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-25 12:19   ` Alexandru Elisei
2019-06-25 12:19     ` Alexandru Elisei
2019-06-25 12:19     ` Alexandru Elisei
2019-07-03 13:47     ` Marc Zyngier
2019-07-03 13:47       ` Marc Zyngier
2019-07-03 13:47       ` Marc Zyngier
2019-06-27 13:15   ` Julien Thierry
2019-06-27 13:15     ` Julien Thierry
2019-06-27 13:15     ` Julien Thierry
2019-07-04 15:51   ` Alexandru Elisei
2019-07-04 15:51     ` Alexandru Elisei
2019-07-04 15:51     ` Alexandru Elisei
2020-01-05 11:35     ` Marc Zyngier
2020-01-05 11:35       ` Marc Zyngier
2020-01-05 11:35       ` Marc Zyngier
2020-01-06 16:31       ` Alexandru Elisei
2020-01-06 16:31         ` Alexandru Elisei
2020-01-06 16:31         ` Alexandru Elisei
2019-06-21  9:38 ` [PATCH 36/59] KVM: arm64: nv: Implement nested Stage-2 page table walk logic Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38 ` [PATCH 37/59] KVM: arm64: nv: Handle shadow stage 2 page faults Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-07-05 14:28   ` Alexandru Elisei [this message]
2019-07-05 14:28     ` Alexandru Elisei
2019-07-05 14:28     ` Alexandru Elisei
2019-06-21  9:38 ` [PATCH 38/59] KVM: arm64: nv: Unmap/flush shadow stage 2 page tables Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-07-01  8:03   ` Julien Thierry
2019-07-01  8:03     ` Julien Thierry
2019-07-01  8:03     ` Julien Thierry
2019-06-21  9:38 ` [PATCH 39/59] KVM: arm64: nv: Move last_vcpu_ran to be per s2 mmu Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-07-01  9:10   ` Julien Thierry
2019-07-01  9:10     ` Julien Thierry
2019-07-01  9:10     ` Julien Thierry
2019-07-05 15:28   ` Alexandru Elisei
2019-07-05 15:28     ` Alexandru Elisei
2019-07-05 15:28     ` Alexandru Elisei
2019-06-21  9:38 ` [PATCH 40/59] KVM: arm64: nv: Don't always start an S2 MMU search from the beginning Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-07-09  9:59   ` Alexandru Elisei
2019-07-09  9:59     ` Alexandru Elisei
2019-07-09  9:59     ` Alexandru Elisei
2019-06-21  9:38 ` [PATCH 41/59] KVM: arm64: nv: Introduce sys_reg_desc.forward_trap Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38 ` [PATCH 42/59] KVM: arm64: nv: Rework the system instruction emulation framework Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38 ` [PATCH 43/59] KVM: arm64: nv: Trap and emulate AT instructions from virtual EL2 Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-07-01 15:45   ` Julien Thierry
2019-07-01 15:45     ` Julien Thierry
2019-07-01 15:45     ` Julien Thierry
2019-07-09 13:20   ` Alexandru Elisei
2019-07-09 13:20     ` Alexandru Elisei
2019-07-09 13:20     ` Alexandru Elisei
2019-07-18 12:13     ` Tomasz Nowicki
2019-07-18 12:13       ` Tomasz Nowicki
2019-07-18 12:13       ` Tomasz Nowicki
2019-07-18 12:36       ` Alexandru Elisei
2019-07-18 12:56         ` Alexandru Elisei
2019-07-18 12:59         ` Tomasz Nowicki
2019-07-18 12:59           ` Tomasz Nowicki
2019-07-18 12:59           ` Tomasz Nowicki
2019-07-24 10:25   ` Tomasz Nowicki
2019-07-24 10:25     ` Tomasz Nowicki
2019-07-24 10:25     ` Tomasz Nowicki
2019-07-24 12:39     ` Marc Zyngier
2019-07-24 12:39       ` Marc Zyngier
2019-07-24 12:39       ` Marc Zyngier
2019-07-24 13:56       ` Tomasz Nowicki
2019-07-24 13:56         ` Tomasz Nowicki
2019-07-24 13:56         ` Tomasz Nowicki
2019-06-21  9:38 ` [PATCH 44/59] KVM: arm64: nv: Trap and emulate TLBI " Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-07-02 12:37   ` Julien Thierry
2019-07-02 12:37     ` Julien Thierry
2019-07-02 12:37     ` Julien Thierry
2019-07-10 10:15   ` Alexandru Elisei
2019-07-10 10:15     ` Alexandru Elisei
2019-07-10 10:15     ` Alexandru Elisei
2019-06-21  9:38 ` [PATCH 45/59] KVM: arm64: nv: Handle traps for timer _EL02 and _EL2 sysregs accessors Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38 ` [PATCH 46/59] KVM: arm64: nv: arch_timer: Support hyp timer emulation Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-07-10 16:23   ` Alexandru Elisei
2019-07-10 16:23     ` Alexandru Elisei
2019-07-10 16:23     ` Alexandru Elisei
2019-06-21  9:38 ` [PATCH 47/59] KVM: arm64: nv: Propagate CNTVOFF_EL2 to the virtual EL1 timer Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-08-08  9:34   ` Alexandru Elisei
2019-08-08  9:34     ` Alexandru Elisei
2019-08-08  9:34     ` Alexandru Elisei
2019-06-21  9:38 ` [PATCH 48/59] KVM: arm64: nv: Load timer before the GIC Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-07-11 13:17   ` Alexandru Elisei
2019-07-11 13:17     ` Alexandru Elisei
2019-07-11 13:17     ` Alexandru Elisei
2019-06-21  9:38 ` [PATCH 49/59] KVM: arm64: nv: vgic-v3: Take cpu_if pointer directly instead of vcpu Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38 ` [PATCH 50/59] KVM: arm64: nv: Nested GICv3 Support Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-07-16 11:41   ` Alexandru Elisei
2019-07-16 11:41     ` Alexandru Elisei
2019-07-16 11:41     ` Alexandru Elisei
2019-06-21  9:38 ` [PATCH 51/59] KVM: arm64: nv: vgic: Emulate the HW bit in software Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38 ` [PATCH 52/59] KVM: arm64: nv: vgic: Allow userland to set VGIC maintenance IRQ Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-07-04  7:38   ` Julien Thierry
2019-07-04  7:38     ` Julien Thierry
2019-07-04  7:38     ` Julien Thierry
2019-07-04  9:01     ` Andre Przywara
2019-07-04  9:01       ` Andre Przywara
2019-07-04  9:01       ` Andre Przywara
2019-07-04  9:04       ` Julien Thierry
2019-07-04  9:04         ` Julien Thierry
2019-07-04  9:04         ` Julien Thierry
2019-06-21  9:38 ` [PATCH 53/59] KVM: arm64: nv: Implement maintenance interrupt forwarding Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-07-04  8:06   ` Julien Thierry
2019-07-04  8:06     ` Julien Thierry
2019-07-04  8:06     ` Julien Thierry
2019-07-16 16:35   ` Alexandru Elisei
2019-07-16 16:35     ` Alexandru Elisei
2019-07-16 16:35     ` Alexandru Elisei
2019-06-21  9:38 ` [PATCH 54/59] KVM: arm64: nv: Add nested GICv3 tracepoints Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38 ` [PATCH 55/59] arm64: KVM: nv: Add handling of EL2-specific timer registers Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-07-11 12:35   ` Alexandru Elisei
2019-07-11 12:35     ` Alexandru Elisei
2019-07-11 12:35     ` Alexandru Elisei
2019-07-17 10:19   ` Alexandru Elisei
2019-07-17 10:19     ` Alexandru Elisei
2019-07-17 10:19     ` Alexandru Elisei
2019-06-21  9:38 ` [PATCH 56/59] arm64: KVM: nv: Honor SCTLR_EL2.SPAN on entering vEL2 Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38 ` [PATCH 57/59] arm64: KVM: nv: Handle SCTLR_EL2 RES0/RES1 bits Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38 ` [PATCH 58/59] arm64: KVM: nv: Restrict S2 RD/WR permissions to match the guest's Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38 ` [PATCH 59/59] arm64: KVM: nv: Allow userspace to request KVM_ARM_VCPU_NESTED_VIRT Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:38   ` Marc Zyngier
2019-06-21  9:57 ` [PATCH 00/59] KVM: arm64: ARMv8.3 Nested Virtualization support Itaru Kitayama
2019-06-21 11:21   ` Marc Zyngier
2019-06-21 11:21     ` Marc Zyngier
2019-06-21 11:21     ` Marc Zyngier
2019-08-02 10:11 ` Alexandru Elisei
2019-08-02 10:11   ` Alexandru Elisei
2019-08-02 10:11   ` Alexandru Elisei
2019-08-02 10:30   ` Andrew Jones
2019-08-02 10:30     ` Andrew Jones
2019-08-02 10:30     ` Andrew Jones
2019-08-09 10:01   ` Alexandru Elisei
2019-08-09 10:01     ` Alexandru Elisei
2019-08-09 10:01     ` Alexandru Elisei
2019-08-09 11:44     ` Andrew Jones
2019-08-09 11:44       ` Andrew Jones
2019-08-09 11:44       ` Andrew Jones
2019-08-09 12:00       ` Alexandru Elisei
2019-08-09 12:00         ` Alexandru Elisei
2019-08-09 12:00         ` Alexandru Elisei
2019-08-09 13:00         ` Andrew Jones
2019-08-09 13:00           ` Andrew Jones
2019-08-09 13:00           ` Andrew Jones
2019-08-22 11:57     ` Alexandru Elisei
2019-08-22 11:57       ` Alexandru Elisei
2019-08-22 11:57       ` Alexandru Elisei
2019-08-22 15:32       ` Alexandru Elisei
2019-08-22 15:32         ` Alexandru Elisei
2019-08-22 15:32         ` Alexandru Elisei

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=de40863e-1688-c028-329c-0cf94149f0b3@arm.com \
    --to=alexandru.elisei@arm.com \
    --cc=Dave.Martin@arm.com \
    --cc=andre.przywara@arm.com \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=marc.zyngier@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.