All of lore.kernel.org
 help / color / mirror / Atom feed
From: Marc Zyngier <maz@kernel.org>
To: linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu, kvm@vger.kernel.org
Cc: Andre Przywara <andre.przywara@arm.com>,
	Christoffer Dall <christoffer.dall@arm.com>,
	Jintack Lim <jintack@cs.columbia.edu>,
	Haibo Xu <haibo.xu@linaro.org>,
	Ganapatrao Kulkarni <gankulkarni@os.amperecomputing.com>,
	Chase Conklin <chase.conklin@arm.com>,
	"Russell King (Oracle)" <linux@armlinux.org.uk>,
	James Morse <james.morse@arm.com>,
	Suzuki K Poulose <suzuki.poulose@arm.com>,
	Alexandru Elisei <alexandru.elisei@arm.com>,
	karl.heubaum@oracle.com, mihai.carabas@oracle.com,
	miguel.luis@oracle.com, kernel-team@android.com
Subject: [PATCH v6 36/64] KVM: arm64: nv: Handle shadow stage 2 page faults
Date: Fri, 28 Jan 2022 12:18:44 +0000	[thread overview]
Message-ID: <20220128121912.509006-37-maz@kernel.org> (raw)
In-Reply-To: <20220128121912.509006-1-maz@kernel.org>

If we are faulting on a shadow stage 2 translation, we first walk the
guest hypervisor's stage 2 page table to see if it has a mapping. If
not, we inject a stage 2 page fault to the virtual EL2. Otherwise, we
create a mapping in the shadow stage 2 page table.

Note that we have to deal with two IPAs when we got a shadow stage 2
page fault. One is the address we faulted on, and is in the L2 guest
phys space. The other is from the guest stage-2 page table walk, and is
in the L1 guest phys space.  To differentiate them, we rename variables
so that fault_ipa is used for the former and ipa is used for the latter.

Co-developed-by: Christoffer Dall <christoffer.dall@linaro.org>
Co-developed-by: Jintack Lim <jintack.lim@linaro.org>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Jintack Lim <jintack.lim@linaro.org>
[maz: rewrote this multiple times...]
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
 arch/arm64/include/asm/kvm_emulate.h |   6 ++
 arch/arm64/include/asm/kvm_nested.h  |  18 +++++
 arch/arm64/kvm/mmu.c                 | 102 +++++++++++++++++++++++----
 arch/arm64/kvm/nested.c              |  48 +++++++++++++
 4 files changed, 159 insertions(+), 15 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index ff8980a39ee8..dc6eeb0cc8a9 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -602,4 +602,10 @@ static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
 	return test_bit(feature, vcpu->arch.features);
 }
 
+static inline bool kvm_is_shadow_s2_fault(struct kvm_vcpu *vcpu)
+{
+	return (vcpu->arch.hw_mmu != &vcpu->kvm->arch.mmu &&
+		vcpu->arch.hw_mmu->nested_stage2_enabled);
+}
+
 #endif /* __ARM64_KVM_EMULATE_H__ */
diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/kvm_nested.h
index 48cf288ea238..4fad4d3848ce 100644
--- a/arch/arm64/include/asm/kvm_nested.h
+++ b/arch/arm64/include/asm/kvm_nested.h
@@ -82,9 +82,27 @@ struct kvm_s2_trans {
 	u64 upper_attr;
 };
 
+static inline phys_addr_t kvm_s2_trans_output(struct kvm_s2_trans *trans)
+{
+	return trans->output;
+}
+
+static inline unsigned long kvm_s2_trans_size(struct kvm_s2_trans *trans)
+{
+	return trans->block_size;
+}
+
+static inline u32 kvm_s2_trans_esr(struct kvm_s2_trans *trans)
+{
+	return trans->esr;
+}
+
 extern int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa,
 			      struct kvm_s2_trans *result);
 
+extern int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu,
+				    struct kvm_s2_trans *trans);
+extern int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2);
 int handle_wfx_nested(struct kvm_vcpu *vcpu, bool is_wfe);
 extern bool __forward_traps(struct kvm_vcpu *vcpu, unsigned int reg,
 			    u64 control_bit);
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 55525fd5743d..36f7ecb4f81b 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -969,7 +969,7 @@ static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
 static unsigned long
 transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
 			    unsigned long hva, kvm_pfn_t *pfnp,
-			    phys_addr_t *ipap)
+			    phys_addr_t *ipap, phys_addr_t *fault_ipap)
 {
 	kvm_pfn_t pfn = *pfnp;
 
@@ -998,6 +998,7 @@ transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
 		 * to PG_head and switch the pfn from a tail page to the head
 		 * page accordingly.
 		 */
+		*fault_ipap &= PMD_MASK;
 		*ipap &= PMD_MASK;
 		kvm_release_pfn_clean(pfn);
 		pfn &= ~(PTRS_PER_PMD - 1);
@@ -1080,15 +1081,17 @@ static int sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn,
 }
 
 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
-			  struct kvm_memory_slot *memslot, unsigned long hva,
-			  unsigned long fault_status)
+			  struct kvm_s2_trans *nested,
+			  struct kvm_memory_slot *memslot,
+			  unsigned long hva, unsigned long fault_status)
 {
 	int ret = 0;
-	bool write_fault, writable, force_pte = false;
+	bool write_fault, writable;
 	bool exec_fault;
 	bool device = false;
 	bool shared;
 	unsigned long mmu_seq;
+	phys_addr_t ipa = fault_ipa;
 	struct kvm *kvm = vcpu->kvm;
 	struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
 	struct vm_area_struct *vma;
@@ -1100,6 +1103,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 	unsigned long vma_pagesize, fault_granule;
 	enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
 	struct kvm_pgtable *pgt;
+	unsigned long max_map_size = PUD_SIZE;
 
 	fault_granule = 1UL << ARM64_HW_PGTABLE_LEVEL_SHIFT(fault_level);
 	write_fault = kvm_is_write_fault(vcpu);
@@ -1128,7 +1132,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 	 * memslots.
 	 */
 	if (logging_active) {
-		force_pte = true;
+		max_map_size = vma_pagesize = PAGE_SIZE;
 		vma_shift = PAGE_SHIFT;
 	} else {
 		vma_shift = get_vma_page_shift(vma, hva);
@@ -1152,7 +1156,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 		fallthrough;
 	case CONT_PTE_SHIFT:
 		vma_shift = PAGE_SHIFT;
-		force_pte = true;
+		max_map_size = PAGE_SIZE;
 		fallthrough;
 	case PAGE_SHIFT:
 		break;
@@ -1161,10 +1165,25 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 	}
 
 	vma_pagesize = 1UL << vma_shift;
+
+	if (kvm_is_shadow_s2_fault(vcpu)) {
+		ipa = kvm_s2_trans_output(nested);
+
+		/*
+		 * If we're about to create a shadow stage 2 entry, then we
+		 * can only create a block mapping if the guest stage 2 page
+		 * table uses at least as big a mapping.
+		 */
+		max_map_size = min(kvm_s2_trans_size(nested), max_map_size);
+	}
+
+	vma_pagesize = min(vma_pagesize, max_map_size);
+
 	if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE)
 		fault_ipa &= ~(vma_pagesize - 1);
 
-	gfn = fault_ipa >> PAGE_SHIFT;
+	gfn = ipa >> PAGE_SHIFT;
+
 	mmap_read_unlock(current->mm);
 
 	/*
@@ -1237,12 +1256,14 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 	 * If we are not forced to use page mapping, check if we are
 	 * backed by a THP and thus use block mapping if possible.
 	 */
-	if (vma_pagesize == PAGE_SIZE && !(force_pte || device)) {
+	if (vma_pagesize == PAGE_SIZE &&
+	    !(max_map_size == PAGE_SIZE || device)) {
 		if (fault_status == FSC_PERM && fault_granule > PAGE_SIZE)
 			vma_pagesize = fault_granule;
 		else
 			vma_pagesize = transparent_hugepage_adjust(kvm, memslot,
 								   hva, &pfn,
+								   &ipa,
 								   &fault_ipa);
 	}
 
@@ -1326,8 +1347,10 @@ static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
 {
 	unsigned long fault_status;
-	phys_addr_t fault_ipa;
+	phys_addr_t fault_ipa; /* The address we faulted on */
+	phys_addr_t ipa; /* Always the IPA in the L1 guest phys space */
 	struct kvm_memory_slot *memslot;
+	struct kvm_s2_trans nested_trans;
 	unsigned long hva;
 	bool is_iabt, write_fault, writable;
 	gfn_t gfn;
@@ -1335,7 +1358,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
 
 	fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
 
-	fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
+	ipa = fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
 	is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
 
 	/* Synchronous External Abort? */
@@ -1356,6 +1379,12 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
 	/* Check the stage-2 fault is trans. fault or write fault */
 	if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
 	    fault_status != FSC_ACCESS) {
+		/*
+		 * We must never see an address size fault on shadow stage 2
+		 * page table walk, because we would have injected an addr
+		 * size fault when we walked the nested s2 page and not
+		 * create the shadow entry.
+		 */
 		kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
 			kvm_vcpu_trap_get_class(vcpu),
 			(unsigned long)kvm_vcpu_trap_get_fault(vcpu),
@@ -1365,7 +1394,49 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
 
 	idx = srcu_read_lock(&vcpu->kvm->srcu);
 
-	gfn = fault_ipa >> PAGE_SHIFT;
+	/*
+	 * We may have faulted on a shadow stage 2 page table if we are
+	 * running a nested guest.  In this case, we have to resolve the L2
+	 * IPA to the L1 IPA first, before knowing what kind of memory should
+	 * back the L1 IPA.
+	 *
+	 * If the shadow stage 2 page table walk faults, then we simply inject
+	 * this to the guest and carry on.
+	 */
+	if (kvm_is_shadow_s2_fault(vcpu)) {
+		u32 esr;
+
+		ret = kvm_walk_nested_s2(vcpu, fault_ipa, &nested_trans);
+		esr = kvm_s2_trans_esr(&nested_trans);
+		if (esr)
+			kvm_inject_s2_fault(vcpu, esr);
+		if (ret)
+			goto out_unlock;
+
+		ret = kvm_s2_handle_perm_fault(vcpu, &nested_trans);
+		esr = kvm_s2_trans_esr(&nested_trans);
+		if (esr)
+			kvm_inject_s2_fault(vcpu, esr);
+		if (ret)
+			goto out_unlock;
+
+		ipa = kvm_s2_trans_output(&nested_trans);
+	} else {
+		nested_trans = (struct kvm_s2_trans) {
+			/*
+			 * Default to RWX so that we don't filter
+			 * anything while evaluating the permissions.
+			 */
+			.writable	= true,
+			.readable	= true,
+			.upper_attr	= 0,
+			.output		= ipa,
+			.level		= kvm_vcpu_trap_get_fault_level(vcpu),
+			.esr		= kvm_vcpu_get_esr(vcpu),
+		};
+	}
+
+	gfn = ipa >> PAGE_SHIFT;
 	memslot = gfn_to_memslot(vcpu->kvm, gfn);
 	hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
 	write_fault = kvm_is_write_fault(vcpu);
@@ -1409,13 +1480,13 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
 		 * faulting VA. This is always 12 bits, irrespective
 		 * of the page size.
 		 */
-		fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
-		ret = io_mem_abort(vcpu, fault_ipa);
+		ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
+		ret = io_mem_abort(vcpu, ipa);
 		goto out_unlock;
 	}
 
 	/* Userspace should not be able to register out-of-bounds IPAs */
-	VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm));
+	VM_BUG_ON(ipa >= kvm_phys_size(vcpu->kvm));
 
 	if (fault_status == FSC_ACCESS) {
 		handle_access_fault(vcpu, fault_ipa);
@@ -1423,7 +1494,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
 		goto out_unlock;
 	}
 
-	ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
+	ret = user_mem_abort(vcpu, fault_ipa, &nested_trans,
+			     memslot, hva, fault_status);
 	if (ret == 0)
 		ret = 1;
 out:
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index c2a99b672368..0a9708f776fc 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -108,6 +108,15 @@ static u32 compute_fsc(int level, u32 fsc)
 	return fsc | (level & 0x3);
 }
 
+static int esr_s2_fault(struct kvm_vcpu *vcpu, int level, u32 fsc)
+{
+	u32 esr;
+
+	esr = kvm_vcpu_get_esr(vcpu) & ~ESR_ELx_FSC;
+	esr |= compute_fsc(level, fsc);
+	return esr;
+}
+
 static int check_base_s2_limits(struct s2_walk_info *wi,
 				int level, int input_size, int stride)
 {
@@ -457,6 +466,45 @@ void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu)
 	}
 }
 
+/*
+ * Returns non-zero if permission fault is handled by injecting it to the next
+ * level hypervisor.
+ */
+int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu, struct kvm_s2_trans *trans)
+{
+	unsigned long fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
+	bool forward_fault = false;
+
+	trans->esr = 0;
+
+	if (fault_status != FSC_PERM)
+		return 0;
+
+	if (kvm_vcpu_trap_is_iabt(vcpu)) {
+		forward_fault = (trans->upper_attr & BIT(54));
+	} else {
+		bool write_fault = kvm_is_write_fault(vcpu);
+
+		forward_fault = ((write_fault && !trans->writable) ||
+				 (!write_fault && !trans->readable));
+	}
+
+	if (forward_fault) {
+		trans->esr = esr_s2_fault(vcpu, trans->level, ESR_ELx_FSC_PERM);
+		return 1;
+	}
+
+	return 0;
+}
+
+int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2)
+{
+	vcpu_write_sys_reg(vcpu, vcpu->arch.fault.far_el2, FAR_EL2);
+	vcpu_write_sys_reg(vcpu, vcpu->arch.fault.hpfar_el2, HPFAR_EL2);
+
+	return kvm_inject_nested_sync(vcpu, esr_el2);
+}
+
 /*
  * Inject wfx to the virtual EL2 if this is not from the virtual EL2 and
  * the virtual HCR_EL2.TWX is set. Otherwise, let the host hypervisor
-- 
2.30.2


WARNING: multiple messages have this Message-ID (diff)
From: Marc Zyngier <maz@kernel.org>
To: linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu, kvm@vger.kernel.org
Cc: kernel-team@android.com, Andre Przywara <andre.przywara@arm.com>,
	Christoffer Dall <christoffer.dall@arm.com>,
	Chase Conklin <chase.conklin@arm.com>,
	"Russell King \(Oracle\)" <linux@armlinux.org.uk>,
	mihai.carabas@oracle.com,
	Ganapatrao Kulkarni <gankulkarni@os.amperecomputing.com>
Subject: [PATCH v6 36/64] KVM: arm64: nv: Handle shadow stage 2 page faults
Date: Fri, 28 Jan 2022 12:18:44 +0000	[thread overview]
Message-ID: <20220128121912.509006-37-maz@kernel.org> (raw)
In-Reply-To: <20220128121912.509006-1-maz@kernel.org>

If we are faulting on a shadow stage 2 translation, we first walk the
guest hypervisor's stage 2 page table to see if it has a mapping. If
not, we inject a stage 2 page fault to the virtual EL2. Otherwise, we
create a mapping in the shadow stage 2 page table.

Note that we have to deal with two IPAs when we got a shadow stage 2
page fault. One is the address we faulted on, and is in the L2 guest
phys space. The other is from the guest stage-2 page table walk, and is
in the L1 guest phys space.  To differentiate them, we rename variables
so that fault_ipa is used for the former and ipa is used for the latter.

Co-developed-by: Christoffer Dall <christoffer.dall@linaro.org>
Co-developed-by: Jintack Lim <jintack.lim@linaro.org>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Jintack Lim <jintack.lim@linaro.org>
[maz: rewrote this multiple times...]
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
 arch/arm64/include/asm/kvm_emulate.h |   6 ++
 arch/arm64/include/asm/kvm_nested.h  |  18 +++++
 arch/arm64/kvm/mmu.c                 | 102 +++++++++++++++++++++++----
 arch/arm64/kvm/nested.c              |  48 +++++++++++++
 4 files changed, 159 insertions(+), 15 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index ff8980a39ee8..dc6eeb0cc8a9 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -602,4 +602,10 @@ static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
 	return test_bit(feature, vcpu->arch.features);
 }
 
+static inline bool kvm_is_shadow_s2_fault(struct kvm_vcpu *vcpu)
+{
+	return (vcpu->arch.hw_mmu != &vcpu->kvm->arch.mmu &&
+		vcpu->arch.hw_mmu->nested_stage2_enabled);
+}
+
 #endif /* __ARM64_KVM_EMULATE_H__ */
diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/kvm_nested.h
index 48cf288ea238..4fad4d3848ce 100644
--- a/arch/arm64/include/asm/kvm_nested.h
+++ b/arch/arm64/include/asm/kvm_nested.h
@@ -82,9 +82,27 @@ struct kvm_s2_trans {
 	u64 upper_attr;
 };
 
+static inline phys_addr_t kvm_s2_trans_output(struct kvm_s2_trans *trans)
+{
+	return trans->output;
+}
+
+static inline unsigned long kvm_s2_trans_size(struct kvm_s2_trans *trans)
+{
+	return trans->block_size;
+}
+
+static inline u32 kvm_s2_trans_esr(struct kvm_s2_trans *trans)
+{
+	return trans->esr;
+}
+
 extern int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa,
 			      struct kvm_s2_trans *result);
 
+extern int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu,
+				    struct kvm_s2_trans *trans);
+extern int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2);
 int handle_wfx_nested(struct kvm_vcpu *vcpu, bool is_wfe);
 extern bool __forward_traps(struct kvm_vcpu *vcpu, unsigned int reg,
 			    u64 control_bit);
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 55525fd5743d..36f7ecb4f81b 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -969,7 +969,7 @@ static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
 static unsigned long
 transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
 			    unsigned long hva, kvm_pfn_t *pfnp,
-			    phys_addr_t *ipap)
+			    phys_addr_t *ipap, phys_addr_t *fault_ipap)
 {
 	kvm_pfn_t pfn = *pfnp;
 
@@ -998,6 +998,7 @@ transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
 		 * to PG_head and switch the pfn from a tail page to the head
 		 * page accordingly.
 		 */
+		*fault_ipap &= PMD_MASK;
 		*ipap &= PMD_MASK;
 		kvm_release_pfn_clean(pfn);
 		pfn &= ~(PTRS_PER_PMD - 1);
@@ -1080,15 +1081,17 @@ static int sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn,
 }
 
 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
-			  struct kvm_memory_slot *memslot, unsigned long hva,
-			  unsigned long fault_status)
+			  struct kvm_s2_trans *nested,
+			  struct kvm_memory_slot *memslot,
+			  unsigned long hva, unsigned long fault_status)
 {
 	int ret = 0;
-	bool write_fault, writable, force_pte = false;
+	bool write_fault, writable;
 	bool exec_fault;
 	bool device = false;
 	bool shared;
 	unsigned long mmu_seq;
+	phys_addr_t ipa = fault_ipa;
 	struct kvm *kvm = vcpu->kvm;
 	struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
 	struct vm_area_struct *vma;
@@ -1100,6 +1103,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 	unsigned long vma_pagesize, fault_granule;
 	enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
 	struct kvm_pgtable *pgt;
+	unsigned long max_map_size = PUD_SIZE;
 
 	fault_granule = 1UL << ARM64_HW_PGTABLE_LEVEL_SHIFT(fault_level);
 	write_fault = kvm_is_write_fault(vcpu);
@@ -1128,7 +1132,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 	 * memslots.
 	 */
 	if (logging_active) {
-		force_pte = true;
+		max_map_size = vma_pagesize = PAGE_SIZE;
 		vma_shift = PAGE_SHIFT;
 	} else {
 		vma_shift = get_vma_page_shift(vma, hva);
@@ -1152,7 +1156,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 		fallthrough;
 	case CONT_PTE_SHIFT:
 		vma_shift = PAGE_SHIFT;
-		force_pte = true;
+		max_map_size = PAGE_SIZE;
 		fallthrough;
 	case PAGE_SHIFT:
 		break;
@@ -1161,10 +1165,25 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 	}
 
 	vma_pagesize = 1UL << vma_shift;
+
+	if (kvm_is_shadow_s2_fault(vcpu)) {
+		ipa = kvm_s2_trans_output(nested);
+
+		/*
+		 * If we're about to create a shadow stage 2 entry, then we
+		 * can only create a block mapping if the guest stage 2 page
+		 * table uses at least as big a mapping.
+		 */
+		max_map_size = min(kvm_s2_trans_size(nested), max_map_size);
+	}
+
+	vma_pagesize = min(vma_pagesize, max_map_size);
+
 	if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE)
 		fault_ipa &= ~(vma_pagesize - 1);
 
-	gfn = fault_ipa >> PAGE_SHIFT;
+	gfn = ipa >> PAGE_SHIFT;
+
 	mmap_read_unlock(current->mm);
 
 	/*
@@ -1237,12 +1256,14 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 	 * If we are not forced to use page mapping, check if we are
 	 * backed by a THP and thus use block mapping if possible.
 	 */
-	if (vma_pagesize == PAGE_SIZE && !(force_pte || device)) {
+	if (vma_pagesize == PAGE_SIZE &&
+	    !(max_map_size == PAGE_SIZE || device)) {
 		if (fault_status == FSC_PERM && fault_granule > PAGE_SIZE)
 			vma_pagesize = fault_granule;
 		else
 			vma_pagesize = transparent_hugepage_adjust(kvm, memslot,
 								   hva, &pfn,
+								   &ipa,
 								   &fault_ipa);
 	}
 
@@ -1326,8 +1347,10 @@ static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
 {
 	unsigned long fault_status;
-	phys_addr_t fault_ipa;
+	phys_addr_t fault_ipa; /* The address we faulted on */
+	phys_addr_t ipa; /* Always the IPA in the L1 guest phys space */
 	struct kvm_memory_slot *memslot;
+	struct kvm_s2_trans nested_trans;
 	unsigned long hva;
 	bool is_iabt, write_fault, writable;
 	gfn_t gfn;
@@ -1335,7 +1358,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
 
 	fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
 
-	fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
+	ipa = fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
 	is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
 
 	/* Synchronous External Abort? */
@@ -1356,6 +1379,12 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
 	/* Check the stage-2 fault is trans. fault or write fault */
 	if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
 	    fault_status != FSC_ACCESS) {
+		/*
+		 * We must never see an address size fault on shadow stage 2
+		 * page table walk, because we would have injected an addr
+		 * size fault when we walked the nested s2 page and not
+		 * create the shadow entry.
+		 */
 		kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
 			kvm_vcpu_trap_get_class(vcpu),
 			(unsigned long)kvm_vcpu_trap_get_fault(vcpu),
@@ -1365,7 +1394,49 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
 
 	idx = srcu_read_lock(&vcpu->kvm->srcu);
 
-	gfn = fault_ipa >> PAGE_SHIFT;
+	/*
+	 * We may have faulted on a shadow stage 2 page table if we are
+	 * running a nested guest.  In this case, we have to resolve the L2
+	 * IPA to the L1 IPA first, before knowing what kind of memory should
+	 * back the L1 IPA.
+	 *
+	 * If the shadow stage 2 page table walk faults, then we simply inject
+	 * this to the guest and carry on.
+	 */
+	if (kvm_is_shadow_s2_fault(vcpu)) {
+		u32 esr;
+
+		ret = kvm_walk_nested_s2(vcpu, fault_ipa, &nested_trans);
+		esr = kvm_s2_trans_esr(&nested_trans);
+		if (esr)
+			kvm_inject_s2_fault(vcpu, esr);
+		if (ret)
+			goto out_unlock;
+
+		ret = kvm_s2_handle_perm_fault(vcpu, &nested_trans);
+		esr = kvm_s2_trans_esr(&nested_trans);
+		if (esr)
+			kvm_inject_s2_fault(vcpu, esr);
+		if (ret)
+			goto out_unlock;
+
+		ipa = kvm_s2_trans_output(&nested_trans);
+	} else {
+		nested_trans = (struct kvm_s2_trans) {
+			/*
+			 * Default to RWX so that we don't filter
+			 * anything while evaluating the permissions.
+			 */
+			.writable	= true,
+			.readable	= true,
+			.upper_attr	= 0,
+			.output		= ipa,
+			.level		= kvm_vcpu_trap_get_fault_level(vcpu),
+			.esr		= kvm_vcpu_get_esr(vcpu),
+		};
+	}
+
+	gfn = ipa >> PAGE_SHIFT;
 	memslot = gfn_to_memslot(vcpu->kvm, gfn);
 	hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
 	write_fault = kvm_is_write_fault(vcpu);
@@ -1409,13 +1480,13 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
 		 * faulting VA. This is always 12 bits, irrespective
 		 * of the page size.
 		 */
-		fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
-		ret = io_mem_abort(vcpu, fault_ipa);
+		ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
+		ret = io_mem_abort(vcpu, ipa);
 		goto out_unlock;
 	}
 
 	/* Userspace should not be able to register out-of-bounds IPAs */
-	VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm));
+	VM_BUG_ON(ipa >= kvm_phys_size(vcpu->kvm));
 
 	if (fault_status == FSC_ACCESS) {
 		handle_access_fault(vcpu, fault_ipa);
@@ -1423,7 +1494,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
 		goto out_unlock;
 	}
 
-	ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
+	ret = user_mem_abort(vcpu, fault_ipa, &nested_trans,
+			     memslot, hva, fault_status);
 	if (ret == 0)
 		ret = 1;
 out:
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index c2a99b672368..0a9708f776fc 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -108,6 +108,15 @@ static u32 compute_fsc(int level, u32 fsc)
 	return fsc | (level & 0x3);
 }
 
+static int esr_s2_fault(struct kvm_vcpu *vcpu, int level, u32 fsc)
+{
+	u32 esr;
+
+	esr = kvm_vcpu_get_esr(vcpu) & ~ESR_ELx_FSC;
+	esr |= compute_fsc(level, fsc);
+	return esr;
+}
+
 static int check_base_s2_limits(struct s2_walk_info *wi,
 				int level, int input_size, int stride)
 {
@@ -457,6 +466,45 @@ void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu)
 	}
 }
 
+/*
+ * Returns non-zero if permission fault is handled by injecting it to the next
+ * level hypervisor.
+ */
+int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu, struct kvm_s2_trans *trans)
+{
+	unsigned long fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
+	bool forward_fault = false;
+
+	trans->esr = 0;
+
+	if (fault_status != FSC_PERM)
+		return 0;
+
+	if (kvm_vcpu_trap_is_iabt(vcpu)) {
+		forward_fault = (trans->upper_attr & BIT(54));
+	} else {
+		bool write_fault = kvm_is_write_fault(vcpu);
+
+		forward_fault = ((write_fault && !trans->writable) ||
+				 (!write_fault && !trans->readable));
+	}
+
+	if (forward_fault) {
+		trans->esr = esr_s2_fault(vcpu, trans->level, ESR_ELx_FSC_PERM);
+		return 1;
+	}
+
+	return 0;
+}
+
+int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2)
+{
+	vcpu_write_sys_reg(vcpu, vcpu->arch.fault.far_el2, FAR_EL2);
+	vcpu_write_sys_reg(vcpu, vcpu->arch.fault.hpfar_el2, HPFAR_EL2);
+
+	return kvm_inject_nested_sync(vcpu, esr_el2);
+}
+
 /*
  * Inject wfx to the virtual EL2 if this is not from the virtual EL2 and
  * the virtual HCR_EL2.TWX is set. Otherwise, let the host hypervisor
-- 
2.30.2

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

WARNING: multiple messages have this Message-ID (diff)
From: Marc Zyngier <maz@kernel.org>
To: linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu, kvm@vger.kernel.org
Cc: Andre Przywara <andre.przywara@arm.com>,
	Christoffer Dall <christoffer.dall@arm.com>,
	Jintack Lim <jintack@cs.columbia.edu>,
	Haibo Xu <haibo.xu@linaro.org>,
	Ganapatrao Kulkarni <gankulkarni@os.amperecomputing.com>,
	Chase Conklin <chase.conklin@arm.com>,
	"Russell King (Oracle)" <linux@armlinux.org.uk>,
	James Morse <james.morse@arm.com>,
	Suzuki K Poulose <suzuki.poulose@arm.com>,
	Alexandru Elisei <alexandru.elisei@arm.com>,
	karl.heubaum@oracle.com, mihai.carabas@oracle.com,
	miguel.luis@oracle.com, kernel-team@android.com
Subject: [PATCH v6 36/64] KVM: arm64: nv: Handle shadow stage 2 page faults
Date: Fri, 28 Jan 2022 12:18:44 +0000	[thread overview]
Message-ID: <20220128121912.509006-37-maz@kernel.org> (raw)
In-Reply-To: <20220128121912.509006-1-maz@kernel.org>

If we are faulting on a shadow stage 2 translation, we first walk the
guest hypervisor's stage 2 page table to see if it has a mapping. If
not, we inject a stage 2 page fault to the virtual EL2. Otherwise, we
create a mapping in the shadow stage 2 page table.

Note that we have to deal with two IPAs when we got a shadow stage 2
page fault. One is the address we faulted on, and is in the L2 guest
phys space. The other is from the guest stage-2 page table walk, and is
in the L1 guest phys space.  To differentiate them, we rename variables
so that fault_ipa is used for the former and ipa is used for the latter.

Co-developed-by: Christoffer Dall <christoffer.dall@linaro.org>
Co-developed-by: Jintack Lim <jintack.lim@linaro.org>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Jintack Lim <jintack.lim@linaro.org>
[maz: rewrote this multiple times...]
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
 arch/arm64/include/asm/kvm_emulate.h |   6 ++
 arch/arm64/include/asm/kvm_nested.h  |  18 +++++
 arch/arm64/kvm/mmu.c                 | 102 +++++++++++++++++++++++----
 arch/arm64/kvm/nested.c              |  48 +++++++++++++
 4 files changed, 159 insertions(+), 15 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index ff8980a39ee8..dc6eeb0cc8a9 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -602,4 +602,10 @@ static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
 	return test_bit(feature, vcpu->arch.features);
 }
 
+static inline bool kvm_is_shadow_s2_fault(struct kvm_vcpu *vcpu)
+{
+	return (vcpu->arch.hw_mmu != &vcpu->kvm->arch.mmu &&
+		vcpu->arch.hw_mmu->nested_stage2_enabled);
+}
+
 #endif /* __ARM64_KVM_EMULATE_H__ */
diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/kvm_nested.h
index 48cf288ea238..4fad4d3848ce 100644
--- a/arch/arm64/include/asm/kvm_nested.h
+++ b/arch/arm64/include/asm/kvm_nested.h
@@ -82,9 +82,27 @@ struct kvm_s2_trans {
 	u64 upper_attr;
 };
 
+static inline phys_addr_t kvm_s2_trans_output(struct kvm_s2_trans *trans)
+{
+	return trans->output;
+}
+
+static inline unsigned long kvm_s2_trans_size(struct kvm_s2_trans *trans)
+{
+	return trans->block_size;
+}
+
+static inline u32 kvm_s2_trans_esr(struct kvm_s2_trans *trans)
+{
+	return trans->esr;
+}
+
 extern int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa,
 			      struct kvm_s2_trans *result);
 
+extern int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu,
+				    struct kvm_s2_trans *trans);
+extern int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2);
 int handle_wfx_nested(struct kvm_vcpu *vcpu, bool is_wfe);
 extern bool __forward_traps(struct kvm_vcpu *vcpu, unsigned int reg,
 			    u64 control_bit);
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 55525fd5743d..36f7ecb4f81b 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -969,7 +969,7 @@ static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
 static unsigned long
 transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
 			    unsigned long hva, kvm_pfn_t *pfnp,
-			    phys_addr_t *ipap)
+			    phys_addr_t *ipap, phys_addr_t *fault_ipap)
 {
 	kvm_pfn_t pfn = *pfnp;
 
@@ -998,6 +998,7 @@ transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
 		 * to PG_head and switch the pfn from a tail page to the head
 		 * page accordingly.
 		 */
+		*fault_ipap &= PMD_MASK;
 		*ipap &= PMD_MASK;
 		kvm_release_pfn_clean(pfn);
 		pfn &= ~(PTRS_PER_PMD - 1);
@@ -1080,15 +1081,17 @@ static int sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn,
 }
 
 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
-			  struct kvm_memory_slot *memslot, unsigned long hva,
-			  unsigned long fault_status)
+			  struct kvm_s2_trans *nested,
+			  struct kvm_memory_slot *memslot,
+			  unsigned long hva, unsigned long fault_status)
 {
 	int ret = 0;
-	bool write_fault, writable, force_pte = false;
+	bool write_fault, writable;
 	bool exec_fault;
 	bool device = false;
 	bool shared;
 	unsigned long mmu_seq;
+	phys_addr_t ipa = fault_ipa;
 	struct kvm *kvm = vcpu->kvm;
 	struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
 	struct vm_area_struct *vma;
@@ -1100,6 +1103,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 	unsigned long vma_pagesize, fault_granule;
 	enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
 	struct kvm_pgtable *pgt;
+	unsigned long max_map_size = PUD_SIZE;
 
 	fault_granule = 1UL << ARM64_HW_PGTABLE_LEVEL_SHIFT(fault_level);
 	write_fault = kvm_is_write_fault(vcpu);
@@ -1128,7 +1132,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 	 * memslots.
 	 */
 	if (logging_active) {
-		force_pte = true;
+		max_map_size = vma_pagesize = PAGE_SIZE;
 		vma_shift = PAGE_SHIFT;
 	} else {
 		vma_shift = get_vma_page_shift(vma, hva);
@@ -1152,7 +1156,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 		fallthrough;
 	case CONT_PTE_SHIFT:
 		vma_shift = PAGE_SHIFT;
-		force_pte = true;
+		max_map_size = PAGE_SIZE;
 		fallthrough;
 	case PAGE_SHIFT:
 		break;
@@ -1161,10 +1165,25 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 	}
 
 	vma_pagesize = 1UL << vma_shift;
+
+	if (kvm_is_shadow_s2_fault(vcpu)) {
+		ipa = kvm_s2_trans_output(nested);
+
+		/*
+		 * If we're about to create a shadow stage 2 entry, then we
+		 * can only create a block mapping if the guest stage 2 page
+		 * table uses at least as big a mapping.
+		 */
+		max_map_size = min(kvm_s2_trans_size(nested), max_map_size);
+	}
+
+	vma_pagesize = min(vma_pagesize, max_map_size);
+
 	if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE)
 		fault_ipa &= ~(vma_pagesize - 1);
 
-	gfn = fault_ipa >> PAGE_SHIFT;
+	gfn = ipa >> PAGE_SHIFT;
+
 	mmap_read_unlock(current->mm);
 
 	/*
@@ -1237,12 +1256,14 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 	 * If we are not forced to use page mapping, check if we are
 	 * backed by a THP and thus use block mapping if possible.
 	 */
-	if (vma_pagesize == PAGE_SIZE && !(force_pte || device)) {
+	if (vma_pagesize == PAGE_SIZE &&
+	    !(max_map_size == PAGE_SIZE || device)) {
 		if (fault_status == FSC_PERM && fault_granule > PAGE_SIZE)
 			vma_pagesize = fault_granule;
 		else
 			vma_pagesize = transparent_hugepage_adjust(kvm, memslot,
 								   hva, &pfn,
+								   &ipa,
 								   &fault_ipa);
 	}
 
@@ -1326,8 +1347,10 @@ static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
 {
 	unsigned long fault_status;
-	phys_addr_t fault_ipa;
+	phys_addr_t fault_ipa; /* The address we faulted on */
+	phys_addr_t ipa; /* Always the IPA in the L1 guest phys space */
 	struct kvm_memory_slot *memslot;
+	struct kvm_s2_trans nested_trans;
 	unsigned long hva;
 	bool is_iabt, write_fault, writable;
 	gfn_t gfn;
@@ -1335,7 +1358,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
 
 	fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
 
-	fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
+	ipa = fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
 	is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
 
 	/* Synchronous External Abort? */
@@ -1356,6 +1379,12 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
 	/* Check the stage-2 fault is trans. fault or write fault */
 	if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
 	    fault_status != FSC_ACCESS) {
+		/*
+		 * We must never see an address size fault on shadow stage 2
+		 * page table walk, because we would have injected an addr
+		 * size fault when we walked the nested s2 page and not
+		 * create the shadow entry.
+		 */
 		kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
 			kvm_vcpu_trap_get_class(vcpu),
 			(unsigned long)kvm_vcpu_trap_get_fault(vcpu),
@@ -1365,7 +1394,49 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
 
 	idx = srcu_read_lock(&vcpu->kvm->srcu);
 
-	gfn = fault_ipa >> PAGE_SHIFT;
+	/*
+	 * We may have faulted on a shadow stage 2 page table if we are
+	 * running a nested guest.  In this case, we have to resolve the L2
+	 * IPA to the L1 IPA first, before knowing what kind of memory should
+	 * back the L1 IPA.
+	 *
+	 * If the shadow stage 2 page table walk faults, then we simply inject
+	 * this to the guest and carry on.
+	 */
+	if (kvm_is_shadow_s2_fault(vcpu)) {
+		u32 esr;
+
+		ret = kvm_walk_nested_s2(vcpu, fault_ipa, &nested_trans);
+		esr = kvm_s2_trans_esr(&nested_trans);
+		if (esr)
+			kvm_inject_s2_fault(vcpu, esr);
+		if (ret)
+			goto out_unlock;
+
+		ret = kvm_s2_handle_perm_fault(vcpu, &nested_trans);
+		esr = kvm_s2_trans_esr(&nested_trans);
+		if (esr)
+			kvm_inject_s2_fault(vcpu, esr);
+		if (ret)
+			goto out_unlock;
+
+		ipa = kvm_s2_trans_output(&nested_trans);
+	} else {
+		nested_trans = (struct kvm_s2_trans) {
+			/*
+			 * Default to RWX so that we don't filter
+			 * anything while evaluating the permissions.
+			 */
+			.writable	= true,
+			.readable	= true,
+			.upper_attr	= 0,
+			.output		= ipa,
+			.level		= kvm_vcpu_trap_get_fault_level(vcpu),
+			.esr		= kvm_vcpu_get_esr(vcpu),
+		};
+	}
+
+	gfn = ipa >> PAGE_SHIFT;
 	memslot = gfn_to_memslot(vcpu->kvm, gfn);
 	hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
 	write_fault = kvm_is_write_fault(vcpu);
@@ -1409,13 +1480,13 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
 		 * faulting VA. This is always 12 bits, irrespective
 		 * of the page size.
 		 */
-		fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
-		ret = io_mem_abort(vcpu, fault_ipa);
+		ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
+		ret = io_mem_abort(vcpu, ipa);
 		goto out_unlock;
 	}
 
 	/* Userspace should not be able to register out-of-bounds IPAs */
-	VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm));
+	VM_BUG_ON(ipa >= kvm_phys_size(vcpu->kvm));
 
 	if (fault_status == FSC_ACCESS) {
 		handle_access_fault(vcpu, fault_ipa);
@@ -1423,7 +1494,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
 		goto out_unlock;
 	}
 
-	ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
+	ret = user_mem_abort(vcpu, fault_ipa, &nested_trans,
+			     memslot, hva, fault_status);
 	if (ret == 0)
 		ret = 1;
 out:
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index c2a99b672368..0a9708f776fc 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -108,6 +108,15 @@ static u32 compute_fsc(int level, u32 fsc)
 	return fsc | (level & 0x3);
 }
 
+static int esr_s2_fault(struct kvm_vcpu *vcpu, int level, u32 fsc)
+{
+	u32 esr;
+
+	esr = kvm_vcpu_get_esr(vcpu) & ~ESR_ELx_FSC;
+	esr |= compute_fsc(level, fsc);
+	return esr;
+}
+
 static int check_base_s2_limits(struct s2_walk_info *wi,
 				int level, int input_size, int stride)
 {
@@ -457,6 +466,45 @@ void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu)
 	}
 }
 
+/*
+ * Returns non-zero if permission fault is handled by injecting it to the next
+ * level hypervisor.
+ */
+int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu, struct kvm_s2_trans *trans)
+{
+	unsigned long fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
+	bool forward_fault = false;
+
+	trans->esr = 0;
+
+	if (fault_status != FSC_PERM)
+		return 0;
+
+	if (kvm_vcpu_trap_is_iabt(vcpu)) {
+		forward_fault = (trans->upper_attr & BIT(54));
+	} else {
+		bool write_fault = kvm_is_write_fault(vcpu);
+
+		forward_fault = ((write_fault && !trans->writable) ||
+				 (!write_fault && !trans->readable));
+	}
+
+	if (forward_fault) {
+		trans->esr = esr_s2_fault(vcpu, trans->level, ESR_ELx_FSC_PERM);
+		return 1;
+	}
+
+	return 0;
+}
+
+int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2)
+{
+	vcpu_write_sys_reg(vcpu, vcpu->arch.fault.far_el2, FAR_EL2);
+	vcpu_write_sys_reg(vcpu, vcpu->arch.fault.hpfar_el2, HPFAR_EL2);
+
+	return kvm_inject_nested_sync(vcpu, esr_el2);
+}
+
 /*
  * Inject wfx to the virtual EL2 if this is not from the virtual EL2 and
  * the virtual HCR_EL2.TWX is set. Otherwise, let the host hypervisor
-- 
2.30.2


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2022-01-28 12:50 UTC|newest]

Thread overview: 378+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-01-28 12:18 [PATCH v6 00/64] KVM: arm64: ARMv8.3/8.4 Nested Virtualization support Marc Zyngier
2022-01-28 12:18 ` Marc Zyngier
2022-01-28 12:18 ` Marc Zyngier
2022-01-28 12:18 ` [PATCH v6 01/64] arm64: Add ARM64_HAS_NESTED_VIRT cpufeature Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-01 14:22   ` Russell King (Oracle)
2022-02-01 14:22     ` Russell King (Oracle)
2022-02-01 14:22     ` Russell King (Oracle)
2022-01-28 12:18 ` [PATCH v6 02/64] KVM: arm64: nv: Introduce nested virtualization VCPU feature Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18 ` [PATCH v6 03/64] KVM: arm64: nv: Reset VCPU to EL2 registers if VCPU nested virt is set Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-02 11:40   ` Alexandru Elisei
2022-02-02 11:40     ` Alexandru Elisei
2022-02-02 11:40     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 04/64] KVM: arm64: nv: Allow userspace to set PSR_MODE_EL2x Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-02 11:53   ` Alexandru Elisei
2022-02-02 11:53     ` Alexandru Elisei
2022-02-02 11:53     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 05/64] KVM: arm64: nv: Add EL2 system registers to vcpu context Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-11 16:35   ` Miguel Luis
2022-02-11 16:35     ` Miguel Luis
2022-02-11 16:35     ` Miguel Luis
2022-01-28 12:18 ` [PATCH v6 06/64] KVM: arm64: nv: Add nested virt VCPU primitives for vEL2 VCPU state Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-02 12:10   ` Alexandru Elisei
2022-02-02 12:10     ` Alexandru Elisei
2022-02-02 12:10     ` Alexandru Elisei
2022-02-14 12:39   ` Miguel Luis
2022-02-14 12:39     ` Miguel Luis
2022-02-14 12:39     ` Miguel Luis
2022-02-14 14:20     ` Marc Zyngier
2022-02-14 14:20       ` Marc Zyngier
2022-02-14 14:20       ` Marc Zyngier
2022-01-28 12:18 ` [PATCH v6 07/64] KVM: arm64: nv: Handle HCR_EL2.NV system register traps Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-01 14:32   ` Russell King (Oracle)
2022-02-01 14:32     ` Russell King (Oracle)
2022-02-01 14:32     ` Russell King (Oracle)
2022-01-28 12:18 ` [PATCH v6 08/64] KVM: arm64: nv: Reset VMPIDR_EL2 and VPIDR_EL2 to sane values Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18 ` [PATCH v6 09/64] KVM: arm64: nv: Support virtual EL2 exceptions Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-02 15:23   ` Alexandru Elisei
2022-02-02 15:23     ` Alexandru Elisei
2022-02-02 15:23     ` Alexandru Elisei
2022-02-03 17:43     ` Marc Zyngier
2022-02-03 17:43       ` Marc Zyngier
2022-02-03 17:43       ` Marc Zyngier
2022-02-04 11:47       ` Alexandru Elisei
2022-02-04 11:47         ` Alexandru Elisei
2022-02-04 11:47         ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 10/64] KVM: arm64: nv: Inject HVC exceptions to the virtual EL2 Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18 ` [PATCH v6 11/64] KVM: arm64: nv: Handle trapped ERET from " Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18 ` [PATCH v6 12/64] KVM: arm64: nv: Add non-VHE-EL2->EL1 translation helpers Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-01 16:37   ` Russell King (Oracle)
2022-02-01 16:37     ` Russell King (Oracle)
2022-02-01 16:37     ` Russell King (Oracle)
2022-02-02 17:08   ` Alexandru Elisei
2022-02-02 17:08     ` Alexandru Elisei
2022-02-02 17:08     ` Alexandru Elisei
2022-02-03 18:29     ` Marc Zyngier
2022-02-03 18:29       ` Marc Zyngier
2022-02-03 18:29       ` Marc Zyngier
2022-02-04 12:05       ` Alexandru Elisei
2022-02-04 12:05         ` Alexandru Elisei
2022-02-04 12:05         ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 13/64] KVM: arm64: nv: Handle virtual EL2 registers in vcpu_read/write_sys_reg() Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-01 16:40   ` Russell King (Oracle)
2022-02-01 16:40     ` Russell King (Oracle)
2022-02-01 16:40     ` Russell King (Oracle)
2022-01-28 12:18 ` [PATCH v6 14/64] KVM: arm64: nv: Handle SPSR_EL2 specially Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-01 16:43   ` Russell King (Oracle)
2022-02-01 16:43     ` Russell King (Oracle)
2022-02-01 16:43     ` Russell King (Oracle)
2022-01-28 12:18 ` [PATCH v6 15/64] KVM: arm64: nv: Handle HCR_EL2.E2H specially Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-01 16:51   ` Russell King (Oracle)
2022-02-01 16:51     ` Russell King (Oracle)
2022-02-01 16:51     ` Russell King (Oracle)
2022-02-01 18:17     ` Marc Zyngier
2022-02-01 18:17       ` Marc Zyngier
2022-02-01 18:17       ` Marc Zyngier
2022-01-28 12:18 ` [PATCH v6 16/64] KVM: arm64: nv: Save/Restore vEL2 sysregs Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-03 15:14   ` Alexandru Elisei
2022-02-03 15:14     ` Alexandru Elisei
2022-02-03 15:14     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 17/64] KVM: arm64: nv: Emulate PSTATE.M for a guest hypervisor Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-01 18:06   ` Russell King (Oracle)
2022-02-01 18:06     ` Russell King (Oracle)
2022-02-01 18:06     ` Russell King (Oracle)
2022-02-03 15:53   ` Alexandru Elisei
2022-02-03 15:53     ` Alexandru Elisei
2022-02-03 15:53     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 18/64] KVM: arm64: nv: Trap EL1 VM register accesses in virtual EL2 Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-01 18:08   ` Russell King (Oracle)
2022-02-01 18:08     ` Russell King (Oracle)
2022-02-01 18:08     ` Russell King (Oracle)
2022-02-03 17:11   ` Alexandru Elisei
2022-02-03 17:11     ` Alexandru Elisei
2022-02-03 17:11     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 19/64] KVM: arm64: nv: Trap SPSR_EL1, ELR_EL1 and VBAR_EL1 from " Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-01 18:13   ` Russell King (Oracle)
2022-02-01 18:13     ` Russell King (Oracle)
2022-02-01 18:13     ` Russell King (Oracle)
2022-02-03 17:27   ` Alexandru Elisei
2022-02-03 17:27     ` Alexandru Elisei
2022-02-03 17:27     ` Alexandru Elisei
2022-02-04 10:58   ` Alexandru Elisei
2022-02-04 10:58     ` Alexandru Elisei
2022-02-04 10:58     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 20/64] KVM: arm64: nv: Trap CPACR_EL1 access in " Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-04 11:10   ` Alexandru Elisei
2022-02-04 11:10     ` Alexandru Elisei
2022-02-04 11:10     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 21/64] KVM: arm64: nv: Handle PSCI call via smc from the guest Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-04 14:02   ` Alexandru Elisei
2022-02-04 14:02     ` Alexandru Elisei
2022-02-04 14:02     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 22/64] KVM: arm64: nv: Respect virtual HCR_EL2.TWX setting Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-04 15:40   ` Alexandru Elisei
2022-02-04 15:40     ` Alexandru Elisei
2022-02-04 15:40     ` Alexandru Elisei
2022-02-04 16:01     ` Alexandru Elisei
2022-02-04 16:01       ` Alexandru Elisei
2022-02-04 16:01       ` Alexandru Elisei
2022-02-07 15:38     ` Alexandru Elisei
2022-02-07 15:38       ` Alexandru Elisei
2022-02-07 15:38       ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 23/64] KVM: arm64: nv: Respect virtual CPTR_EL2.{TFP,FPEN} settings Marc Zyngier
2022-01-28 12:18   ` [PATCH v6 23/64] KVM: arm64: nv: Respect virtual CPTR_EL2.{TFP, FPEN} settings Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18 ` [PATCH v6 24/64] KVM: arm64: nv: Respect the virtual HCR_EL2.NV bit setting Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-07 15:33   ` Alexandru Elisei
2022-02-07 15:33     ` Alexandru Elisei
2022-02-07 15:33     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 25/64] KVM: arm64: nv: Respect virtual HCR_EL2.TVM and TRVM settings Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-07 16:18   ` Alexandru Elisei
2022-02-07 16:18     ` Alexandru Elisei
2022-02-07 16:18     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 26/64] KVM: arm64: nv: Respect the virtual HCR_EL2.NV1 bit setting Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-07 16:36   ` Alexandru Elisei
2022-02-07 16:36     ` Alexandru Elisei
2022-02-07 16:36     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 27/64] KVM: arm64: nv: Allow a sysreg to be hidden from userspace only Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-08 14:36   ` Alexandru Elisei
2022-02-08 14:36     ` Alexandru Elisei
2022-02-08 14:36     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 28/64] KVM: arm64: nv: Emulate EL12 register accesses from the virtual EL2 Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-08 15:35   ` Alexandru Elisei
2022-02-08 15:35     ` Alexandru Elisei
2022-02-08 15:35     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 29/64] KVM: arm64: nv: Forward debug traps to the nested guest Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-09 11:04   ` Alexandru Elisei
2022-02-09 11:04     ` Alexandru Elisei
2022-02-09 11:04     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 30/64] KVM: arm64: nv: Configure HCR_EL2 for nested virtualization Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-09 16:41   ` Alexandru Elisei
2022-02-09 16:41     ` Alexandru Elisei
2022-02-09 16:41     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 31/64] KVM: arm64: nv: Only toggle cache for virtual EL2 when SCTLR_EL2 changes Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-09 16:56   ` Alexandru Elisei
2022-02-09 16:56     ` Alexandru Elisei
2022-02-09 16:56     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 32/64] KVM: arm64: nv: Filter out unsupported features from ID regs Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-09 17:33   ` Alexandru Elisei
2022-02-09 17:33     ` Alexandru Elisei
2022-02-09 17:33     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 33/64] KVM: arm64: nv: Hide RAS from nested guests Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18 ` [PATCH v6 34/64] KVM: arm64: nv: Support multiple nested Stage-2 mmu structures Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-16 16:12   ` Alexandru Elisei
2022-02-16 16:12     ` Alexandru Elisei
2022-02-16 16:12     ` Alexandru Elisei
2022-02-24 14:25   ` Alexandru Elisei
2022-02-24 14:25     ` Alexandru Elisei
2022-02-24 14:25     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 35/64] KVM: arm64: nv: Implement nested Stage-2 page table walk logic Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18 ` Marc Zyngier [this message]
2022-01-28 12:18   ` [PATCH v6 36/64] KVM: arm64: nv: Handle shadow stage 2 page faults Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-17 15:23   ` Alexandru Elisei
2022-02-17 15:23     ` Alexandru Elisei
2022-02-17 15:23     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 37/64] KVM: arm64: nv: Restrict S2 RD/WR permissions to match the guest's Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-17 16:29   ` Alexandru Elisei
2022-02-17 16:29     ` Alexandru Elisei
2022-02-17 16:29     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 38/64] KVM: arm64: nv: Unmap/flush shadow stage 2 page tables Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-22 16:13   ` Alexandru Elisei
2022-02-22 16:13     ` Alexandru Elisei
2022-02-22 16:13     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 39/64] KVM: arm64: nv: Set a handler for the system instruction traps Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-24 11:59   ` Alexandru Elisei
2022-02-24 11:59     ` Alexandru Elisei
2022-02-24 11:59     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 40/64] KVM: arm64: nv: Trap and emulate AT instructions from virtual EL2 Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-24 15:39   ` Alexandru Elisei
2022-02-24 15:39     ` Alexandru Elisei
2022-02-24 15:39     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 41/64] KVM: arm64: nv: Trap and emulate TLBI " Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-24 15:56   ` Alexandru Elisei
2022-02-24 15:56     ` Alexandru Elisei
2022-02-24 15:56     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 42/64] KVM: arm64: nv: Fold guest's HCR_EL2 configuration into the host's Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-02-25 13:45   ` Alexandru Elisei
2022-02-25 13:45     ` Alexandru Elisei
2022-02-25 13:45     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 43/64] KVM: arm64: nv: arch_timer: Support hyp timer emulation Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-03-07 14:52   ` Alexandru Elisei
2022-03-07 14:52     ` Alexandru Elisei
2022-03-07 14:52     ` Alexandru Elisei
2022-03-07 15:48     ` Marc Zyngier
2022-03-07 15:48       ` Marc Zyngier
2022-03-07 15:48       ` Marc Zyngier
2022-03-07 16:28       ` Alexandru Elisei
2022-03-07 16:28         ` Alexandru Elisei
2022-03-07 16:28         ` Alexandru Elisei
2022-03-07 16:52         ` Marc Zyngier
2022-03-07 16:52           ` Marc Zyngier
2022-03-07 16:52           ` Marc Zyngier
2022-03-07 17:13           ` Alexandru Elisei
2022-03-07 17:13             ` Alexandru Elisei
2022-03-07 17:13             ` Alexandru Elisei
2022-03-07 15:23   ` Alexandru Elisei
2022-03-07 15:23     ` Alexandru Elisei
2022-03-07 15:23     ` Alexandru Elisei
2022-03-07 15:44     ` Marc Zyngier
2022-03-07 15:44       ` Marc Zyngier
2022-03-07 15:44       ` Marc Zyngier
2022-03-07 16:24       ` Alexandru Elisei
2022-03-07 16:24         ` Alexandru Elisei
2022-03-07 16:24         ` Alexandru Elisei
2022-03-07 16:40         ` Marc Zyngier
2022-03-07 16:40           ` Marc Zyngier
2022-03-07 16:40           ` Marc Zyngier
2022-01-28 12:18 ` [PATCH v6 44/64] KVM: arm64: nv: Add handling of EL2-specific timer registers Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-03-07 16:01   ` Alexandru Elisei
2022-03-07 16:01     ` Alexandru Elisei
2022-03-07 16:01     ` Alexandru Elisei
2022-01-28 12:18 ` [PATCH v6 45/64] KVM: arm64: nv: Load timer before the GIC Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18 ` [PATCH v6 46/64] KVM: arm64: nv: Nested GICv3 Support Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18 ` [PATCH v6 47/64] KVM: arm64: nv: Don't load the GICv4 context on entering a nested guest Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18 ` [PATCH v6 48/64] KVM: arm64: nv: vgic: Emulate the HW bit in software Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18 ` [PATCH v6 49/64] KVM: arm64: nv: vgic: Allow userland to set VGIC maintenance IRQ Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18 ` [PATCH v6 50/64] KVM: arm64: nv: Implement maintenance interrupt forwarding Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18 ` [PATCH v6 51/64] KVM: arm64: nv: Add nested GICv3 tracepoints Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:18   ` Marc Zyngier
2022-01-28 12:19 ` [PATCH v6 52/64] KVM: arm64: nv: Allow userspace to request KVM_ARM_VCPU_NESTED_VIRT Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19 ` [PATCH v6 53/64] KVM: arm64: nv: Add handling of ARMv8.4-TTL TLB invalidation Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19 ` [PATCH v6 54/64] KVM: arm64: nv: Invalidate TLBs based on shadow S2 TTL-like information Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19 ` [PATCH v6 55/64] KVM: arm64: nv: Tag shadow S2 entries with nested level Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19 ` [PATCH v6 56/64] KVM: arm64: nv: Add include containing the VNCR_EL2 offsets Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19 ` [PATCH v6 57/64] KVM: arm64: nv: Map VNCR-capable registers to a separate page Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19 ` [PATCH v6 58/64] KVM: arm64: nv: Move nested vgic state into the sysreg file Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19 ` [PATCH v6 59/64] KVM: arm64: Add ARMv8.4 Enhanced Nested Virt cpufeature Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19 ` [PATCH v6 60/64] KVM: arm64: nv: Sync nested timer state with ARMv8.4 Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-04-01 17:51   ` Chase Conklin
2022-04-01 17:51     ` Chase Conklin
2022-04-01 17:51     ` Chase Conklin
2022-01-28 12:19 ` [PATCH v6 61/64] KVM: arm64: nv: Allocate VNCR page when required Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19 ` [PATCH v6 62/64] KVM: arm64: nv: Enable ARMv8.4-NV support Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19 ` [PATCH v6 63/64] KVM: arm64: nv: Fast-track 'InHost' exception returns Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19 ` [PATCH v6 64/64] KVM: arm64: nv: Fast-track EL1 TLBIs for VHE guests Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier
2022-01-28 12:19   ` Marc Zyngier

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220128121912.509006-37-maz@kernel.org \
    --to=maz@kernel.org \
    --cc=alexandru.elisei@arm.com \
    --cc=andre.przywara@arm.com \
    --cc=chase.conklin@arm.com \
    --cc=christoffer.dall@arm.com \
    --cc=gankulkarni@os.amperecomputing.com \
    --cc=haibo.xu@linaro.org \
    --cc=james.morse@arm.com \
    --cc=jintack@cs.columbia.edu \
    --cc=karl.heubaum@oracle.com \
    --cc=kernel-team@android.com \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux@armlinux.org.uk \
    --cc=miguel.luis@oracle.com \
    --cc=mihai.carabas@oracle.com \
    --cc=suzuki.poulose@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.