All of lore.kernel.org
 help / color / mirror / Atom feed
From: Marc Zyngier <maz@kernel.org>
To: linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu, kvm@vger.kernel.org
Cc: Andre Przywara <andre.przywara@arm.com>,
	Christoffer Dall <christoffer.dall@arm.com>,
	Jintack Lim <jintack@cs.columbia.edu>,
	Alexandru Elisei <alexandru.elisei@arm.com>,
	James Morse <james.morse@arm.com>,
	Julien Thierry <julien.thierry.kdev@gmail.com>,
	Suzuki K Poulose <suzuki.poulose@arm.com>,
	kernel-team@android.com,
	Christoffer Dall <christoffer.dall@linaro.org>,
	Jintack Lim <jintack.lim@linaro.org>
Subject: [PATCH v3 37/66] KVM: arm64: nv: Unmap/flush shadow stage 2 page tables
Date: Thu, 10 Dec 2020 15:59:33 +0000	[thread overview]
Message-ID: <20201210160002.1407373-38-maz@kernel.org> (raw)
In-Reply-To: <20201210160002.1407373-1-maz@kernel.org>

From: Christoffer Dall <christoffer.dall@linaro.org>

Unmap/flush shadow stage 2 page tables for the nested VMs as well as the
stage 2 page table for the guest hypervisor.

Note: A bunch of the code in mmu.c relating to MMU notifiers is
currently dealt with in an extremely abrupt way, for example by clearing
out an entire shadow stage-2 table. This will be handled in a more
efficient way using the reverse mapping feature in a later version of
the patch series.

Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Jintack Lim <jintack.lim@linaro.org>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
 arch/arm64/include/asm/kvm_mmu.h    |  3 +++
 arch/arm64/include/asm/kvm_nested.h |  3 +++
 arch/arm64/kvm/mmu.c                | 34 ++++++++++++++++++++++---
 arch/arm64/kvm/nested.c             | 39 +++++++++++++++++++++++++++++
 4 files changed, 75 insertions(+), 4 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index ec39015bb2a6..e2c58ad46bd1 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -183,6 +183,8 @@ int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
 			   void __iomem **haddr);
 int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
 			     void **haddr);
+void kvm_stage2_flush_range(struct kvm_s2_mmu *mmu,
+			    phys_addr_t addr, phys_addr_t end);
 void free_hyp_pgds(void);
 
 void kvm_unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size);
@@ -191,6 +193,7 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu);
 void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
 			  phys_addr_t pa, unsigned long size, bool writable);
+void kvm_stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end);
 
 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu);
 
diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/kvm_nested.h
index 3f3d8e10bd99..2987806850f0 100644
--- a/arch/arm64/include/asm/kvm_nested.h
+++ b/arch/arm64/include/asm/kvm_nested.h
@@ -114,6 +114,9 @@ extern int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa,
 extern int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu,
 				    struct kvm_s2_trans *trans);
 extern int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2);
+extern void kvm_nested_s2_wp(struct kvm *kvm);
+extern void kvm_nested_s2_clear(struct kvm *kvm);
+extern void kvm_nested_s2_flush(struct kvm *kvm);
 int handle_wfx_nested(struct kvm_vcpu *vcpu, bool is_wfe);
 extern bool __forward_traps(struct kvm_vcpu *vcpu, unsigned int reg,
 			    u64 control_bit);
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 6f973efb2cc3..36cb9fa22153 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -141,13 +141,20 @@ void kvm_unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
 	__unmap_stage2_range(mmu, start, size, true);
 }
 
+void kvm_stage2_flush_range(struct kvm_s2_mmu *mmu,
+			    phys_addr_t addr, phys_addr_t end)
+{
+	stage2_apply_range_resched(mmu->kvm, addr, end, kvm_pgtable_stage2_flush);
+}
+
 static void stage2_flush_memslot(struct kvm *kvm,
 				 struct kvm_memory_slot *memslot)
 {
 	phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
 	phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
+	struct kvm_s2_mmu *mmu = &kvm->arch.mmu;
 
-	stage2_apply_range_resched(kvm, addr, end, kvm_pgtable_stage2_flush);
+	kvm_stage2_flush_range(mmu, addr, end);
 }
 
 /**
@@ -170,6 +177,8 @@ static void stage2_flush_vm(struct kvm *kvm)
 	kvm_for_each_memslot(memslot, slots)
 		stage2_flush_memslot(kvm, memslot);
 
+	kvm_nested_s2_flush(kvm);
+
 	spin_unlock(&kvm->mmu_lock);
 	srcu_read_unlock(&kvm->srcu, idx);
 }
@@ -465,6 +474,8 @@ void stage2_unmap_vm(struct kvm *kvm)
 	kvm_for_each_memslot(memslot, slots)
 		stage2_unmap_memslot(kvm, memslot);
 
+	kvm_nested_s2_clear(kvm);
+
 	spin_unlock(&kvm->mmu_lock);
 	mmap_read_unlock(current->mm);
 	srcu_read_unlock(&kvm->srcu, idx);
@@ -539,7 +550,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
  * @addr:	Start address of range
  * @end:	End address of range
  */
-static void stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end)
+void kvm_stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end)
 {
 	struct kvm *kvm = mmu->kvm;
 	stage2_apply_range_resched(kvm, addr, end, kvm_pgtable_stage2_wrprotect);
@@ -571,7 +582,8 @@ void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
 	end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
 
 	spin_lock(&kvm->mmu_lock);
-	stage2_wp_range(&kvm->arch.mmu, start, end);
+	kvm_stage2_wp_range(&kvm->arch.mmu, start, end);
+	kvm_nested_s2_wp(kvm);
 	spin_unlock(&kvm->mmu_lock);
 	kvm_flush_remote_tlbs(kvm);
 }
@@ -595,7 +607,7 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
 	phys_addr_t start = (base_gfn +  __ffs(mask)) << PAGE_SHIFT;
 	phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
 
-	stage2_wp_range(&kvm->arch.mmu, start, end);
+	kvm_stage2_wp_range(&kvm->arch.mmu, start, end);
 }
 
 /*
@@ -610,6 +622,7 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
 		gfn_t gfn_offset, unsigned long mask)
 {
 	kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
+	kvm_nested_s2_wp(kvm);
 }
 
 static void clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
@@ -1164,6 +1177,7 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *dat
 	bool may_block = flags & MMU_NOTIFIER_RANGE_BLOCKABLE;
 
 	__unmap_stage2_range(&kvm->arch.mmu, gpa, size, may_block);
+	kvm_nested_s2_clear(kvm);
 	return 0;
 }
 
@@ -1192,6 +1206,7 @@ static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data
 	 */
 	kvm_pgtable_stage2_map(kvm->arch.mmu.pgt, gpa, PAGE_SIZE,
 			       __pfn_to_phys(*pfn), KVM_PGTABLE_PROT_R, NULL);
+	kvm_nested_s2_clear(kvm);
 	return 0;
 }
 
@@ -1223,12 +1238,22 @@ static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
 	kpte = kvm_pgtable_stage2_mkold(kvm->arch.mmu.pgt, gpa);
 	pte = __pte(kpte);
 	return pte_valid(pte) && pte_young(pte);
+
+	/*
+	 * TODO: Handle nested_mmu structures here using the reverse mapping in
+	 * a later version of patch series.
+	 */
 }
 
 static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
 {
 	WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
 	return kvm_pgtable_stage2_is_young(kvm->arch.mmu.pgt, gpa);
+
+	/*
+	 * TODO: Handle nested_mmu structures here using the reverse mapping in
+	 * a later version of patch series.
+	 */
 }
 
 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
@@ -1457,6 +1482,7 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
 
 	spin_lock(&kvm->mmu_lock);
 	kvm_unmap_stage2_range(&kvm->arch.mmu, gpa, size);
+	kvm_nested_s2_clear(kvm);
 	spin_unlock(&kvm->mmu_lock);
 }
 
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index 551aee363cc3..e78c6c093afc 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -505,6 +505,45 @@ int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2)
 	return kvm_inject_nested_sync(vcpu, esr_el2);
 }
 
+/* expects kvm->mmu_lock to be held */
+void kvm_nested_s2_wp(struct kvm *kvm)
+{
+	int i;
+
+	for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
+		struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
+
+		if (kvm_s2_mmu_valid(mmu))
+			kvm_stage2_wp_range(mmu, 0, kvm_phys_size(kvm));
+	}
+}
+
+/* expects kvm->mmu_lock to be held */
+void kvm_nested_s2_clear(struct kvm *kvm)
+{
+	int i;
+
+	for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
+		struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
+
+		if (kvm_s2_mmu_valid(mmu))
+			kvm_unmap_stage2_range(mmu, 0, kvm_phys_size(kvm));
+	}
+}
+
+/* expects kvm->mmu_lock to be held */
+void kvm_nested_s2_flush(struct kvm *kvm)
+{
+	int i;
+
+	for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
+		struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
+
+		if (kvm_s2_mmu_valid(mmu))
+			kvm_stage2_flush_range(mmu, 0, kvm_phys_size(kvm));
+	}
+}
+
 /*
  * Inject wfx to the virtual EL2 if this is not from the virtual EL2 and
  * the virtual HCR_EL2.TWX is set. Otherwise, let the host hypervisor
-- 
2.29.2


WARNING: multiple messages have this Message-ID (diff)
From: Marc Zyngier <maz@kernel.org>
To: linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu, kvm@vger.kernel.org
Cc: kernel-team@android.com, Andre Przywara <andre.przywara@arm.com>,
	Christoffer Dall <christoffer.dall@linaro.org>,
	Jintack Lim <jintack.lim@linaro.org>
Subject: [PATCH v3 37/66] KVM: arm64: nv: Unmap/flush shadow stage 2 page tables
Date: Thu, 10 Dec 2020 15:59:33 +0000	[thread overview]
Message-ID: <20201210160002.1407373-38-maz@kernel.org> (raw)
In-Reply-To: <20201210160002.1407373-1-maz@kernel.org>

From: Christoffer Dall <christoffer.dall@linaro.org>

Unmap/flush shadow stage 2 page tables for the nested VMs as well as the
stage 2 page table for the guest hypervisor.

Note: A bunch of the code in mmu.c relating to MMU notifiers is
currently dealt with in an extremely abrupt way, for example by clearing
out an entire shadow stage-2 table. This will be handled in a more
efficient way using the reverse mapping feature in a later version of
the patch series.

Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Jintack Lim <jintack.lim@linaro.org>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
 arch/arm64/include/asm/kvm_mmu.h    |  3 +++
 arch/arm64/include/asm/kvm_nested.h |  3 +++
 arch/arm64/kvm/mmu.c                | 34 ++++++++++++++++++++++---
 arch/arm64/kvm/nested.c             | 39 +++++++++++++++++++++++++++++
 4 files changed, 75 insertions(+), 4 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index ec39015bb2a6..e2c58ad46bd1 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -183,6 +183,8 @@ int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
 			   void __iomem **haddr);
 int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
 			     void **haddr);
+void kvm_stage2_flush_range(struct kvm_s2_mmu *mmu,
+			    phys_addr_t addr, phys_addr_t end);
 void free_hyp_pgds(void);
 
 void kvm_unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size);
@@ -191,6 +193,7 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu);
 void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
 			  phys_addr_t pa, unsigned long size, bool writable);
+void kvm_stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end);
 
 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu);
 
diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/kvm_nested.h
index 3f3d8e10bd99..2987806850f0 100644
--- a/arch/arm64/include/asm/kvm_nested.h
+++ b/arch/arm64/include/asm/kvm_nested.h
@@ -114,6 +114,9 @@ extern int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa,
 extern int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu,
 				    struct kvm_s2_trans *trans);
 extern int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2);
+extern void kvm_nested_s2_wp(struct kvm *kvm);
+extern void kvm_nested_s2_clear(struct kvm *kvm);
+extern void kvm_nested_s2_flush(struct kvm *kvm);
 int handle_wfx_nested(struct kvm_vcpu *vcpu, bool is_wfe);
 extern bool __forward_traps(struct kvm_vcpu *vcpu, unsigned int reg,
 			    u64 control_bit);
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 6f973efb2cc3..36cb9fa22153 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -141,13 +141,20 @@ void kvm_unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
 	__unmap_stage2_range(mmu, start, size, true);
 }
 
+void kvm_stage2_flush_range(struct kvm_s2_mmu *mmu,
+			    phys_addr_t addr, phys_addr_t end)
+{
+	stage2_apply_range_resched(mmu->kvm, addr, end, kvm_pgtable_stage2_flush);
+}
+
 static void stage2_flush_memslot(struct kvm *kvm,
 				 struct kvm_memory_slot *memslot)
 {
 	phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
 	phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
+	struct kvm_s2_mmu *mmu = &kvm->arch.mmu;
 
-	stage2_apply_range_resched(kvm, addr, end, kvm_pgtable_stage2_flush);
+	kvm_stage2_flush_range(mmu, addr, end);
 }
 
 /**
@@ -170,6 +177,8 @@ static void stage2_flush_vm(struct kvm *kvm)
 	kvm_for_each_memslot(memslot, slots)
 		stage2_flush_memslot(kvm, memslot);
 
+	kvm_nested_s2_flush(kvm);
+
 	spin_unlock(&kvm->mmu_lock);
 	srcu_read_unlock(&kvm->srcu, idx);
 }
@@ -465,6 +474,8 @@ void stage2_unmap_vm(struct kvm *kvm)
 	kvm_for_each_memslot(memslot, slots)
 		stage2_unmap_memslot(kvm, memslot);
 
+	kvm_nested_s2_clear(kvm);
+
 	spin_unlock(&kvm->mmu_lock);
 	mmap_read_unlock(current->mm);
 	srcu_read_unlock(&kvm->srcu, idx);
@@ -539,7 +550,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
  * @addr:	Start address of range
  * @end:	End address of range
  */
-static void stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end)
+void kvm_stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end)
 {
 	struct kvm *kvm = mmu->kvm;
 	stage2_apply_range_resched(kvm, addr, end, kvm_pgtable_stage2_wrprotect);
@@ -571,7 +582,8 @@ void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
 	end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
 
 	spin_lock(&kvm->mmu_lock);
-	stage2_wp_range(&kvm->arch.mmu, start, end);
+	kvm_stage2_wp_range(&kvm->arch.mmu, start, end);
+	kvm_nested_s2_wp(kvm);
 	spin_unlock(&kvm->mmu_lock);
 	kvm_flush_remote_tlbs(kvm);
 }
@@ -595,7 +607,7 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
 	phys_addr_t start = (base_gfn +  __ffs(mask)) << PAGE_SHIFT;
 	phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
 
-	stage2_wp_range(&kvm->arch.mmu, start, end);
+	kvm_stage2_wp_range(&kvm->arch.mmu, start, end);
 }
 
 /*
@@ -610,6 +622,7 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
 		gfn_t gfn_offset, unsigned long mask)
 {
 	kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
+	kvm_nested_s2_wp(kvm);
 }
 
 static void clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
@@ -1164,6 +1177,7 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *dat
 	bool may_block = flags & MMU_NOTIFIER_RANGE_BLOCKABLE;
 
 	__unmap_stage2_range(&kvm->arch.mmu, gpa, size, may_block);
+	kvm_nested_s2_clear(kvm);
 	return 0;
 }
 
@@ -1192,6 +1206,7 @@ static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data
 	 */
 	kvm_pgtable_stage2_map(kvm->arch.mmu.pgt, gpa, PAGE_SIZE,
 			       __pfn_to_phys(*pfn), KVM_PGTABLE_PROT_R, NULL);
+	kvm_nested_s2_clear(kvm);
 	return 0;
 }
 
@@ -1223,12 +1238,22 @@ static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
 	kpte = kvm_pgtable_stage2_mkold(kvm->arch.mmu.pgt, gpa);
 	pte = __pte(kpte);
 	return pte_valid(pte) && pte_young(pte);
+
+	/*
+	 * TODO: Handle nested_mmu structures here using the reverse mapping in
+	 * a later version of patch series.
+	 */
 }
 
 static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
 {
 	WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
 	return kvm_pgtable_stage2_is_young(kvm->arch.mmu.pgt, gpa);
+
+	/*
+	 * TODO: Handle nested_mmu structures here using the reverse mapping in
+	 * a later version of patch series.
+	 */
 }
 
 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
@@ -1457,6 +1482,7 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
 
 	spin_lock(&kvm->mmu_lock);
 	kvm_unmap_stage2_range(&kvm->arch.mmu, gpa, size);
+	kvm_nested_s2_clear(kvm);
 	spin_unlock(&kvm->mmu_lock);
 }
 
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index 551aee363cc3..e78c6c093afc 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -505,6 +505,45 @@ int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2)
 	return kvm_inject_nested_sync(vcpu, esr_el2);
 }
 
+/* expects kvm->mmu_lock to be held */
+void kvm_nested_s2_wp(struct kvm *kvm)
+{
+	int i;
+
+	for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
+		struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
+
+		if (kvm_s2_mmu_valid(mmu))
+			kvm_stage2_wp_range(mmu, 0, kvm_phys_size(kvm));
+	}
+}
+
+/* expects kvm->mmu_lock to be held */
+void kvm_nested_s2_clear(struct kvm *kvm)
+{
+	int i;
+
+	for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
+		struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
+
+		if (kvm_s2_mmu_valid(mmu))
+			kvm_unmap_stage2_range(mmu, 0, kvm_phys_size(kvm));
+	}
+}
+
+/* expects kvm->mmu_lock to be held */
+void kvm_nested_s2_flush(struct kvm *kvm)
+{
+	int i;
+
+	for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
+		struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
+
+		if (kvm_s2_mmu_valid(mmu))
+			kvm_stage2_flush_range(mmu, 0, kvm_phys_size(kvm));
+	}
+}
+
 /*
  * Inject wfx to the virtual EL2 if this is not from the virtual EL2 and
  * the virtual HCR_EL2.TWX is set. Otherwise, let the host hypervisor
-- 
2.29.2

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

WARNING: multiple messages have this Message-ID (diff)
From: Marc Zyngier <maz@kernel.org>
To: linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu, kvm@vger.kernel.org
Cc: kernel-team@android.com,
	Suzuki K Poulose <suzuki.poulose@arm.com>,
	Andre Przywara <andre.przywara@arm.com>,
	Christoffer Dall <christoffer.dall@arm.com>,
	James Morse <james.morse@arm.com>,
	Christoffer Dall <christoffer.dall@linaro.org>,
	Jintack Lim <jintack.lim@linaro.org>,
	Alexandru Elisei <alexandru.elisei@arm.com>,
	Jintack Lim <jintack@cs.columbia.edu>,
	Julien Thierry <julien.thierry.kdev@gmail.com>
Subject: [PATCH v3 37/66] KVM: arm64: nv: Unmap/flush shadow stage 2 page tables
Date: Thu, 10 Dec 2020 15:59:33 +0000	[thread overview]
Message-ID: <20201210160002.1407373-38-maz@kernel.org> (raw)
In-Reply-To: <20201210160002.1407373-1-maz@kernel.org>

From: Christoffer Dall <christoffer.dall@linaro.org>

Unmap/flush shadow stage 2 page tables for the nested VMs as well as the
stage 2 page table for the guest hypervisor.

Note: A bunch of the code in mmu.c relating to MMU notifiers is
currently dealt with in an extremely abrupt way, for example by clearing
out an entire shadow stage-2 table. This will be handled in a more
efficient way using the reverse mapping feature in a later version of
the patch series.

Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Jintack Lim <jintack.lim@linaro.org>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
 arch/arm64/include/asm/kvm_mmu.h    |  3 +++
 arch/arm64/include/asm/kvm_nested.h |  3 +++
 arch/arm64/kvm/mmu.c                | 34 ++++++++++++++++++++++---
 arch/arm64/kvm/nested.c             | 39 +++++++++++++++++++++++++++++
 4 files changed, 75 insertions(+), 4 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index ec39015bb2a6..e2c58ad46bd1 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -183,6 +183,8 @@ int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
 			   void __iomem **haddr);
 int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
 			     void **haddr);
+void kvm_stage2_flush_range(struct kvm_s2_mmu *mmu,
+			    phys_addr_t addr, phys_addr_t end);
 void free_hyp_pgds(void);
 
 void kvm_unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size);
@@ -191,6 +193,7 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu);
 void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
 			  phys_addr_t pa, unsigned long size, bool writable);
+void kvm_stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end);
 
 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu);
 
diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/kvm_nested.h
index 3f3d8e10bd99..2987806850f0 100644
--- a/arch/arm64/include/asm/kvm_nested.h
+++ b/arch/arm64/include/asm/kvm_nested.h
@@ -114,6 +114,9 @@ extern int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa,
 extern int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu,
 				    struct kvm_s2_trans *trans);
 extern int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2);
+extern void kvm_nested_s2_wp(struct kvm *kvm);
+extern void kvm_nested_s2_clear(struct kvm *kvm);
+extern void kvm_nested_s2_flush(struct kvm *kvm);
 int handle_wfx_nested(struct kvm_vcpu *vcpu, bool is_wfe);
 extern bool __forward_traps(struct kvm_vcpu *vcpu, unsigned int reg,
 			    u64 control_bit);
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 6f973efb2cc3..36cb9fa22153 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -141,13 +141,20 @@ void kvm_unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
 	__unmap_stage2_range(mmu, start, size, true);
 }
 
+void kvm_stage2_flush_range(struct kvm_s2_mmu *mmu,
+			    phys_addr_t addr, phys_addr_t end)
+{
+	stage2_apply_range_resched(mmu->kvm, addr, end, kvm_pgtable_stage2_flush);
+}
+
 static void stage2_flush_memslot(struct kvm *kvm,
 				 struct kvm_memory_slot *memslot)
 {
 	phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
 	phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
+	struct kvm_s2_mmu *mmu = &kvm->arch.mmu;
 
-	stage2_apply_range_resched(kvm, addr, end, kvm_pgtable_stage2_flush);
+	kvm_stage2_flush_range(mmu, addr, end);
 }
 
 /**
@@ -170,6 +177,8 @@ static void stage2_flush_vm(struct kvm *kvm)
 	kvm_for_each_memslot(memslot, slots)
 		stage2_flush_memslot(kvm, memslot);
 
+	kvm_nested_s2_flush(kvm);
+
 	spin_unlock(&kvm->mmu_lock);
 	srcu_read_unlock(&kvm->srcu, idx);
 }
@@ -465,6 +474,8 @@ void stage2_unmap_vm(struct kvm *kvm)
 	kvm_for_each_memslot(memslot, slots)
 		stage2_unmap_memslot(kvm, memslot);
 
+	kvm_nested_s2_clear(kvm);
+
 	spin_unlock(&kvm->mmu_lock);
 	mmap_read_unlock(current->mm);
 	srcu_read_unlock(&kvm->srcu, idx);
@@ -539,7 +550,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
  * @addr:	Start address of range
  * @end:	End address of range
  */
-static void stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end)
+void kvm_stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end)
 {
 	struct kvm *kvm = mmu->kvm;
 	stage2_apply_range_resched(kvm, addr, end, kvm_pgtable_stage2_wrprotect);
@@ -571,7 +582,8 @@ void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
 	end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
 
 	spin_lock(&kvm->mmu_lock);
-	stage2_wp_range(&kvm->arch.mmu, start, end);
+	kvm_stage2_wp_range(&kvm->arch.mmu, start, end);
+	kvm_nested_s2_wp(kvm);
 	spin_unlock(&kvm->mmu_lock);
 	kvm_flush_remote_tlbs(kvm);
 }
@@ -595,7 +607,7 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
 	phys_addr_t start = (base_gfn +  __ffs(mask)) << PAGE_SHIFT;
 	phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
 
-	stage2_wp_range(&kvm->arch.mmu, start, end);
+	kvm_stage2_wp_range(&kvm->arch.mmu, start, end);
 }
 
 /*
@@ -610,6 +622,7 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
 		gfn_t gfn_offset, unsigned long mask)
 {
 	kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
+	kvm_nested_s2_wp(kvm);
 }
 
 static void clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
@@ -1164,6 +1177,7 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *dat
 	bool may_block = flags & MMU_NOTIFIER_RANGE_BLOCKABLE;
 
 	__unmap_stage2_range(&kvm->arch.mmu, gpa, size, may_block);
+	kvm_nested_s2_clear(kvm);
 	return 0;
 }
 
@@ -1192,6 +1206,7 @@ static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data
 	 */
 	kvm_pgtable_stage2_map(kvm->arch.mmu.pgt, gpa, PAGE_SIZE,
 			       __pfn_to_phys(*pfn), KVM_PGTABLE_PROT_R, NULL);
+	kvm_nested_s2_clear(kvm);
 	return 0;
 }
 
@@ -1223,12 +1238,22 @@ static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
 	kpte = kvm_pgtable_stage2_mkold(kvm->arch.mmu.pgt, gpa);
 	pte = __pte(kpte);
 	return pte_valid(pte) && pte_young(pte);
+
+	/*
+	 * TODO: Handle nested_mmu structures here using the reverse mapping in
+	 * a later version of patch series.
+	 */
 }
 
 static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
 {
 	WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
 	return kvm_pgtable_stage2_is_young(kvm->arch.mmu.pgt, gpa);
+
+	/*
+	 * TODO: Handle nested_mmu structures here using the reverse mapping in
+	 * a later version of patch series.
+	 */
 }
 
 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
@@ -1457,6 +1482,7 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
 
 	spin_lock(&kvm->mmu_lock);
 	kvm_unmap_stage2_range(&kvm->arch.mmu, gpa, size);
+	kvm_nested_s2_clear(kvm);
 	spin_unlock(&kvm->mmu_lock);
 }
 
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index 551aee363cc3..e78c6c093afc 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -505,6 +505,45 @@ int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2)
 	return kvm_inject_nested_sync(vcpu, esr_el2);
 }
 
+/* expects kvm->mmu_lock to be held */
+void kvm_nested_s2_wp(struct kvm *kvm)
+{
+	int i;
+
+	for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
+		struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
+
+		if (kvm_s2_mmu_valid(mmu))
+			kvm_stage2_wp_range(mmu, 0, kvm_phys_size(kvm));
+	}
+}
+
+/* expects kvm->mmu_lock to be held */
+void kvm_nested_s2_clear(struct kvm *kvm)
+{
+	int i;
+
+	for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
+		struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
+
+		if (kvm_s2_mmu_valid(mmu))
+			kvm_unmap_stage2_range(mmu, 0, kvm_phys_size(kvm));
+	}
+}
+
+/* expects kvm->mmu_lock to be held */
+void kvm_nested_s2_flush(struct kvm *kvm)
+{
+	int i;
+
+	for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
+		struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
+
+		if (kvm_s2_mmu_valid(mmu))
+			kvm_stage2_flush_range(mmu, 0, kvm_phys_size(kvm));
+	}
+}
+
 /*
  * Inject wfx to the virtual EL2 if this is not from the virtual EL2 and
  * the virtual HCR_EL2.TWX is set. Otherwise, let the host hypervisor
-- 
2.29.2


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2020-12-10 16:32 UTC|newest]

Thread overview: 249+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-12-10 15:58 [PATCH v3 00/66] KVM: arm64: ARMv8.3/8.4 Nested Virtualization support Marc Zyngier
2020-12-10 15:58 ` Marc Zyngier
2020-12-10 15:58 ` Marc Zyngier
2020-12-10 15:58 ` [PATCH v3 01/66] arm64: Add ARM64_HAS_NESTED_VIRT cpufeature Marc Zyngier
2020-12-10 15:58   ` Marc Zyngier
2020-12-10 15:58   ` Marc Zyngier
2020-12-10 15:58 ` [PATCH v3 02/66] KVM: arm64: nv: Introduce nested virtualization VCPU feature Marc Zyngier
2020-12-10 15:58   ` Marc Zyngier
2020-12-10 15:58   ` Marc Zyngier
2020-12-10 15:58 ` [PATCH v3 03/66] KVM: arm64: nv: Reset VCPU to EL2 registers if VCPU nested virt is set Marc Zyngier
2020-12-10 15:58   ` Marc Zyngier
2020-12-10 15:58   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 04/66] KVM: arm64: nv: Allow userspace to set PSR_MODE_EL2x Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 05/66] KVM: arm64: nv: Add EL2 system registers to vcpu context Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2021-01-29 18:04   ` Andre Przywara
2021-01-29 18:04     ` Andre Przywara
2021-01-29 18:04     ` Andre Przywara
2020-12-10 15:59 ` [PATCH v3 06/66] KVM: arm64: nv: Add nested virt VCPU primitives for vEL2 VCPU state Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 07/66] KVM: arm64: nv: Handle HCR_EL2.NV system register traps Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2021-01-29 18:05   ` Andre Przywara
2021-01-29 18:05     ` Andre Przywara
2021-01-29 18:05     ` Andre Przywara
2020-12-10 15:59 ` [PATCH v3 08/66] KVM: arm64: nv: Reset VMPIDR_EL2 and VPIDR_EL2 to sane values Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 09/66] KVM: arm64: nv: Support virtual EL2 exceptions Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2021-01-29 18:05   ` Andre Przywara
2021-01-29 18:05     ` Andre Przywara
2021-01-29 18:05     ` Andre Przywara
2021-02-16 22:05     ` Marc Zyngier
2021-02-16 22:05       ` Marc Zyngier
2021-02-16 22:05       ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 10/66] KVM: arm64: nv: Inject HVC exceptions to the virtual EL2 Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 11/66] KVM: arm64: nv: Handle trapped ERET from " Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 12/66] KVM: arm64: nv: Add non-VHE-EL2->EL1 translation helpers Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 13/66] KVM: arm64: nv: Handle virtual EL2 registers in vcpu_read/write_sys_reg() Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 14/66] KVM: arm64: nv: Handle SPSR_EL2 specially Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 15/66] KVM: arm64: nv: Handle HCR_EL2.E2H specially Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 16/66] KVM: arm64: nv: Save/Restore vEL2 sysregs Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 17/66] KVM: arm64: nv: Emulate PSTATE.M for a guest hypervisor Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 18/66] KVM: arm64: nv: Trap EL1 VM register accesses in virtual EL2 Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 19/66] KVM: arm64: nv: Trap SPSR_EL1, ELR_EL1 and VBAR_EL1 from " Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 20/66] KVM: arm64: nv: Trap CPACR_EL1 access in " Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 21/66] KVM: arm64: nv: Handle PSCI call via smc from the guest Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 22/66] KVM: arm64: nv: Respect virtual HCR_EL2.TWX setting Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 23/66] KVM: arm64: nv: Respect virtual CPTR_EL2.{TFP,FPEN} settings Marc Zyngier
2020-12-10 15:59   ` [PATCH v3 23/66] KVM: arm64: nv: Respect virtual CPTR_EL2.{TFP, FPEN} settings Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 24/66] KVM: arm64: nv: Respect the virtual HCR_EL2.NV bit setting Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 25/66] KVM: arm64: nv: Respect virtual HCR_EL2.TVM and TRVM settings Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 26/66] KVM: arm64: nv: Respect the virtual HCR_EL2.NV1 bit setting Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 27/66] KVM: arm64: nv: Emulate EL12 register accesses from the virtual EL2 Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 28/66] KVM: arm64: nv: Forward debug traps to the nested guest Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 29/66] KVM: arm64: nv: Configure HCR_EL2 for nested virtualization Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 30/66] KVM: arm64: nv: Only toggle cache for virtual EL2 when SCTLR_EL2 changes Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 31/66] KVM: arm64: nv: Filter out unsupported features from ID regs Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 32/66] KVM: arm64: nv: Hide RAS from nested guests Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 33/66] KVM: arm64: nv: Support multiple nested Stage-2 mmu structures Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2021-01-21  2:59   ` Haibo Xu
2021-01-21  2:59     ` Haibo Xu
2021-01-21  2:59     ` Haibo Xu
2021-02-16 18:34     ` Marc Zyngier
2021-02-16 18:34       ` Marc Zyngier
2021-02-16 18:34       ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 34/66] KVM: arm64: nv: Implement nested Stage-2 page table walk logic Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 35/66] KVM: arm64: nv: Handle shadow stage 2 page faults Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 36/66] KVM: arm64: nv: Restrict S2 RD/WR permissions to match the guest's Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` Marc Zyngier [this message]
2020-12-10 15:59   ` [PATCH v3 37/66] KVM: arm64: nv: Unmap/flush shadow stage 2 page tables Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 38/66] KVM: arm64: nv: Introduce sys_reg_desc.forward_trap Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 39/66] KVM: arm64: nv: Set a handler for the system instruction traps Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 40/66] KVM: arm64: nv: Trap and emulate AT instructions from virtual EL2 Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 41/66] KVM: arm64: nv: Trap and emulate TLBI " Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 42/66] KVM: arm64: nv: Fold guest's HCR_EL2 configuration into the host's Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 43/66] KVM: arm64: nv: arch_timer: Support hyp timer emulation Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-11  0:26   ` kernel test robot
2020-12-10 15:59 ` [PATCH v3 44/66] KVM: arm64: nv: Add handling of EL2-specific timer registers Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 45/66] KVM: arm64: nv: Load timer before the GIC Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 46/66] KVM: arm64: nv: Nested GICv3 Support Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 47/66] KVM: arm64: nv: Don't load the GICv4 context on entering a nested guest Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 48/66] KVM: arm64: nv: vgic: Emulate the HW bit in software Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 49/66] KVM: arm64: nv: vgic: Allow userland to set VGIC maintenance IRQ Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 50/66] KVM: arm64: nv: Implement maintenance interrupt forwarding Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 51/66] KVM: arm64: nv: Add nested GICv3 tracepoints Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 52/66] KVM: arm64: nv: Allow userspace to request KVM_ARM_VCPU_NESTED_VIRT Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 53/66] KVM: arm64: nv: Add handling of ARMv8.4-TTL TLB invalidation Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 54/66] KVM: arm64: nv: Invalidate TLBs based on shadow S2 TTL-like information Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 55/66] KVM: arm64: Allow populating S2 SW bits Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 56/66] KVM: arm64: nv: Tag shadow S2 entries with nested level Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 57/66] KVM: arm64: nv: Add include containing the VNCR_EL2 offsets Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 58/66] KVM: arm64: Map VNCR-capable registers to a separate page Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 59/66] KVM: arm64: nv: Move nested vgic state into the sysreg file Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 60/66] KVM: arm64: Add ARMv8.4 Enhanced Nested Virt cpufeature Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 61/66] KVM: arm64: nv: Synchronize PSTATE early on exit Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 62/66] KVM: arm64: nv: Sync nested timer state with ARMv8.4 Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59 ` [PATCH v3 63/66] KVM: arm64: nv: Allocate VNCR page when required Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2020-12-10 15:59   ` Marc Zyngier
2021-01-21  2:47   ` Haibo Xu
2021-01-21  2:47     ` Haibo Xu
2021-01-21  2:47     ` Haibo Xu
2021-02-16 19:03     ` Marc Zyngier
2021-02-16 19:03       ` Marc Zyngier
2021-02-16 19:03       ` Marc Zyngier
2020-12-10 16:00 ` [PATCH v3 64/66] KVM: arm64: nv: Enable ARMv8.4-NV support Marc Zyngier
2020-12-10 16:00   ` Marc Zyngier
2020-12-10 16:00   ` Marc Zyngier
2020-12-10 16:00 ` [PATCH v3 65/66] KVM: arm64: nv: Fast-track 'InHost' exception returns Marc Zyngier
2020-12-10 16:00   ` Marc Zyngier
2020-12-10 16:00   ` Marc Zyngier
2020-12-10 16:00 ` [PATCH v3 66/66] KVM: arm64: nv: Fast-track EL1 TLBIs for VHE guests Marc Zyngier
2020-12-10 16:00   ` Marc Zyngier
2020-12-10 16:00   ` Marc Zyngier
2021-01-11  7:20 ` [PATCH v3 00/66] KVM: arm64: ARMv8.3/8.4 Nested Virtualization support Haibo Xu
2021-01-11  8:59   ` Marc Zyngier
2021-01-11  8:59     ` Marc Zyngier
2021-01-11  8:59     ` Marc Zyngier
2021-01-12  8:02     ` Haibo Xu
2021-01-12  8:02       ` Haibo Xu
2021-01-12  8:02       ` Haibo Xu
2021-01-18 17:38 ` André Przywara
2021-01-18 17:38   ` André Przywara
2021-01-18 17:38   ` André Przywara
2021-01-21  2:35 ` Haibo Xu
2021-01-21  3:03 ` Haibo Xu
2021-01-21  3:03   ` Haibo Xu
2021-01-21  3:03   ` Haibo Xu
2021-02-04  7:51   ` Haibo Xu
2021-02-04  7:51     ` Haibo Xu
2021-02-04  7:51     ` Haibo Xu
2021-02-17 22:10     ` Marc Zyngier
2021-02-17 22:10       ` Marc Zyngier
2021-02-17 22:10       ` Marc Zyngier
2021-02-23  9:43       ` Haibo Xu
2021-02-23  9:43         ` Haibo Xu
2021-02-23  9:43         ` Haibo Xu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201210160002.1407373-38-maz@kernel.org \
    --to=maz@kernel.org \
    --cc=alexandru.elisei@arm.com \
    --cc=andre.przywara@arm.com \
    --cc=christoffer.dall@arm.com \
    --cc=christoffer.dall@linaro.org \
    --cc=james.morse@arm.com \
    --cc=jintack.lim@linaro.org \
    --cc=jintack@cs.columbia.edu \
    --cc=julien.thierry.kdev@gmail.com \
    --cc=kernel-team@android.com \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=suzuki.poulose@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.