linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/7] kvm: x86/mmu: Share the same code to invalidate each vTLB entry
@ 2023-01-05  9:58 Lai Jiangshan
  2023-01-05  9:58 ` [PATCH 1/7] kvm: x86/mmu: Use KVM_MMU_ROOT_XXX for kvm_mmu_invalidate_gva() Lai Jiangshan
                   ` (7 more replies)
  0 siblings, 8 replies; 16+ messages in thread
From: Lai Jiangshan @ 2023-01-05  9:58 UTC (permalink / raw)
  To: linux-kernel; +Cc: Paolo Bonzini, Sean Christopherson, Lai Jiangshan

From: Lai Jiangshan <jiangshan.ljs@antgroup.com>

FNAME(invlpg) and FNAME(sync_page) invalidate vTLB entries but in
slightly different methods.

Make them use the same method and share the same code.

Lai Jiangshan (7):
  kvm: x86/mmu: Use KVM_MMU_ROOT_XXX for kvm_mmu_invalidate_gva()
  kvm: x86/mmu: Use kvm_mmu_invalidate_gva() in kvm_mmu_invpcid_gva()
  kvm: x86/mmu: Use kvm_mmu_invalidate_gva() in
    nested_ept_invalidate_addr()
  kvm: x86/mmu: Reduce the update to the spte in FNAME(sync_page)
  kvm: x86/mmu: Move the code out of FNAME(sync_page)'s loop body into
    mmu.c
  kvm: x86/mmu: Remove FNAME(invlpg)
  kvm: x86/mmu: Remove @no_dirty_log from FNAME(prefetch_gpte)

 arch/x86/include/asm/kvm_host.h |   7 +-
 arch/x86/kvm/mmu/mmu.c          | 177 +++++++++++++++++----------
 arch/x86/kvm/mmu/paging_tmpl.h  | 207 ++++++++------------------------
 arch/x86/kvm/vmx/nested.c       |   5 +-
 arch/x86/kvm/x86.c              |   2 +-
 5 files changed, 176 insertions(+), 222 deletions(-)

-- 
2.19.1.6.gb485710b


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH 1/7] kvm: x86/mmu: Use KVM_MMU_ROOT_XXX for kvm_mmu_invalidate_gva()
  2023-01-05  9:58 [PATCH 0/7] kvm: x86/mmu: Share the same code to invalidate each vTLB entry Lai Jiangshan
@ 2023-01-05  9:58 ` Lai Jiangshan
  2023-02-02  1:21   ` Sean Christopherson
  2023-01-05  9:58 ` [PATCH 2/7] kvm: x86/mmu: Use kvm_mmu_invalidate_gva() in kvm_mmu_invpcid_gva() Lai Jiangshan
                   ` (6 subsequent siblings)
  7 siblings, 1 reply; 16+ messages in thread
From: Lai Jiangshan @ 2023-01-05  9:58 UTC (permalink / raw)
  To: linux-kernel
  Cc: Paolo Bonzini, Sean Christopherson, Lai Jiangshan,
	Thomas Gleixner, Ingo Molnar, Borislav Petkov, Dave Hansen, x86,
	H. Peter Anvin, kvm

From: Lai Jiangshan <jiangshan.ljs@antgroup.com>

The @root_hpa for kvm_mmu_invalidate_gva() is called with @mmu->root.hpa
or INVALID_PAGE.

Replace them with KVM_MMU_ROOT_XXX.

No fuctionalities changed.

Signed-off-by: Lai Jiangshan <jiangshan.ljs@antgroup.com>
---
 arch/x86/include/asm/kvm_host.h |  2 +-
 arch/x86/kvm/mmu/mmu.c          | 39 ++++++++++++++++-----------------
 arch/x86/kvm/x86.c              |  2 +-
 3 files changed, 21 insertions(+), 22 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 2f5bf581d00a..dbea616bccce 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -2026,7 +2026,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
 		       void *insn, int insn_len);
 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
 void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
-			    gva_t gva, hpa_t root_hpa);
+			    gva_t gva, ulong roots_to_invalidate);
 void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
 void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd);
 
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 5407649de547..90339b71bd56 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -5693,8 +5693,9 @@ int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 err
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
 
+/* roots_to_invalidte must be some combination of the KVM_MMU_ROOT_* flags */
 void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
-			    gva_t gva, hpa_t root_hpa)
+			    gva_t gva, ulong roots_to_invalidate)
 {
 	int i;
 
@@ -5710,31 +5711,29 @@ void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
 	if (!mmu->invlpg)
 		return;
 
-	if (root_hpa == INVALID_PAGE) {
+	if ((roots_to_invalidate & KVM_MMU_ROOT_CURRENT) && VALID_PAGE(mmu->root.hpa))
 		mmu->invlpg(vcpu, gva, mmu->root.hpa);
 
-		/*
-		 * INVLPG is required to invalidate any global mappings for the VA,
-		 * irrespective of PCID. Since it would take us roughly similar amount
-		 * of work to determine whether any of the prev_root mappings of the VA
-		 * is marked global, or to just sync it blindly, so we might as well
-		 * just always sync it.
-		 *
-		 * Mappings not reachable via the current cr3 or the prev_roots will be
-		 * synced when switching to that cr3, so nothing needs to be done here
-		 * for them.
-		 */
-		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
-			if (VALID_PAGE(mmu->prev_roots[i].hpa))
-				mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
-	} else {
-		mmu->invlpg(vcpu, gva, root_hpa);
-	}
+	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
+		if ((roots_to_invalidate & KVM_MMU_ROOT_PREVIOUS(i)) &&
+		    VALID_PAGE(mmu->prev_roots[i].hpa))
+			mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
 }
 
 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
 {
-	kvm_mmu_invalidate_gva(vcpu, vcpu->arch.walk_mmu, gva, INVALID_PAGE);
+	/*
+	 * INVLPG is required to invalidate any global mappings for the VA,
+	 * irrespective of PCID. Since it would take us roughly similar amount
+	 * of work to determine whether any of the prev_root mappings of the VA
+	 * is marked global, or to just sync it blindly, so we might as well
+	 * just always sync it.
+	 *
+	 * Mappings not reachable via the current cr3 or the prev_roots will be
+	 * synced when switching to that cr3, so nothing needs to be done here
+	 * for them.
+	 */
+	kvm_mmu_invalidate_gva(vcpu, vcpu->arch.walk_mmu, gva, KVM_MMU_ROOTS_ALL);
 	++vcpu->stat.invlpg;
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c936f8d28a53..4696cbb40545 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -799,7 +799,7 @@ void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
 	if ((fault->error_code & PFERR_PRESENT_MASK) &&
 	    !(fault->error_code & PFERR_RSVD_MASK))
 		kvm_mmu_invalidate_gva(vcpu, fault_mmu, fault->address,
-				       fault_mmu->root.hpa);
+				       KVM_MMU_ROOT_CURRENT);
 
 	fault_mmu->inject_page_fault(vcpu, fault);
 }
-- 
2.19.1.6.gb485710b


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 2/7] kvm: x86/mmu: Use kvm_mmu_invalidate_gva() in kvm_mmu_invpcid_gva()
  2023-01-05  9:58 [PATCH 0/7] kvm: x86/mmu: Share the same code to invalidate each vTLB entry Lai Jiangshan
  2023-01-05  9:58 ` [PATCH 1/7] kvm: x86/mmu: Use KVM_MMU_ROOT_XXX for kvm_mmu_invalidate_gva() Lai Jiangshan
@ 2023-01-05  9:58 ` Lai Jiangshan
  2023-02-02  1:21   ` Sean Christopherson
  2023-01-05  9:58 ` [PATCH 3/7] kvm: x86/mmu: Use kvm_mmu_invalidate_gva() in nested_ept_invalidate_addr() Lai Jiangshan
                   ` (5 subsequent siblings)
  7 siblings, 1 reply; 16+ messages in thread
From: Lai Jiangshan @ 2023-01-05  9:58 UTC (permalink / raw)
  To: linux-kernel
  Cc: Paolo Bonzini, Sean Christopherson, Lai Jiangshan,
	Thomas Gleixner, Ingo Molnar, Borislav Petkov, Dave Hansen, x86,
	H. Peter Anvin, kvm

From: Lai Jiangshan <jiangshan.ljs@antgroup.com>

Use kvm_mmu_invalidate_gva() instead open calls to mmu->invlpg().

No functional change intended.

Signed-off-by: Lai Jiangshan <jiangshan.ljs@antgroup.com>
---
 arch/x86/kvm/mmu/mmu.c | 21 +++++++--------------
 1 file changed, 7 insertions(+), 14 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 90339b71bd56..b0e7ac6d4e88 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -5742,27 +5742,20 @@ EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
 void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
 {
 	struct kvm_mmu *mmu = vcpu->arch.mmu;
-	bool tlb_flush = false;
+	ulong roots_to_invalidate = 0;
 	uint i;
 
-	if (pcid == kvm_get_active_pcid(vcpu)) {
-		if (mmu->invlpg)
-			mmu->invlpg(vcpu, gva, mmu->root.hpa);
-		tlb_flush = true;
-	}
+	if (pcid == kvm_get_active_pcid(vcpu))
+		roots_to_invalidate |= KVM_MMU_ROOT_CURRENT;
 
 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
 		if (VALID_PAGE(mmu->prev_roots[i].hpa) &&
-		    pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd)) {
-			if (mmu->invlpg)
-				mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
-			tlb_flush = true;
-		}
+		    pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd))
+			roots_to_invalidate |= KVM_MMU_ROOT_PREVIOUS(i);
 	}
 
-	if (tlb_flush)
-		static_call(kvm_x86_flush_tlb_gva)(vcpu, gva);
-
+	if (roots_to_invalidate)
+		kvm_mmu_invalidate_gva(vcpu, mmu, gva, roots_to_invalidate);
 	++vcpu->stat.invlpg;
 
 	/*
-- 
2.19.1.6.gb485710b


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 3/7] kvm: x86/mmu: Use kvm_mmu_invalidate_gva() in nested_ept_invalidate_addr()
  2023-01-05  9:58 [PATCH 0/7] kvm: x86/mmu: Share the same code to invalidate each vTLB entry Lai Jiangshan
  2023-01-05  9:58 ` [PATCH 1/7] kvm: x86/mmu: Use KVM_MMU_ROOT_XXX for kvm_mmu_invalidate_gva() Lai Jiangshan
  2023-01-05  9:58 ` [PATCH 2/7] kvm: x86/mmu: Use kvm_mmu_invalidate_gva() in kvm_mmu_invpcid_gva() Lai Jiangshan
@ 2023-01-05  9:58 ` Lai Jiangshan
  2023-02-02  1:22   ` Sean Christopherson
  2023-01-05  9:58 ` [PATCH 4/7] kvm: x86/mmu: Reduce the update to the spte in FNAME(sync_page) Lai Jiangshan
                   ` (4 subsequent siblings)
  7 siblings, 1 reply; 16+ messages in thread
From: Lai Jiangshan @ 2023-01-05  9:58 UTC (permalink / raw)
  To: linux-kernel
  Cc: Paolo Bonzini, Sean Christopherson, Lai Jiangshan,
	Thomas Gleixner, Ingo Molnar, Borislav Petkov, Dave Hansen, x86,
	H. Peter Anvin, kvm

From: Lai Jiangshan <jiangshan.ljs@antgroup.com>

Use kvm_mmu_invalidate_gva() instead open calls to mmu->invlpg().

No functional change intended.

Signed-off-by: Lai Jiangshan <jiangshan.ljs@antgroup.com>
---
 arch/x86/kvm/mmu/mmu.c    | 1 +
 arch/x86/kvm/vmx/nested.c | 5 ++++-
 2 files changed, 5 insertions(+), 1 deletion(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index b0e7ac6d4e88..ffef9fe0c853 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -5719,6 +5719,7 @@ void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
 		    VALID_PAGE(mmu->prev_roots[i].hpa))
 			mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
 }
+EXPORT_SYMBOL_GPL(kvm_mmu_invalidate_gva);
 
 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
 {
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 557b9c468734..daf3138bafd1 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -358,6 +358,7 @@ static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp)
 static void nested_ept_invalidate_addr(struct kvm_vcpu *vcpu, gpa_t eptp,
 				       gpa_t addr)
 {
+	ulong roots_to_invalidate = 0;
 	uint i;
 	struct kvm_mmu_root_info *cached_root;
 
@@ -368,8 +369,10 @@ static void nested_ept_invalidate_addr(struct kvm_vcpu *vcpu, gpa_t eptp,
 
 		if (nested_ept_root_matches(cached_root->hpa, cached_root->pgd,
 					    eptp))
-			vcpu->arch.mmu->invlpg(vcpu, addr, cached_root->hpa);
+			roots_to_invalidate |= KVM_MMU_ROOT_PREVIOUS(i);
 	}
+	if (roots_to_invalidate)
+		kvm_mmu_invalidate_gva(vcpu, vcpu->arch.mmu, addr, roots_to_invalidate);
 }
 
 static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
-- 
2.19.1.6.gb485710b


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 4/7] kvm: x86/mmu: Reduce the update to the spte in FNAME(sync_page)
  2023-01-05  9:58 [PATCH 0/7] kvm: x86/mmu: Share the same code to invalidate each vTLB entry Lai Jiangshan
                   ` (2 preceding siblings ...)
  2023-01-05  9:58 ` [PATCH 3/7] kvm: x86/mmu: Use kvm_mmu_invalidate_gva() in nested_ept_invalidate_addr() Lai Jiangshan
@ 2023-01-05  9:58 ` Lai Jiangshan
  2023-01-05  9:58 ` [PATCH 5/7] kvm: x86/mmu: Move the code out of FNAME(sync_page)'s loop body into mmu.c Lai Jiangshan
                   ` (3 subsequent siblings)
  7 siblings, 0 replies; 16+ messages in thread
From: Lai Jiangshan @ 2023-01-05  9:58 UTC (permalink / raw)
  To: linux-kernel
  Cc: Paolo Bonzini, Sean Christopherson, Lai Jiangshan,
	Thomas Gleixner, Ingo Molnar, Borislav Petkov, Dave Hansen, x86,
	H. Peter Anvin, kvm

From: Lai Jiangshan <jiangshan.ljs@antgroup.com>

Sometimes when the guest updates its pagetable, it adds only new gptes
to it without changing any existed one, so there is no point to update
the sptes for these existed gptes.

Also when the sptes for these unchanged gptes are updated, the AD
bits are also removed since make_spte() is called with prefetch=true
which might result unneeded TLB flushing.

Do nothing if the permissions are unchanged.

Signed-off-by: Lai Jiangshan <jiangshan.ljs@antgroup.com>
---
 arch/x86/kvm/mmu/paging_tmpl.h | 8 +++++++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 11f17efbec97..ab0b031d4825 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -985,7 +985,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 	for (i = 0; i < SPTE_ENT_PER_PAGE; i++) {
 		u64 *sptep, spte;
 		struct kvm_memory_slot *slot;
-		unsigned pte_access;
+		unsigned old_pte_access, pte_access;
 		pt_element_t gpte;
 		gpa_t pte_gpa;
 		gfn_t gfn;
@@ -1025,6 +1025,12 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 			flush = true;
 			continue;
 		}
+		/*
+		 * Do nothing if the permissions are unchanged.
+		 */
+		old_pte_access = kvm_mmu_page_get_access(sp, i);
+		if (old_pte_access == pte_access)
+			continue;
 
 		/* Update the shadowed access bits in case they changed. */
 		kvm_mmu_page_set_access(sp, i, pte_access);
-- 
2.19.1.6.gb485710b


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 5/7] kvm: x86/mmu: Move the code out of FNAME(sync_page)'s loop body into mmu.c
  2023-01-05  9:58 [PATCH 0/7] kvm: x86/mmu: Share the same code to invalidate each vTLB entry Lai Jiangshan
                   ` (3 preceding siblings ...)
  2023-01-05  9:58 ` [PATCH 4/7] kvm: x86/mmu: Reduce the update to the spte in FNAME(sync_page) Lai Jiangshan
@ 2023-01-05  9:58 ` Lai Jiangshan
  2023-02-02  1:29   ` Sean Christopherson
  2023-01-05  9:58 ` [PATCH 6/7] kvm: x86/mmu: Remove FNAME(invlpg) Lai Jiangshan
                   ` (2 subsequent siblings)
  7 siblings, 1 reply; 16+ messages in thread
From: Lai Jiangshan @ 2023-01-05  9:58 UTC (permalink / raw)
  To: linux-kernel
  Cc: Paolo Bonzini, Sean Christopherson, Lai Jiangshan,
	Thomas Gleixner, Ingo Molnar, Borislav Petkov, Dave Hansen, x86,
	H. Peter Anvin, kvm

From: Lai Jiangshan <jiangshan.ljs@antgroup.com>

Rename mmu->sync_page to mmu->sync_spte and move the code out
of FNAME(sync_page)'s loop body into mmu.c.

Also initialize mmu->sync_spte as NULL for direct paging.

No functionalities change intended.

Signed-off-by: Lai Jiangshan <jiangshan.ljs@antgroup.com>
---
 arch/x86/include/asm/kvm_host.h |   4 +-
 arch/x86/kvm/mmu/mmu.c          |  70 ++++++++++++---
 arch/x86/kvm/mmu/paging_tmpl.h  | 147 +++++++++++---------------------
 3 files changed, 110 insertions(+), 111 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index dbea616bccce..69b7967cd743 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -441,8 +441,8 @@ struct kvm_mmu {
 	gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
 			    gpa_t gva_or_gpa, u64 access,
 			    struct x86_exception *exception);
-	int (*sync_page)(struct kvm_vcpu *vcpu,
-			 struct kvm_mmu_page *sp);
+	int (*sync_spte)(struct kvm_vcpu *vcpu,
+			 struct kvm_mmu_page *sp, int i);
 	void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa);
 	struct kvm_mmu_root_info root;
 	union kvm_cpu_role cpu_role;
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index ffef9fe0c853..f39bee1542d8 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1779,12 +1779,6 @@ static void mark_unsync(u64 *spte)
 	kvm_mmu_mark_parents_unsync(sp);
 }
 
-static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
-			       struct kvm_mmu_page *sp)
-{
-	return -1;
-}
-
 #define KVM_PAGE_ARRAY_NR 16
 
 struct kvm_mmu_pages {
@@ -1904,10 +1898,62 @@ static bool sp_has_gptes(struct kvm_mmu_page *sp)
 	  &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)])	\
 		if ((_sp)->gfn != (_gfn) || !sp_has_gptes(_sp)) {} else
 
+static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
+{
+	union kvm_mmu_page_role root_role = vcpu->arch.mmu->root_role;
+	bool flush = false;
+	int i;
+
+	/*
+	 * Ignore various flags when verifying that it's safe to sync a shadow
+	 * page using the current MMU context.
+	 *
+	 *  - level: not part of the overall MMU role and will never match as the MMU's
+	 *           level tracks the root level
+	 *  - access: updated based on the new guest PTE
+	 *  - quadrant: not part of the overall MMU role (similar to level)
+	 */
+	const union kvm_mmu_page_role sync_role_ign = {
+		.level = 0xf,
+		.access = 0x7,
+		.quadrant = 0x3,
+		.passthrough = 0x1,
+	};
+
+	/*
+	 * Direct pages can never be unsync, and KVM should never attempt to
+	 * sync a shadow page for a different MMU context, e.g. if the role
+	 * differs then the memslot lookup (SMM vs. non-SMM) will be bogus, the
+	 * reserved bits checks will be wrong, etc...
+	 */
+	if (WARN_ON_ONCE(sp->role.direct ||
+			 (sp->role.word ^ root_role.word) & ~sync_role_ign.word))
+		return -1;
+
+	for (i = 0; i < SPTE_ENT_PER_PAGE; i++) {
+		int ret = vcpu->arch.mmu->sync_spte(vcpu, sp, i);
+
+		if (ret < -1)
+			return -1;
+		flush |= ret;
+	}
+
+	/*
+	 * Note, any flush is purely for KVM's correctness, e.g. when dropping
+	 * an existing SPTE or clearing W/A/D bits to ensure an mmu_notifier
+	 * unmap or dirty logging event doesn't fail to flush.  The guest is
+	 * responsible for flushing the TLB to ensure any changes in protection
+	 * bits are recognized, i.e. until the guest flushes or page faults on
+	 * a relevant address, KVM is architecturally allowed to let vCPUs use
+	 * cached translations with the old protection bits.
+	 */
+	return flush;
+}
+
 static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
 			 struct list_head *invalid_list)
 {
-	int ret = vcpu->arch.mmu->sync_page(vcpu, sp);
+	int ret = __kvm_sync_page(vcpu, sp);
 
 	if (ret < 0)
 		kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
@@ -4458,7 +4504,7 @@ static void nonpaging_init_context(struct kvm_mmu *context)
 {
 	context->page_fault = nonpaging_page_fault;
 	context->gva_to_gpa = nonpaging_gva_to_gpa;
-	context->sync_page = nonpaging_sync_page;
+	context->sync_spte = NULL;
 	context->invlpg = NULL;
 }
 
@@ -5047,7 +5093,7 @@ static void paging64_init_context(struct kvm_mmu *context)
 {
 	context->page_fault = paging64_page_fault;
 	context->gva_to_gpa = paging64_gva_to_gpa;
-	context->sync_page = paging64_sync_page;
+	context->sync_spte = paging64_sync_spte;
 	context->invlpg = paging64_invlpg;
 }
 
@@ -5055,7 +5101,7 @@ static void paging32_init_context(struct kvm_mmu *context)
 {
 	context->page_fault = paging32_page_fault;
 	context->gva_to_gpa = paging32_gva_to_gpa;
-	context->sync_page = paging32_sync_page;
+	context->sync_spte = paging32_sync_spte;
 	context->invlpg = paging32_invlpg;
 }
 
@@ -5144,7 +5190,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,
 	context->cpu_role.as_u64 = cpu_role.as_u64;
 	context->root_role.word = root_role.word;
 	context->page_fault = kvm_tdp_page_fault;
-	context->sync_page = nonpaging_sync_page;
+	context->sync_spte = NULL;
 	context->invlpg = NULL;
 	context->get_guest_pgd = get_cr3;
 	context->get_pdptr = kvm_pdptr_read;
@@ -5276,7 +5322,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
 
 		context->page_fault = ept_page_fault;
 		context->gva_to_gpa = ept_gva_to_gpa;
-		context->sync_page = ept_sync_page;
+		context->sync_spte = ept_sync_spte;
 		context->invlpg = ept_invlpg;
 
 		update_permission_bitmask(context, true);
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index ab0b031d4825..3bc13b9b61d1 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -942,120 +942,73 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
  *   can't change unless all sptes pointing to it are nuked first.
  *
  * Returns
- * < 0: the sp should be zapped
+ * < 0: failed to sync
  *   0: the sp is synced and no tlb flushing is required
  * > 0: the sp is synced and tlb flushing is required
  */
-static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
+static int FNAME(sync_spte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int i)
 {
-	union kvm_mmu_page_role root_role = vcpu->arch.mmu->root_role;
-	int i;
 	bool host_writable;
 	gpa_t first_pte_gpa;
-	bool flush = false;
-
-	/*
-	 * Ignore various flags when verifying that it's safe to sync a shadow
-	 * page using the current MMU context.
-	 *
-	 *  - level: not part of the overall MMU role and will never match as the MMU's
-	 *           level tracks the root level
-	 *  - access: updated based on the new guest PTE
-	 *  - quadrant: not part of the overall MMU role (similar to level)
-	 */
-	const union kvm_mmu_page_role sync_role_ign = {
-		.level = 0xf,
-		.access = 0x7,
-		.quadrant = 0x3,
-		.passthrough = 0x1,
-	};
+	u64 *sptep, spte;
+	struct kvm_memory_slot *slot;
+	unsigned old_pte_access, pte_access;
+	pt_element_t gpte;
+	gpa_t pte_gpa;
+	gfn_t gfn;
 
-	/*
-	 * Direct pages can never be unsync, and KVM should never attempt to
-	 * sync a shadow page for a different MMU context, e.g. if the role
-	 * differs then the memslot lookup (SMM vs. non-SMM) will be bogus, the
-	 * reserved bits checks will be wrong, etc...
-	 */
-	if (WARN_ON_ONCE(sp->role.direct ||
-			 (sp->role.word ^ root_role.word) & ~sync_role_ign.word))
-		return -1;
+	if (!sp->spt[i])
+		return 0;
 
 	first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
+	pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
 
-	for (i = 0; i < SPTE_ENT_PER_PAGE; i++) {
-		u64 *sptep, spte;
-		struct kvm_memory_slot *slot;
-		unsigned old_pte_access, pte_access;
-		pt_element_t gpte;
-		gpa_t pte_gpa;
-		gfn_t gfn;
-
-		if (!sp->spt[i])
-			continue;
-
-		pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
-
-		if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte,
-					       sizeof(pt_element_t)))
-			return -1;
-
-		if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
-			flush = true;
-			continue;
-		}
-
-		gfn = gpte_to_gfn(gpte);
-		pte_access = sp->role.access;
-		pte_access &= FNAME(gpte_access)(gpte);
-		FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
-
-		if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access))
-			continue;
+	if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte,
+				       sizeof(pt_element_t)))
+		return -1;
 
-		/*
-		 * Drop the SPTE if the new protections would result in a RWX=0
-		 * SPTE or if the gfn is changing.  The RWX=0 case only affects
-		 * EPT with execute-only support, i.e. EPT without an effective
-		 * "present" bit, as all other paging modes will create a
-		 * read-only SPTE if pte_access is zero.
-		 */
-		if ((!pte_access && !shadow_present_mask) ||
-		    gfn != kvm_mmu_page_get_gfn(sp, i)) {
-			drop_spte(vcpu->kvm, &sp->spt[i]);
-			flush = true;
-			continue;
-		}
-		/*
-		 * Do nothing if the permissions are unchanged.
-		 */
-		old_pte_access = kvm_mmu_page_get_access(sp, i);
-		if (old_pte_access == pte_access)
-			continue;
+	if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte))
+		return 1;
 
-		/* Update the shadowed access bits in case they changed. */
-		kvm_mmu_page_set_access(sp, i, pte_access);
+	gfn = gpte_to_gfn(gpte);
+	pte_access = sp->role.access;
+	pte_access &= FNAME(gpte_access)(gpte);
+	FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
 
-		sptep = &sp->spt[i];
-		spte = *sptep;
-		host_writable = spte & shadow_host_writable_mask;
-		slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
-		make_spte(vcpu, sp, slot, pte_access, gfn,
-			  spte_to_pfn(spte), spte, true, false,
-			  host_writable, &spte);
+	if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access))
+		return 0;
 
-		flush |= mmu_spte_update(sptep, spte);
+	/*
+	 * Drop the SPTE if the new protections would result in a RWX=0
+	 * SPTE or if the gfn is changing.  The RWX=0 case only affects
+	 * EPT with execute-only support, i.e. EPT without an effective
+	 * "present" bit, as all other paging modes will create a
+	 * read-only SPTE if pte_access is zero.
+	 */
+	if ((!pte_access && !shadow_present_mask) ||
+	    gfn != kvm_mmu_page_get_gfn(sp, i)) {
+		drop_spte(vcpu->kvm, &sp->spt[i]);
+		return 1;
 	}
-
 	/*
-	 * Note, any flush is purely for KVM's correctness, e.g. when dropping
-	 * an existing SPTE or clearing W/A/D bits to ensure an mmu_notifier
-	 * unmap or dirty logging event doesn't fail to flush.  The guest is
-	 * responsible for flushing the TLB to ensure any changes in protection
-	 * bits are recognized, i.e. until the guest flushes or page faults on
-	 * a relevant address, KVM is architecturally allowed to let vCPUs use
-	 * cached translations with the old protection bits.
+	 * Do nothing if the permissions are unchanged.
 	 */
-	return flush;
+	old_pte_access = kvm_mmu_page_get_access(sp, i);
+	if (old_pte_access == pte_access)
+		return 0;
+
+	/* Update the shadowed access bits in case they changed. */
+	kvm_mmu_page_set_access(sp, i, pte_access);
+
+	sptep = &sp->spt[i];
+	spte = *sptep;
+	host_writable = spte & shadow_host_writable_mask;
+	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
+	make_spte(vcpu, sp, slot, pte_access, gfn,
+		  spte_to_pfn(spte), spte, true, false,
+		  host_writable, &spte);
+
+	return mmu_spte_update(sptep, spte);
 }
 
 #undef pt_element_t
-- 
2.19.1.6.gb485710b


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 6/7] kvm: x86/mmu: Remove FNAME(invlpg)
  2023-01-05  9:58 [PATCH 0/7] kvm: x86/mmu: Share the same code to invalidate each vTLB entry Lai Jiangshan
                   ` (4 preceding siblings ...)
  2023-01-05  9:58 ` [PATCH 5/7] kvm: x86/mmu: Move the code out of FNAME(sync_page)'s loop body into mmu.c Lai Jiangshan
@ 2023-01-05  9:58 ` Lai Jiangshan
  2023-02-02  1:30   ` Sean Christopherson
  2023-01-05  9:58 ` [PATCH 7/7] kvm: x86/mmu: Remove @no_dirty_log from FNAME(prefetch_gpte) Lai Jiangshan
  2023-01-19  0:53 ` [PATCH 0/7] kvm: x86/mmu: Share the same code to invalidate each vTLB entry Lai Jiangshan
  7 siblings, 1 reply; 16+ messages in thread
From: Lai Jiangshan @ 2023-01-05  9:58 UTC (permalink / raw)
  To: linux-kernel
  Cc: Paolo Bonzini, Sean Christopherson, Lai Jiangshan,
	Thomas Gleixner, Ingo Molnar, Borislav Petkov, Dave Hansen, x86,
	H. Peter Anvin, kvm

From: Lai Jiangshan <jiangshan.ljs@antgroup.com>

Replace it with FNAME(sync_spte).

FNAME(sync_spte) combined with the shadow pagetable walk meets the
semantics of the instruction INVLPG.

Using FNAME(sync_spte) can share the code with flushing vTLB
(kvm_sync_page()) on invalidating each vTLB entry.

Signed-off-by: Lai Jiangshan <jiangshan.ljs@antgroup.com>
---
 arch/x86/include/asm/kvm_host.h |  1 -
 arch/x86/kvm/mmu/mmu.c          | 48 +++++++++++++++++----------
 arch/x86/kvm/mmu/paging_tmpl.h  | 59 ---------------------------------
 3 files changed, 31 insertions(+), 77 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 69b7967cd743..b80de8f53130 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -443,7 +443,6 @@ struct kvm_mmu {
 			    struct x86_exception *exception);
 	int (*sync_spte)(struct kvm_vcpu *vcpu,
 			 struct kvm_mmu_page *sp, int i);
-	void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa);
 	struct kvm_mmu_root_info root;
 	union kvm_cpu_role cpu_role;
 	union kvm_mmu_page_role root_role;
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index f39bee1542d8..1e5f2e79863f 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1061,14 +1061,6 @@ static struct kvm_rmap_head *gfn_to_rmap(gfn_t gfn, int level,
 	return &slot->arch.rmap[level - PG_LEVEL_4K][idx];
 }
 
-static bool rmap_can_add(struct kvm_vcpu *vcpu)
-{
-	struct kvm_mmu_memory_cache *mc;
-
-	mc = &vcpu->arch.mmu_pte_list_desc_cache;
-	return kvm_mmu_memory_cache_nr_free_objects(mc);
-}
-
 static void rmap_remove(struct kvm *kvm, u64 *spte)
 {
 	struct kvm_memslots *slots;
@@ -4505,7 +4497,6 @@ static void nonpaging_init_context(struct kvm_mmu *context)
 	context->page_fault = nonpaging_page_fault;
 	context->gva_to_gpa = nonpaging_gva_to_gpa;
 	context->sync_spte = NULL;
-	context->invlpg = NULL;
 }
 
 static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
@@ -5094,7 +5085,6 @@ static void paging64_init_context(struct kvm_mmu *context)
 	context->page_fault = paging64_page_fault;
 	context->gva_to_gpa = paging64_gva_to_gpa;
 	context->sync_spte = paging64_sync_spte;
-	context->invlpg = paging64_invlpg;
 }
 
 static void paging32_init_context(struct kvm_mmu *context)
@@ -5102,7 +5092,6 @@ static void paging32_init_context(struct kvm_mmu *context)
 	context->page_fault = paging32_page_fault;
 	context->gva_to_gpa = paging32_gva_to_gpa;
 	context->sync_spte = paging32_sync_spte;
-	context->invlpg = paging32_invlpg;
 }
 
 static union kvm_cpu_role
@@ -5191,7 +5180,6 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,
 	context->root_role.word = root_role.word;
 	context->page_fault = kvm_tdp_page_fault;
 	context->sync_spte = NULL;
-	context->invlpg = NULL;
 	context->get_guest_pgd = get_cr3;
 	context->get_pdptr = kvm_pdptr_read;
 	context->inject_page_fault = kvm_inject_page_fault;
@@ -5323,7 +5311,6 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
 		context->page_fault = ept_page_fault;
 		context->gva_to_gpa = ept_gva_to_gpa;
 		context->sync_spte = ept_sync_spte;
-		context->invlpg = ept_invlpg;
 
 		update_permission_bitmask(context, true);
 		context->pkru_mask = 0;
@@ -5364,7 +5351,7 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu,
 	 * L2 page tables are never shadowed, so there is no need to sync
 	 * SPTEs.
 	 */
-	g_context->invlpg            = NULL;
+	g_context->sync_spte         = NULL;
 
 	/*
 	 * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using
@@ -5739,6 +5726,33 @@ int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 err
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
 
+static void __kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
+				     gva_t gva, hpa_t root_hpa)
+{
+	struct kvm_shadow_walk_iterator iterator;
+
+	vcpu_clear_mmio_info(vcpu, gva);
+
+	write_lock(&vcpu->kvm->mmu_lock);
+	for_each_shadow_entry_using_root(vcpu, root_hpa, gva, iterator) {
+		struct kvm_mmu_page *sp = sptep_to_sp(iterator.sptep);
+
+		if (sp->unsync && *iterator.sptep) {
+			gfn_t gfn = kvm_mmu_page_get_gfn(sp, iterator.index);
+			int ret = mmu->sync_spte(vcpu, sp, iterator.index);
+
+			if (ret < 0)
+				mmu_page_zap_pte(vcpu->kvm, sp, iterator.sptep, NULL);
+			if (ret)
+				kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1);
+		}
+
+		if (!sp->unsync_children)
+			break;
+	}
+	write_unlock(&vcpu->kvm->mmu_lock);
+}
+
 /* roots_to_invalidte must be some combination of the KVM_MMU_ROOT_* flags */
 void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
 			    gva_t gva, ulong roots_to_invalidate)
@@ -5754,16 +5768,16 @@ void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
 		static_call(kvm_x86_flush_tlb_gva)(vcpu, gva);
 	}
 
-	if (!mmu->invlpg)
+	if (!mmu->sync_spte)
 		return;
 
 	if ((roots_to_invalidate & KVM_MMU_ROOT_CURRENT) && VALID_PAGE(mmu->root.hpa))
-		mmu->invlpg(vcpu, gva, mmu->root.hpa);
+		__kvm_mmu_invalidate_gva(vcpu, mmu, gva, mmu->root.hpa);
 
 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
 		if ((roots_to_invalidate & KVM_MMU_ROOT_PREVIOUS(i)) &&
 		    VALID_PAGE(mmu->prev_roots[i].hpa))
-			mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
+			__kvm_mmu_invalidate_gva(vcpu, mmu, gva, mmu->prev_roots[i].hpa);
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_invalidate_gva);
 
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 3bc13b9b61d1..62aac5d7d38c 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -851,65 +851,6 @@ static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
 	return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
 }
 
-static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
-{
-	struct kvm_shadow_walk_iterator iterator;
-	struct kvm_mmu_page *sp;
-	u64 old_spte;
-	int level;
-	u64 *sptep;
-
-	vcpu_clear_mmio_info(vcpu, gva);
-
-	/*
-	 * No need to check return value here, rmap_can_add() can
-	 * help us to skip pte prefetch later.
-	 */
-	mmu_topup_memory_caches(vcpu, true);
-
-	if (!VALID_PAGE(root_hpa)) {
-		WARN_ON(1);
-		return;
-	}
-
-	write_lock(&vcpu->kvm->mmu_lock);
-	for_each_shadow_entry_using_root(vcpu, root_hpa, gva, iterator) {
-		level = iterator.level;
-		sptep = iterator.sptep;
-
-		sp = sptep_to_sp(sptep);
-		old_spte = *sptep;
-		if (is_last_spte(old_spte, level)) {
-			pt_element_t gpte;
-			gpa_t pte_gpa;
-
-			if (!sp->unsync)
-				break;
-
-			pte_gpa = FNAME(get_level1_sp_gpa)(sp);
-			pte_gpa += spte_index(sptep) * sizeof(pt_element_t);
-
-			mmu_page_zap_pte(vcpu->kvm, sp, sptep, NULL);
-			if (is_shadow_present_pte(old_spte))
-				kvm_flush_remote_tlbs_with_address(vcpu->kvm,
-					sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level));
-
-			if (!rmap_can_add(vcpu))
-				break;
-
-			if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte,
-						       sizeof(pt_element_t)))
-				break;
-
-			FNAME(prefetch_gpte)(vcpu, sp, sptep, gpte, false);
-		}
-
-		if (!sp->unsync_children)
-			break;
-	}
-	write_unlock(&vcpu->kvm->mmu_lock);
-}
-
 /* Note, @addr is a GPA when gva_to_gpa() translates an L2 GPA to an L1 GPA. */
 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
 			       gpa_t addr, u64 access,
-- 
2.19.1.6.gb485710b


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 7/7] kvm: x86/mmu: Remove @no_dirty_log from FNAME(prefetch_gpte)
  2023-01-05  9:58 [PATCH 0/7] kvm: x86/mmu: Share the same code to invalidate each vTLB entry Lai Jiangshan
                   ` (5 preceding siblings ...)
  2023-01-05  9:58 ` [PATCH 6/7] kvm: x86/mmu: Remove FNAME(invlpg) Lai Jiangshan
@ 2023-01-05  9:58 ` Lai Jiangshan
  2023-01-19  0:53 ` [PATCH 0/7] kvm: x86/mmu: Share the same code to invalidate each vTLB entry Lai Jiangshan
  7 siblings, 0 replies; 16+ messages in thread
From: Lai Jiangshan @ 2023-01-05  9:58 UTC (permalink / raw)
  To: linux-kernel
  Cc: Paolo Bonzini, Sean Christopherson, Lai Jiangshan,
	Thomas Gleixner, Ingo Molnar, Borislav Petkov, Dave Hansen, x86,
	H. Peter Anvin, kvm

From: Lai Jiangshan <jiangshan.ljs@antgroup.com>

FNAME(prefetch_gpte) is always called with @no_dirty_log=true.

Signed-off-by: Lai Jiangshan <jiangshan.ljs@antgroup.com>
---
 arch/x86/kvm/mmu/paging_tmpl.h | 7 +++----
 1 file changed, 3 insertions(+), 4 deletions(-)

diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 62aac5d7d38c..2db844d5d33c 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -519,7 +519,7 @@ static int FNAME(walk_addr)(struct guest_walker *walker,
 
 static bool
 FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
-		     u64 *spte, pt_element_t gpte, bool no_dirty_log)
+		     u64 *spte, pt_element_t gpte)
 {
 	struct kvm_memory_slot *slot;
 	unsigned pte_access;
@@ -535,8 +535,7 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
 	pte_access = sp->role.access & FNAME(gpte_access)(gpte);
 	FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
 
-	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn,
-			no_dirty_log && (pte_access & ACC_WRITE_MASK));
+	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, pte_access & ACC_WRITE_MASK);
 	if (!slot)
 		return false;
 
@@ -605,7 +604,7 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
 		if (is_shadow_present_pte(*spte))
 			continue;
 
-		if (!FNAME(prefetch_gpte)(vcpu, sp, spte, gptep[i], true))
+		if (!FNAME(prefetch_gpte)(vcpu, sp, spte, gptep[i]))
 			break;
 	}
 }
-- 
2.19.1.6.gb485710b


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* Re: [PATCH 0/7] kvm: x86/mmu: Share the same code to invalidate each vTLB entry
  2023-01-05  9:58 [PATCH 0/7] kvm: x86/mmu: Share the same code to invalidate each vTLB entry Lai Jiangshan
                   ` (6 preceding siblings ...)
  2023-01-05  9:58 ` [PATCH 7/7] kvm: x86/mmu: Remove @no_dirty_log from FNAME(prefetch_gpte) Lai Jiangshan
@ 2023-01-19  0:53 ` Lai Jiangshan
  7 siblings, 0 replies; 16+ messages in thread
From: Lai Jiangshan @ 2023-01-19  0:53 UTC (permalink / raw)
  To: linux-kernel; +Cc: Paolo Bonzini, Sean Christopherson, Lai Jiangshan

On Thu, Jan 5, 2023 at 5:57 PM Lai Jiangshan <jiangshanlai@gmail.com> wrote:
>
> From: Lai Jiangshan <jiangshan.ljs@antgroup.com>
>
> FNAME(invlpg) and FNAME(sync_page) invalidate vTLB entries but in
> slightly different methods.
>
> Make them use the same method and share the same code.
>
> Lai Jiangshan (7):
>   kvm: x86/mmu: Use KVM_MMU_ROOT_XXX for kvm_mmu_invalidate_gva()
>   kvm: x86/mmu: Use kvm_mmu_invalidate_gva() in kvm_mmu_invpcid_gva()
>   kvm: x86/mmu: Use kvm_mmu_invalidate_gva() in
>     nested_ept_invalidate_addr()
>   kvm: x86/mmu: Reduce the update to the spte in FNAME(sync_page)
>   kvm: x86/mmu: Move the code out of FNAME(sync_page)'s loop body into
>     mmu.c
>   kvm: x86/mmu: Remove FNAME(invlpg)
>   kvm: x86/mmu: Remove @no_dirty_log from FNAME(prefetch_gpte)
>
>  arch/x86/include/asm/kvm_host.h |   7 +-
>  arch/x86/kvm/mmu/mmu.c          | 177 +++++++++++++++++----------
>  arch/x86/kvm/mmu/paging_tmpl.h  | 207 ++++++++------------------------
>  arch/x86/kvm/vmx/nested.c       |   5 +-
>  arch/x86/kvm/x86.c              |   2 +-
>  5 files changed, 176 insertions(+), 222 deletions(-)
>
> --
> 2.19.1.6.gb485710b
>

Hello

Ping.

Cheers,
Lai

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/7] kvm: x86/mmu: Use KVM_MMU_ROOT_XXX for kvm_mmu_invalidate_gva()
  2023-01-05  9:58 ` [PATCH 1/7] kvm: x86/mmu: Use KVM_MMU_ROOT_XXX for kvm_mmu_invalidate_gva() Lai Jiangshan
@ 2023-02-02  1:21   ` Sean Christopherson
  2023-02-03 14:51     ` Lai Jiangshan
  0 siblings, 1 reply; 16+ messages in thread
From: Sean Christopherson @ 2023-02-02  1:21 UTC (permalink / raw)
  To: Lai Jiangshan
  Cc: linux-kernel, Paolo Bonzini, Lai Jiangshan, Thomas Gleixner,
	Ingo Molnar, Borislav Petkov, Dave Hansen, x86, H. Peter Anvin,
	kvm

On Thu, Jan 05, 2023, Lai Jiangshan wrote:
> From: Lai Jiangshan <jiangshan.ljs@antgroup.com>
> 
> The @root_hpa for kvm_mmu_invalidate_gva() is called with @mmu->root.hpa
> or INVALID_PAGE.
> 
> Replace them with KVM_MMU_ROOT_XXX.

Please explain _why_.  I can (and did) figure it out on my own, but doing that
takes time and slows down reviews.

> No fuctionalities changed.
> 
> Signed-off-by: Lai Jiangshan <jiangshan.ljs@antgroup.com>
> ---
>  arch/x86/include/asm/kvm_host.h |  2 +-
>  arch/x86/kvm/mmu/mmu.c          | 39 ++++++++++++++++-----------------
>  arch/x86/kvm/x86.c              |  2 +-
>  3 files changed, 21 insertions(+), 22 deletions(-)
> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 2f5bf581d00a..dbea616bccce 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -2026,7 +2026,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
>  		       void *insn, int insn_len);
>  void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
>  void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
> -			    gva_t gva, hpa_t root_hpa);
> +			    gva_t gva, ulong roots_to_invalidate);
>  void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
>  void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd);
>  
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 5407649de547..90339b71bd56 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -5693,8 +5693,9 @@ int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 err
>  }
>  EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
>  
> +/* roots_to_invalidte must be some combination of the KVM_MMU_ROOT_* flags */

Typo, though I would just drop this comment.  If we want some form of sanity check,
it should be totally doable to add a WARN_ON_ONCE() that verifies the parameter
is a subset of all possible root flags.

>  void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
> -			    gva_t gva, hpa_t root_hpa)
> +			    gva_t gva, ulong roots_to_invalidate)

s/ulong/unsigned long

And I got confused by "roots_to_invalidate"; I thought it meant "invalidate these
entire trees" as opposed to "invalidate the gva in these trees".  Best I can come
up with is simply "roots".

>  {
>  	int i;
>  
> @@ -5710,31 +5711,29 @@ void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
>  	if (!mmu->invlpg)
>  		return;
>  
> -	if (root_hpa == INVALID_PAGE) {
> +	if ((roots_to_invalidate & KVM_MMU_ROOT_CURRENT) && VALID_PAGE(mmu->root.hpa))
>  		mmu->invlpg(vcpu, gva, mmu->root.hpa);
>  
> -		/*
> -		 * INVLPG is required to invalidate any global mappings for the VA,
> -		 * irrespective of PCID. Since it would take us roughly similar amount
> -		 * of work to determine whether any of the prev_root mappings of the VA
> -		 * is marked global, or to just sync it blindly, so we might as well
> -		 * just always sync it.
> -		 *
> -		 * Mappings not reachable via the current cr3 or the prev_roots will be
> -		 * synced when switching to that cr3, so nothing needs to be done here
> -		 * for them.
> -		 */
> -		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
> -			if (VALID_PAGE(mmu->prev_roots[i].hpa))
> -				mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
> -	} else {
> -		mmu->invlpg(vcpu, gva, root_hpa);
> -	}
> +	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)

for-loop needs curly braces.

> +		if ((roots_to_invalidate & KVM_MMU_ROOT_PREVIOUS(i)) &&
> +		    VALID_PAGE(mmu->prev_roots[i].hpa))
> +			mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);

I think it has to go at the end of this series, but please add a patch somewhere
to move the VALID_PAGE() check into __kvm_mmu_invalidate_gva(), e.g. end up with

	if (roots & KVM_MMU_ROOT_CURRENT)
		__kvm_mmu_invalidate_gva(vcpu, mmu, gva, mmu->root.hpa);

	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
		if (roots & KVM_MMU_ROOT_PREVIOUS(i))
			__kvm_mmu_invalidate_gva(vcpu, mmu, gva,
						 mmu->prev_roots[i].hpa);
	}

> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index c936f8d28a53..4696cbb40545 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -799,7 +799,7 @@ void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
>  	if ((fault->error_code & PFERR_PRESENT_MASK) &&
>  	    !(fault->error_code & PFERR_RSVD_MASK))
>  		kvm_mmu_invalidate_gva(vcpu, fault_mmu, fault->address,
> -				       fault_mmu->root.hpa);
> +				       KVM_MMU_ROOT_CURRENT);

This is logically correct, but there's potential (weird) functional change here.
If this is called with an invalid root, then KVM will invalidate the GVA in all
roots prior to this patch, but in no roots after this patch.

I _think_ it should be impossible get here with an invalid root.  Can you try
adding a prep patch to assert that the root is valid so that this patch can
reasonably assert that there's no functional change?


diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 508074e47bc0..fffd9b610196 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -792,6 +792,8 @@ void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
        fault_mmu = fault->nested_page_fault ? vcpu->arch.mmu :
                                               vcpu->arch.walk_mmu;
 
+       WARN_ON_ONCE(!VALID_PAGE(fault_mmu->root.hpa));
+
        /*
         * Invalidate the TLB entry for the faulting address, if it exists,
         * else the access will fault indefinitely (and to emulate hardware).


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* Re: [PATCH 2/7] kvm: x86/mmu: Use kvm_mmu_invalidate_gva() in kvm_mmu_invpcid_gva()
  2023-01-05  9:58 ` [PATCH 2/7] kvm: x86/mmu: Use kvm_mmu_invalidate_gva() in kvm_mmu_invpcid_gva() Lai Jiangshan
@ 2023-02-02  1:21   ` Sean Christopherson
  0 siblings, 0 replies; 16+ messages in thread
From: Sean Christopherson @ 2023-02-02  1:21 UTC (permalink / raw)
  To: Lai Jiangshan
  Cc: linux-kernel, Paolo Bonzini, Lai Jiangshan, Thomas Gleixner,
	Ingo Molnar, Borislav Petkov, Dave Hansen, x86, H. Peter Anvin,
	kvm

On Thu, Jan 05, 2023, Lai Jiangshan wrote:
> From: Lai Jiangshan <jiangshan.ljs@antgroup.com>
> 
> Use kvm_mmu_invalidate_gva() instead open calls to mmu->invlpg().
> 
> No functional change intended.
> 
> Signed-off-by: Lai Jiangshan <jiangshan.ljs@antgroup.com>
> ---
>  arch/x86/kvm/mmu/mmu.c | 21 +++++++--------------
>  1 file changed, 7 insertions(+), 14 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 90339b71bd56..b0e7ac6d4e88 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -5742,27 +5742,20 @@ EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
>  void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
>  {
>  	struct kvm_mmu *mmu = vcpu->arch.mmu;
> -	bool tlb_flush = false;
> +	ulong roots_to_invalidate = 0;

"unsigned long roots" again, unless someone has a better idea.

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 3/7] kvm: x86/mmu: Use kvm_mmu_invalidate_gva() in nested_ept_invalidate_addr()
  2023-01-05  9:58 ` [PATCH 3/7] kvm: x86/mmu: Use kvm_mmu_invalidate_gva() in nested_ept_invalidate_addr() Lai Jiangshan
@ 2023-02-02  1:22   ` Sean Christopherson
  0 siblings, 0 replies; 16+ messages in thread
From: Sean Christopherson @ 2023-02-02  1:22 UTC (permalink / raw)
  To: Lai Jiangshan
  Cc: linux-kernel, Paolo Bonzini, Lai Jiangshan, Thomas Gleixner,
	Ingo Molnar, Borislav Petkov, Dave Hansen, x86, H. Peter Anvin,
	kvm

On Thu, Jan 05, 2023, Lai Jiangshan wrote:
> From: Lai Jiangshan <jiangshan.ljs@antgroup.com>
> 
> Use kvm_mmu_invalidate_gva() instead open calls to mmu->invlpg().
> 
> No functional change intended.
> 
> Signed-off-by: Lai Jiangshan <jiangshan.ljs@antgroup.com>
> ---
>  arch/x86/kvm/mmu/mmu.c    | 1 +
>  arch/x86/kvm/vmx/nested.c | 5 ++++-
>  2 files changed, 5 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index b0e7ac6d4e88..ffef9fe0c853 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -5719,6 +5719,7 @@ void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
>  		    VALID_PAGE(mmu->prev_roots[i].hpa))
>  			mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
>  }
> +EXPORT_SYMBOL_GPL(kvm_mmu_invalidate_gva);
>  
>  void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
>  {
> diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
> index 557b9c468734..daf3138bafd1 100644
> --- a/arch/x86/kvm/vmx/nested.c
> +++ b/arch/x86/kvm/vmx/nested.c
> @@ -358,6 +358,7 @@ static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp)
>  static void nested_ept_invalidate_addr(struct kvm_vcpu *vcpu, gpa_t eptp,
>  				       gpa_t addr)
>  {
> +	ulong roots_to_invalidate = 0;

Same thing here.

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 5/7] kvm: x86/mmu: Move the code out of FNAME(sync_page)'s loop body into mmu.c
  2023-01-05  9:58 ` [PATCH 5/7] kvm: x86/mmu: Move the code out of FNAME(sync_page)'s loop body into mmu.c Lai Jiangshan
@ 2023-02-02  1:29   ` Sean Christopherson
  0 siblings, 0 replies; 16+ messages in thread
From: Sean Christopherson @ 2023-02-02  1:29 UTC (permalink / raw)
  To: Lai Jiangshan
  Cc: linux-kernel, Paolo Bonzini, Lai Jiangshan, Thomas Gleixner,
	Ingo Molnar, Borislav Petkov, Dave Hansen, x86, H. Peter Anvin,
	kvm

On Thu, Jan 05, 2023, Lai Jiangshan wrote:
> From: Lai Jiangshan <jiangshan.ljs@antgroup.com>
> 
> Rename mmu->sync_page to mmu->sync_spte and move the code out
> of FNAME(sync_page)'s loop body into mmu.c.
> 
> Also initialize mmu->sync_spte as NULL for direct paging.

Please split this up, we got burned before by making one of the mmu hooks NULL,
I don't want a repeat of that.  I.e. nullify the direct hook only when it's
very clear it can't be dereferenced.

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 6/7] kvm: x86/mmu: Remove FNAME(invlpg)
  2023-01-05  9:58 ` [PATCH 6/7] kvm: x86/mmu: Remove FNAME(invlpg) Lai Jiangshan
@ 2023-02-02  1:30   ` Sean Christopherson
  0 siblings, 0 replies; 16+ messages in thread
From: Sean Christopherson @ 2023-02-02  1:30 UTC (permalink / raw)
  To: Lai Jiangshan
  Cc: linux-kernel, Paolo Bonzini, Lai Jiangshan, Thomas Gleixner,
	Ingo Molnar, Borislav Petkov, Dave Hansen, x86, H. Peter Anvin,
	kvm

On Thu, Jan 05, 2023, Lai Jiangshan wrote:
> From: Lai Jiangshan <jiangshan.ljs@antgroup.com>
> 
> Replace it with FNAME(sync_spte).
> 
> FNAME(sync_spte) combined with the shadow pagetable walk meets the
> semantics of the instruction INVLPG.

Please call out the differences (I assume the two aren't perfectly identical),
and explain why those differences are benign.

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/7] kvm: x86/mmu: Use KVM_MMU_ROOT_XXX for kvm_mmu_invalidate_gva()
  2023-02-02  1:21   ` Sean Christopherson
@ 2023-02-03 14:51     ` Lai Jiangshan
  2023-02-03 20:54       ` Sean Christopherson
  0 siblings, 1 reply; 16+ messages in thread
From: Lai Jiangshan @ 2023-02-03 14:51 UTC (permalink / raw)
  To: Sean Christopherson
  Cc: linux-kernel, Paolo Bonzini, Lai Jiangshan, Thomas Gleixner,
	Ingo Molnar, Borislav Petkov, Dave Hansen, x86, H. Peter Anvin,
	kvm

On Thu, Feb 2, 2023 at 9:21 AM Sean Christopherson <seanjc@google.com> wrote:

>
> This is logically correct, but there's potential (weird) functional change here.
> If this is called with an invalid root, then KVM will invalidate the GVA in all
> roots prior to this patch, but in no roots after this patch.
>
> I _think_ it should be impossible get here with an invalid root.  Can you try
> adding a prep patch to assert that the root is valid so that this patch can
> reasonably assert that there's no functional change?
>
>
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 508074e47bc0..fffd9b610196 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -792,6 +792,8 @@ void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
>         fault_mmu = fault->nested_page_fault ? vcpu->arch.mmu :
>                                                vcpu->arch.walk_mmu;
>
> +       WARN_ON_ONCE(!VALID_PAGE(fault_mmu->root.hpa));
> +

I've been updating the patches as per your suggestions.

And I suddenly realized that when fault->nested_page_fault=false
with nested EPT, fault_mmu->root.hpa is always unset.

fault_mmu->root.hpa is just meaningless when fault_mmu is not
vcpu->arch.mmu.

I will add it as one of the reasons for replacing the argument
with KVM_MMU_ROOT_XXX.

>         /*
>          * Invalidate the TLB entry for the faulting address, if it exists,
>          * else the access will fault indefinitely (and to emulate hardware).
>

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/7] kvm: x86/mmu: Use KVM_MMU_ROOT_XXX for kvm_mmu_invalidate_gva()
  2023-02-03 14:51     ` Lai Jiangshan
@ 2023-02-03 20:54       ` Sean Christopherson
  0 siblings, 0 replies; 16+ messages in thread
From: Sean Christopherson @ 2023-02-03 20:54 UTC (permalink / raw)
  To: Lai Jiangshan
  Cc: linux-kernel, Paolo Bonzini, Lai Jiangshan, Thomas Gleixner,
	Ingo Molnar, Borislav Petkov, Dave Hansen, x86, H. Peter Anvin,
	kvm

On Fri, Feb 03, 2023, Lai Jiangshan wrote:
> On Thu, Feb 2, 2023 at 9:21 AM Sean Christopherson <seanjc@google.com> wrote:
> 
> >
> > This is logically correct, but there's potential (weird) functional change here.
> > If this is called with an invalid root, then KVM will invalidate the GVA in all
> > roots prior to this patch, but in no roots after this patch.
> >
> > I _think_ it should be impossible get here with an invalid root.  Can you try
> > adding a prep patch to assert that the root is valid so that this patch can
> > reasonably assert that there's no functional change?
> >
> >
> > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> > index 508074e47bc0..fffd9b610196 100644
> > --- a/arch/x86/kvm/x86.c
> > +++ b/arch/x86/kvm/x86.c
> > @@ -792,6 +792,8 @@ void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
> >         fault_mmu = fault->nested_page_fault ? vcpu->arch.mmu :
> >                                                vcpu->arch.walk_mmu;
> >
> > +       WARN_ON_ONCE(!VALID_PAGE(fault_mmu->root.hpa));
> > +
> 
> I've been updating the patches as per your suggestions.
> 
> And I suddenly realized that when fault->nested_page_fault=false
> with nested EPT, fault_mmu->root.hpa is always unset.
> 
> fault_mmu->root.hpa is just meaningless when fault_mmu is not
> vcpu->arch.mmu.

Right, because there's no KVM-managed MMU. 

> I will add it as one of the reasons for replacing the argument
> with KVM_MMU_ROOT_XXX.

And maybe call out that when using walk_mmu, the ->invlpg() implementation is
NULL, i.e. using CURRENT root is a nop.

Thanks!

^ permalink raw reply	[flat|nested] 16+ messages in thread

end of thread, other threads:[~2023-02-03 20:54 UTC | newest]

Thread overview: 16+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-01-05  9:58 [PATCH 0/7] kvm: x86/mmu: Share the same code to invalidate each vTLB entry Lai Jiangshan
2023-01-05  9:58 ` [PATCH 1/7] kvm: x86/mmu: Use KVM_MMU_ROOT_XXX for kvm_mmu_invalidate_gva() Lai Jiangshan
2023-02-02  1:21   ` Sean Christopherson
2023-02-03 14:51     ` Lai Jiangshan
2023-02-03 20:54       ` Sean Christopherson
2023-01-05  9:58 ` [PATCH 2/7] kvm: x86/mmu: Use kvm_mmu_invalidate_gva() in kvm_mmu_invpcid_gva() Lai Jiangshan
2023-02-02  1:21   ` Sean Christopherson
2023-01-05  9:58 ` [PATCH 3/7] kvm: x86/mmu: Use kvm_mmu_invalidate_gva() in nested_ept_invalidate_addr() Lai Jiangshan
2023-02-02  1:22   ` Sean Christopherson
2023-01-05  9:58 ` [PATCH 4/7] kvm: x86/mmu: Reduce the update to the spte in FNAME(sync_page) Lai Jiangshan
2023-01-05  9:58 ` [PATCH 5/7] kvm: x86/mmu: Move the code out of FNAME(sync_page)'s loop body into mmu.c Lai Jiangshan
2023-02-02  1:29   ` Sean Christopherson
2023-01-05  9:58 ` [PATCH 6/7] kvm: x86/mmu: Remove FNAME(invlpg) Lai Jiangshan
2023-02-02  1:30   ` Sean Christopherson
2023-01-05  9:58 ` [PATCH 7/7] kvm: x86/mmu: Remove @no_dirty_log from FNAME(prefetch_gpte) Lai Jiangshan
2023-01-19  0:53 ` [PATCH 0/7] kvm: x86/mmu: Share the same code to invalidate each vTLB entry Lai Jiangshan

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).