linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Sean Christopherson <sean.j.christopherson@intel.com>
To: Paolo Bonzini <pbonzini@redhat.com>
Cc: Sean Christopherson <sean.j.christopherson@intel.com>,
	Vitaly Kuznetsov <vkuznets@redhat.com>,
	Wanpeng Li <wanpengli@tencent.com>,
	Jim Mattson <jmattson@google.com>, Joerg Roedel <joro@8bytes.org>,
	kvm@vger.kernel.org, linux-kernel@vger.kernel.org,
	Ben Gardon <bgardon@google.com>,
	Junaid Shahid <junaids@google.com>,
	Liran Alon <liran.alon@oracle.com>,
	Boris Ostrovsky <boris.ostrovsky@oracle.com>,
	John Haxby <john.haxby@oracle.com>,
	Miaohe Lin <linmiaohe@huawei.com>,
	Tom Lendacky <thomas.lendacky@amd.com>
Subject: [PATCH v3 36/37] KVM: x86: Replace "cr3" with "pgd" in "new cr3/pgd" related code
Date: Fri, 20 Mar 2020 14:28:32 -0700	[thread overview]
Message-ID: <20200320212833.3507-37-sean.j.christopherson@intel.com> (raw)
In-Reply-To: <20200320212833.3507-1-sean.j.christopherson@intel.com>

Rename functions and variables in kvm_mmu_new_cr3() and related code to
replace "cr3" with "pgd", i.e. continue the work started by commit
727a7e27cf88a ("KVM: x86: rename set_cr3 callback and related flags to
load_mmu_pgd").  kvm_mmu_new_cr3() and company are not always loading a
new CR3, e.g. when nested EPT is enabled "cr3" is actually an EPTP.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/include/asm/kvm_host.h |  8 ++---
 arch/x86/kvm/mmu/mmu.c          | 58 ++++++++++++++++-----------------
 arch/x86/kvm/vmx/nested.c       |  8 ++---
 arch/x86/kvm/vmx/vmx.c          |  2 +-
 arch/x86/kvm/x86.c              |  2 +-
 5 files changed, 39 insertions(+), 39 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 6fca2e45886c..167729624149 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -373,12 +373,12 @@ struct rsvd_bits_validate {
 };
 
 struct kvm_mmu_root_info {
-	gpa_t cr3;
+	gpa_t pgd;
 	hpa_t hpa;
 };
 
 #define KVM_MMU_ROOT_INFO_INVALID \
-	((struct kvm_mmu_root_info) { .cr3 = INVALID_PAGE, .hpa = INVALID_PAGE })
+	((struct kvm_mmu_root_info) { .pgd = INVALID_PAGE, .hpa = INVALID_PAGE })
 
 #define KVM_MMU_NUM_PREV_ROOTS 3
 
@@ -404,7 +404,7 @@ struct kvm_mmu {
 	void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
 			   u64 *spte, const void *pte);
 	hpa_t root_hpa;
-	gpa_t root_cr3;
+	gpa_t root_pgd;
 	union kvm_mmu_role mmu_role;
 	u8 root_level;
 	u8 shadow_root_level;
@@ -1517,7 +1517,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
 		       void *insn, int insn_len);
 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
 void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
-void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush,
+void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd, bool skip_tlb_flush,
 		     bool skip_mmu_sync);
 
 void kvm_configure_mmu(bool enable_tdp, int tdp_page_level);
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 7b0fb7f2c24d..be03f353dd3d 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3669,7 +3669,7 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
 							   &invalid_list);
 			mmu->root_hpa = INVALID_PAGE;
 		}
-		mmu->root_cr3 = 0;
+		mmu->root_pgd = 0;
 	}
 
 	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
@@ -3726,8 +3726,8 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
 	} else
 		BUG();
 
-	/* root_cr3 is ignored for direct MMUs. */
-	vcpu->arch.mmu->root_cr3 = 0;
+	/* root_pgd is ignored for direct MMUs. */
+	vcpu->arch.mmu->root_pgd = 0;
 
 	return 0;
 }
@@ -3736,11 +3736,11 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
 {
 	struct kvm_mmu_page *sp;
 	u64 pdptr, pm_mask;
-	gfn_t root_gfn, root_cr3;
+	gfn_t root_gfn, root_pgd;
 	int i;
 
-	root_cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu);
-	root_gfn = root_cr3 >> PAGE_SHIFT;
+	root_pgd = vcpu->arch.mmu->get_guest_pgd(vcpu);
+	root_gfn = root_pgd >> PAGE_SHIFT;
 
 	if (mmu_check_root(vcpu, root_gfn))
 		return 1;
@@ -3765,7 +3765,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
 		++sp->root_count;
 		spin_unlock(&vcpu->kvm->mmu_lock);
 		vcpu->arch.mmu->root_hpa = root;
-		goto set_root_cr3;
+		goto set_root_pgd;
 	}
 
 	/*
@@ -3831,8 +3831,8 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
 		vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root);
 	}
 
-set_root_cr3:
-	vcpu->arch.mmu->root_cr3 = root_cr3;
+set_root_pgd:
+	vcpu->arch.mmu->root_pgd = root_pgd;
 
 	return 0;
 }
@@ -4248,49 +4248,49 @@ static void nonpaging_init_context(struct kvm_vcpu *vcpu,
 	context->nx = false;
 }
 
-static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t cr3,
+static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
 				  union kvm_mmu_page_role role)
 {
-	return (role.direct || cr3 == root->cr3) &&
+	return (role.direct || pgd == root->pgd) &&
 	       VALID_PAGE(root->hpa) && page_header(root->hpa) &&
 	       role.word == page_header(root->hpa)->role.word;
 }
 
 /*
- * Find out if a previously cached root matching the new CR3/role is available.
+ * Find out if a previously cached root matching the new pgd/role is available.
  * The current root is also inserted into the cache.
  * If a matching root was found, it is assigned to kvm_mmu->root_hpa and true is
  * returned.
  * Otherwise, the LRU root from the cache is assigned to kvm_mmu->root_hpa and
  * false is returned. This root should now be freed by the caller.
  */
-static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3,
+static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_pgd,
 				  union kvm_mmu_page_role new_role)
 {
 	uint i;
 	struct kvm_mmu_root_info root;
 	struct kvm_mmu *mmu = vcpu->arch.mmu;
 
-	root.cr3 = mmu->root_cr3;
+	root.pgd = mmu->root_pgd;
 	root.hpa = mmu->root_hpa;
 
-	if (is_root_usable(&root, new_cr3, new_role))
+	if (is_root_usable(&root, new_pgd, new_role))
 		return true;
 
 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
 		swap(root, mmu->prev_roots[i]);
 
-		if (is_root_usable(&root, new_cr3, new_role))
+		if (is_root_usable(&root, new_pgd, new_role))
 			break;
 	}
 
 	mmu->root_hpa = root.hpa;
-	mmu->root_cr3 = root.cr3;
+	mmu->root_pgd = root.pgd;
 
 	return i < KVM_MMU_NUM_PREV_ROOTS;
 }
 
-static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
+static bool fast_pgd_switch(struct kvm_vcpu *vcpu, gpa_t new_pgd,
 			    union kvm_mmu_page_role new_role)
 {
 	struct kvm_mmu *mmu = vcpu->arch.mmu;
@@ -4302,17 +4302,17 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
 	 */
 	if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
 	    mmu->root_level >= PT64_ROOT_4LEVEL)
-		return !mmu_check_root(vcpu, new_cr3 >> PAGE_SHIFT) &&
-		       cached_root_available(vcpu, new_cr3, new_role);
+		return !mmu_check_root(vcpu, new_pgd >> PAGE_SHIFT) &&
+		       cached_root_available(vcpu, new_pgd, new_role);
 
 	return false;
 }
 
-static void __kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3,
+static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd,
 			      union kvm_mmu_page_role new_role,
 			      bool skip_tlb_flush, bool skip_mmu_sync)
 {
-	if (!fast_cr3_switch(vcpu, new_cr3, new_role)) {
+	if (!fast_pgd_switch(vcpu, new_pgd, new_role)) {
 		kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, KVM_MMU_ROOT_CURRENT);
 		return;
 	}
@@ -4341,13 +4341,13 @@ static void __kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3,
 	__clear_sp_write_flooding_count(page_header(vcpu->arch.mmu->root_hpa));
 }
 
-void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush,
+void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd, bool skip_tlb_flush,
 		     bool skip_mmu_sync)
 {
-	__kvm_mmu_new_cr3(vcpu, new_cr3, kvm_mmu_calc_root_page_role(vcpu),
+	__kvm_mmu_new_pgd(vcpu, new_pgd, kvm_mmu_calc_root_page_role(vcpu),
 			  skip_tlb_flush, skip_mmu_sync);
 }
-EXPORT_SYMBOL_GPL(kvm_mmu_new_cr3);
+EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd);
 
 static unsigned long get_cr3(struct kvm_vcpu *vcpu)
 {
@@ -5038,7 +5038,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
 		kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
 						   execonly, level);
 
-	__kvm_mmu_new_cr3(vcpu, new_eptp, new_role.base, true, true);
+	__kvm_mmu_new_pgd(vcpu, new_eptp, new_role.base, true, true);
 
 	if (new_role.as_u64 == context->mmu_role.as_u64)
 		return;
@@ -5532,7 +5532,7 @@ void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
 
 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
 		if (VALID_PAGE(mmu->prev_roots[i].hpa) &&
-		    pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].cr3)) {
+		    pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd)) {
 			mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
 			tlb_flush = true;
 		}
@@ -5686,13 +5686,13 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
 	vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
 
 	vcpu->arch.root_mmu.root_hpa = INVALID_PAGE;
-	vcpu->arch.root_mmu.root_cr3 = 0;
+	vcpu->arch.root_mmu.root_pgd = 0;
 	vcpu->arch.root_mmu.translate_gpa = translate_gpa;
 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
 		vcpu->arch.root_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
 
 	vcpu->arch.guest_mmu.root_hpa = INVALID_PAGE;
-	vcpu->arch.guest_mmu.root_cr3 = 0;
+	vcpu->arch.guest_mmu.root_pgd = 0;
 	vcpu->arch.guest_mmu.translate_gpa = translate_gpa;
 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
 		vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 72e69d841531..88fe87f8e140 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -356,7 +356,7 @@ static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
 		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
 			prev = &vcpu->arch.mmu->prev_roots[i];
 
-			if (nested_ept_root_matches(prev->hpa, prev->cr3,
+			if (nested_ept_root_matches(prev->hpa, prev->pgd,
 						    vmcs12->ept_pointer))
 				vcpu->arch.mmu->invlpg(vcpu, gpa, prev->hpa);
 		}
@@ -1166,7 +1166,7 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne
 	 * nested_vmx_transition_mmu_sync for details on skipping the MMU sync.
 	 */
 	if (!nested_ept)
-		kvm_mmu_new_cr3(vcpu, cr3, true,
+		kvm_mmu_new_pgd(vcpu, cr3, true,
 				!nested_vmx_transition_mmu_sync(vcpu));
 
 	vcpu->arch.cr3 = cr3;
@@ -5233,13 +5233,13 @@ static int handle_invept(struct kvm_vcpu *vcpu)
 				VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
 
 		roots_to_free = 0;
-		if (nested_ept_root_matches(mmu->root_hpa, mmu->root_cr3,
+		if (nested_ept_root_matches(mmu->root_hpa, mmu->root_pgd,
 					    operand.eptp))
 			roots_to_free |= KVM_MMU_ROOT_CURRENT;
 
 		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
 			if (nested_ept_root_matches(mmu->prev_roots[i].hpa,
-						    mmu->prev_roots[i].cr3,
+						    mmu->prev_roots[i].pgd,
 						    operand.eptp))
 				roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
 		}
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index d49d2a1ddf03..53fea2d38590 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -5487,7 +5487,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
 		}
 
 		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
-			if (kvm_get_pcid(vcpu, vcpu->arch.mmu->prev_roots[i].cr3)
+			if (kvm_get_pcid(vcpu, vcpu->arch.mmu->prev_roots[i].pgd)
 			    == operand.pcid)
 				roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 0d1572a0791c..210af343eebf 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1045,7 +1045,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
 		 !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
 		return 1;
 
-	kvm_mmu_new_cr3(vcpu, cr3, skip_tlb_flush, skip_tlb_flush);
+	kvm_mmu_new_pgd(vcpu, cr3, skip_tlb_flush, skip_tlb_flush);
 	vcpu->arch.cr3 = cr3;
 	kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
 
-- 
2.24.1


  parent reply	other threads:[~2020-03-20 21:29 UTC|newest]

Thread overview: 83+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-03-20 21:27 [PATCH v3 00/37] KVM: x86: TLB flushing fixes and enhancements Sean Christopherson
2020-03-20 21:27 ` [PATCH v3 01/37] KVM: VMX: Flush all EPTP/VPID contexts on remote TLB flush Sean Christopherson
2021-08-03  1:45   ` Lai Jiangshan
2021-08-03 15:39     ` Sean Christopherson
2021-08-04  3:11       ` Lai Jiangshan
2021-08-04 15:33         ` Sean Christopherson
2020-03-20 21:27 ` [PATCH v3 02/37] KVM: nVMX: Validate the EPTP when emulating INVEPT(EXTENT_CONTEXT) Sean Christopherson
2020-03-23 14:51   ` Vitaly Kuznetsov
2020-03-23 15:45     ` Sean Christopherson
2020-03-23 23:46       ` Paolo Bonzini
2020-03-20 21:27 ` [PATCH v3 03/37] KVM: nVMX: Invalidate all EPTP contexts when emulating INVEPT for L1 Sean Christopherson
2020-03-23 15:24   ` Vitaly Kuznetsov
2020-03-23 15:53     ` Sean Christopherson
2020-03-23 16:24   ` Jim Mattson
2020-03-23 16:28     ` Sean Christopherson
2020-03-23 16:36       ` Jim Mattson
2020-03-23 16:44         ` Sean Christopherson
2020-03-23 23:50           ` Paolo Bonzini
2020-03-24  0:12             ` Jim Mattson
2020-03-30 18:38               ` Sean Christopherson
2020-03-20 21:28 ` [PATCH v3 04/37] KVM: nVMX: Invalidate all roots when emulating INVVPID without EPT Sean Christopherson
2020-03-23 15:34   ` Vitaly Kuznetsov
2020-03-23 16:04     ` Sean Christopherson
2020-03-23 16:33       ` Vitaly Kuznetsov
2020-03-23 16:50         ` Sean Christopherson
2020-03-23 16:57           ` Vitaly Kuznetsov
2020-03-20 21:28 ` [PATCH v3 05/37] KVM: x86: Export kvm_propagate_fault() (as kvm_inject_emulated_page_fault) Sean Christopherson
2020-03-23 15:47   ` Vitaly Kuznetsov
2020-03-23 16:24     ` Sean Christopherson
2020-03-23 23:56       ` Paolo Bonzini
2020-03-20 21:28 ` [PATCH v3 06/37] KVM: x86: Consolidate logic for injecting page faults to L1 Sean Christopherson
2020-03-24  0:47   ` Paolo Bonzini
2020-03-20 21:28 ` [PATCH v3 07/37] KVM: x86: Sync SPTEs when injecting page/EPT fault into L1 Sean Christopherson
2020-03-20 21:28 ` [PATCH v3 08/37] KVM: VMX: Skip global INVVPID fallback if vpid==0 in vpid_sync_context() Sean Christopherson
2020-03-25  9:33   ` Vitaly Kuznetsov
2020-03-20 21:28 ` [PATCH v3 09/37] KVM: VMX: Use vpid_sync_context() directly when possible Sean Christopherson
2020-03-20 21:28 ` [PATCH v3 10/37] KVM: VMX: Move vpid_sync_vcpu_addr() down a few lines Sean Christopherson
2020-03-20 21:28 ` [PATCH v3 11/37] KVM: VMX: Handle INVVPID fallback logic in vpid_sync_vcpu_addr() Sean Christopherson
2020-03-20 21:28 ` [PATCH v3 12/37] KVM: VMX: Drop redundant capability checks in low level INVVPID helpers Sean Christopherson
2020-03-20 21:28 ` [PATCH v3 13/37] KVM: nVMX: Use vpid_sync_vcpu_addr() to emulate INVVPID with address Sean Christopherson
2020-03-20 21:28 ` [PATCH v3 14/37] KVM: x86: Move "flush guest's TLB" logic to separate kvm_x86_ops hook Sean Christopherson
2020-03-25 10:23   ` Vitaly Kuznetsov
2020-03-25 15:41     ` Paolo Bonzini
2020-03-25 16:08       ` Vitaly Kuznetsov
2020-03-25 15:48     ` Sean Christopherson
2020-03-25 16:11       ` Vitaly Kuznetsov
2020-03-20 21:28 ` [PATCH v3 15/37] KVM: VMX: Clean up vmx_flush_tlb_gva() Sean Christopherson
2020-03-20 21:28 ` [PATCH v3 16/37] KVM: x86: Drop @invalidate_gpa param from kvm_x86_ops' tlb_flush() Sean Christopherson
2020-03-25 11:23   ` Vitaly Kuznetsov
2020-03-20 21:28 ` [PATCH v3 17/37] KVM: SVM: Wire up ->tlb_flush_guest() directly to svm_flush_tlb() Sean Christopherson
2020-03-25 11:23   ` Vitaly Kuznetsov
2020-03-20 21:28 ` [PATCH v3 18/37] KVM: VMX: Move vmx_flush_tlb() to vmx.c Sean Christopherson
2020-03-25 11:25   ` Vitaly Kuznetsov
2020-03-20 21:28 ` [PATCH v3 19/37] KVM: nVMX: Move nested_get_vpid02() to vmx/nested.h Sean Christopherson
2020-03-25 11:25   ` Vitaly Kuznetsov
2020-03-20 21:28 ` [PATCH v3 20/37] KVM: VMX: Introduce vmx_flush_tlb_current() Sean Christopherson
2020-03-20 21:28 ` [PATCH v3 21/37] KVM: SVM: Document the ASID logic in svm_flush_tlb() Sean Christopherson
2020-03-20 21:28 ` [PATCH v3 22/37] KVM: x86: Rename ->tlb_flush() to ->tlb_flush_all() Sean Christopherson
2020-03-20 21:28 ` [PATCH v3 23/37] KVM: nVMX: Add helper to handle TLB flushes on nested VM-Enter/VM-Exit Sean Christopherson
2021-10-28 13:11   ` Lai Jiangshan
2021-10-28 15:22     ` Sean Christopherson
2021-10-29  0:44       ` Lai Jiangshan
2021-10-29 17:10         ` Sean Christopherson
2021-10-30  1:34           ` Lai Jiangshan
2021-11-04 17:47             ` Sean Christopherson
2020-03-20 21:28 ` [PATCH v3 24/37] KVM: x86: Introduce KVM_REQ_TLB_FLUSH_CURRENT to flush current ASID Sean Christopherson
2020-03-20 21:28 ` [PATCH v3 25/37] KVM: x86/mmu: Use KVM_REQ_TLB_FLUSH_CURRENT for MMU specific flushes Sean Christopherson
2020-03-20 21:28 ` [PATCH v3 26/37] KVM: nVMX: Selectively use TLB_FLUSH_CURRENT for nested VM-Enter/VM-Exit Sean Christopherson
2020-03-20 21:28 ` [PATCH v3 27/37] KVM: nVMX: Reload APIC access page on nested VM-Exit only if necessary Sean Christopherson
2020-03-20 21:28 ` [PATCH v3 28/37] KVM: VMX: Retrieve APIC access page HPA only when necessary Sean Christopherson
2020-03-20 21:28 ` [PATCH v3 29/37] KVM: VMX: Don't reload APIC access page if its control is disabled Sean Christopherson
2020-03-20 21:28 ` [PATCH v3 30/37] KVM: x86/mmu: Move fast_cr3_switch() side effects to __kvm_mmu_new_cr3() Sean Christopherson
2020-03-20 21:28 ` [PATCH v3 31/37] KVM: x86/mmu: Add separate override for MMU sync during fast CR3 switch Sean Christopherson
2020-03-24 11:07   ` Paolo Bonzini
2020-03-20 21:28 ` [PATCH v3 32/37] KVM: x86/mmu: Add module param to force TLB flush on root reuse Sean Christopherson
2020-03-20 21:28 ` [PATCH v3 33/37] KVM: nVMX: Skip MMU sync on nested VMX transition when possible Sean Christopherson
2020-03-24 11:19   ` Paolo Bonzini
2020-03-20 21:28 ` [PATCH v3 34/37] KVM: nVMX: Don't flush TLB on nested VMX transition Sean Christopherson
2020-03-24 11:20   ` Paolo Bonzini
2020-03-24 18:10     ` Sean Christopherson
2020-03-20 21:28 ` [PATCH v3 35/37] KVM: nVMX: Free only the affected contexts when emulating INVEPT Sean Christopherson
2020-03-20 21:28 ` Sean Christopherson [this message]
2020-03-20 21:28 ` [PATCH v3 37/37] KVM: VMX: Clean cr3/pgd handling in vmx_load_mmu_pgd() Sean Christopherson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200320212833.3507-37-sean.j.christopherson@intel.com \
    --to=sean.j.christopherson@intel.com \
    --cc=bgardon@google.com \
    --cc=boris.ostrovsky@oracle.com \
    --cc=jmattson@google.com \
    --cc=john.haxby@oracle.com \
    --cc=joro@8bytes.org \
    --cc=junaids@google.com \
    --cc=kvm@vger.kernel.org \
    --cc=linmiaohe@huawei.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=liran.alon@oracle.com \
    --cc=pbonzini@redhat.com \
    --cc=thomas.lendacky@amd.com \
    --cc=vkuznets@redhat.com \
    --cc=wanpengli@tencent.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).