All of lore.kernel.org
 help / color / mirror / Atom feed
From: Sean Christopherson <sean.j.christopherson@intel.com>
To: Paolo Bonzini <pbonzini@redhat.com>
Cc: Sean Christopherson <sean.j.christopherson@intel.com>,
	Vitaly Kuznetsov <vkuznets@redhat.com>,
	Wanpeng Li <wanpengli@tencent.com>,
	Jim Mattson <jmattson@google.com>, Joerg Roedel <joro@8bytes.org>,
	kvm@vger.kernel.org, linux-kernel@vger.kernel.org
Subject: [PATCH 06/10] KVM: x86: Move "flush guest's TLB" logic to separate kvm_x86_ops hook
Date: Thu, 20 Feb 2020 12:43:52 -0800	[thread overview]
Message-ID: <20200220204356.8837-7-sean.j.christopherson@intel.com> (raw)
In-Reply-To: <20200220204356.8837-1-sean.j.christopherson@intel.com>

Add a dedicated hook to handle flushing TLB entries on behalf of the
guest, i.e. for a paravirtualized TLB flush, and use it directly instead
of bouncing through kvm_vcpu_flush_tlb().  Change the effective VMX
implementation to never do INVEPT, i.e. to always flush via INVVPID.
The INVEPT performed by __vmx_flush_tlb() when @invalidate_gpa=false and
enable_vpid=0 is unnecessary, as it will only flush GPA->HPA mappings;
GVA->GPA and GVA->HPA translations are flushed by VM-Enter when VPID is
disabled, and changes in the guest pages tables only affect GVA->*PA
mappings.

When EPT and VPID are enabled, doing INVVPID is not required (by Intel's
architecture) to invalidate GPA mappings, i.e. TLB entries that cache
GPA->HPA translations can live across INVVPID as GPA->HPA mappings are
associated with an EPTP, not a VPID.  The intent of @invalidate_gpa is
to inform vmx_flush_tlb() that it needs to "invalidate gpa mappings",
i.e. do INVEPT and not simply INVVPID.  Other than nested VPID handling,
which now calls vpid_sync_context() directly, the only scenario where
KVM can safely do INVVPID instead of INVEPT (when EPT is enabled) is if
KVM is flushing TLB entries from the guest's perspective, i.e. is
invalidating GLA->GPA mappings.

Adding a dedicated ->tlb_flush_guest() paves the way toward removing
@invalidate_gpa, which is a potentially dangerous control flag as its
meaning is not exactly crystal clear, even for those who are familiar
with the subtleties of what mappings Intel CPUs are/aren't allowed to
keep across various invalidation scenarios.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/include/asm/kvm_host.h |  6 ++++++
 arch/x86/kvm/svm.c              |  6 ++++++
 arch/x86/kvm/vmx/vmx.c          | 13 +++++++++++++
 arch/x86/kvm/x86.c              |  2 +-
 4 files changed, 26 insertions(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4dffbc10d3f8..86aed64b9a88 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1108,6 +1108,12 @@ struct kvm_x86_ops {
 	 */
 	void (*tlb_flush_gva)(struct kvm_vcpu *vcpu, gva_t addr);
 
+	/*
+	 * Flush any TLB entries created by the guest.  Like tlb_flush_gva(),
+	 * does not need to flush GPA->HPA mappings.
+	 */
+	void (*tlb_flush_guest)(struct kvm_vcpu *vcpu);
+
 	void (*run)(struct kvm_vcpu *vcpu);
 	int (*handle_exit)(struct kvm_vcpu *vcpu,
 		enum exit_fastpath_completion exit_fastpath);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index a3e32d61d60c..e549811f51c6 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -5608,6 +5608,11 @@ static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva)
 	invlpga(gva, svm->vmcb->control.asid);
 }
 
+static void svm_flush_tlb_guest(struct kvm_vcpu *vcpu)
+{
+	svm_flush_tlb(vcpu, true);
+}
+
 static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
 {
 }
@@ -7429,6 +7434,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
 
 	.tlb_flush = svm_flush_tlb,
 	.tlb_flush_gva = svm_flush_tlb_gva,
+	.tlb_flush_guest = svm_flush_tlb_guest,
 
 	.run = svm_vcpu_run,
 	.handle_exit = handle_exit,
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 349a6e054e0e..5372a93e1727 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -2835,6 +2835,18 @@ static void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr)
 	 */
 }
 
+static void vmx_flush_tlb_guest(struct kvm_vcpu *vcpu)
+{
+	/*
+	 * vpid_sync_context() is a nop if vmx->vpid==0, e.g. if enable_vpid==0
+	 * or a vpid couldn't be allocated for this vCPU.  VM-Enter and VM-Exit
+	 * are required to flush GVA->{G,H}PA mappings from the TLB if vpid is
+	 * disabled (VM-Enter with vpid enabled and vpid==0 is disallowed),
+	 * i.e. no explicit INVVPID is necessary.
+	 */
+	vpid_sync_context(to_vmx(vcpu)->vpid);
+}
+
 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
 {
 	ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
@@ -7779,6 +7791,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
 
 	.tlb_flush = vmx_flush_tlb,
 	.tlb_flush_gva = vmx_flush_tlb_gva,
+	.tlb_flush_guest = vmx_flush_tlb_guest,
 
 	.run = vmx_vcpu_run,
 	.handle_exit = vmx_handle_exit,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index fbabb2f06273..72f7ca4baa6d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2675,7 +2675,7 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
 	trace_kvm_pv_tlb_flush(vcpu->vcpu_id,
 		st->preempted & KVM_VCPU_FLUSH_TLB);
 	if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB)
-		kvm_vcpu_flush_tlb(vcpu, false);
+		kvm_x86_ops->tlb_flush_guest(vcpu);
 
 	vcpu->arch.st.preempted = 0;
 
-- 
2.24.1


  parent reply	other threads:[~2020-02-20 20:44 UTC|newest]

Thread overview: 25+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-02-20 20:43 [PATCH 00/10] KVM: x86: Clean up VMX's TLB flushing code Sean Christopherson
2020-02-20 20:43 ` [PATCH 01/10] KVM: VMX: Use vpid_sync_context() directly when possible Sean Christopherson
2020-02-21 13:17   ` Vitaly Kuznetsov
2020-02-21 15:36     ` Sean Christopherson
2020-02-21 17:26       ` Paolo Bonzini
2020-02-20 20:43 ` [PATCH 02/10] KVM: VMX: Move vpid_sync_vcpu_addr() down a few lines Sean Christopherson
2020-02-21 13:19   ` Vitaly Kuznetsov
2020-02-20 20:43 ` [PATCH 03/10] KVM: VMX: Handle INVVPID fallback logic in vpid_sync_vcpu_addr() Sean Christopherson
2020-02-21 13:26   ` Vitaly Kuznetsov
2020-02-20 20:43 ` [PATCH 04/10] KVM: VMX: Fold vpid_sync_vcpu_{single,global}() into vpid_sync_context() Sean Christopherson
2020-02-21 13:39   ` Vitaly Kuznetsov
2020-02-21 15:32     ` Sean Christopherson
2020-02-21 17:28       ` Paolo Bonzini
2020-02-20 20:43 ` [PATCH 05/10] KVM: nVMX: Use vpid_sync_vcpu_addr() to emulate INVVPID with address Sean Christopherson
2020-02-21 13:43   ` Vitaly Kuznetsov
2020-02-20 20:43 ` Sean Christopherson [this message]
     [not found]   ` <87tv3krqta.fsf@vitty.brq.redhat.com>
2020-02-21 17:31     ` [PATCH 06/10] KVM: x86: Move "flush guest's TLB" logic to separate kvm_x86_ops hook Paolo Bonzini
2020-02-21 17:32   ` Paolo Bonzini
2020-02-20 20:43 ` [PATCH 07/10] KVM: VMX: Clean up vmx_flush_tlb_gva() Sean Christopherson
2020-02-21 13:54   ` Vitaly Kuznetsov
2020-02-20 20:43 ` [PATCH 08/10] KVM: x86: Drop @invalidate_gpa param from kvm_x86_ops' tlb_flush() Sean Christopherson
2020-02-21 13:56   ` Vitaly Kuznetsov
2020-02-20 20:43 ` [PATCH 09/10] KVM: VMX: Drop @invalidate_gpa from __vmx_flush_tlb() Sean Christopherson
2020-02-20 20:43 ` [PATCH 10/10] KVM: VMX: Fold __vmx_flush_tlb() into vmx_flush_tlb() Sean Christopherson
2020-02-21 13:20 ` [PATCH 00/10] KVM: x86: Clean up VMX's TLB flushing code Paolo Bonzini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200220204356.8837-7-sean.j.christopherson@intel.com \
    --to=sean.j.christopherson@intel.com \
    --cc=jmattson@google.com \
    --cc=joro@8bytes.org \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=vkuznets@redhat.com \
    --cc=wanpengli@tencent.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.