All of lore.kernel.org
 help / color / mirror / Atom feed
From: Paolo Bonzini <pbonzini@redhat.com>
To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org
Cc: wei.huang2@amd.com, cavery@redhat.com, vkuznets@redhat.com,
	Sean Christopherson <sean.j.christopherson@intel.com>,
	Oliver Upton <oupton@google.com>,
	Jim Mattson <jmattson@google.com>
Subject: [PATCH v2 22/22] KVM: x86: Replace late check_nested_events() hack with more precise fix
Date: Sat, 25 Apr 2020 03:02:15 -0400	[thread overview]
Message-ID: <20200425070215.251397-3-pbonzini@redhat.com> (raw)
In-Reply-To: <20200424172416.243870-1-pbonzini@redhat.com>

Add an argument to interrupt_allowed and nmi_allowed, to checking if
interrupt injection is blocked.  Use the hook to handle the case where
an interrupt arrives between check_nested_events() and the injection
logic.  Drop the retry of check_nested_events() that hack-a-fixed the
same condition.

Blocking injection is also a bit of a hack, e.g. KVM should do exiting
and non-exiting interrupt processing in a single pass, but it's a more
precise hack.  The old comment is also misleading, e.g. KVM_REQ_EVENT is
purely an optimization, setting it on every run loop (which KVM doesn't
do) should not affect functionality, only performance.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200423022550.15113-13-sean.j.christopherson@intel.com>
[Extend to SVM, add SMI and NMI.  Even though NMI and SMI cannot come
 asynchronously right now, making the fix generic is easy and removes a
 special case. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/include/asm/kvm_host.h |  6 +++---
 arch/x86/kvm/svm/svm.c          | 25 ++++++++++++++++++-----
 arch/x86/kvm/vmx/vmx.c          | 17 +++++++++++++---
 arch/x86/kvm/x86.c              | 36 +++++++++++----------------------
 4 files changed, 49 insertions(+), 35 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index efaddc68a694..7cd68d1d0627 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1139,8 +1139,8 @@ struct kvm_x86_ops {
 	void (*set_nmi)(struct kvm_vcpu *vcpu);
 	void (*queue_exception)(struct kvm_vcpu *vcpu);
 	void (*cancel_injection)(struct kvm_vcpu *vcpu);
-	bool (*interrupt_allowed)(struct kvm_vcpu *vcpu);
-	bool (*nmi_allowed)(struct kvm_vcpu *vcpu);
+	bool (*interrupt_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
+	bool (*nmi_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
 	bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
 	void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked);
 	void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
@@ -1238,7 +1238,7 @@ struct kvm_x86_ops {
 
 	void (*setup_mce)(struct kvm_vcpu *vcpu);
 
-	bool (*smi_allowed)(struct kvm_vcpu *vcpu);
+	bool (*smi_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
 	int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
 	int (*pre_leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
 	int (*enable_smi_window)(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 0bbb2cd24eb7..8f8fc65bfa3e 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -3080,13 +3080,17 @@ bool svm_nmi_blocked(struct kvm_vcpu *vcpu)
 	return ret;
 }
 
-static bool svm_nmi_allowed(struct kvm_vcpu *vcpu)
+static bool svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 	if (svm->nested.nested_run_pending)
 		return false;
 
-        return !svm_nmi_blocked(vcpu);
+	/* An NMI must not be injected into L2 if it's supposed to VM-Exit.  */
+	if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(svm))
+		return false;
+
+	return !svm_nmi_blocked(vcpu);
 }
 
 static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
@@ -3135,13 +3139,20 @@ bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
 	return (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK);
 }
 
-static bool svm_interrupt_allowed(struct kvm_vcpu *vcpu)
+static bool svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 	if (svm->nested.nested_run_pending)
 		return false;
 
-        return !svm_interrupt_blocked(vcpu);
+	/*
+	 * An IRQ must not be injected into L2 if it's supposed to VM-Exit,
+	 * e.g. if the IRQ arrived asynchronously after checking nested events.
+	 */
+	if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(svm))
+		return false;
+
+	return !svm_interrupt_blocked(vcpu);
 }
 
 static void enable_irq_window(struct kvm_vcpu *vcpu)
@@ -3800,12 +3811,16 @@ bool svm_smi_blocked(struct kvm_vcpu *vcpu)
 	return is_smm(vcpu);
 }
 
-static bool svm_smi_allowed(struct kvm_vcpu *vcpu)
+static bool svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 	if (svm->nested.nested_run_pending)
 		return false;
 
+	/* An SMI must not be injected into L2 if it's supposed to VM-Exit.  */
+	if (for_injection && is_guest_mode(vcpu) && nested_exit_on_smi(svm))
+		return false;
+
 	return !svm_smi_blocked(vcpu);
 }
 
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 9e2ba1f96cd1..3ab6ca6062ce 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -4524,11 +4524,15 @@ bool vmx_nmi_blocked(struct kvm_vcpu *vcpu)
 		 GUEST_INTR_STATE_NMI));
 }
 
-static bool vmx_nmi_allowed(struct kvm_vcpu *vcpu)
+static bool vmx_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
 {
 	if (to_vmx(vcpu)->nested.nested_run_pending)
 		return false;
 
+	/* An NMI must not be injected into L2 if it's supposed to VM-Exit.  */
+	if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(vcpu))
+		return false;
+
 	return !vmx_nmi_blocked(vcpu);
 }
 
@@ -4542,11 +4546,18 @@ bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu)
 		(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
 }
 
-static bool vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
+static bool vmx_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
 {
 	if (to_vmx(vcpu)->nested.nested_run_pending)
 		return false;
 
+       /*
+        * An IRQ must not be injected into L2 if it's supposed to VM-Exit,
+        * e.g. if the IRQ arrived asynchronously after checking nested events.
+        */
+	if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
+		return false;
+
 	return !vmx_interrupt_blocked(vcpu);
 }
 
@@ -7688,7 +7699,7 @@ static void vmx_setup_mce(struct kvm_vcpu *vcpu)
 			~FEAT_CTL_LMCE_ENABLED;
 }
 
-static bool vmx_smi_allowed(struct kvm_vcpu *vcpu)
+static bool vmx_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
 {
 	/* we need a nested vmexit to enter SMM, postpone if run is pending */
 	if (to_vmx(vcpu)->nested.nested_run_pending)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 44b9d834621c..856b6fc2c2ba 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7746,32 +7746,20 @@ static int inject_pending_event(struct kvm_vcpu *vcpu)
 	if (kvm_event_needs_reinjection(vcpu))
 		return 0;
 
-	if (vcpu->arch.smi_pending && kvm_x86_ops.smi_allowed(vcpu)) {
+	if (vcpu->arch.smi_pending &&
+	    kvm_x86_ops.smi_allowed(vcpu, true)) {
 		vcpu->arch.smi_pending = false;
 		++vcpu->arch.smi_count;
 		enter_smm(vcpu);
-	} else if (vcpu->arch.nmi_pending && kvm_x86_ops.nmi_allowed(vcpu)) {
+	} else if (vcpu->arch.nmi_pending &&
+		   kvm_x86_ops.nmi_allowed(vcpu, true)) {
 		--vcpu->arch.nmi_pending;
 		vcpu->arch.nmi_injected = true;
 		kvm_x86_ops.set_nmi(vcpu);
-	} else if (kvm_cpu_has_injectable_intr(vcpu)) {
-		/*
-		 * Because interrupts can be injected asynchronously, we are
-		 * calling check_nested_events again here to avoid a race condition.
-		 * See https://lkml.org/lkml/2014/7/2/60 for discussion about this
-		 * proposal and current concerns.  Perhaps we should be setting
-		 * KVM_REQ_EVENT only on certain events and not unconditionally?
-		 */
-		if (is_guest_mode(vcpu)) {
-			r = kvm_x86_ops.nested_ops->check_events(vcpu);
-			if (r != 0)
-				return r;
-		}
-		if (kvm_x86_ops.interrupt_allowed(vcpu)) {
-			kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
-					    false);
-			kvm_x86_ops.set_irq(vcpu);
-		}
+	} else if (kvm_cpu_has_injectable_intr(vcpu) &&
+		   kvm_x86_ops.interrupt_allowed(vcpu, true)) {
+		kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), false);
+		kvm_x86_ops.set_irq(vcpu);
 	}
 
 	return 0;
@@ -10171,12 +10159,12 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
 
 	if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
 	    (vcpu->arch.nmi_pending &&
-	     kvm_x86_ops.nmi_allowed(vcpu)))
+	     kvm_x86_ops.nmi_allowed(vcpu, false)))
 		return true;
 
 	if (kvm_test_request(KVM_REQ_SMI, vcpu) ||
 	    (vcpu->arch.smi_pending &&
-	     kvm_x86_ops.smi_allowed(vcpu)))
+	     kvm_x86_ops.smi_allowed(vcpu, false)))
 		return true;
 
 	if (kvm_arch_interrupt_allowed(vcpu) &&
@@ -10228,7 +10216,7 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
 
 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
 {
-	return kvm_x86_ops.interrupt_allowed(vcpu);
+	return kvm_x86_ops.interrupt_allowed(vcpu, false);
 }
 
 unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu)
@@ -10393,7 +10381,7 @@ bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
 	 * If interrupts are off we cannot even use an artificial
 	 * halt state.
 	 */
-	return kvm_x86_ops.interrupt_allowed(vcpu);
+	return kvm_arch_interrupt_allowed(vcpu);
 }
 
 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
-- 
2.18.2


      parent reply	other threads:[~2020-04-25  7:02 UTC|newest]

Thread overview: 31+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-04-24 17:23 [PATCH v2 00/22] KVM: Event fixes and cleanup Paolo Bonzini
2020-04-24 17:23 ` [PATCH v2 01/22] KVM: SVM: introduce nested_run_pending Paolo Bonzini
2020-04-24 17:23 ` [PATCH v2 02/22] KVM: SVM: leave halted state on vmexit Paolo Bonzini
2020-04-24 17:41   ` Oliver Upton
2020-04-24 17:23 ` [PATCH v2 03/22] KVM: SVM: immediately inject INTR vmexit Paolo Bonzini
2020-05-21 12:50   ` Vitaly Kuznetsov
2020-05-21 14:08     ` Paolo Bonzini
2020-05-21 21:04       ` Paolo Bonzini
2020-04-24 17:23 ` [PATCH v2 04/22] KVM: SVM: Implement check_nested_events for NMI Paolo Bonzini
2020-04-24 17:23 ` [PATCH v2 05/22] KVM: nVMX: Preserve exception priority irrespective of exiting behavior Paolo Bonzini
2020-04-24 17:24 ` [PATCH v2 06/22] KVM: nVMX: Open a window for pending nested VMX preemption timer Paolo Bonzini
2020-04-24 17:24 ` [PATCH v2 07/22] KVM: x86: Set KVM_REQ_EVENT if run is canceled with req_immediate_exit set Paolo Bonzini
2020-04-24 17:24 ` [PATCH v2 08/22] KVM: x86: Make return for {interrupt_nmi,smi}_allowed() a bool instead of int Paolo Bonzini
2020-04-24 17:24 ` [PATCH v2 09/22] KVM: x86: replace is_smm checks with kvm_x86_ops.smi_allowed Paolo Bonzini
2020-04-24 17:29 ` [PATCH v2 00/22] KVM: Event fixes and cleanup Sean Christopherson
2020-04-24 21:02 ` Oliver Upton
2020-04-24 21:05   ` Sean Christopherson
2020-04-25  7:21     ` Paolo Bonzini
2020-04-25  7:01 ` [PATCH v2 10/22] KVM: nVMX: Report NMIs as allowed when in L2 and Exit-on-NMI is set Paolo Bonzini
2020-04-25  7:01 ` [PATCH v2 11/22] KVM: nSVM: " Paolo Bonzini
2020-04-25  7:01 ` [PATCH v2 12/22] KVM: nSVM: Move SMI vmexit handling to svm_check_nested_events() Paolo Bonzini
2020-04-25  7:01 ` [PATCH v2 13/22] KVM: VMX: Split out architectural interrupt/NMI blocking checks Paolo Bonzini
2020-04-25  7:01 ` [PATCH v2 14/22] KVM: SVM: Split out architectural interrupt/NMI/SMI " Paolo Bonzini
2020-04-25  7:01 ` [PATCH v2 15/22] KVM: nVMX: Preserve IRQ/NMI priority irrespective of exiting behavior Paolo Bonzini
2020-04-25  7:01 ` [PATCH v2 16/22] KVM: nVMX: Prioritize SMI over nested IRQ/NMI Paolo Bonzini
2020-04-25  7:01 ` [PATCH v2 17/22] KVM: nSVM: Report interrupts as allowed when in L2 and exit-on-interrupt is set Paolo Bonzini
2020-04-25  7:01 ` [PATCH v2 18/22] KVM: nSVM: Preserve IRQ/NMI/SMI priority irrespective of exiting behavior Paolo Bonzini
2020-04-25  7:01 ` [PATCH v2 19/22] KVM: x86: WARN on injected+pending exception even in nested case Paolo Bonzini
2020-04-25  7:02 ` [PATCH v2 20/22] KVM: VMX: Use vmx_interrupt_blocked() directly from vmx_handle_exit() Paolo Bonzini
2020-04-25  7:02 ` [PATCH v2 21/22] KVM: VMX: Use vmx_get_rflags() to query RFLAGS in vmx_interrupt_blocked() Paolo Bonzini
2020-04-25  7:02 ` Paolo Bonzini [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200425070215.251397-3-pbonzini@redhat.com \
    --to=pbonzini@redhat.com \
    --cc=cavery@redhat.com \
    --cc=jmattson@google.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=oupton@google.com \
    --cc=sean.j.christopherson@intel.com \
    --cc=vkuznets@redhat.com \
    --cc=wei.huang2@amd.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.