kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Paolo Bonzini <pbonzini@redhat.com>
To: Liran Alon <liran.alon@oracle.com>,
	rkrcmar@redhat.com, kvm@vger.kernel.org
Cc: sean.j.christopherson@intel.com, jmattson@google.com,
	vkuznets@redhat.com, Joao Martins <joao.m.martins@oracle.com>,
	Nikita Leshenko <nikita.leshchenko@oracle.com>
Subject: Re: [PATCH 2/2] KVM: x86: Fix INIT signal handling in various CPU states
Date: Wed, 11 Sep 2019 18:23:35 +0200	[thread overview]
Message-ID: <2a36ca45-56fa-5d4e-7b8c-157190f29f82@redhat.com> (raw)
In-Reply-To: <20190826102449.142687-3-liran.alon@oracle.com>

On 26/08/19 12:24, Liran Alon wrote:
>  	/*
> -	 * INITs are latched while in SMM.  Because an SMM CPU cannot
> -	 * be in KVM_MP_STATE_INIT_RECEIVED state, just eat SIPIs
> -	 * and delay processing of INIT until the next RSM.
> +	 * INITs are latched while CPU is in specific states.
> +	 * Because a CPU cannot be in these states immediately
> +	 * after it have processed an INIT signal (and thus in
> +	 * KVM_MP_STATE_INIT_RECEIVED state), just eat SIPIs
> +	 * and delay processing of INIT until CPU leaves
> +	 * the state which latch INIT signal.
>  	 */
> -	if (is_smm(vcpu)) {
> +	if (kvm_x86_ops->apic_init_signal_blocked(vcpu)) {

I'd prefer keeping is_smm(vcpu) here, since that is not vendor-specific.

Together with some edits to the comments, this is what I got.
Let me know if you prefer to have the latched_init changes
on top, or you'd like to post v2 with everything.

Thanks,

Paolo

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index c4f271a9b306..b523949a8df8 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1209,6 +1209,8 @@ struct kvm_x86_ops {
 	uint16_t (*nested_get_evmcs_version)(struct kvm_vcpu *vcpu);
 
 	bool (*need_emulation_on_page_fault)(struct kvm_vcpu *vcpu);
+
+	bool (*apic_init_signal_blocked)(struct kvm_vcpu *vcpu);
 };
 
 struct kvm_arch_async_pf {
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 559e1c4c0832..dbbe4781fbb2 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -2706,11 +2706,14 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
 		return;
 
 	/*
-	 * INITs are latched while in SMM.  Because an SMM CPU cannot
-	 * be in KVM_MP_STATE_INIT_RECEIVED state, just eat SIPIs
-	 * and delay processing of INIT until the next RSM.
+	 * INITs are latched while CPU is in specific states
+	 * (SMM, VMX non-root mode, SVM with GIF=0).
+	 * Because a CPU cannot be in these states immediately
+	 * after it has processed an INIT signal (and thus in
+	 * KVM_MP_STATE_INIT_RECEIVED state), just eat SIPIs
+	 * and leave the INIT pending.
 	 */
-	if (is_smm(vcpu)) {
+	if (is_smm(vcpu) || kvm_x86_ops->apic_init_signal_blocked(vcpu)) {
 		WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED);
 		if (test_bit(KVM_APIC_SIPI, &apic->pending_events))
 			clear_bit(KVM_APIC_SIPI, &apic->pending_events);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 2854aafc489e..d24050b647c7 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -7170,6 +7170,21 @@ static bool svm_need_emulation_on_page_fault(struct kvm_vcpu *vcpu)
 	return false;
 }
 
+static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
+{
+	struct vcpu_svm *svm = to_svm(vcpu);
+
+	/*
+	 * TODO: Last condition latch INIT signals on vCPU when
+	 * vCPU is in guest-mode and vmcb12 defines intercept on INIT.
+	 * To properly emulate the INIT intercept, SVM should implement
+	 * kvm_x86_ops->check_nested_events() and call nested_svm_vmexit()
+	 * there if an INIT signal is pending.
+	 */
+	return !gif_set(svm) ||
+		   (svm->vmcb->control.intercept & (1ULL << INTERCEPT_INIT));
+}
+
 static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
 	.cpu_has_kvm_support = has_svm,
 	.disabled_by_bios = is_disabled,
@@ -7306,6 +7321,8 @@ static bool svm_need_emulation_on_page_fault(struct kvm_vcpu *vcpu)
 	.nested_get_evmcs_version = nested_get_evmcs_version,
 
 	.need_emulation_on_page_fault = svm_need_emulation_on_page_fault,
+
+	.apic_init_signal_blocked = svm_apic_init_signal_blocked,
 };
 
 static int __init svm_init(void)
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index ad2453317c4b..6ce83c602e7f 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -3409,6 +3409,15 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
 	unsigned long exit_qual;
 	bool block_nested_events =
 	    vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu);
+	struct kvm_lapic *apic = vcpu->arch.apic;
+
+	if (lapic_in_kernel(vcpu) &&
+		test_bit(KVM_APIC_INIT, &apic->pending_events)) {
+		if (block_nested_events)
+			return -EBUSY;
+		nested_vmx_vmexit(vcpu, EXIT_REASON_INIT_SIGNAL, 0, 0);
+		return 0;
+	}
 
 	if (vcpu->arch.exception.pending &&
 		nested_vmx_check_exception(vcpu, &exit_qual)) {
@@ -4470,7 +4479,12 @@ static int handle_vmoff(struct kvm_vcpu *vcpu)
 {
 	if (!nested_vmx_check_permission(vcpu))
 		return 1;
+
 	free_nested(vcpu);
+
+	/* Process a latched INIT during time CPU was in VMX operation */
+	kvm_make_request(KVM_REQ_EVENT, vcpu);
+
 	return nested_vmx_succeed(vcpu);
 }
 
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 99f52f8c969a..73bf9a2e6fb6 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -7470,6 +7470,11 @@ static bool vmx_need_emulation_on_page_fault(struct kvm_vcpu *vcpu)
 	return false;
 }
 
+static bool vmx_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
+{
+	return to_vmx(vcpu)->nested.vmxon;
+}
+
 static __init int hardware_setup(void)
 {
 	unsigned long host_bndcfgs;
@@ -7794,6 +7799,7 @@ static __exit void hardware_unsetup(void)
 	.get_vmcs12_pages = NULL,
 	.nested_enable_evmcs = NULL,
 	.need_emulation_on_page_fault = vmx_need_emulation_on_page_fault,
+	.apic_init_signal_blocked = vmx_apic_init_signal_blocked,
 };
 
 static void vmx_cleanup_l1d_flush(void)

  parent reply	other threads:[~2019-09-11 16:23 UTC|newest]

Thread overview: 15+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-08-26 10:24 [PATCH 0/2]: KVM: x86: Fix INIT signal handling in various CPU states Liran Alon
2019-08-26 10:24 ` [PATCH 1/2] KVM: VMX: Introduce exit reason for receiving INIT signal on guest-mode Liran Alon
2019-08-26 16:37   ` Jim Mattson
2019-08-26 10:24 ` [PATCH 2/2] KVM: x86: Fix INIT signal handling in various CPU states Liran Alon
2019-08-26 16:03   ` Sean Christopherson
2019-08-26 18:26     ` Liran Alon
2019-09-11 16:21       ` Paolo Bonzini
2019-11-10 12:23         ` Liran Alon
2019-11-10 12:57           ` Liran Alon
2019-08-26 21:17   ` Jim Mattson
2019-08-26 22:04     ` Liran Alon
2019-08-26 22:38       ` Jim Mattson
2019-09-11 16:23   ` Paolo Bonzini [this message]
2019-09-11 16:42     ` Liran Alon
2019-09-11 16:48       ` Paolo Bonzini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=2a36ca45-56fa-5d4e-7b8c-157190f29f82@redhat.com \
    --to=pbonzini@redhat.com \
    --cc=jmattson@google.com \
    --cc=joao.m.martins@oracle.com \
    --cc=kvm@vger.kernel.org \
    --cc=liran.alon@oracle.com \
    --cc=nikita.leshchenko@oracle.com \
    --cc=rkrcmar@redhat.com \
    --cc=sean.j.christopherson@intel.com \
    --cc=vkuznets@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).