All of lore.kernel.org
 help / color / mirror / Atom feed
From: Sean Christopherson <sean.j.christopherson@intel.com>
To: Paolo Bonzini <pbonzini@redhat.com>
Cc: Sean Christopherson <sean.j.christopherson@intel.com>,
	Vitaly Kuznetsov <vkuznets@redhat.com>,
	Wanpeng Li <wanpengli@tencent.com>,
	Jim Mattson <jmattson@google.com>, Joerg Roedel <joro@8bytes.org>,
	kvm@vger.kernel.org, linux-kernel@vger.kernel.org,
	Oliver Upton <oupton@google.com>, Peter Shier <pshier@google.com>
Subject: [PATCH 04/13] KVM: x86: Make return for {interrupt_nmi}_allowed() a bool instead of int
Date: Wed, 22 Apr 2020 19:25:41 -0700	[thread overview]
Message-ID: <20200423022550.15113-5-sean.j.christopherson@intel.com> (raw)
In-Reply-To: <20200423022550.15113-1-sean.j.christopherson@intel.com>

Return an actual bool for kvm_x86_ops' {interrupt_nmi}_allowed() hook to
better reflect the return semantics, and to avoid creating an even
bigger mess when the related VMX code is refactored in upcoming patches.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/include/asm/kvm_host.h | 4 ++--
 arch/x86/kvm/svm/svm.c          | 9 +++++----
 arch/x86/kvm/vmx/vmx.c          | 8 ++++----
 3 files changed, 11 insertions(+), 10 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 65dc2c88d8b2..787636acd648 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1139,8 +1139,8 @@ struct kvm_x86_ops {
 	void (*set_nmi)(struct kvm_vcpu *vcpu);
 	void (*queue_exception)(struct kvm_vcpu *vcpu);
 	void (*cancel_injection)(struct kvm_vcpu *vcpu);
-	int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
-	int (*nmi_allowed)(struct kvm_vcpu *vcpu);
+	bool (*interrupt_allowed)(struct kvm_vcpu *vcpu);
+	bool (*nmi_allowed)(struct kvm_vcpu *vcpu);
 	bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
 	void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked);
 	void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index eb95283ab68d..f21f734861dd 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -3062,11 +3062,12 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
 		set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
 }
 
-static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
+static bool svm_nmi_allowed(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 	struct vmcb *vmcb = svm->vmcb;
-	int ret;
+	bool ret;
+
 	ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
 	      !(svm->vcpu.arch.hflags & HF_NMI_MASK);
 	ret = ret && gif_set(svm) && nested_svm_nmi(svm);
@@ -3094,14 +3095,14 @@ static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
 	}
 }
 
-static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
+static bool svm_interrupt_allowed(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 	struct vmcb *vmcb = svm->vmcb;
 
 	if (!gif_set(svm) ||
 	     (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK))
-		return 0;
+		return false;
 
 	if (is_guest_mode(vcpu) && (svm->vcpu.arch.hflags & HF_VINTR_MASK))
 		return !!(svm->vcpu.arch.hflags & HF_HIF_MASK);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 766303b31949..7dd42e7fef94 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -4511,21 +4511,21 @@ void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
 	}
 }
 
-static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
+static bool vmx_nmi_allowed(struct kvm_vcpu *vcpu)
 {
 	if (to_vmx(vcpu)->nested.nested_run_pending)
-		return 0;
+		return false;
 
 	if (!enable_vnmi &&
 	    to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked)
-		return 0;
+		return false;
 
 	return	!(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
 		  (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI
 		   | GUEST_INTR_STATE_NMI));
 }
 
-static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
+static bool vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
 {
 	if (to_vmx(vcpu)->nested.nested_run_pending)
 		return false;
-- 
2.26.0


  parent reply	other threads:[~2020-04-23  2:27 UTC|newest]

Thread overview: 39+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-04-23  2:25 [PATCH 00/13] KVM: x86: Event fixes and cleanup Sean Christopherson
2020-04-23  2:25 ` [PATCH 01/13] KVM: nVMX: Preserve exception priority irrespective of exiting behavior Sean Christopherson
2020-04-28 18:54   ` Jim Mattson
2020-04-28 20:07     ` Oliver Upton
2020-04-23  2:25 ` [PATCH 02/13] KVM: nVMX: Open a window for pending nested VMX preemption timer Sean Christopherson
2020-04-28 21:39   ` Jim Mattson
2020-04-23  2:25 ` [PATCH 03/13] KVM: x86: Set KVM_REQ_EVENT if run is canceled with req_immediate_exit set Sean Christopherson
2020-04-28 21:41   ` Jim Mattson
2020-04-23  2:25 ` Sean Christopherson [this message]
2020-04-28 21:42   ` [PATCH 04/13] KVM: x86: Make return for {interrupt_nmi}_allowed() a bool instead of int Jim Mattson
2020-04-23  2:25 ` [PATCH 05/13] KVM: nVMX: Move nested_exit_on_nmi() to nested.h Sean Christopherson
2020-04-28 21:44   ` Jim Mattson
2020-04-23  2:25 ` [PATCH 06/13] KVM: nVMX: Report NMIs as allowed when in L2 and Exit-on-NMI is set Sean Christopherson
2020-04-28 21:46   ` Jim Mattson
2020-04-23  2:25 ` [PATCH 07/13] KVM: VMX: Split out architectural interrupt/NMI blocking checks Sean Christopherson
2020-04-28 21:57   ` Jim Mattson
2020-04-23  2:25 ` [PATCH 08/13] KVM: nVMX: Preserve IRQ/NMI priority irrespective of exiting behavior Sean Christopherson
2020-04-28 21:58   ` Jim Mattson
2020-04-23  2:25 ` [PATCH 09/13] KVM: nVMX: Prioritize SMI over nested IRQ/NMI Sean Christopherson
2020-04-28 22:04   ` Jim Mattson
2020-04-28 22:59     ` Sean Christopherson
2020-04-28 23:16       ` Jim Mattson
2020-04-29 14:50         ` Sean Christopherson
2020-04-29 20:06           ` Sean Christopherson
2020-04-28 23:23       ` Jim Mattson
2020-04-23  2:25 ` [PATCH 10/13] KVM: x86: WARN on injected+pending exception even in nested case Sean Christopherson
2020-04-28 22:05   ` Jim Mattson
2020-04-23  2:25 ` [PATCH 11/13] KVM: VMX: Use vmx_interrupt_blocked() directly from vmx_handle_exit() Sean Christopherson
2020-04-28 22:07   ` Jim Mattson
2020-04-23  2:25 ` [PATCH 12/13] KVM: x86: Replace late check_nested_events() hack with more precise fix Sean Christopherson
2020-04-23 11:00   ` Paolo Bonzini
2020-04-28 22:12   ` Jim Mattson
2020-04-28 22:20     ` Sean Christopherson
2020-04-29  8:36       ` Paolo Bonzini
2020-04-29 16:45         ` Sean Christopherson
2020-04-29 16:58           ` Paolo Bonzini
2020-04-29 17:07             ` Sean Christopherson
2020-04-23  2:25 ` [PATCH 13/13] KVM: VMX: Use vmx_get_rflags() to query RFLAGS in vmx_interrupt_blocked() Sean Christopherson
2020-04-28 22:13   ` Jim Mattson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200423022550.15113-5-sean.j.christopherson@intel.com \
    --to=sean.j.christopherson@intel.com \
    --cc=jmattson@google.com \
    --cc=joro@8bytes.org \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=oupton@google.com \
    --cc=pbonzini@redhat.com \
    --cc=pshier@google.com \
    --cc=vkuznets@redhat.com \
    --cc=wanpengli@tencent.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.