All of lore.kernel.org
 help / color / mirror / Atom feed
From: Paolo Bonzini <pbonzini@redhat.com>
To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org
Cc: wei.huang2@amd.com, cavery@redhat.com, vkuznets@redhat.com,
	Sean Christopherson <sean.j.christopherson@intel.com>,
	Oliver Upton <oupton@google.com>,
	Jim Mattson <jmattson@google.com>
Subject: [PATCH v2 08/22] KVM: x86: Make return for {interrupt_nmi,smi}_allowed() a bool instead of int
Date: Fri, 24 Apr 2020 13:24:02 -0400	[thread overview]
Message-ID: <20200424172416.243870-9-pbonzini@redhat.com> (raw)
In-Reply-To: <20200424172416.243870-1-pbonzini@redhat.com>

From: Sean Christopherson <sean.j.christopherson@intel.com>

Return an actual bool for kvm_x86_ops' {interrupt_nmi}_allowed() hook to
better reflect the return semantics, and to avoid creating an even
bigger mess when the related VMX code is refactored in upcoming patches.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200423022550.15113-5-sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/include/asm/kvm_host.h |  6 +++---
 arch/x86/kvm/svm/svm.c          | 16 ++++++++--------
 arch/x86/kvm/vmx/vmx.c          | 14 +++++++-------
 3 files changed, 18 insertions(+), 18 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 372e6ea4af32..efaddc68a694 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1139,8 +1139,8 @@ struct kvm_x86_ops {
 	void (*set_nmi)(struct kvm_vcpu *vcpu);
 	void (*queue_exception)(struct kvm_vcpu *vcpu);
 	void (*cancel_injection)(struct kvm_vcpu *vcpu);
-	int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
-	int (*nmi_allowed)(struct kvm_vcpu *vcpu);
+	bool (*interrupt_allowed)(struct kvm_vcpu *vcpu);
+	bool (*nmi_allowed)(struct kvm_vcpu *vcpu);
 	bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
 	void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked);
 	void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
@@ -1238,7 +1238,7 @@ struct kvm_x86_ops {
 
 	void (*setup_mce)(struct kvm_vcpu *vcpu);
 
-	int (*smi_allowed)(struct kvm_vcpu *vcpu);
+	bool (*smi_allowed)(struct kvm_vcpu *vcpu);
 	int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
 	int (*pre_leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
 	int (*enable_smi_window)(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 8e732eb0b5c9..cdee634e961d 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -3062,11 +3062,11 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
 		set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
 }
 
-static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
+static bool svm_nmi_allowed(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 	struct vmcb *vmcb = svm->vmcb;
-	int ret;
+	bool ret;
 
 	ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
 	      !(svm->vcpu.arch.hflags & HF_NMI_MASK);
@@ -3095,14 +3095,14 @@ static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
 	}
 }
 
-static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
+static bool svm_interrupt_allowed(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 	struct vmcb *vmcb = svm->vmcb;
 
 	if (!gif_set(svm) ||
 	     (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK))
-		return 0;
+		return false;
 
 	if (is_guest_mode(vcpu) && (svm->vcpu.arch.hflags & HF_VINTR_MASK))
 		return !!(svm->vcpu.arch.hflags & HF_HIF_MASK);
@@ -3755,23 +3755,23 @@ static void svm_setup_mce(struct kvm_vcpu *vcpu)
 	vcpu->arch.mcg_cap &= 0x1ff;
 }
 
-static int svm_smi_allowed(struct kvm_vcpu *vcpu)
+static bool svm_smi_allowed(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 
 	/* Per APM Vol.2 15.22.2 "Response to SMI" */
 	if (!gif_set(svm))
-		return 0;
+		return false;
 
 	if (is_guest_mode(&svm->vcpu) &&
 	    svm->nested.intercept & (1ULL << INTERCEPT_SMI)) {
 		/* TODO: Might need to set exit_info_1 and exit_info_2 here */
 		svm->vmcb->control.exit_code = SVM_EXIT_SMI;
 		svm->nested.exit_required = true;
-		return 0;
+		return false;
 	}
 
-	return 1;
+	return true;
 }
 
 static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 455cd2c8dbce..c98194f04b04 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -4511,21 +4511,21 @@ void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
 	}
 }
 
-static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
+static bool vmx_nmi_allowed(struct kvm_vcpu *vcpu)
 {
 	if (to_vmx(vcpu)->nested.nested_run_pending)
-		return 0;
+		return false;
 
 	if (!enable_vnmi &&
 	    to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked)
-		return 0;
+		return false;
 
 	return	!(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
 		  (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI
 		   | GUEST_INTR_STATE_NMI));
 }
 
-static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
+static bool vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
 {
 	if (to_vmx(vcpu)->nested.nested_run_pending)
 		return false;
@@ -7675,12 +7675,12 @@ static void vmx_setup_mce(struct kvm_vcpu *vcpu)
 			~FEAT_CTL_LMCE_ENABLED;
 }
 
-static int vmx_smi_allowed(struct kvm_vcpu *vcpu)
+static bool vmx_smi_allowed(struct kvm_vcpu *vcpu)
 {
 	/* we need a nested vmexit to enter SMM, postpone if run is pending */
 	if (to_vmx(vcpu)->nested.nested_run_pending)
-		return 0;
-	return 1;
+		return false;
+	return true;
 }
 
 static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
-- 
2.18.2



  parent reply	other threads:[~2020-04-24 17:24 UTC|newest]

Thread overview: 31+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-04-24 17:23 [PATCH v2 00/22] KVM: Event fixes and cleanup Paolo Bonzini
2020-04-24 17:23 ` [PATCH v2 01/22] KVM: SVM: introduce nested_run_pending Paolo Bonzini
2020-04-24 17:23 ` [PATCH v2 02/22] KVM: SVM: leave halted state on vmexit Paolo Bonzini
2020-04-24 17:41   ` Oliver Upton
2020-04-24 17:23 ` [PATCH v2 03/22] KVM: SVM: immediately inject INTR vmexit Paolo Bonzini
2020-05-21 12:50   ` Vitaly Kuznetsov
2020-05-21 14:08     ` Paolo Bonzini
2020-05-21 21:04       ` Paolo Bonzini
2020-04-24 17:23 ` [PATCH v2 04/22] KVM: SVM: Implement check_nested_events for NMI Paolo Bonzini
2020-04-24 17:23 ` [PATCH v2 05/22] KVM: nVMX: Preserve exception priority irrespective of exiting behavior Paolo Bonzini
2020-04-24 17:24 ` [PATCH v2 06/22] KVM: nVMX: Open a window for pending nested VMX preemption timer Paolo Bonzini
2020-04-24 17:24 ` [PATCH v2 07/22] KVM: x86: Set KVM_REQ_EVENT if run is canceled with req_immediate_exit set Paolo Bonzini
2020-04-24 17:24 ` Paolo Bonzini [this message]
2020-04-24 17:24 ` [PATCH v2 09/22] KVM: x86: replace is_smm checks with kvm_x86_ops.smi_allowed Paolo Bonzini
2020-04-24 17:29 ` [PATCH v2 00/22] KVM: Event fixes and cleanup Sean Christopherson
2020-04-24 21:02 ` Oliver Upton
2020-04-24 21:05   ` Sean Christopherson
2020-04-25  7:21     ` Paolo Bonzini
2020-04-25  7:01 ` [PATCH v2 10/22] KVM: nVMX: Report NMIs as allowed when in L2 and Exit-on-NMI is set Paolo Bonzini
2020-04-25  7:01 ` [PATCH v2 11/22] KVM: nSVM: " Paolo Bonzini
2020-04-25  7:01 ` [PATCH v2 12/22] KVM: nSVM: Move SMI vmexit handling to svm_check_nested_events() Paolo Bonzini
2020-04-25  7:01 ` [PATCH v2 13/22] KVM: VMX: Split out architectural interrupt/NMI blocking checks Paolo Bonzini
2020-04-25  7:01 ` [PATCH v2 14/22] KVM: SVM: Split out architectural interrupt/NMI/SMI " Paolo Bonzini
2020-04-25  7:01 ` [PATCH v2 15/22] KVM: nVMX: Preserve IRQ/NMI priority irrespective of exiting behavior Paolo Bonzini
2020-04-25  7:01 ` [PATCH v2 16/22] KVM: nVMX: Prioritize SMI over nested IRQ/NMI Paolo Bonzini
2020-04-25  7:01 ` [PATCH v2 17/22] KVM: nSVM: Report interrupts as allowed when in L2 and exit-on-interrupt is set Paolo Bonzini
2020-04-25  7:01 ` [PATCH v2 18/22] KVM: nSVM: Preserve IRQ/NMI/SMI priority irrespective of exiting behavior Paolo Bonzini
2020-04-25  7:01 ` [PATCH v2 19/22] KVM: x86: WARN on injected+pending exception even in nested case Paolo Bonzini
2020-04-25  7:02 ` [PATCH v2 20/22] KVM: VMX: Use vmx_interrupt_blocked() directly from vmx_handle_exit() Paolo Bonzini
2020-04-25  7:02 ` [PATCH v2 21/22] KVM: VMX: Use vmx_get_rflags() to query RFLAGS in vmx_interrupt_blocked() Paolo Bonzini
2020-04-25  7:02 ` [PATCH v2 22/22] KVM: x86: Replace late check_nested_events() hack with more precise fix Paolo Bonzini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200424172416.243870-9-pbonzini@redhat.com \
    --to=pbonzini@redhat.com \
    --cc=cavery@redhat.com \
    --cc=jmattson@google.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=oupton@google.com \
    --cc=sean.j.christopherson@intel.com \
    --cc=vkuznets@redhat.com \
    --cc=wei.huang2@amd.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.