linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Maxim Levitsky <mlevitsk@redhat.com>
To: Paolo Bonzini <pbonzini@redhat.com>,
	linux-kernel@vger.kernel.org, kvm@vger.kernel.org
Cc: seanjc@google.com, maciej.szmigiero@oracle.com
Subject: Re: [PATCH v2 6/8] KVM: x86: compile out vendor-specific code if SMM is disabled
Date: Mon, 24 Oct 2022 15:32:56 +0300	[thread overview]
Message-ID: <7a9fb4d7f3f9fbe977ab4bb38711520673aa03cc.camel@redhat.com> (raw)
In-Reply-To: <20220929172016.319443-7-pbonzini@redhat.com>

On Thu, 2022-09-29 at 13:20 -0400, Paolo Bonzini wrote:
> Vendor-specific code that deals with SMI injection and saving/restoring
> SMM state is not needed if CONFIG_KVM_SMM is disabled, so remove the
> four callbacks smi_allowed, enter_smm, leave_smm and enable_smi_window.
> The users in svm/nested.c and x86.c also have to be compiled out; the
> amount of #ifdef'ed code is small and it's not worth moving it to
> smm.c.
> 
> enter_smm is now used only within #ifdef CONFIG_KVM_SMM, and the stub
> can therefore be removed.
> 
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
>  arch/x86/include/asm/kvm-x86-ops.h | 2 ++
>  arch/x86/include/asm/kvm_host.h    | 2 ++
>  arch/x86/kvm/smm.h                 | 1 -
>  arch/x86/kvm/svm/nested.c          | 2 ++
>  arch/x86/kvm/svm/svm.c             | 4 ++++
>  arch/x86/kvm/vmx/vmx.c             | 4 ++++
>  arch/x86/kvm/x86.c                 | 4 ++++
>  7 files changed, 18 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
> index 82ba4a564e58..ea58e67e9a67 100644
> --- a/arch/x86/include/asm/kvm-x86-ops.h
> +++ b/arch/x86/include/asm/kvm-x86-ops.h
> @@ -110,10 +110,12 @@ KVM_X86_OP_OPTIONAL_RET0(dy_apicv_has_pending_interrupt)
>  KVM_X86_OP_OPTIONAL(set_hv_timer)
>  KVM_X86_OP_OPTIONAL(cancel_hv_timer)
>  KVM_X86_OP(setup_mce)
> +#ifdef CONFIG_KVM_SMM
>  KVM_X86_OP(smi_allowed)
>  KVM_X86_OP(enter_smm)
>  KVM_X86_OP(leave_smm)
>  KVM_X86_OP(enable_smi_window)
> +#endif
>  KVM_X86_OP_OPTIONAL(mem_enc_ioctl)
>  KVM_X86_OP_OPTIONAL(mem_enc_register_region)
>  KVM_X86_OP_OPTIONAL(mem_enc_unregister_region)
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index b7d078cd768d..cb88da02d965 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -1606,10 +1606,12 @@ struct kvm_x86_ops {
>  
>  	void (*setup_mce)(struct kvm_vcpu *vcpu);
>  
> +#ifdef CONFIG_KVM_SMM
>  	int (*smi_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
>  	int (*enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
>  	int (*leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
>  	void (*enable_smi_window)(struct kvm_vcpu *vcpu);
> +#endif
>  
>  	int (*mem_enc_ioctl)(struct kvm *kvm, void __user *argp);
>  	int (*mem_enc_register_region)(struct kvm *kvm, struct kvm_enc_region *argp);
> diff --git a/arch/x86/kvm/smm.h b/arch/x86/kvm/smm.h
> index 4c699fee4492..7ccce6b655ca 100644
> --- a/arch/x86/kvm/smm.h
> +++ b/arch/x86/kvm/smm.h
> @@ -28,7 +28,6 @@ void process_smi(struct kvm_vcpu *vcpu);
>  static inline int kvm_inject_smi(struct kvm_vcpu *vcpu) { return -ENOTTY; }
>  static inline bool is_smm(struct kvm_vcpu *vcpu) { return false; }
>  static inline void kvm_smm_changed(struct kvm_vcpu *vcpu, bool in_smm) { WARN_ON_ONCE(1); }
> -static inline void enter_smm(struct kvm_vcpu *vcpu) { WARN_ON_ONCE(1); }
>  static inline void process_smi(struct kvm_vcpu *vcpu) { WARN_ON_ONCE(1); }
>  
>  /*
> diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
> index cc0fd75f7cba..b258d6988f5d 100644
> --- a/arch/x86/kvm/svm/nested.c
> +++ b/arch/x86/kvm/svm/nested.c
> @@ -1378,6 +1378,7 @@ static int svm_check_nested_events(struct kvm_vcpu *vcpu)
>  		return 0;
>  	}
>  
> +#ifdef CONFIG_KVM_SMM
>  	if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) {
>  		if (block_nested_events)
>  			return -EBUSY;
> @@ -1386,6 +1387,7 @@ static int svm_check_nested_events(struct kvm_vcpu *vcpu)
>  		nested_svm_simple_vmexit(svm, SVM_EXIT_SMI);
>  		return 0;
>  	}
> +#endif
>  
>  	if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) {
>  		if (block_nested_events)
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index 6f7ceb35d2ff..2200b8aa7273 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -4408,6 +4408,7 @@ static void svm_setup_mce(struct kvm_vcpu *vcpu)
>  	vcpu->arch.mcg_cap &= 0x1ff;
>  }
>  
> +#ifdef CONFIG_KVM_SMM
>  bool svm_smi_blocked(struct kvm_vcpu *vcpu)
>  {
>  	struct vcpu_svm *svm = to_svm(vcpu);
> @@ -4557,6 +4558,7 @@ static void svm_enable_smi_window(struct kvm_vcpu *vcpu)
>  		/* We must be in SMM; RSM will cause a vmexit anyway.  */
>  	}
>  }
> +#endif
>  
>  static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
>  					void *insn, int insn_len)
> @@ -4832,10 +4834,12 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
>  	.pi_update_irte = avic_pi_update_irte,
>  	.setup_mce = svm_setup_mce,
>  
> +#ifdef CONFIG_KVM_SMM
>  	.smi_allowed = svm_smi_allowed,
>  	.enter_smm = svm_enter_smm,
>  	.leave_smm = svm_leave_smm,
>  	.enable_smi_window = svm_enable_smi_window,
> +#endif
>  
>  	.mem_enc_ioctl = sev_mem_enc_ioctl,
>  	.mem_enc_register_region = sev_mem_enc_register_region,
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index b22330a15adb..107fc035c91b 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -7905,6 +7905,7 @@ static void vmx_setup_mce(struct kvm_vcpu *vcpu)
>  			~FEAT_CTL_LMCE_ENABLED;
>  }
>  
> +#ifdef CONFIG_KVM_SMM
>  static int vmx_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
>  {
>  	/* we need a nested vmexit to enter SMM, postpone if run is pending */
> @@ -7959,6 +7960,7 @@ static void vmx_enable_smi_window(struct kvm_vcpu *vcpu)
>  {
>  	/* RSM will cause a vmexit anyway.  */
>  }
> +#endif
>  
>  static bool vmx_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
>  {
> @@ -8126,10 +8128,12 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
>  
>  	.setup_mce = vmx_setup_mce,
>  
> +#ifdef CONFIG_KVM_SMM
>  	.smi_allowed = vmx_smi_allowed,
>  	.enter_smm = vmx_enter_smm,
>  	.leave_smm = vmx_leave_smm,
>  	.enable_smi_window = vmx_enable_smi_window,
> +#endif
>  
>  	.can_emulate_instruction = vmx_can_emulate_instruction,
>  	.apic_init_signal_blocked = vmx_apic_init_signal_blocked,
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index a9e050aefea6..e22184bad92b 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -9863,6 +9863,7 @@ static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu,
>  	 * in order to make progress and get back here for another iteration.
>  	 * The kvm_x86_ops hooks communicate this by returning -EBUSY.
>  	 */
> +#ifdef CONFIG_KVM_SMM
>  	if (vcpu->arch.smi_pending) {
>  		r = can_inject ? static_call(kvm_x86_smi_allowed)(vcpu, true) : -EBUSY;
>  		if (r < 0)
> @@ -9875,6 +9876,7 @@ static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu,
>  		} else
>  			static_call(kvm_x86_enable_smi_window)(vcpu);
>  	}
> +#endif
>  
>  	if (vcpu->arch.nmi_pending) {
>  		r = can_inject ? static_call(kvm_x86_nmi_allowed)(vcpu, true) : -EBUSY;
> @@ -12491,10 +12493,12 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
>  	     static_call(kvm_x86_nmi_allowed)(vcpu, false)))
>  		return true;
>  
> +#ifdef CONFIG_KVM_SMM
>  	if (kvm_test_request(KVM_REQ_SMI, vcpu) ||
>  	    (vcpu->arch.smi_pending &&
>  	     static_call(kvm_x86_smi_allowed)(vcpu, false)))
>  		return true;
> +#endif
>  
>  	if (kvm_arch_interrupt_allowed(vcpu) &&
>  	    (kvm_cpu_has_interrupt(vcpu) ||


Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>

Best regards,
	Maxim Levitsky



  reply	other threads:[~2022-10-24 19:23 UTC|newest]

Thread overview: 23+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-09-29 17:20 [PATCH v2 0/8] KVM: x86: allow compiling out SMM support Paolo Bonzini
2022-09-29 17:20 ` [PATCH v2 1/8] KVM: x86: start moving SMM-related functions to new files Paolo Bonzini
2022-10-14 19:38   ` Sean Christopherson
2022-10-24 12:31   ` Maxim Levitsky
2022-09-29 17:20 ` [PATCH v2 2/8] KVM: x86: move SMM entry to a new file Paolo Bonzini
2022-10-24 12:32   ` Maxim Levitsky
2022-09-29 17:20 ` [PATCH v2 3/8] KVM: x86: move SMM exit " Paolo Bonzini
2022-10-24 12:32   ` Maxim Levitsky
2022-09-29 17:20 ` [PATCH v2 4/8] KVM: x86: do not go through ctxt->ops when emulating rsm Paolo Bonzini
2022-10-01 13:10   ` kernel test robot
2022-10-01 13:41   ` kernel test robot
2022-10-02  2:42   ` kernel test robot
2022-10-14 21:04   ` Sean Christopherson
2022-10-24 12:32   ` Maxim Levitsky
2022-09-29 17:20 ` [PATCH v2 5/8] KVM: allow compiling out SMM support Paolo Bonzini
2022-10-24 12:32   ` Maxim Levitsky
2022-09-29 17:20 ` [PATCH v2 6/8] KVM: x86: compile out vendor-specific code if SMM is disabled Paolo Bonzini
2022-10-24 12:32   ` Maxim Levitsky [this message]
2022-09-29 17:20 ` [PATCH v2 7/8] KVM: x86: remove SMRAM address space if SMM is not supported Paolo Bonzini
2022-10-24 12:33   ` Maxim Levitsky
2022-09-29 17:20 ` [PATCH v2 8/8] KVM: x86: do not define KVM_REQ_SMI if SMM disabled Paolo Bonzini
2022-10-14 21:06   ` Sean Christopherson
2022-10-24 12:33   ` Maxim Levitsky

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=7a9fb4d7f3f9fbe977ab4bb38711520673aa03cc.camel@redhat.com \
    --to=mlevitsk@redhat.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=maciej.szmigiero@oracle.com \
    --cc=pbonzini@redhat.com \
    --cc=seanjc@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).