All of lore.kernel.org
 help / color / mirror / Atom feed
From: Maxim Levitsky <mlevitsk@redhat.com>
To: kvm@vger.kernel.org
Cc: Thomas Gleixner <tglx@linutronix.de>,
	Yang Zhong <yang.zhong@intel.com>,
	x86@kernel.org, Jim Mattson <jmattson@google.com>,
	Vitaly Kuznetsov <vkuznets@redhat.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Sean Christopherson <seanjc@google.com>,
	Wanpeng Li <wanpengli@tencent.com>, Shuah Khan <shuah@kernel.org>,
	Guang Zeng <guang.zeng@intel.com>, Joerg Roedel <joro@8bytes.org>,
	Maxim Levitsky <mlevitsk@redhat.com>,
	linux-kernel@vger.kernel.org,
	Dave Hansen <dave.hansen@linux.intel.com>,
	Ingo Molnar <mingo@redhat.com>,
	linux-kselftest@vger.kernel.org,
	Kees Cook <keescook@chromium.org>,
	"H. Peter Anvin" <hpa@zytor.com>, Wei Wang <wei.w.wang@intel.com>,
	Borislav Petkov <bp@alien8.de>
Subject: [PATCH RESEND v4 17/23] KVM: x86: smm: use smram structs in the common code
Date: Tue, 25 Oct 2022 15:47:35 +0300	[thread overview]
Message-ID: <20221025124741.228045-18-mlevitsk@redhat.com> (raw)
In-Reply-To: <20221025124741.228045-1-mlevitsk@redhat.com>

Use kvm_smram union instad of raw arrays in the common smm code.

Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
---
 arch/x86/include/asm/kvm_host.h |  5 +++--
 arch/x86/kvm/smm.c              | 27 ++++++++++++++-------------
 arch/x86/kvm/svm/svm.c          |  8 ++++++--
 arch/x86/kvm/vmx/vmx.c          |  4 ++--
 4 files changed, 25 insertions(+), 19 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 0b0a82c0bb5cbd..540ff3122bbf8e 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -206,6 +206,7 @@ typedef enum exit_fastpath_completion fastpath_t;
 
 struct x86_emulate_ctxt;
 struct x86_exception;
+union kvm_smram;
 enum x86_intercept;
 enum x86_intercept_stage;
 
@@ -1611,8 +1612,8 @@ struct kvm_x86_ops {
 
 #ifdef CONFIG_KVM_SMM
 	int (*smi_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
-	int (*enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
-	int (*leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
+	int (*enter_smm)(struct kvm_vcpu *vcpu, union kvm_smram *smram);
+	int (*leave_smm)(struct kvm_vcpu *vcpu, const union kvm_smram *smram);
 	void (*enable_smi_window)(struct kvm_vcpu *vcpu);
 #endif
 
diff --git a/arch/x86/kvm/smm.c b/arch/x86/kvm/smm.c
index 01dab9fc3ab4b7..e714d43b746cce 100644
--- a/arch/x86/kvm/smm.c
+++ b/arch/x86/kvm/smm.c
@@ -288,17 +288,18 @@ void enter_smm(struct kvm_vcpu *vcpu)
 	struct kvm_segment cs, ds;
 	struct desc_ptr dt;
 	unsigned long cr0;
-	char buf[512];
+	union kvm_smram smram;
 
 	check_smram_offsets();
 
-	memset(buf, 0, 512);
+	memset(smram.bytes, 0, sizeof(smram.bytes));
+
 #ifdef CONFIG_X86_64
 	if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
-		enter_smm_save_state_64(vcpu, buf);
+		enter_smm_save_state_64(vcpu, smram.bytes);
 	else
 #endif
-		enter_smm_save_state_32(vcpu, buf);
+		enter_smm_save_state_32(vcpu, smram.bytes);
 
 	/*
 	 * Give enter_smm() a chance to make ISA-specific changes to the vCPU
@@ -308,12 +309,12 @@ void enter_smm(struct kvm_vcpu *vcpu)
 	 * Kill the VM in the unlikely case of failure, because the VM
 	 * can be in undefined state in this case.
 	 */
-	if (static_call(kvm_x86_enter_smm)(vcpu, buf))
+	if (static_call(kvm_x86_enter_smm)(vcpu, &smram))
 		goto error;
 
 	kvm_smm_changed(vcpu, true);
 
-	if (kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf)))
+	if (kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, &smram, sizeof(smram)))
 		goto error;
 
 	if (static_call(kvm_x86_get_nmi_mask)(vcpu))
@@ -473,7 +474,7 @@ static int rsm_enter_protected_mode(struct kvm_vcpu *vcpu,
 }
 
 static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
-			     const char *smstate)
+			     u8 *smstate)
 {
 	struct kvm_vcpu *vcpu = ctxt->vcpu;
 	struct kvm_segment desc;
@@ -534,7 +535,7 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
 
 #ifdef CONFIG_X86_64
 static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
-			     const char *smstate)
+			     u8 *smstate)
 {
 	struct kvm_vcpu *vcpu = ctxt->vcpu;
 	struct kvm_segment desc;
@@ -606,13 +607,13 @@ int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
 {
 	struct kvm_vcpu *vcpu = ctxt->vcpu;
 	unsigned long cr0, cr4, efer;
-	char buf[512];
+	union kvm_smram smram;
 	u64 smbase;
 	int ret;
 
 	smbase = vcpu->arch.smbase;
 
-	ret = kvm_vcpu_read_guest(vcpu, smbase + 0xfe00, buf, sizeof(buf));
+	ret = kvm_vcpu_read_guest(vcpu, smbase + 0xfe00, smram.bytes, sizeof(smram));
 	if (ret < 0)
 		return X86EMUL_UNHANDLEABLE;
 
@@ -666,13 +667,13 @@ int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
 	 * state (e.g. enter guest mode) before loading state from the SMM
 	 * state-save area.
 	 */
-	if (static_call(kvm_x86_leave_smm)(vcpu, buf))
+	if (static_call(kvm_x86_leave_smm)(vcpu, &smram))
 		return X86EMUL_UNHANDLEABLE;
 
 #ifdef CONFIG_X86_64
 	if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
-		return rsm_load_state_64(ctxt, buf);
+		return rsm_load_state_64(ctxt, smram.bytes);
 	else
 #endif
-		return rsm_load_state_32(ctxt, buf);
+		return rsm_load_state_32(ctxt, smram.bytes);
 }
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 2200b8aa727398..4cbb95796dcd63 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4436,12 +4436,14 @@ static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
 	return 1;
 }
 
-static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
+static int svm_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 	struct kvm_host_map map_save;
 	int ret;
 
+	char *smstate = (char *)smram;
+
 	if (!is_guest_mode(vcpu))
 		return 0;
 
@@ -4483,7 +4485,7 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
 	return 0;
 }
 
-static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
+static int svm_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 	struct kvm_host_map map, map_save;
@@ -4491,6 +4493,8 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
 	struct vmcb *vmcb12;
 	int ret;
 
+	const char *smstate = (const char *)smram;
+
 	if (!guest_cpuid_has(vcpu, X86_FEATURE_LM))
 		return 0;
 
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 107fc035c91b80..8c7890af11fb21 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -7914,7 +7914,7 @@ static int vmx_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
 	return !is_smm(vcpu);
 }
 
-static int vmx_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
+static int vmx_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 
@@ -7935,7 +7935,7 @@ static int vmx_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
 	return 0;
 }
 
-static int vmx_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
+static int vmx_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	int ret;
-- 
2.34.3


  parent reply	other threads:[~2022-10-25 12:52 UTC|newest]

Thread overview: 31+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-10-25 12:47 [PATCH RESEND v4 00/23] SMM emulation and interrupt shadow fixes Maxim Levitsky
2022-10-25 12:47 ` [PATCH RESEND v4 01/23] KVM: x86: start moving SMM-related functions to new files Maxim Levitsky
2022-10-25 12:47 ` [PATCH RESEND v4 02/23] KVM: x86: move SMM entry to a new file Maxim Levitsky
2022-10-25 12:47 ` [PATCH RESEND v4 03/23] KVM: x86: move SMM exit " Maxim Levitsky
2022-10-25 12:47 ` [PATCH RESEND v4 04/23] KVM: x86: do not go through ctxt->ops when emulating rsm Maxim Levitsky
2022-10-25 12:47 ` [PATCH RESEND v4 05/23] KVM: allow compiling out SMM support Maxim Levitsky
2022-10-25 12:47 ` [PATCH RESEND v4 06/23] KVM: x86: compile out vendor-specific code if SMM is disabled Maxim Levitsky
2022-10-25 12:47 ` [PATCH RESEND v4 07/23] KVM: x86: remove SMRAM address space if SMM is not supported Maxim Levitsky
2022-10-25 12:47 ` [PATCH RESEND v4 08/23] KVM: x86: do not define KVM_REQ_SMI if SMM disabled Maxim Levitsky
2022-10-25 12:47 ` [PATCH RESEND v4 09/23] bug: introduce ASSERT_STRUCT_OFFSET Maxim Levitsky
2022-10-25 12:47 ` [PATCH RESEND v4 10/23] KVM: x86: emulator: em_sysexit should update ctxt->mode Maxim Levitsky
2022-10-25 12:47 ` [PATCH RESEND v4 11/23] KVM: x86: emulator: introduce emulator_recalc_and_set_mode Maxim Levitsky
2022-10-25 12:47 ` [PATCH RESEND v4 12/23] KVM: x86: emulator: update the emulation mode after rsm Maxim Levitsky
2022-10-25 12:47 ` [PATCH RESEND v4 13/23] KVM: x86: emulator: update the emulation mode after CR0 write Maxim Levitsky
2022-10-25 12:47 ` [PATCH RESEND v4 14/23] KVM: x86: smm: number of GPRs in the SMRAM image depends on the image format Maxim Levitsky
2022-10-25 12:47 ` [PATCH RESEND v4 15/23] KVM: x86: smm: check for failures on smm entry Maxim Levitsky
2022-10-25 12:47 ` [PATCH RESEND v4 16/23] KVM: x86: smm: add structs for KVM's smram layout Maxim Levitsky
2022-10-28 13:34   ` Paolo Bonzini
2022-10-25 12:47 ` Maxim Levitsky [this message]
2022-10-25 12:47 ` [PATCH RESEND v4 18/23] KVM: x86: smm: use smram struct for 32 bit smram load/restore Maxim Levitsky
2022-10-25 12:47 ` [PATCH RESEND v4 19/23] KVM: x86: smm: use smram struct for 64 " Maxim Levitsky
2022-10-25 12:47 ` [PATCH RESEND v4 20/23] KVM: svm: drop explicit return value of kvm_vcpu_map Maxim Levitsky
2022-10-25 12:47 ` [PATCH RESEND v4 21/23] KVM: x86: SVM: use smram structs Maxim Levitsky
2022-10-25 12:47 ` [PATCH RESEND v4 22/23] KVM: x86: SVM: don't save SVM state to SMRAM when VM is not long mode capable Maxim Levitsky
2022-10-25 12:47 ` [PATCH RESEND v4 23/23] KVM: x86: smm: preserve interrupt shadow in SMRAM Maxim Levitsky
2022-10-28 10:35   ` Paolo Bonzini
2022-10-30  8:23     ` Maxim Levitsky
2022-10-27 16:49 ` [PATCH RESEND v4 00/23] SMM emulation and interrupt shadow fixes Paolo Bonzini
2022-10-27 17:06   ` Maxim Levitsky
2022-10-28 10:36     ` Paolo Bonzini
2022-10-28 22:42       ` Sean Christopherson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221025124741.228045-18-mlevitsk@redhat.com \
    --to=mlevitsk@redhat.com \
    --cc=bp@alien8.de \
    --cc=dave.hansen@linux.intel.com \
    --cc=guang.zeng@intel.com \
    --cc=hpa@zytor.com \
    --cc=jmattson@google.com \
    --cc=joro@8bytes.org \
    --cc=keescook@chromium.org \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-kselftest@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=seanjc@google.com \
    --cc=shuah@kernel.org \
    --cc=tglx@linutronix.de \
    --cc=vkuznets@redhat.com \
    --cc=wanpengli@tencent.com \
    --cc=wei.w.wang@intel.com \
    --cc=x86@kernel.org \
    --cc=yang.zhong@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.