All of lore.kernel.org
 help / color / mirror / Atom feed
From: Ladi Prosek <lprosek@redhat.com>
To: kvm@vger.kernel.org
Cc: rkrcmar@redhat.com, pbonzini@redhat.com
Subject: [PATCH v4 1/6] KVM: x86: introduce ISA specific SMM entry/exit callbacks
Date: Tue, 10 Oct 2017 14:17:12 +0200	[thread overview]
Message-ID: <20171010121717.17792-2-lprosek@redhat.com> (raw)
In-Reply-To: <20171010121717.17792-1-lprosek@redhat.com>

Entering and exiting SMM may require ISA specific handling under certain
circumstances. This commit adds two new callbacks with empty implementations.
Actual functionality will be added in following commits.

* prep_enter_smm() is to be called when injecting an SMM, before any
  SMM related vcpu state has been changed
* post_leave_smm() is to be called when emulating the RSM instruction,
  after all SMM related vcpu state has been restored; the function may
  ask the caller to restore the vcpu state again via the reload_state
  parameter

Signed-off-by: Ladi Prosek <lprosek@redhat.com>
---
 arch/x86/include/asm/kvm_emulate.h |  3 +++
 arch/x86/include/asm/kvm_host.h    |  4 ++++
 arch/x86/kvm/emulate.c             | 30 ++++++++++++++++++++++++++----
 arch/x86/kvm/svm.c                 | 16 ++++++++++++++++
 arch/x86/kvm/vmx.c                 | 16 ++++++++++++++++
 arch/x86/kvm/x86.c                 | 17 +++++++++++++++++
 6 files changed, 82 insertions(+), 4 deletions(-)

diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index fa2558e12024..0ba3837173fb 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -225,6 +225,9 @@ struct x86_emulate_ops {
 
 	unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt);
 	void (*set_hflags)(struct x86_emulate_ctxt *ctxt, unsigned hflags);
+	int (*post_leave_smm)(struct x86_emulate_ctxt *ctxt, u64 smbase,
+			      bool *reload_state);
+
 };
 
 typedef u32 __attribute__((vector_size(16))) sse128_t;
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index c73e493adf07..769de6d2e684 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1061,6 +1061,10 @@ struct kvm_x86_ops {
 	void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
 
 	void (*setup_mce)(struct kvm_vcpu *vcpu);
+
+	int (*prep_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
+	int (*post_leave_smm)(struct kvm_vcpu *vcpu, u64 smbase,
+			      bool *reload_state);
 };
 
 struct kvm_arch_async_pf {
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index d90cdc77e077..1e6a8a824b8b 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2546,9 +2546,18 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
 	return X86EMUL_CONTINUE;
 }
 
+static int rsm_load_state(struct x86_emulate_ctxt *ctxt, u64 smbase)
+{
+	if (emulator_has_longmode(ctxt))
+		return rsm_load_state_64(ctxt, smbase + 0x8000);
+	else
+		return rsm_load_state_32(ctxt, smbase + 0x8000);
+}
+
 static int em_rsm(struct x86_emulate_ctxt *ctxt)
 {
 	unsigned long cr0, cr4, efer;
+	bool reload_state = false;
 	u64 smbase;
 	int ret;
 
@@ -2591,16 +2600,29 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
 	ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
 
 	smbase = ctxt->ops->get_smbase(ctxt);
-	if (emulator_has_longmode(ctxt))
-		ret = rsm_load_state_64(ctxt, smbase + 0x8000);
-	else
-		ret = rsm_load_state_32(ctxt, smbase + 0x8000);
 
+	ret = rsm_load_state(ctxt, smbase);
 	if (ret != X86EMUL_CONTINUE) {
 		/* FIXME: should triple fault */
 		return X86EMUL_UNHANDLEABLE;
 	}
 
+	if (ctxt->ops->post_leave_smm(ctxt, smbase, &reload_state))
+		return X86EMUL_UNHANDLEABLE;
+
+	if (reload_state) {
+		/*
+		 * post_leave_smm() made changes to the vCPU (e.g. entered
+		 * guest mode) and is asking us to load the SMM state-save
+		 * area again.
+		 */
+		ret = rsm_load_state(ctxt, smbase);
+		if (ret != X86EMUL_CONTINUE) {
+			/* FIXME: should triple fault */
+			return X86EMUL_UNHANDLEABLE;
+		}
+	}
+
 	if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
 		ctxt->ops->set_nmi_mask(ctxt, false);
 
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 0e68f0b3cbf7..d9b3e1bea644 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -5393,6 +5393,19 @@ static void svm_setup_mce(struct kvm_vcpu *vcpu)
 	vcpu->arch.mcg_cap &= 0x1ff;
 }
 
+static int svm_prep_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
+{
+	/* TODO: Implement */
+	return 0;
+}
+
+static int svm_post_leave_smm(struct kvm_vcpu *vcpu, u64 smbase,
+			      bool *reload_state)
+{
+	/* TODO: Implement */
+	return 0;
+}
+
 static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
 	.cpu_has_kvm_support = has_svm,
 	.disabled_by_bios = is_disabled,
@@ -5503,6 +5516,9 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
 	.deliver_posted_interrupt = svm_deliver_avic_intr,
 	.update_pi_irte = svm_update_pi_irte,
 	.setup_mce = svm_setup_mce,
+
+	.prep_enter_smm = svm_prep_enter_smm,
+	.post_leave_smm = svm_post_leave_smm,
 };
 
 static int __init svm_init(void)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index a2b804e10c95..15478f413392 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -11941,6 +11941,19 @@ static void vmx_setup_mce(struct kvm_vcpu *vcpu)
 			~FEATURE_CONTROL_LMCE;
 }
 
+static int vmx_prep_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
+{
+	/* TODO: Implement */
+	return 0;
+}
+
+static int vmx_post_leave_smm(struct kvm_vcpu *vcpu, u64 smbase,
+			      bool *reload_state)
+{
+	/* TODO: Implement */
+	return 0;
+}
+
 static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
 	.cpu_has_kvm_support = cpu_has_kvm_support,
 	.disabled_by_bios = vmx_disabled_by_bios,
@@ -12066,6 +12079,9 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
 #endif
 
 	.setup_mce = vmx_setup_mce,
+
+	.prep_enter_smm = vmx_prep_enter_smm,
+	.post_leave_smm = vmx_post_leave_smm,
 };
 
 static int __init vmx_init(void)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 03869eb7fcd6..e9aef1d858a8 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5275,6 +5275,12 @@ static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_fla
 	kvm_set_hflags(emul_to_vcpu(ctxt), emul_flags);
 }
 
+static int emulator_post_leave_smm(struct x86_emulate_ctxt *ctxt, u64 smbase,
+				   bool *reload_state)
+{
+	return kvm_x86_ops->post_leave_smm(emul_to_vcpu(ctxt), smbase, reload_state);
+}
+
 static const struct x86_emulate_ops emulate_ops = {
 	.read_gpr            = emulator_read_gpr,
 	.write_gpr           = emulator_write_gpr,
@@ -5316,6 +5322,7 @@ static const struct x86_emulate_ops emulate_ops = {
 	.set_nmi_mask        = emulator_set_nmi_mask,
 	.get_hflags          = emulator_get_hflags,
 	.set_hflags          = emulator_set_hflags,
+	.post_leave_smm      = emulator_post_leave_smm,
 };
 
 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
@@ -6643,6 +6650,7 @@ static void enter_smm(struct kvm_vcpu *vcpu)
 	trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
 	vcpu->arch.hflags |= HF_SMM_MASK;
 	memset(buf, 0, 512);
+
 	if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
 		enter_smm_save_state_64(vcpu, buf);
 	else
@@ -6650,6 +6658,15 @@ static void enter_smm(struct kvm_vcpu *vcpu)
 
 	kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf));
 
+	/*
+	 * Give prep_enter_smm() a chance to make ISA-specific changes to the
+	 * vCPU state (e.g. leave guest mode) after we've saved the state into
+	 * the SMM state-save area. Clear HF_SMM_MASK temporarily.
+	 */
+	vcpu->arch.hflags &= ~HF_SMM_MASK;
+	kvm_x86_ops->prep_enter_smm(vcpu, buf);
+	vcpu->arch.hflags |= HF_SMM_MASK;
+
 	if (kvm_x86_ops->get_nmi_mask(vcpu))
 		vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
 	else
-- 
2.13.5

  reply	other threads:[~2017-10-10 12:17 UTC|newest]

Thread overview: 13+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-10-10 12:17 [PATCH v4 0/6] KVM: nested virt SMM fixes Ladi Prosek
2017-10-10 12:17 ` Ladi Prosek [this message]
2017-10-10 12:17 ` [PATCH v4 2/6] KVM: x86: introduce ISA specific smi_allowed callback Ladi Prosek
2017-10-10 12:17 ` [PATCH v4 3/6] KVM: nVMX: fix SMI injection in guest mode Ladi Prosek
2017-10-10 12:17 ` [PATCH v4 4/6] KVM: nVMX: treat CR4.VMXE as reserved in SMM Ladi Prosek
2017-10-10 14:31   ` Paolo Bonzini
2017-10-10 12:17 ` [PATCH v4 5/6] KVM: nSVM: refactor nested_svm_vmrun Ladi Prosek
2017-10-10 12:17 ` [PATCH v4 6/6] KVM: nSVM: fix SMI injection in guest mode Ladi Prosek
2017-10-10 14:56   ` Paolo Bonzini
2017-10-10 15:59     ` Ladi Prosek
2017-10-10 16:15       ` Paolo Bonzini
2017-10-11  7:50         ` Ladi Prosek
2017-10-11 11:21           ` Paolo Bonzini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20171010121717.17792-2-lprosek@redhat.com \
    --to=lprosek@redhat.com \
    --cc=kvm@vger.kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=rkrcmar@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.