All of lore.kernel.org
 help / color / mirror / Atom feed
From: Paolo Bonzini <pbonzini@redhat.com>
To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org
Cc: rkrcmar@redhat.com, bsd@redhat.com,
	guangrong.xiao@linux.intel.com,
	Yang Zhang <yang.z.zhang@intel.com>,
	wanpeng.li@linux.intel.com
Subject: [PATCH 07/13] KVM: x86: API changes for SMM support
Date: Thu, 30 Apr 2015 13:36:06 +0200	[thread overview]
Message-ID: <1430393772-27208-8-git-send-email-pbonzini@redhat.com> (raw)
In-Reply-To: <1430393772-27208-1-git-send-email-pbonzini@redhat.com>

This patch includes changes to the external API for SMM support.
All the changes are predicated by the availability of a new
capability, KVM_CAP_X86_SMM, which is added at the end of the
patch series.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 Documentation/virtual/kvm/api.txt | 34 ++++++++++++++++++++++++++++++----
 arch/x86/include/asm/kvm_host.h   |  1 +
 arch/x86/include/uapi/asm/kvm.h   |  7 +++++++
 arch/x86/kvm/kvm_cache_regs.h     |  5 +++++
 arch/x86/kvm/x86.c                | 30 +++++++++++++++++++++++++-----
 include/uapi/linux/kvm.h          |  5 ++++-
 6 files changed, 72 insertions(+), 10 deletions(-)

diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 9fa2bf8c3f6f..c1afcb2e4b89 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -820,11 +820,19 @@ struct kvm_vcpu_events {
 	} nmi;
 	__u32 sipi_vector;
 	__u32 flags;
+	struct {
+		__u8 smm;
+		__u8 pad[3];
+	} smi;
 };
 
-KVM_VCPUEVENT_VALID_SHADOW may be set in the flags field to signal that
-interrupt.shadow contains a valid state. Otherwise, this field is undefined.
+Only two fields are defined in the flags field:
+
+- KVM_VCPUEVENT_VALID_SHADOW may be set in the flags field to signal that
+  interrupt.shadow contains a valid state.
 
+- KVM_VCPUEVENT_VALID_SMM may be set in the flags field to signal that
+  smi.smm contains a valid state.
 
 4.32 KVM_SET_VCPU_EVENTS
 
@@ -841,17 +849,20 @@ vcpu.
 See KVM_GET_VCPU_EVENTS for the data structure.
 
 Fields that may be modified asynchronously by running VCPUs can be excluded
-from the update. These fields are nmi.pending and sipi_vector. Keep the
+from the update. These fields are nmi.pending, sipi_vector, smi.smm. Keep the
 corresponding bits in the flags field cleared to suppress overwriting the
 current in-kernel state. The bits are:
 
 KVM_VCPUEVENT_VALID_NMI_PENDING - transfer nmi.pending to the kernel
 KVM_VCPUEVENT_VALID_SIPI_VECTOR - transfer sipi_vector
+KVM_VCPUEVENT_VALID_SMM         - transfer smi.smm
 
 If KVM_CAP_INTR_SHADOW is available, KVM_VCPUEVENT_VALID_SHADOW can be set in
 the flags field to signal that interrupt.shadow contains a valid state and
 shall be written into the VCPU.
 
+KVM_VCPUEVENT_VALID_SMM can only be set if KVM_CAP_X86_SMM is available.
+
 
 4.33 KVM_GET_DEBUGREGS
 
@@ -2978,6 +2989,16 @@ len must be a multiple of sizeof(struct kvm_s390_irq). It must be > 0
 and it must not exceed (max_vcpus + 32) * sizeof(struct kvm_s390_irq),
 which is the maximum number of possibly pending cpu-local interrupts.
 
+4.90 KVM_SMI
+
+Capability: KVM_CAP_X86_SMM
+Architectures: x86
+Type: vcpu ioctl
+Parameters: none
+Returns: 0 on success, -1 on error
+
+Queues an SMI on the thread's vcpu.
+
 5. The kvm_run structure
 ------------------------
 
@@ -3013,7 +3034,12 @@ an interrupt can be injected now with KVM_INTERRUPT.
 The value of the current interrupt flag.  Only valid if in-kernel
 local APIC is not used.
 
-	__u8 padding2[2];
+	__u16 flags;
+
+More architecture-specific flags, detailing state of the VCPU that may
+affect the device's behavior.  The only currently defined flag is
+KVM_RUN_X86_SMM, which is valid on x86 machines and is set if the
+VCPU is in system management mode.
 
 	/* in (pre_kvm_run), out (post_kvm_run) */
 	__u64 cr8;
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index b0a97b4dbde4..1171e346bb33 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1109,6 +1109,7 @@ enum {
 #define HF_NMI_MASK		(1 << 3)
 #define HF_IRET_MASK		(1 << 4)
 #define HF_GUEST_MASK		(1 << 5) /* VCPU is in guest-mode */
+#define HF_SMM_MASK		(1 << 6)
 
 /*
  * Hardware virtualization extension instructions may fault if a
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index d7dcef58aefa..0bb09e2e549e 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -106,6 +106,8 @@ struct kvm_ioapic_state {
 #define KVM_IRQCHIP_IOAPIC       2
 #define KVM_NR_IRQCHIPS          3
 
+#define KVM_RUN_X86_SMM		 (1 << 0)
+
 /* for KVM_GET_REGS and KVM_SET_REGS */
 struct kvm_regs {
 	/* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
@@ -281,6 +283,7 @@ struct kvm_reinject_control {
 #define KVM_VCPUEVENT_VALID_NMI_PENDING	0x00000001
 #define KVM_VCPUEVENT_VALID_SIPI_VECTOR	0x00000002
 #define KVM_VCPUEVENT_VALID_SHADOW	0x00000004
+#define KVM_VCPUEVENT_VALID_SMM		0x00000008
 
 /* Interrupt shadow states */
 #define KVM_X86_SHADOW_INT_MOV_SS	0x01
@@ -309,6 +312,10 @@ struct kvm_vcpu_events {
 	} nmi;
 	__u32 sipi_vector;
 	__u32 flags;
+	struct {
+		__u8 smm;
+		__u8 pad[3];
+	} smi;
 	__u32 reserved[10];
 };
 
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
index 544076c4f44b..e1e89ee4af75 100644
--- a/arch/x86/kvm/kvm_cache_regs.h
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -99,4 +99,9 @@ static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
 	return vcpu->arch.hflags & HF_GUEST_MASK;
 }
 
+static inline bool is_smm(struct kvm_vcpu *vcpu)
+{
+	return vcpu->arch.hflags & HF_SMM_MASK;
+}
+
 #endif
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6009e6a0e406..c088b057aad5 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3032,6 +3032,11 @@ static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
 	return 0;
 }
 
+static int kvm_vcpu_ioctl_smi(struct kvm_vcpu *vcpu)
+{
+	return 0;
+}
+
 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
 					   struct kvm_tpr_access_ctl *tac)
 {
@@ -3116,29 +3121,31 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
 					       struct kvm_vcpu_events *events)
 {
 	process_nmi(vcpu);
+
+	memset(events, 0, sizeof(*events));
 	events->exception.injected =
 		vcpu->arch.exception.pending &&
 		!kvm_exception_is_soft(vcpu->arch.exception.nr);
 	events->exception.nr = vcpu->arch.exception.nr;
 	events->exception.has_error_code = vcpu->arch.exception.has_error_code;
-	events->exception.pad = 0;
 	events->exception.error_code = vcpu->arch.exception.error_code;
 
 	events->interrupt.injected =
 		vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft;
 	events->interrupt.nr = vcpu->arch.interrupt.nr;
-	events->interrupt.soft = 0;
 	events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu);
 
 	events->nmi.injected = vcpu->arch.nmi_injected;
 	events->nmi.pending = vcpu->arch.nmi_pending != 0;
 	events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
-	events->nmi.pad = 0;
 
 	events->sipi_vector = 0; /* never valid when reporting to user space */
 
+	events->smi.smm = is_smm(vcpu);
+
 	events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
-			 | KVM_VCPUEVENT_VALID_SHADOW);
+			 | KVM_VCPUEVENT_VALID_SHADOW
+			 | KVM_VCPUEVENT_VALID_SMM);
 	memset(&events->reserved, 0, sizeof(events->reserved));
 }
 
@@ -3147,7 +3154,8 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
 {
 	if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
 			      | KVM_VCPUEVENT_VALID_SIPI_VECTOR
-			      | KVM_VCPUEVENT_VALID_SHADOW))
+			      | KVM_VCPUEVENT_VALID_SHADOW
+			      | KVM_VCPUEVENT_VALID_SMM))
 		return -EINVAL;
 
 	process_nmi(vcpu);
@@ -3172,6 +3180,13 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
 	    kvm_vcpu_has_lapic(vcpu))
 		vcpu->arch.apic->sipi_vector = events->sipi_vector;
 
+	if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
+		if (events->smi.smm)
+			vcpu->arch.hflags |= HF_SMM_MASK;
+		else
+			vcpu->arch.hflags &= ~HF_SMM_MASK;
+	}
+
 	kvm_make_request(KVM_REQ_EVENT, vcpu);
 
 	return 0;
@@ -3431,6 +3446,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
 		r = kvm_vcpu_ioctl_nmi(vcpu);
 		break;
 	}
+	case KVM_SMI: {
+		r = kvm_vcpu_ioctl_smi(vcpu);
+		break;
+	}
 	case KVM_SET_CPUID: {
 		struct kvm_cpuid __user *cpuid_arg = argp;
 		struct kvm_cpuid cpuid;
@@ -6067,6 +6086,7 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
 	struct kvm_run *kvm_run = vcpu->run;
 
 	kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
+	kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0;
 	kvm_run->cr8 = kvm_get_cr8(vcpu);
 	kvm_run->apic_base = kvm_get_apic_base(vcpu);
 	if (irqchip_in_kernel(vcpu->kvm))
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 4b60056776d1..cd51f0289e9f 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -202,7 +202,7 @@ struct kvm_run {
 	__u32 exit_reason;
 	__u8 ready_for_interrupt_injection;
 	__u8 if_flag;
-	__u8 padding2[2];
+	__u16 flags;
 
 	/* in (pre_kvm_run), out (post_kvm_run) */
 	__u64 cr8;
@@ -814,6 +814,7 @@ struct kvm_ppc_smmu_info {
 #define KVM_CAP_S390_INJECT_IRQ 113
 #define KVM_CAP_S390_IRQ_STATE 114
 #define KVM_CAP_PPC_HWRNG 115
+#define KVM_CAP_X86_SMM 120
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
@@ -1199,6 +1200,8 @@ struct kvm_s390_ucas_mapping {
 /* Available with KVM_CAP_S390_IRQ_STATE */
 #define KVM_S390_SET_IRQ_STATE	  _IOW(KVMIO, 0xb5, struct kvm_s390_irq_state)
 #define KVM_S390_GET_IRQ_STATE	  _IOW(KVMIO, 0xb6, struct kvm_s390_irq_state)
+/* Available with KVM_CAP_X86_SMM */
+#define KVM_SMI                   _IO(KVMIO,   0xb7)
 
 #define KVM_DEV_ASSIGN_ENABLE_IOMMU	(1 << 0)
 #define KVM_DEV_ASSIGN_PCI_2_3		(1 << 1)
-- 
1.8.3.1



  parent reply	other threads:[~2015-04-30 11:36 UTC|newest]

Thread overview: 56+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-04-30 11:35 [RFC PATCH 00/13] KVM: x86: SMM support Paolo Bonzini
2015-04-30 11:36 ` [PATCH 01/13] KVM: MMU: fix for CR4.SMEP=1, CR0.WP=0? Paolo Bonzini
2015-05-08  2:52   ` Xiao Guangrong
2015-04-30 11:36 ` [PATCH 02/13] KVM: reuse memslot in kvm_write_guest_page Paolo Bonzini
2015-05-05 15:03   ` Bandan Das
2015-05-05 16:29     ` Radim Krčmář
2015-04-30 11:36 ` [PATCH 03/13] KVM: export __gfn_to_pfn_memslot, drop gfn_to_pfn_async Paolo Bonzini
2015-04-30 11:36 ` [PATCH 04/13] KVM: remove unnecessary arg from mark_page_dirty_in_slot, export it Paolo Bonzini
2015-04-30 11:36 ` [PATCH 05/13] KVM: x86: pass host_initiated to functions that read MSRs Paolo Bonzini
2015-05-04 14:01   ` Radim Krčmář
2015-05-04 16:04     ` Paolo Bonzini
2015-04-30 11:36 ` [PATCH 06/13] KVM: x86: pass the whole hflags field to emulator and back Paolo Bonzini
2015-05-05 15:47   ` Bandan Das
2015-05-05 16:16     ` Paolo Bonzini
2015-05-06 16:49       ` Bandan Das
2015-04-30 11:36 ` Paolo Bonzini [this message]
2015-05-04 15:37   ` [PATCH 07/13] KVM: x86: API changes for SMM support Radim Krčmář
2015-05-04 16:02     ` Paolo Bonzini
2015-05-05 16:36   ` Bandan Das
2015-05-05 16:45     ` Paolo Bonzini
2015-04-30 11:36 ` [PATCH 08/13] KVM: x86: stubs " Paolo Bonzini
2015-05-04 17:51   ` Radim Krčmář
2015-05-05  9:37     ` Paolo Bonzini
2015-05-05 18:38     ` Bandan Das
2015-05-05 18:48       ` Radim Krčmář
2015-04-30 11:36 ` [PATCH 09/13] KVM: x86: save/load state on SMM switch Paolo Bonzini
2015-05-04 19:59   ` Radim Krčmář
2015-05-05  9:37     ` Paolo Bonzini
2015-05-05 12:48       ` Radim Krčmář
2015-05-05 13:18         ` Paolo Bonzini
2015-05-05 20:44   ` Bandan Das
2015-05-06 10:39     ` Paolo Bonzini
2015-05-06 17:55       ` Bandan Das
2015-05-06 19:38         ` Paolo Bonzini
2015-05-12 23:56           ` Bandan Das
2015-05-13  6:58             ` Paolo Bonzini
2015-04-30 11:36 ` [PATCH 10/13] KVM: x86: add vcpu-specific functions to read/write/translate GFNs Paolo Bonzini
2015-04-30 11:36 ` [PATCH 11/13] KVM: x86: add SMM to the MMU role Paolo Bonzini
2015-04-30 11:36 ` [PATCH 12/13] KVM: x86: add KVM_MEM_X86_SMRAM memory slot flag Paolo Bonzini
2015-05-05 17:17   ` Radim Krčmář
2015-05-06  9:47     ` Paolo Bonzini
2015-05-06 16:24       ` Radim Krčmář
2015-05-06 18:15         ` Bandan Das
2015-05-06 19:43         ` Paolo Bonzini
2015-05-15 20:32   ` Avi Kivity
2015-05-18  8:31     ` Paolo Bonzini
2015-04-30 11:36 ` [PATCH 13/13] KVM: x86: advertise KVM_CAP_X86_SMM Paolo Bonzini
2015-05-05 18:40 ` [RFC PATCH 00/13] KVM: x86: SMM support Radim Krčmář
2015-05-06 11:18   ` Paolo Bonzini
2015-05-06 17:14     ` Radim Krčmář
2015-05-19 14:25 ` Zhang, Yang Z
2015-05-19 14:25   ` Zhang, Yang Z
2015-05-19 14:27   ` Paolo Bonzini
2015-05-20  1:03     ` Zhang, Yang Z
2015-05-20  1:03       ` Zhang, Yang Z
2015-05-20 15:22     ` Andi Kleen

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1430393772-27208-8-git-send-email-pbonzini@redhat.com \
    --to=pbonzini@redhat.com \
    --cc=bsd@redhat.com \
    --cc=guangrong.xiao@linux.intel.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=rkrcmar@redhat.com \
    --cc=wanpeng.li@linux.intel.com \
    --cc=yang.z.zhang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.