All of lore.kernel.org
 help / color / mirror / Atom feed
From: Kai Huang <kaih.linux@gmail.com>
To: pbonzini@redhat.com, rkrcmar@redhat.com, kvm@vger.kernel.org
Subject: [PATCH 09/10] kvm: vmx: handle ENCLS VMEXIT
Date: Mon,  8 May 2017 17:24:32 +1200	[thread overview]
Message-ID: <20170508052434.3627-10-kai.huang@linux.intel.com> (raw)
In-Reply-To: <20170508052434.3627-1-kai.huang@linux.intel.com>

This patch handles ENCLS VMEXIT. ENCLS VMEXIT doesn't need to be always turned
on, actually it should not be turned on in most cases, as guest can run ENCLS
perfectly in non-root mode. However there are some cases we need to trap ENCLS
and emulate as in those cases ENCLS in guest may behavor differently with
in native (for example, when hardware supports SGX but SGX is not exposed to
guest, and if guest runs ENCLS deliberately, it may have different behavior to
on native).

In case of nested SGX support, we need to turn on ENCLS VMEXIT if L1 hypervisor
has turned on ENCLS VMEXIT, and such ENCLS VMEXIT from L2 (nested guest) will
be handled by L1 hypervisor.

Signed-off-by: Kai Huang <kai.huang@linux.intel.com>
---
 arch/x86/include/asm/vmx.h      |   2 +
 arch/x86/include/uapi/asm/vmx.h |   4 +-
 arch/x86/kvm/vmx.c              | 265 ++++++++++++++++++++++++++++++++++++++++
 3 files changed, 270 insertions(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index f7ac249ce83d..2f24290b7f9d 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -202,6 +202,8 @@ enum vmcs_field {
 	XSS_EXIT_BITMAP_HIGH            = 0x0000202D,
 	TSC_MULTIPLIER                  = 0x00002032,
 	TSC_MULTIPLIER_HIGH             = 0x00002033,
+	ENCLS_EXITING_BITMAP		= 0x0000202E,
+	ENCLS_EXITING_BITMAP_HIGH	= 0x0000202F,
 	GUEST_PHYSICAL_ADDRESS          = 0x00002400,
 	GUEST_PHYSICAL_ADDRESS_HIGH     = 0x00002401,
 	VMCS_LINK_POINTER               = 0x00002800,
diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
index 14458658e988..2bcd967d5c83 100644
--- a/arch/x86/include/uapi/asm/vmx.h
+++ b/arch/x86/include/uapi/asm/vmx.h
@@ -77,6 +77,7 @@
 #define EXIT_REASON_XSETBV              55
 #define EXIT_REASON_APIC_WRITE          56
 #define EXIT_REASON_INVPCID             58
+#define EXIT_REASON_ENCLS		60
 #define EXIT_REASON_PML_FULL            62
 #define EXIT_REASON_XSAVES              63
 #define EXIT_REASON_XRSTORS             64
@@ -130,7 +131,8 @@
 	{ EXIT_REASON_INVVPID,               "INVVPID" }, \
 	{ EXIT_REASON_INVPCID,               "INVPCID" }, \
 	{ EXIT_REASON_XSAVES,                "XSAVES" }, \
-	{ EXIT_REASON_XRSTORS,               "XRSTORS" }
+	{ EXIT_REASON_XRSTORS,               "XRSTORS" }, \
+	{ EXIT_REASON_ENCLS,		     "ENCLS" }
 
 #define VMX_ABORT_SAVE_GUEST_MSR_FAIL        1
 #define VMX_ABORT_LOAD_HOST_PDPTE_FAIL       2
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c96332b9dd44..b5f37982e975 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -254,6 +254,7 @@ struct __packed vmcs12 {
 	u64 eoi_exit_bitmap2;
 	u64 eoi_exit_bitmap3;
 	u64 xss_exit_bitmap;
+	u64 encls_exiting_bitmap;
 	u64 guest_physical_address;
 	u64 vmcs_link_pointer;
 	u64 guest_ia32_debugctl;
@@ -780,6 +781,7 @@ static const unsigned short vmcs_field_to_offset_table[] = {
 	FIELD64(EOI_EXIT_BITMAP2, eoi_exit_bitmap2),
 	FIELD64(EOI_EXIT_BITMAP3, eoi_exit_bitmap3),
 	FIELD64(XSS_EXIT_BITMAP, xss_exit_bitmap),
+	FIELD64(ENCLS_EXITING_BITMAP, encls_exiting_bitmap),
 	FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address),
 	FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer),
 	FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl),
@@ -1402,6 +1404,11 @@ static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12)
 	return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR;
 }
 
+static inline bool nested_cpu_has_encls_exit(struct vmcs12 *vmcs12)
+{
+	return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENCLS_EXITING);
+}
+
 static inline bool is_nmi(u32 intr_info)
 {
 	return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
@@ -2312,6 +2319,128 @@ static void vmx_sgx_lepubkeyhash_load(struct kvm_vcpu *vcpu)
 }
 
 /*
+ * Setup ENCLS VMEXIT on current VMCS according to encls_vmexit_bitmap.
+ * If encls_vmexit_bitmap is 0, we also disable ENCLS VMEXIT in secondary
+ * execution control. Otherwise we enable ENCLS VMEXIT.
+ *
+ * Must be called after vcpu is loaded.
+ */
+static void vmx_set_encls_vmexit_bitmap(struct kvm_vcpu *vcpu, u64
+		encls_vmexit_bitmap)
+{
+	u32 secondary_exec_ctl = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
+
+	if (encls_vmexit_bitmap)
+		secondary_exec_ctl |= SECONDARY_EXEC_ENCLS_EXITING;
+	else
+		secondary_exec_ctl &= ~SECONDARY_EXEC_ENCLS_EXITING;
+
+	vmcs_write64(ENCLS_EXITING_BITMAP, encls_vmexit_bitmap);
+	vmcs_write32(SECONDARY_VM_EXEC_CONTROL, secondary_exec_ctl);
+}
+
+static void vmx_enable_encls_vmexit_all(struct kvm_vcpu *vcpu)
+{
+	vmx_set_encls_vmexit_bitmap(vcpu, -1ULL);
+}
+
+/* Disable ENCLS VMEXIT on current VMCS. Must be called after vcpu is loaded. */
+static void vmx_disable_encls_vmexit(struct kvm_vcpu *vcpu)
+{
+	vmx_set_encls_vmexit_bitmap(vcpu, 0);
+}
+
+static bool vmx_sgx_enabled_in_bios(struct kvm_vcpu *vcpu)
+{
+	u32 sgx_opted_in = FEATURE_CONTROL_SGX_ENABLE | FEATURE_CONTROL_LOCKED;
+
+	return (to_vmx(vcpu)->msr_ia32_feature_control & sgx_opted_in) ==
+		sgx_opted_in;
+}
+
+static void vmx_update_encls_vmexit(struct kvm_vcpu *vcpu)
+{
+	/* Hardware doesn't support SGX */
+	if (!cpu_has_vmx_encls_vmexit())
+		return;
+
+	/*
+	 * ENCLS error check sequence:
+	 *
+	 * 1) IF CR0.PE = 0 (real mode), or RFLAGS.VM = 1 (virtual-8086 mode),
+	 *    or SMM mode, or CPUID.0x12.0x0:EAX.SGX1 = 0
+	 *	#UD
+	 *
+	 * 2) IF CPL > 0
+	 *	#UD
+	 *
+	 * 3) VMEXIT if enabled
+	 *
+	 * 4) IA32_FEATURE_CONTROL.LOCK, or IA32_FEATURE_CONTROL.SGX_ENABLE = 0
+	 *	#GP
+	 *
+	 * 5) IF RAX = invalid leaf function
+	 *	#GP
+	 *
+	 * 6) IF CR0.PG = 0 (paging disabled)
+	 *	#GP
+	 *
+	 * 7) IF not in 64-bit mode, and DS.type is expend-down data
+	 *	#GP
+	 *
+	 *    Note: non 64-bit mode (32-bit mode) means:
+	 *	- protected mode
+	 *	- IA32e mode's compatibility mode (IA32_EFER.LMA = 1, CS.L = 1)
+	 *
+	 *    Currently KVM doesn't do anything in terms of compatibility mode
+	 *    (SECONDARY_VM_EXEC_CONTROL[bit 2] (descriptor-table exiting) is not
+	 *    enabled, so KVM won't trap any segment register operation in
+	 *    guest). We don't have to trap ENCLS for compatibility mode as
+	 *    ENCLS will behavior just the same in guest.
+	 *
+	 * So, to correctly emulate ENCLS, below ENCLS VMEXIT policy is applied:
+	 *
+	 * - For 1), in real mode, SMM mode, no need to trap ENCLS (we cannot
+	 *   actually, as this check happens before VMEXIT).
+	 *
+	 * - If SGX is not exposed to guest (guest_cpuid_has_sgx(vcpu) == 0), or
+	 *   SGX is not enabled in guest's BIOS (vmx->msr_ia32_feature_control
+	 *   doesn't have SGX_ENABLE or LOCK bit set), we need to turn on ENCLS
+	 *   VMEXIT for protect mode and long mode. The reason is, we need to
+	 *   inject #UD for the formar and inject #GP for the latter. The
+	 *   hardware actually has SGX support and it is indeed enabled in
+	 *   physical BIOS, so may be ENCLS will have different behavior with
+	 *   SDM when running in guest.
+	 *
+	 * - For 5), 6), 7), no need to trap ENCLS, as ENCLS will just cause
+	 *   #GP while running in guest.
+	 *
+	 * Most importantly:
+	 *
+	 * - If guest supports SGX, and SGX is enabled in guest's BIOS, on the
+	 *   contrary we don't want to turn on ENCLS VMEXIT, as ENCLS can
+	 *   perfectly run in guest while having the same hardware behavior.
+	 *   Trapping ENCLS from guest is meaningless but only hurt performance.
+	 */
+
+	/* It's pointless to update ENCLS VMEXIT while guest in real mode */
+	if (to_vmx(vcpu)->rmode.vm86_active)
+		return;
+
+	if (!guest_cpuid_has_sgx(vcpu) || !vmx_sgx_enabled_in_bios(vcpu)) {
+		vmx_enable_encls_vmexit_all(vcpu);
+		return;
+	}
+
+	/* If ENCLS VMEXIT is turned on nested, don't disable it */
+	if (nested && is_guest_mode(vcpu) &&
+			nested_cpu_has_encls_exit(get_vmcs12(vcpu)))
+		return;
+
+	vmx_disable_encls_vmexit(vcpu);
+}
+
+/*
  * Switches to specified vcpu, until a matching vcpu_put(), but assumes
  * vcpu mutex is already taken.
  */
@@ -3417,6 +3546,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		vmx->msr_ia32_feature_control = data;
 		if (msr_info->host_initiated && data == 0)
 			vmx_leave_nested(vcpu);
+
+		/* SGX may be enabled/disabled in guest's BIOS */
+		vmx_update_encls_vmexit(vcpu);
+
 		/*
 		 * If guest's FEATURE_CONTROL_SGX_ENABLE is disabled, shall
 		 * we also clear vcpu's SGX CPUID? SDM (chapter 37.7.7.1)
@@ -4131,6 +4264,9 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
 		msr->data = efer & ~EFER_LME;
 	}
 	setup_msrs(vmx);
+
+	/* Possible mode change */
+	vmx_update_encls_vmexit(vcpu);
 }
 
 #ifdef CONFIG_X86_64
@@ -4337,6 +4473,9 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 
 	/* depends on vcpu->arch.cr0 to be set to a new value */
 	vmx->emulation_required = emulation_required(vcpu);
+
+	/* Possible mode change */
+	vmx_update_encls_vmexit(vcpu);
 }
 
 static u64 construct_eptp(unsigned long root_hpa)
@@ -4548,6 +4687,9 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu,
 
 out:
 	vmx->emulation_required = emulation_required(vcpu);
+
+	/* Possible mode change */
+	vmx_update_encls_vmexit(vcpu);
 }
 
 static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
@@ -7992,6 +8134,73 @@ static int handle_preemption_timer(struct kvm_vcpu *vcpu)
 	return 1;
 }
 
+static int nested_handle_encls_exit(struct kvm_vcpu *vcpu)
+{
+	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+
+	if (guest_cpuid_has_sgx(vcpu)) {
+		/*
+		 * Which means SGX is exposed to L1 but is disabled in
+		 * L1's BIOS. We should inject #GP according to SDM
+		 * (Chapter 37.7.1 Intel SGX Opt-in Configuration).
+		 *
+		 * nested_cpu_has_encls_exit cannot be true as in this
+		 * case we have allowed L1 to handle ENCLS VMEXIT.
+		 */
+		BUG_ON(nested_cpu_has_encls_exit(vmcs12));
+
+		kvm_inject_gp(vcpu, 0);
+	}
+	else {
+		/*
+		 * Which means we didn't expose SGX to L1 at all. Inject
+		 * #UD according to SDM.
+		 */
+		kvm_queue_exception(vcpu, UD_VECTOR);
+	}
+
+	return 1;
+}
+
+/*
+ * Handle ENCLS VMEXIT due to unexpected ENCLS from in guest, including
+ * executing ENCLS when SGX is not exposed to guest, or SGX is disabled
+ * in guest BIOS.
+ *
+ * Return 1 if handled, 0 if not handled
+ */
+static int handle_unexpected_encls(struct kvm_vcpu *vcpu)
+{
+	if (guest_cpuid_has_sgx(vcpu) && vmx_sgx_enabled_in_bios(vcpu))
+		return 0;
+
+	if (!guest_cpuid_has_sgx(vcpu))
+		kvm_queue_exception(vcpu, UD_VECTOR);
+	else	/* !vmx_sgx_enabled_in_bios(vcpu)) */
+		kvm_inject_gp(vcpu, 0);
+
+	kvm_x86_ops->skip_emulated_instruction(vcpu);
+
+	return 1;
+}
+
+static int handle_encls(struct kvm_vcpu *vcpu)
+{
+	/* Handle ENCLS VMEXIT from L2 */
+	if (is_guest_mode(vcpu))
+		return nested_handle_encls_exit(vcpu);
+
+	/*
+	 * Handle unexpected ENCLS VMEXIT. If successfully handled we can
+	 * just return to guest to run.
+	 */
+	if (handle_unexpected_encls(vcpu))
+		return 1;
+
+	/* So far ENCLS is not trapped in normal cases. */
+	return -EFAULT;
+}
+
 /*
  * The exit handlers return 1 if the exit was handled fully and guest execution
  * may resume.  Otherwise they set the kvm_run parameter to indicate what needs
@@ -8043,6 +8252,7 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
 	[EXIT_REASON_XRSTORS]                 = handle_xrstors,
 	[EXIT_REASON_PML_FULL]		      = handle_pml_full,
 	[EXIT_REASON_PREEMPTION_TIMER]	      = handle_preemption_timer,
+	[EXIT_REASON_ENCLS]		      = handle_encls,
 };
 
 static const int kvm_vmx_max_exit_handlers =
@@ -8356,6 +8566,43 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
 	case EXIT_REASON_PML_FULL:
 		/* We don't expose PML support to L1. */
 		return false;
+	case EXIT_REASON_ENCLS:
+		/*
+		 * So far we don't trap ENCLS in normal case (meaning SGX is
+		 * exposed to guest and SGX is enabled in guest's BIOS).
+		 * If SGX is enabled in L1 hypervisor properly, L1 hypervisor
+		 * should take care of this ENCLS VMEXIT, otherwise L0
+		 * hypervisor should handle this ENCLS VMEXIT and inject proper
+		 * error (#UD or #GP) according to ENCLS behavior in abnormal
+		 * SGX environment.
+		 */
+		if (guest_cpuid_has_sgx(vcpu) &&
+				vmx_sgx_enabled_in_bios(vcpu)) {
+			/*
+			 * As explained above, if SGX in L1 hypervisor is
+			 * normal, ENCLS VMEXIT from L2 guest should be due
+			 * to L1 turned on ENCLS VMEXIT, as L0 won't turn on
+			 * ENCLS VMEXIT in this case. We don't want to handle
+			 * this case in L0 as we really don't know how to,
+			 * and instead, we depend on L1 hypervisor to handle.
+			 */
+			WARN_ON(!nested_cpu_has_encls_exit(vmcs12));
+			return true;
+		}
+		else if (guest_cpuid_has_sgx(vcpu)) {
+			/*
+			 * If SGX is exposed to L1 but SGX is not turned on
+			 * in L1's BIOS, then L1 may or may not turn on ENCLS
+			 * VMEXIT. If ENCLS VMEXIT is turned on in L1, VMEXIT
+			 * happens prior to FEATURE_CONTROL check, so we inject
+			 * ENCLS VMEXIT to L1. Otherwise we let L0 inject #GP
+			 * directly to L2.
+			 */
+			return nested_cpu_has_encls_exit(vmcs12);
+		}
+		else {
+			return false;
+		}
 	default:
 		return true;
 	}
@@ -9743,8 +9990,19 @@ static int vmx_cpuid_update(struct kvm_vcpu *vcpu)
 		if (guest_cpuid_has_sgx_launch_control(vcpu))
 			to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |=
 				FEATURE_CONTROL_SGX_LAUNCH_CONTROL_ENABLE;
+
+		/*
+		 * To reflect hardware hebavior, We must allow guest to be able
+		 * to set ENCLS exiting if we expose SGX to guest.
+		 */
+		if (nested_vmx_allowed(vcpu))
+			to_vmx(vcpu)->nested.nested_vmx_secondary_ctls_high |=
+				SECONDARY_EXEC_ENCLS_EXITING;
 	}
 
+	/* SGX CPUID may be changed */
+	vmx_update_encls_vmexit(vcpu);
+
 	return 0;
 }
 
@@ -10491,6 +10749,13 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
 		if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)
 			vmcs_write64(APIC_ACCESS_ADDR, -1ull);
 
+		/* If L1 has turned on ENCLS vmexit, we need to honor that. */
+		if (nested_cpu_has_encls_exit(vmcs12)) {
+			exec_control |= SECONDARY_EXEC_ENCLS_EXITING;
+			vmcs_write64(ENCLS_EXITING_BITMAP,
+					vmcs12->encls_exiting_bitmap);
+		}
+
 		vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
 	}
 
-- 
2.11.0

  parent reply	other threads:[~2017-05-08  5:25 UTC|newest]

Thread overview: 78+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-05-08  5:24 [RFC PATCH 00/10] Basic KVM SGX Virtualization support Kai Huang
2017-05-08  5:24 ` [PATCH 01/10] x86: add SGX Launch Control definition to cpufeature Kai Huang
2017-05-08  5:24 ` [PATCH 02/10] kvm: vmx: add ENCLS VMEXIT detection Kai Huang
2017-05-08  5:24 ` [PATCH 03/10] kvm: vmx: detect presence of host SGX driver Kai Huang
2017-05-08  5:24 ` [PATCH 04/10] kvm: sgx: new functions to init and destory SGX for guest Kai Huang
2017-05-08  5:24 ` [PATCH 05/10] kvm: x86: add KVM_GET_SUPPORTED_CPUID SGX support Kai Huang
2017-05-08  5:24 ` [PATCH 06/10] kvm: x86: add KVM_SET_CPUID2 " Kai Huang
2017-05-08  5:24 ` [PATCH 07/10] kvm: vmx: add SGX IA32_FEATURE_CONTROL MSR emulation Kai Huang
2017-05-08  5:24 ` [PATCH 08/10] kvm: vmx: add guest's IA32_SGXLEPUBKEYHASHn runtime switch support Kai Huang
2017-05-12  0:32   ` Huang, Kai
2017-05-12  3:28     ` [intel-sgx-kernel-dev] " Andy Lutomirski
2017-05-12  4:56       ` Huang, Kai
2017-05-12  6:11         ` Andy Lutomirski
2017-05-12 18:48           ` Christopherson, Sean J
2017-05-12 20:50             ` Christopherson, Sean J
2017-05-16  0:59             ` Huang, Kai
2017-05-16  1:22             ` Huang, Kai
2017-05-16  0:48           ` Huang, Kai
2017-05-16 14:21             ` Paolo Bonzini
2017-05-18  7:54               ` Huang, Kai
2017-05-18  8:58                 ` Paolo Bonzini
2017-05-17  0:09             ` Andy Lutomirski
2017-05-18  7:45               ` Huang, Kai
2017-06-06 20:52                 ` Huang, Kai
2017-06-06 21:22                   ` Andy Lutomirski
2017-06-06 22:51                     ` Huang, Kai
2017-06-07 14:45                       ` Cohen, Haim
2017-06-08 12:31                   ` Jarkko Sakkinen
2017-06-08 23:47                     ` Huang, Kai
2017-06-08 23:53                       ` Andy Lutomirski
2017-06-09 15:38                         ` Cohen, Haim
2017-06-10 12:23                       ` Jarkko Sakkinen
2017-06-11 22:45                         ` Huang, Kai
2017-06-12  8:36                           ` Jarkko Sakkinen
2017-06-12  9:53                             ` Huang, Kai
2017-06-12 16:24                               ` Andy Lutomirski
2017-06-12 22:08                                 ` Huang, Kai
2017-06-12 23:00                                   ` Andy Lutomirski
2017-06-16  3:46                                     ` Huang, Kai
2017-06-16  4:11                                       ` Andy Lutomirski
2017-06-16  4:33                                         ` Huang, Kai
2017-06-16  9:34                                           ` Huang, Kai
2017-06-16 16:03                                           ` Andy Lutomirski
2017-06-16 16:25                                           ` Andy Lutomirski
2017-06-16 16:31                                             ` Christopherson, Sean J
2017-06-16 16:43                                               ` Andy Lutomirski
2017-06-13 18:57                               ` Jarkko Sakkinen
2017-06-13 19:05                                 ` Jarkko Sakkinen
2017-06-13 20:13                                   ` Sean Christopherson
2017-06-14  9:37                                     ` Jarkko Sakkinen
2017-06-14 15:11                                       ` Christopherson, Sean J
2017-06-14 17:03                                         ` Jarkko Sakkinen
2017-06-13 23:28                                 ` Huang, Kai
2017-06-14  9:44                                   ` Jarkko Sakkinen
2017-07-19 15:04           ` Sean Christopherson
2017-05-15 12:46       ` Jarkko Sakkinen
2017-05-15 23:56         ` Huang, Kai
2017-05-16 14:23           ` Paolo Bonzini
2017-05-17 14:21           ` Sean Christopherson
2017-05-18  8:14             ` Huang, Kai
2017-05-20 21:55               ` Andy Lutomirski
2017-05-23  5:43                 ` Huang, Kai
2017-05-23  5:55                   ` Huang, Kai
2017-05-23 16:34                   ` Andy Lutomirski
2017-05-23 16:43                     ` Paolo Bonzini
2017-05-24  8:20                       ` Huang, Kai
2017-05-20 13:23           ` Jarkko Sakkinen
2017-05-08  5:24 ` Kai Huang [this message]
2017-05-08  8:08   ` [PATCH 09/10] kvm: vmx: handle ENCLS VMEXIT Paolo Bonzini
2017-05-10  1:30     ` Huang, Kai
2017-05-08  5:24 ` [PATCH 10/10] kvm: vmx: handle VMEXIT from SGX Enclave Kai Huang
2017-05-08  8:22   ` Paolo Bonzini
2017-05-11  9:34     ` Huang, Kai
2017-06-19  5:02       ` Huang, Kai
2017-06-27 15:29         ` Radim Krčmář
2017-06-28 22:22           ` Huang, Kai
2017-05-08  5:24 ` [PATCH 11/11] kvm: vmx: workaround FEATURE_CONTROL[17] is not set by BIOS Kai Huang
2017-05-08  5:29   ` Huang, Kai

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170508052434.3627-10-kai.huang@linux.intel.com \
    --to=kaih.linux@gmail.com \
    --cc=kvm@vger.kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=rkrcmar@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.