All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] kvm: nVMX: Remove superfluous VMX instruction fault checks
@ 2017-04-21 16:53 Jim Mattson
  2017-04-22  0:24 ` kbuild test robot
                   ` (3 more replies)
  0 siblings, 4 replies; 19+ messages in thread
From: Jim Mattson @ 2017-04-21 16:53 UTC (permalink / raw)
  To: kvm; +Cc: Jim Mattson

According to the Intel SDM, "Certain exceptions have priority over VM
exits. These include invalid-opcode exceptions, faults based on
privilege level*, and general-protection exceptions that are based on
checking I/O permission bits in the task-state segment (TSS)."

There is no need to check for faulting conditions that the hardware
has already checked.

One of the constraints on the VMX instructions is that they are not
allowed in real-address mode. Though the hardware checks for this
condition as well, when real-address mode is emulated, the faulting
condition does have to be checked in software.

* These include faults generated by attempts to execute, in
  virtual-8086 mode, privileged instructions that are not recognized
  in that mode.

Signed-off-by: Jim Mattson <jmattson@google.com>
---
 arch/x86/kvm/vmx.c | 58 ++++++++++++++----------------------------------------
 1 file changed, 15 insertions(+), 43 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 259e9b28ccf8..1a975e942b87 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -7115,25 +7115,14 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
 	/* The Intel VMX Instruction Reference lists a bunch of bits that
 	 * are prerequisite to running VMXON, most notably cr4.VMXE must be
 	 * set to 1 (see vmx_set_cr4() for when we allow the guest to set this).
-	 * Otherwise, we should fail with #UD. We test these now:
+	 * Otherwise, we should fail with #UD. Hardware has already tested
+	 * most or all of these conditions, with the exception of real-address
+	 * mode, when real-address mode is emulated.
 	 */
-	if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE) ||
-	    !kvm_read_cr0_bits(vcpu, X86_CR0_PE) ||
-	    (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) {
-		kvm_queue_exception(vcpu, UD_VECTOR);
-		return 1;
-	}
 
-	vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
-	if (is_long_mode(vcpu) && !cs.l) {
+	if ((!enable_unrestricted_guest &&
+	     !kvm_read_cr0_bits(vcpu, X86_CR0_PE))) {
 		kvm_queue_exception(vcpu, UD_VECTOR);
-		return 1;
-	}
-
-	if (vmx_get_cpl(vcpu)) {
-		kvm_inject_gp(vcpu, 0);
-		return 1;
-	}
 
 	if (vmx->nested.vmxon) {
 		nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
@@ -7161,30 +7150,18 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
  * Intel's VMX Instruction Reference specifies a common set of prerequisites
  * for running VMX instructions (except VMXON, whose prerequisites are
  * slightly different). It also specifies what exception to inject otherwise.
+ * Note that many of these exceptions have priority over VM exits, so they
+ * don't have to be checked again here.
  */
-static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
+static bool nested_vmx_check_permission(struct kvm_vcpu *vcpu)
 {
-	struct kvm_segment cs;
-	struct vcpu_vmx *vmx = to_vmx(vcpu);
-
-	if (!vmx->nested.vmxon) {
+	if (!to_vmx(vcpu)->nested.vmxon ||
+	    (!enable_unrestricted_guest &&
+	     !kvm_read_cr0_bits(vcpu, X86_CR0_PE))) {
 		kvm_queue_exception(vcpu, UD_VECTOR);
-		return 0;
-	}
-
-	vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
-	if ((vmx_get_rflags(vcpu) & X86_EFLAGS_VM) ||
-	    (is_long_mode(vcpu) && !cs.l)) {
-		kvm_queue_exception(vcpu, UD_VECTOR);
-		return 0;
-	}
-
-	if (vmx_get_cpl(vcpu)) {
-		kvm_inject_gp(vcpu, 0);
-		return 0;
+		return false;
 	}
-
-	return 1;
+	return true;
 }
 
 static inline void nested_release_vmcs12(struct vcpu_vmx *vmx)
@@ -7527,7 +7504,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
 		if (get_vmx_mem_address(vcpu, exit_qualification,
 				vmx_instruction_info, true, &gva))
 			return 1;
-		/* _system ok, as nested_vmx_check_permission verified cpl=0 */
+		/* _system ok, as hardware has verified cpl=0 */
 		kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva,
 			     &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL);
 	}
@@ -7660,7 +7637,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
 	if (get_vmx_mem_address(vcpu, exit_qualification,
 			vmx_instruction_info, true, &vmcs_gva))
 		return 1;
-	/* ok to use *_system, as nested_vmx_check_permission verified cpl=0 */
+	/* ok to use *_system, as hardware has verified cpl=0 */
 	if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva,
 				 (void *)&to_vmx(vcpu)->nested.current_vmptr,
 				 sizeof(u64), &e)) {
@@ -7693,11 +7670,6 @@ static int handle_invept(struct kvm_vcpu *vcpu)
 	if (!nested_vmx_check_permission(vcpu))
 		return 1;
 
-	if (!kvm_read_cr0_bits(vcpu, X86_CR0_PE)) {
-		kvm_queue_exception(vcpu, UD_VECTOR);
-		return 1;
-	}
-
 	vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
 	type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
 
-- 
2.12.2.816.g2cccc81164-goog

^ permalink raw reply related	[flat|nested] 19+ messages in thread

end of thread, other threads:[~2017-04-27 17:29 UTC | newest]

Thread overview: 19+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-04-21 16:53 [PATCH] kvm: nVMX: Remove superfluous VMX instruction fault checks Jim Mattson
2017-04-22  0:24 ` kbuild test robot
2017-04-22  1:35 ` kbuild test robot
2017-04-24 15:28 ` [PATCH v2] " Jim Mattson
2017-04-25 12:38   ` David Hildenbrand
2017-04-25 14:10     ` Jim Mattson
2017-04-25 16:30       ` David Hildenbrand
2017-04-26  9:26 ` [PATCH] " Paolo Bonzini
2017-04-26 15:38   ` [PATCH v3] " Jim Mattson
2017-04-26 15:46   ` [PATCH] " Jim Mattson
2017-04-26 15:53     ` [PATCH v4] " Jim Mattson
2017-04-27  8:29       ` David Hildenbrand
2017-04-27 11:25         ` Paolo Bonzini
2017-04-27 12:09           ` David Hildenbrand
2017-04-27 15:00           ` Jim Mattson
2017-04-27 15:31             ` Paolo Bonzini
2017-04-27 15:32             ` Jim Mattson
2017-04-27 15:49               ` Paolo Bonzini
2017-04-27 17:29                 ` Jim Mattson

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.