All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2 1/2] KVM: nVMX: Properly handle kvm_read/write_guest_virt*() result
@ 2020-06-05 11:59 Vitaly Kuznetsov
  2020-06-05 11:59 ` [PATCH v2 2/2] Revert "KVM: x86: work around leak of uninitialized stack contents" Vitaly Kuznetsov
                   ` (2 more replies)
  0 siblings, 3 replies; 6+ messages in thread
From: Vitaly Kuznetsov @ 2020-06-05 11:59 UTC (permalink / raw)
  To: kvm
  Cc: Paolo Bonzini, Sean Christopherson, Wanpeng Li, Jim Mattson,
	linux-kernel

Syzbot reports the following issue:

WARNING: CPU: 0 PID: 6819 at arch/x86/kvm/x86.c:618
 kvm_inject_emulated_page_fault+0x210/0x290 arch/x86/kvm/x86.c:618
...
Call Trace:
...
RIP: 0010:kvm_inject_emulated_page_fault+0x210/0x290 arch/x86/kvm/x86.c:618
...
 nested_vmx_get_vmptr+0x1f9/0x2a0 arch/x86/kvm/vmx/nested.c:4638
 handle_vmon arch/x86/kvm/vmx/nested.c:4767 [inline]
 handle_vmon+0x168/0x3a0 arch/x86/kvm/vmx/nested.c:4728
 vmx_handle_exit+0x29c/0x1260 arch/x86/kvm/vmx/vmx.c:6067

'exception' we're trying to inject with kvm_inject_emulated_page_fault()
comes from:

  nested_vmx_get_vmptr()
   kvm_read_guest_virt()
     kvm_read_guest_virt_helper()
       vcpu->arch.walk_mmu->gva_to_gpa()

but it is only set when GVA to GPA conversion fails. In case it doesn't but
we still fail kvm_vcpu_read_guest_page(), X86EMUL_IO_NEEDED is returned and
nested_vmx_get_vmptr() calls kvm_inject_emulated_page_fault() with zeroed
'exception'. This happen when the argument is MMIO.

Paolo also noticed that nested_vmx_get_vmptr() is not the only place in
KVM code where kvm_read/write_guest_virt*() return result is mishandled.
All VMX instructions along with INVPCID/INVVPID/INVEPT have the same
issue. This was already noticed before, e.g. see commit 541ab2aeb282 ("KVM:
x86: work around leak of uninitialized stack contents") but was never fully
fixed.

KVM could've handled the request correctly by going to userspace and
performing I/O but there doesn't seem to be a good need for such requests
in the first place.

Introduce vmx_handle_memory_failure() as an interim solution.

Note, nested_vmx_get_vmptr() now has three possible outcomes: OK, PF,
KVM_EXIT_INTERNAL_ERROR and callers need to know if userspace exit is
needed (for KVM_EXIT_INTERNAL_ERROR) in case of failure. We don't seem
to have a good enum describing this tristate, just add "int *ret" to
nested_vmx_get_vmptr() interface to pass the information.

Reported-by: syzbot+2a7156e11dc199bdbd8a@syzkaller.appspotmail.com
Suggested-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
---
 arch/x86/kvm/vmx/nested.c | 78 +++++++++++++++++++++------------------
 arch/x86/kvm/vmx/vmx.c    | 34 +++++++++++++++--
 arch/x86/kvm/vmx/vmx.h    |  2 +
 3 files changed, 74 insertions(+), 40 deletions(-)

diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 9c74a732b08d..bcb50724be38 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -4624,19 +4624,24 @@ void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
 	}
 }
 
-static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
+static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer,
+				int *ret)
 {
 	gva_t gva;
 	struct x86_exception e;
+	int r;
 
 	if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
 				vmcs_read32(VMX_INSTRUCTION_INFO), false,
-				sizeof(*vmpointer), &gva))
-		return 1;
+				sizeof(*vmpointer), &gva)) {
+		*ret = 1;
+		return -EINVAL;
+	}
 
-	if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) {
-		kvm_inject_emulated_page_fault(vcpu, &e);
-		return 1;
+	r = kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e);
+	if (r != X86EMUL_CONTINUE) {
+		*ret = vmx_handle_memory_failure(vcpu, r, &e);
+		return -EINVAL;
 	}
 
 	return 0;
@@ -4764,8 +4769,8 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
 		return 1;
 	}
 
-	if (nested_vmx_get_vmptr(vcpu, &vmptr))
-		return 1;
+	if (nested_vmx_get_vmptr(vcpu, &vmptr, &ret))
+		return ret;
 
 	/*
 	 * SDM 3: 24.11.5
@@ -4838,12 +4843,13 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
 	u32 zero = 0;
 	gpa_t vmptr;
 	u64 evmcs_gpa;
+	int r;
 
 	if (!nested_vmx_check_permission(vcpu))
 		return 1;
 
-	if (nested_vmx_get_vmptr(vcpu, &vmptr))
-		return 1;
+	if (nested_vmx_get_vmptr(vcpu, &vmptr, &r))
+		return r;
 
 	if (!page_address_valid(vcpu, vmptr))
 		return nested_vmx_failValid(vcpu,
@@ -4902,7 +4908,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
 	u64 value;
 	gva_t gva = 0;
 	short offset;
-	int len;
+	int len, r;
 
 	if (!nested_vmx_check_permission(vcpu))
 		return 1;
@@ -4943,10 +4949,9 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
 					instr_info, true, len, &gva))
 			return 1;
 		/* _system ok, nested_vmx_check_permission has verified cpl=0 */
-		if (kvm_write_guest_virt_system(vcpu, gva, &value, len, &e)) {
-			kvm_inject_emulated_page_fault(vcpu, &e);
-			return 1;
-		}
+		r = kvm_write_guest_virt_system(vcpu, gva, &value, len, &e);
+		if (r != X86EMUL_CONTINUE)
+			return vmx_handle_memory_failure(vcpu, r, &e);
 	}
 
 	return nested_vmx_succeed(vcpu);
@@ -4987,7 +4992,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
 	unsigned long field;
 	short offset;
 	gva_t gva;
-	int len;
+	int len, r;
 
 	/*
 	 * The value to write might be 32 or 64 bits, depending on L1's long
@@ -5017,10 +5022,9 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
 		if (get_vmx_mem_address(vcpu, exit_qualification,
 					instr_info, false, len, &gva))
 			return 1;
-		if (kvm_read_guest_virt(vcpu, gva, &value, len, &e)) {
-			kvm_inject_emulated_page_fault(vcpu, &e);
-			return 1;
-		}
+		r = kvm_read_guest_virt(vcpu, gva, &value, len, &e);
+		if (r != X86EMUL_CONTINUE)
+			return vmx_handle_memory_failure(vcpu, r, &e);
 	}
 
 	field = kvm_register_readl(vcpu, (((instr_info) >> 28) & 0xf));
@@ -5103,12 +5107,13 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	gpa_t vmptr;
+	int r;
 
 	if (!nested_vmx_check_permission(vcpu))
 		return 1;
 
-	if (nested_vmx_get_vmptr(vcpu, &vmptr))
-		return 1;
+	if (nested_vmx_get_vmptr(vcpu, &vmptr, &r))
+		return r;
 
 	if (!page_address_valid(vcpu, vmptr))
 		return nested_vmx_failValid(vcpu,
@@ -5170,6 +5175,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
 	gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr;
 	struct x86_exception e;
 	gva_t gva;
+	int r;
 
 	if (!nested_vmx_check_permission(vcpu))
 		return 1;
@@ -5181,11 +5187,11 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
 				true, sizeof(gpa_t), &gva))
 		return 1;
 	/* *_system ok, nested_vmx_check_permission has verified cpl=0 */
-	if (kvm_write_guest_virt_system(vcpu, gva, (void *)&current_vmptr,
-					sizeof(gpa_t), &e)) {
-		kvm_inject_emulated_page_fault(vcpu, &e);
-		return 1;
-	}
+	r = kvm_write_guest_virt_system(vcpu, gva, (void *)&current_vmptr,
+					sizeof(gpa_t), &e);
+	if (r != X86EMUL_CONTINUE)
+		return vmx_handle_memory_failure(vcpu, r, &e);
+
 	return nested_vmx_succeed(vcpu);
 }
 
@@ -5209,7 +5215,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
 	struct {
 		u64 eptp, gpa;
 	} operand;
-	int i;
+	int i, r;
 
 	if (!(vmx->nested.msrs.secondary_ctls_high &
 	      SECONDARY_EXEC_ENABLE_EPT) ||
@@ -5236,10 +5242,9 @@ static int handle_invept(struct kvm_vcpu *vcpu)
 	if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
 			vmx_instruction_info, false, sizeof(operand), &gva))
 		return 1;
-	if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
-		kvm_inject_emulated_page_fault(vcpu, &e);
-		return 1;
-	}
+	r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
+	if (r != X86EMUL_CONTINUE)
+		return vmx_handle_memory_failure(vcpu, r, &e);
 
 	/*
 	 * Nested EPT roots are always held through guest_mmu,
@@ -5291,6 +5296,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
 		u64 gla;
 	} operand;
 	u16 vpid02;
+	int r;
 
 	if (!(vmx->nested.msrs.secondary_ctls_high &
 	      SECONDARY_EXEC_ENABLE_VPID) ||
@@ -5318,10 +5324,10 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
 	if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
 			vmx_instruction_info, false, sizeof(operand), &gva))
 		return 1;
-	if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
-		kvm_inject_emulated_page_fault(vcpu, &e);
-		return 1;
-	}
+	r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
+	if (r != X86EMUL_CONTINUE)
+		return vmx_handle_memory_failure(vcpu, r, &e);
+
 	if (operand.vpid >> 16)
 		return nested_vmx_failValid(vcpu,
 			VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 170cc76a581f..d9083f80ec87 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1600,6 +1600,32 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
 	return 1;
 }
 
+/*
+ * Handles kvm_read/write_guest_virt*() result and either injects #PF or returns
+ * KVM_EXIT_INTERNAL_ERROR for cases not currently handled by KVM. Return value
+ * indicates whether exit to userspace is needed.
+ */
+int vmx_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
+			      struct x86_exception *e)
+{
+	if (r == X86EMUL_PROPAGATE_FAULT) {
+		kvm_inject_emulated_page_fault(vcpu, e);
+		return 1;
+	}
+
+	/*
+	 * In case kvm_read/write_guest_virt*() failed with X86EMUL_IO_NEEDED
+	 * while handling a VMX instruction KVM could've handled the request
+	 * correctly by exiting to userspace and performing I/O but there
+	 * doesn't seem to be a real use-case behind such requests, just return
+	 * KVM_EXIT_INTERNAL_ERROR for now.
+	 */
+	vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+	vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+	vcpu->run->internal.ndata = 0;
+
+	return 0;
+}
 
 /*
  * Recognizes a pending MTF VM-exit and records the nested state for later
@@ -5486,6 +5512,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
 		u64 pcid;
 		u64 gla;
 	} operand;
+	int r;
 
 	if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) {
 		kvm_queue_exception(vcpu, UD_VECTOR);
@@ -5508,10 +5535,9 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
 				sizeof(operand), &gva))
 		return 1;
 
-	if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
-		kvm_inject_emulated_page_fault(vcpu, &e);
-		return 1;
-	}
+	r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
+	if (r != X86EMUL_CONTINUE)
+		return vmx_handle_memory_failure(vcpu, r, &e);
 
 	if (operand.pcid >> 12 != 0) {
 		kvm_inject_gp(vcpu, 0);
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 672c28f17e49..8a83b5edc820 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -355,6 +355,8 @@ struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr);
 void pt_update_intercept_for_msr(struct vcpu_vmx *vmx);
 void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
 int vmx_find_msr_index(struct vmx_msrs *m, u32 msr);
+int vmx_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
+			      struct x86_exception *e);
 
 #define POSTED_INTR_ON  0
 #define POSTED_INTR_SN  1
-- 
2.25.4


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH v2 2/2] Revert "KVM: x86: work around leak of uninitialized stack contents"
  2020-06-05 11:59 [PATCH v2 1/2] KVM: nVMX: Properly handle kvm_read/write_guest_virt*() result Vitaly Kuznetsov
@ 2020-06-05 11:59 ` Vitaly Kuznetsov
  2020-06-05 17:33   ` Paolo Bonzini
  2020-06-05 12:01 ` [PATCH v2 1/2] KVM: nVMX: Properly handle kvm_read/write_guest_virt*() result Vitaly Kuznetsov
  2020-06-05 20:06 ` Sean Christopherson
  2 siblings, 1 reply; 6+ messages in thread
From: Vitaly Kuznetsov @ 2020-06-05 11:59 UTC (permalink / raw)
  To: kvm
  Cc: Paolo Bonzini, Sean Christopherson, Wanpeng Li, Jim Mattson,
	linux-kernel

handle_vmptrst()/handle_vmread() stopped injecting #PF unconditionally
and switched to nested_vmx_handle_memory_failure() which just kills the
guest with KVM_EXIT_INTERNAL_ERROR in case of MMIO access, zeroing
'exception' in kvm_write_guest_virt_system() is not needed anymore.

This reverts commit 541ab2aeb28251bf7135c7961f3a6080eebcc705.

Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
---
 arch/x86/kvm/x86.c | 7 -------
 1 file changed, 7 deletions(-)

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 9e41b5135340..0097a97d331f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5647,13 +5647,6 @@ int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
 	/* kvm_write_guest_virt_system can pull in tons of pages. */
 	vcpu->arch.l1tf_flush_l1d = true;
 
-	/*
-	 * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
-	 * is returned, but our callers are not ready for that and they blindly
-	 * call kvm_inject_page_fault.  Ensure that they at least do not leak
-	 * uninitialized kernel stack memory into cr2 and error code.
-	 */
-	memset(exception, 0, sizeof(*exception));
 	return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
 					   PFERR_WRITE_MASK, exception);
 }
-- 
2.25.4


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH v2 1/2] KVM: nVMX: Properly handle kvm_read/write_guest_virt*() result
  2020-06-05 11:59 [PATCH v2 1/2] KVM: nVMX: Properly handle kvm_read/write_guest_virt*() result Vitaly Kuznetsov
  2020-06-05 11:59 ` [PATCH v2 2/2] Revert "KVM: x86: work around leak of uninitialized stack contents" Vitaly Kuznetsov
@ 2020-06-05 12:01 ` Vitaly Kuznetsov
  2020-06-05 20:06 ` Sean Christopherson
  2 siblings, 0 replies; 6+ messages in thread
From: Vitaly Kuznetsov @ 2020-06-05 12:01 UTC (permalink / raw)
  To: kvm
  Cc: Paolo Bonzini, Sean Christopherson, Wanpeng Li, Jim Mattson,
	linux-kernel

Vitaly Kuznetsov <vkuznets@redhat.com> writes:

> Syzbot reports the following issue:

Noticed while sending: the prefix of the patch should be "KVM: VMX:" as
it is not only nested related...

-- 
Vitaly


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v2 2/2] Revert "KVM: x86: work around leak of uninitialized stack contents"
  2020-06-05 11:59 ` [PATCH v2 2/2] Revert "KVM: x86: work around leak of uninitialized stack contents" Vitaly Kuznetsov
@ 2020-06-05 17:33   ` Paolo Bonzini
  0 siblings, 0 replies; 6+ messages in thread
From: Paolo Bonzini @ 2020-06-05 17:33 UTC (permalink / raw)
  To: Vitaly Kuznetsov, kvm
  Cc: Sean Christopherson, Wanpeng Li, Jim Mattson, linux-kernel

On 05/06/20 13:59, Vitaly Kuznetsov wrote:
> handle_vmptrst()/handle_vmread() stopped injecting #PF unconditionally
> and switched to nested_vmx_handle_memory_failure() which just kills the
> guest with KVM_EXIT_INTERNAL_ERROR in case of MMIO access, zeroing
> 'exception' in kvm_write_guest_virt_system() is not needed anymore.
> 
> This reverts commit 541ab2aeb28251bf7135c7961f3a6080eebcc705.
> 
> Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
> ---
>  arch/x86/kvm/x86.c | 7 -------
>  1 file changed, 7 deletions(-)
> 
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 9e41b5135340..0097a97d331f 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -5647,13 +5647,6 @@ int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
>  	/* kvm_write_guest_virt_system can pull in tons of pages. */
>  	vcpu->arch.l1tf_flush_l1d = true;
>  
> -	/*
> -	 * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
> -	 * is returned, but our callers are not ready for that and they blindly
> -	 * call kvm_inject_page_fault.  Ensure that they at least do not leak
> -	 * uninitialized kernel stack memory into cr2 and error code.
> -	 */
> -	memset(exception, 0, sizeof(*exception));
>  	return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
>  					   PFERR_WRITE_MASK, exception);
>  }
> 

Queued, thanks.

Paolo


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v2 1/2] KVM: nVMX: Properly handle kvm_read/write_guest_virt*() result
  2020-06-05 11:59 [PATCH v2 1/2] KVM: nVMX: Properly handle kvm_read/write_guest_virt*() result Vitaly Kuznetsov
  2020-06-05 11:59 ` [PATCH v2 2/2] Revert "KVM: x86: work around leak of uninitialized stack contents" Vitaly Kuznetsov
  2020-06-05 12:01 ` [PATCH v2 1/2] KVM: nVMX: Properly handle kvm_read/write_guest_virt*() result Vitaly Kuznetsov
@ 2020-06-05 20:06 ` Sean Christopherson
  2020-06-08  8:20   ` Vitaly Kuznetsov
  2 siblings, 1 reply; 6+ messages in thread
From: Sean Christopherson @ 2020-06-05 20:06 UTC (permalink / raw)
  To: Vitaly Kuznetsov
  Cc: kvm, Paolo Bonzini, Wanpeng Li, Jim Mattson, linux-kernel

On Fri, Jun 05, 2020 at 01:59:05PM +0200, Vitaly Kuznetsov wrote:
> Introduce vmx_handle_memory_failure() as an interim solution.

Heh, "interim".  I'll take the over on that :-D.

> Note, nested_vmx_get_vmptr() now has three possible outcomes: OK, PF,
> KVM_EXIT_INTERNAL_ERROR and callers need to know if userspace exit is
> needed (for KVM_EXIT_INTERNAL_ERROR) in case of failure. We don't seem
> to have a good enum describing this tristate, just add "int *ret" to
> nested_vmx_get_vmptr() interface to pass the information.
> 
> Reported-by: syzbot+2a7156e11dc199bdbd8a@syzkaller.appspotmail.com
> Suggested-by: Sean Christopherson <sean.j.christopherson@intel.com>
> Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
> ---

...

> +/*
> + * Handles kvm_read/write_guest_virt*() result and either injects #PF or returns
> + * KVM_EXIT_INTERNAL_ERROR for cases not currently handled by KVM. Return value
> + * indicates whether exit to userspace is needed.
> + */
> +int vmx_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
> +			      struct x86_exception *e)
> +{
> +	if (r == X86EMUL_PROPAGATE_FAULT) {
> +		kvm_inject_emulated_page_fault(vcpu, e);
> +		return 1;
> +	}
> +
> +	/*
> +	 * In case kvm_read/write_guest_virt*() failed with X86EMUL_IO_NEEDED
> +	 * while handling a VMX instruction KVM could've handled the request

A nit similar to your observation on the shortlog, this isn't limited to VMX
instructions.

> +	 * correctly by exiting to userspace and performing I/O but there
> +	 * doesn't seem to be a real use-case behind such requests, just return
> +	 * KVM_EXIT_INTERNAL_ERROR for now.
> +	 */
> +	vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
> +	vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
> +	vcpu->run->internal.ndata = 0;
> +
> +	return 0;
> +}

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v2 1/2] KVM: nVMX: Properly handle kvm_read/write_guest_virt*() result
  2020-06-05 20:06 ` Sean Christopherson
@ 2020-06-08  8:20   ` Vitaly Kuznetsov
  0 siblings, 0 replies; 6+ messages in thread
From: Vitaly Kuznetsov @ 2020-06-08  8:20 UTC (permalink / raw)
  To: Sean Christopherson
  Cc: kvm, Paolo Bonzini, Wanpeng Li, Jim Mattson, linux-kernel

Sean Christopherson <sean.j.christopherson@intel.com> writes:

> On Fri, Jun 05, 2020 at 01:59:05PM +0200, Vitaly Kuznetsov wrote:
>> Introduce vmx_handle_memory_failure() as an interim solution.
>
> Heh, "interim".  I'll take the over on that :-D.
>

We just need a crazy but real use-case to start acting :-)

>> Note, nested_vmx_get_vmptr() now has three possible outcomes: OK, PF,
>> KVM_EXIT_INTERNAL_ERROR and callers need to know if userspace exit is
>> needed (for KVM_EXIT_INTERNAL_ERROR) in case of failure. We don't seem
>> to have a good enum describing this tristate, just add "int *ret" to
>> nested_vmx_get_vmptr() interface to pass the information.

On a loosely related note, while writing this patch I was struggling
with our exit handlers calling convention (that the return value is '1'
- return to the guest, '0' - return to userspace successfully, '< 0' -
return to userspace with an error). This is intertwined with normal
int/bool functions and make it hard to read. At the very minimum we can
introduce an enum for 0/1 return values from exit handlers. Or, maybe,
we can introduce KVM_REQ_USERSPACE_EXIT/KVM_REQ_INTERNAL_ERROR/.. and
make all the exit handlers normal functions returning 0/error?

>> 
>> Reported-by: syzbot+2a7156e11dc199bdbd8a@syzkaller.appspotmail.com
>> Suggested-by: Sean Christopherson <sean.j.christopherson@intel.com>
>> Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
>> ---
>
> ...
>
>> +/*
>> + * Handles kvm_read/write_guest_virt*() result and either injects #PF or returns
>> + * KVM_EXIT_INTERNAL_ERROR for cases not currently handled by KVM. Return value
>> + * indicates whether exit to userspace is needed.
>> + */
>> +int vmx_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
>> +			      struct x86_exception *e)
>> +{
>> +	if (r == X86EMUL_PROPAGATE_FAULT) {
>> +		kvm_inject_emulated_page_fault(vcpu, e);
>> +		return 1;
>> +	}
>> +
>> +	/*
>> +	 * In case kvm_read/write_guest_virt*() failed with X86EMUL_IO_NEEDED
>> +	 * while handling a VMX instruction KVM could've handled the request
>
> A nit similar to your observation on the shortlog, this isn't limited to VMX
> instructions.
>

Yea, it all started with nested_vmx_get_vmptr() then Paolo discovered
vmwrite/vmread/vmptrst/invept/invvpid and then I discovered invpcid but
forgot to update the comment ...

>> +	 * correctly by exiting to userspace and performing I/O but there
>> +	 * doesn't seem to be a real use-case behind such requests, just return
>> +	 * KVM_EXIT_INTERNAL_ERROR for now.
>> +	 */
>> +	vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
>> +	vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
>> +	vcpu->run->internal.ndata = 0;
>> +
>> +	return 0;
>> +}
>

-- 
Vitaly


^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2020-06-08  8:20 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-06-05 11:59 [PATCH v2 1/2] KVM: nVMX: Properly handle kvm_read/write_guest_virt*() result Vitaly Kuznetsov
2020-06-05 11:59 ` [PATCH v2 2/2] Revert "KVM: x86: work around leak of uninitialized stack contents" Vitaly Kuznetsov
2020-06-05 17:33   ` Paolo Bonzini
2020-06-05 12:01 ` [PATCH v2 1/2] KVM: nVMX: Properly handle kvm_read/write_guest_virt*() result Vitaly Kuznetsov
2020-06-05 20:06 ` Sean Christopherson
2020-06-08  8:20   ` Vitaly Kuznetsov

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.