All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/4] KVM: x86: fix bugs reported by Dan Carpenter
@ 2017-05-18 17:37 Radim Krčmář
  2017-05-18 17:37 ` [PATCH 1/4] KVM: nVMX: fix nested_vmx_check_vmptr failure paths under debugging Radim Krčmář
                   ` (4 more replies)
  0 siblings, 5 replies; 12+ messages in thread
From: Radim Krčmář @ 2017-05-18 17:37 UTC (permalink / raw)
  To: linux-kernel, kvm; +Cc: Paolo Bonzini, Dan Carpenter

It would be possible to make reproducers for the first three patches,
but they happen under circumstances too remote from normal use, so I
didn't test them like that. :)


Radim Krčmář (4):
  KVM: nVMX: fix nested_vmx_check_vmptr failure paths under debugging
  KVM: x86: zero base3 of unusable segments
  KVM: x86/vPMU: fix undefined shift in intel_pmu_refresh()
  KVM: x86: prevent uninitialized variable warning in check_svme()

 arch/x86/kvm/emulate.c   |  2 +-
 arch/x86/kvm/pmu_intel.c |  2 +-
 arch/x86/kvm/vmx.c       | 31 ++++++++++++++++++-------------
 arch/x86/kvm/x86.c       |  2 ++
 4 files changed, 22 insertions(+), 15 deletions(-)

-- 
2.13.0

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 1/4] KVM: nVMX: fix nested_vmx_check_vmptr failure paths under debugging
  2017-05-18 17:37 [PATCH 0/4] KVM: x86: fix bugs reported by Dan Carpenter Radim Krčmář
@ 2017-05-18 17:37 ` Radim Krčmář
  2017-05-19 13:48   ` [PATCH v2] " Radim Krčmář
  2017-05-18 17:37 ` [PATCH 2/4] KVM: x86: zero base3 of unusable segments Radim Krčmář
                   ` (3 subsequent siblings)
  4 siblings, 1 reply; 12+ messages in thread
From: Radim Krčmář @ 2017-05-18 17:37 UTC (permalink / raw)
  To: linux-kernel, kvm; +Cc: Paolo Bonzini, Dan Carpenter

kvm_skip_emulated_instruction() will return 0 if userspace is
single-stepping the guest.

kvm_skip_emulated_instruction() uses return status convention of exit
handler: 0 means "exit to userspace" and 1 means "continue vm entries".
The problem is that nested_vmx_check_vmptr() return status means
something else: 0 is ok, 1 is error.

This means we would continue executing after a failure.  Static checker
noticed it because vmptr was not initialized.

Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Fixes: 6affcbedcac7 ("KVM: x86: Add kvm_skip_emulated_instruction and use it.")
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
---
  TODO: add enum for exit handler return states.
---
 arch/x86/kvm/vmx.c | 31 ++++++++++++++++++-------------
 1 file changed, 18 insertions(+), 13 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 9ff1b04502c9..35e058ddd97a 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -6955,19 +6955,19 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
 		 */
 		if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
 			nested_vmx_failInvalid(vcpu);
-			return kvm_skip_emulated_instruction(vcpu);
+			return 2;
 		}
 
 		page = nested_get_page(vcpu, vmptr);
 		if (page == NULL) {
 			nested_vmx_failInvalid(vcpu);
-			return kvm_skip_emulated_instruction(vcpu);
+			return 2;
 		}
 		if (*(u32 *)kmap(page) != VMCS12_REVISION) {
 			kunmap(page);
 			nested_release_page_clean(page);
 			nested_vmx_failInvalid(vcpu);
-			return kvm_skip_emulated_instruction(vcpu);
+			return 2;
 		}
 		kunmap(page);
 		nested_release_page_clean(page);
@@ -6977,26 +6977,26 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
 		if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
 			nested_vmx_failValid(vcpu,
 					     VMXERR_VMCLEAR_INVALID_ADDRESS);
-			return kvm_skip_emulated_instruction(vcpu);
+			return 2;
 		}
 
 		if (vmptr == vmx->nested.vmxon_ptr) {
 			nested_vmx_failValid(vcpu,
 					     VMXERR_VMCLEAR_VMXON_POINTER);
-			return kvm_skip_emulated_instruction(vcpu);
+			return 2;
 		}
 		break;
 	case EXIT_REASON_VMPTRLD:
 		if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
 			nested_vmx_failValid(vcpu,
 					     VMXERR_VMPTRLD_INVALID_ADDRESS);
-			return kvm_skip_emulated_instruction(vcpu);
+			return 2;
 		}
 
 		if (vmptr == vmx->nested.vmxon_ptr) {
 			nested_vmx_failValid(vcpu,
 					     VMXERR_VMPTRLD_VMXON_POINTER);
-			return kvm_skip_emulated_instruction(vcpu);
+			return 2;
 		}
 		break;
 	default:
@@ -7095,8 +7095,9 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
 		return 1;
 	}
 
-	if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMON, NULL))
-		return 1;
+	ret = nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMON, NULL);
+	if (ret)
+		return ret == 1 ? 1 : kvm_skip_emulated_instruction(vcpu);
  
 	ret = enter_vmx_operation(vcpu);
 	if (ret)
@@ -7209,12 +7210,14 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	u32 zero = 0;
 	gpa_t vmptr;
+	int ret;
 
 	if (!nested_vmx_check_permission(vcpu))
 		return 1;
 
-	if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMCLEAR, &vmptr))
-		return 1;
+	ret = nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMCLEAR, &vmptr);
+	if (ret)
+		return ret == 1 ? 1 : kvm_skip_emulated_instruction(vcpu);
 
 	if (vmptr == vmx->nested.current_vmptr)
 		nested_release_vmcs12(vmx);
@@ -7541,12 +7544,14 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	gpa_t vmptr;
+	int ret;
 
 	if (!nested_vmx_check_permission(vcpu))
 		return 1;
 
-	if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMPTRLD, &vmptr))
-		return 1;
+	ret = nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMPTRLD, &vmptr);
+	if (ret)
+		return ret == 1 ? 1 : kvm_skip_emulated_instruction(vcpu);
 
 	if (vmx->nested.current_vmptr != vmptr) {
 		struct vmcs12 *new_vmcs12;
-- 
2.13.0

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 2/4] KVM: x86: zero base3 of unusable segments
  2017-05-18 17:37 [PATCH 0/4] KVM: x86: fix bugs reported by Dan Carpenter Radim Krčmář
  2017-05-18 17:37 ` [PATCH 1/4] KVM: nVMX: fix nested_vmx_check_vmptr failure paths under debugging Radim Krčmář
@ 2017-05-18 17:37 ` Radim Krčmář
  2017-05-19 13:30   ` David Hildenbrand
  2017-05-18 17:37 ` [PATCH 3/4] KVM: x86/vPMU: fix undefined shift in intel_pmu_refresh() Radim Krčmář
                   ` (2 subsequent siblings)
  4 siblings, 1 reply; 12+ messages in thread
From: Radim Krčmář @ 2017-05-18 17:37 UTC (permalink / raw)
  To: linux-kernel, kvm; +Cc: Paolo Bonzini, Dan Carpenter

Static checker noticed that base3 could be used uninitialized if the
segment was not present (useable).  Random stack values probably would
not pass VMCS entry checks.

Reported-by:  Dan Carpenter <dan.carpenter@oracle.com>
Fixes: 1aa366163b8b ("KVM: x86 emulator: consolidate segment accessors")
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
---
 arch/x86/kvm/x86.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index b54125b590e8..eed8272dd52e 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5053,6 +5053,8 @@ static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
 
 	if (var.unusable) {
 		memset(desc, 0, sizeof(*desc));
+		if (base3)
+			*base3 = 0;
 		return false;
 	}
 
-- 
2.13.0

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 3/4] KVM: x86/vPMU: fix undefined shift in intel_pmu_refresh()
  2017-05-18 17:37 [PATCH 0/4] KVM: x86: fix bugs reported by Dan Carpenter Radim Krčmář
  2017-05-18 17:37 ` [PATCH 1/4] KVM: nVMX: fix nested_vmx_check_vmptr failure paths under debugging Radim Krčmář
  2017-05-18 17:37 ` [PATCH 2/4] KVM: x86: zero base3 of unusable segments Radim Krčmář
@ 2017-05-18 17:37 ` Radim Krčmář
  2017-05-19 13:31   ` David Hildenbrand
  2017-05-18 17:37 ` [PATCH 4/4] KVM: x86: prevent uninitialized variable warning in check_svme() Radim Krčmář
  2017-05-18 18:52 ` [PATCH 0/4] KVM: x86: fix bugs reported by Dan Carpenter Paolo Bonzini
  4 siblings, 1 reply; 12+ messages in thread
From: Radim Krčmář @ 2017-05-18 17:37 UTC (permalink / raw)
  To: linux-kernel, kvm; +Cc: Paolo Bonzini, Dan Carpenter

Static analysis noticed that pmu->nr_arch_gp_counters can be 32
(INTEL_PMC_MAX_GENERIC) and therefore cannot be used to shift 'int'.

I didn't add BUILD_BUG_ON for it as we have a better checker.

Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Fixes: 25462f7f5295 ("KVM: x86/vPMU: Define kvm_pmu_ops to support vPMU function dispatch")
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
---
 arch/x86/kvm/pmu_intel.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c
index 9d4a8504a95a..5ab4a364348e 100644
--- a/arch/x86/kvm/pmu_intel.c
+++ b/arch/x86/kvm/pmu_intel.c
@@ -294,7 +294,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
 			((u64)1 << edx.split.bit_width_fixed) - 1;
 	}
 
-	pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) |
+	pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) |
 		(((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
 	pmu->global_ctrl_mask = ~pmu->global_ctrl;
 
-- 
2.13.0

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 4/4] KVM: x86: prevent uninitialized variable warning in check_svme()
  2017-05-18 17:37 [PATCH 0/4] KVM: x86: fix bugs reported by Dan Carpenter Radim Krčmář
                   ` (2 preceding siblings ...)
  2017-05-18 17:37 ` [PATCH 3/4] KVM: x86/vPMU: fix undefined shift in intel_pmu_refresh() Radim Krčmář
@ 2017-05-18 17:37 ` Radim Krčmář
  2017-05-19 13:32   ` David Hildenbrand
  2017-05-18 18:52 ` [PATCH 0/4] KVM: x86: fix bugs reported by Dan Carpenter Paolo Bonzini
  4 siblings, 1 reply; 12+ messages in thread
From: Radim Krčmář @ 2017-05-18 17:37 UTC (permalink / raw)
  To: linux-kernel, kvm; +Cc: Paolo Bonzini, Dan Carpenter

get_msr() of MSR_EFER is currently always going to succeed, but static
checker doesn't see that far.

Don't complicate stuff and just use 0 for the fallback -- it means that
the feature is not present.

Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
---
 arch/x86/kvm/emulate.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index c25cfaf584e7..0816ab2e8adc 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -4173,7 +4173,7 @@ static int check_dr_write(struct x86_emulate_ctxt *ctxt)
 
 static int check_svme(struct x86_emulate_ctxt *ctxt)
 {
-	u64 efer;
+	u64 efer = 0;
 
 	ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
 
-- 
2.13.0

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH 0/4] KVM: x86: fix bugs reported by Dan Carpenter
  2017-05-18 17:37 [PATCH 0/4] KVM: x86: fix bugs reported by Dan Carpenter Radim Krčmář
                   ` (3 preceding siblings ...)
  2017-05-18 17:37 ` [PATCH 4/4] KVM: x86: prevent uninitialized variable warning in check_svme() Radim Krčmář
@ 2017-05-18 18:52 ` Paolo Bonzini
  2017-05-18 19:25   ` Radim Krčmář
  4 siblings, 1 reply; 12+ messages in thread
From: Paolo Bonzini @ 2017-05-18 18:52 UTC (permalink / raw)
  To: Radim Krčmář, linux-kernel, kvm; +Cc: Dan Carpenter



On 18/05/2017 19:37, Radim Krčmář wrote:
> It would be possible to make reproducers for the first three patches,
> but they happen under circumstances too remote from normal use, so I
> didn't test them like that. :)
> 
> 
> Radim Krčmář (4):
>   KVM: nVMX: fix nested_vmx_check_vmptr failure paths under debugging
>   KVM: x86: zero base3 of unusable segments
>   KVM: x86/vPMU: fix undefined shift in intel_pmu_refresh()
>   KVM: x86: prevent uninitialized variable warning in check_svme()
> 
>  arch/x86/kvm/emulate.c   |  2 +-
>  arch/x86/kvm/pmu_intel.c |  2 +-
>  arch/x86/kvm/vmx.c       | 31 ++++++++++++++++++-------------
>  arch/x86/kvm/x86.c       |  2 ++
>  4 files changed, 22 insertions(+), 15 deletions(-)
> 

Patch 1 is ugly, but I don't have any better idea.

Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 0/4] KVM: x86: fix bugs reported by Dan Carpenter
  2017-05-18 18:52 ` [PATCH 0/4] KVM: x86: fix bugs reported by Dan Carpenter Paolo Bonzini
@ 2017-05-18 19:25   ` Radim Krčmář
  0 siblings, 0 replies; 12+ messages in thread
From: Radim Krčmář @ 2017-05-18 19:25 UTC (permalink / raw)
  To: Paolo Bonzini; +Cc: linux-kernel, kvm, Dan Carpenter

2017-05-18 20:52+0200, Paolo Bonzini:
> On 18/05/2017 19:37, Radim Krčmář wrote:
> > It would be possible to make reproducers for the first three patches,
> > but they happen under circumstances too remote from normal use, so I
> > didn't test them like that. :)
> > 
> > 
> > Radim Krčmář (4):
> >   KVM: nVMX: fix nested_vmx_check_vmptr failure paths under debugging
> >   KVM: x86: zero base3 of unusable segments
> >   KVM: x86/vPMU: fix undefined shift in intel_pmu_refresh()
> >   KVM: x86: prevent uninitialized variable warning in check_svme()
> > 
> >  arch/x86/kvm/emulate.c   |  2 +-
> >  arch/x86/kvm/pmu_intel.c |  2 +-
> >  arch/x86/kvm/vmx.c       | 31 ++++++++++++++++++-------------
> >  arch/x86/kvm/x86.c       |  2 ++
> >  4 files changed, 22 insertions(+), 15 deletions(-)
> > 
> 
> Patch 1 is ugly, but I don't have any better idea.

I agree.  Adding another argument was clearly worse, but I almost chose
to keep the skip in nested_vmx_check_vmptr() and return it +1, to signal
an error, and then subtract 1 before returning from the exit handler.

> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>

Thanks.

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/4] KVM: x86: zero base3 of unusable segments
  2017-05-18 17:37 ` [PATCH 2/4] KVM: x86: zero base3 of unusable segments Radim Krčmář
@ 2017-05-19 13:30   ` David Hildenbrand
  0 siblings, 0 replies; 12+ messages in thread
From: David Hildenbrand @ 2017-05-19 13:30 UTC (permalink / raw)
  To: Radim Krčmář, linux-kernel, kvm
  Cc: Paolo Bonzini, Dan Carpenter

On 18.05.2017 19:37, Radim Krčmář wrote:
> Static checker noticed that base3 could be used uninitialized if the
> segment was not present (useable).  Random stack values probably would
> not pass VMCS entry checks.
> 
> Reported-by:  Dan Carpenter <dan.carpenter@oracle.com>
> Fixes: 1aa366163b8b ("KVM: x86 emulator: consolidate segment accessors")
> Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
> ---
>  arch/x86/kvm/x86.c | 2 ++
>  1 file changed, 2 insertions(+)
> 
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index b54125b590e8..eed8272dd52e 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -5053,6 +5053,8 @@ static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
>  
>  	if (var.unusable) {
>  		memset(desc, 0, sizeof(*desc));
> +		if (base3)
> +			*base3 = 0;
>  		return false;
>  	}
>  
> 

Reviewed-by: David Hildenbrand <david@redhat.com>

-- 

Thanks,

David

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 3/4] KVM: x86/vPMU: fix undefined shift in intel_pmu_refresh()
  2017-05-18 17:37 ` [PATCH 3/4] KVM: x86/vPMU: fix undefined shift in intel_pmu_refresh() Radim Krčmář
@ 2017-05-19 13:31   ` David Hildenbrand
  0 siblings, 0 replies; 12+ messages in thread
From: David Hildenbrand @ 2017-05-19 13:31 UTC (permalink / raw)
  To: Radim Krčmář, linux-kernel, kvm
  Cc: Paolo Bonzini, Dan Carpenter

On 18.05.2017 19:37, Radim Krčmář wrote:
> Static analysis noticed that pmu->nr_arch_gp_counters can be 32
> (INTEL_PMC_MAX_GENERIC) and therefore cannot be used to shift 'int'.
> 
> I didn't add BUILD_BUG_ON for it as we have a better checker.
> 
> Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
> Fixes: 25462f7f5295 ("KVM: x86/vPMU: Define kvm_pmu_ops to support vPMU function dispatch")
> Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
> ---
>  arch/x86/kvm/pmu_intel.c | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
> diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c
> index 9d4a8504a95a..5ab4a364348e 100644
> --- a/arch/x86/kvm/pmu_intel.c
> +++ b/arch/x86/kvm/pmu_intel.c
> @@ -294,7 +294,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
>  			((u64)1 << edx.split.bit_width_fixed) - 1;
>  	}
>  
> -	pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) |
> +	pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) |
>  		(((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
>  	pmu->global_ctrl_mask = ~pmu->global_ctrl;
>  
> 

Reviewed-by: David Hildenbrand <david@redhat.com>

-- 

Thanks,

David

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 4/4] KVM: x86: prevent uninitialized variable warning in check_svme()
  2017-05-18 17:37 ` [PATCH 4/4] KVM: x86: prevent uninitialized variable warning in check_svme() Radim Krčmář
@ 2017-05-19 13:32   ` David Hildenbrand
  0 siblings, 0 replies; 12+ messages in thread
From: David Hildenbrand @ 2017-05-19 13:32 UTC (permalink / raw)
  To: Radim Krčmář, linux-kernel, kvm
  Cc: Paolo Bonzini, Dan Carpenter

On 18.05.2017 19:37, Radim Krčmář wrote:
> get_msr() of MSR_EFER is currently always going to succeed, but static
> checker doesn't see that far.
> 
> Don't complicate stuff and just use 0 for the fallback -- it means that
> the feature is not present.
> 
> Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
> Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
> ---
>  arch/x86/kvm/emulate.c | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
> diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
> index c25cfaf584e7..0816ab2e8adc 100644
> --- a/arch/x86/kvm/emulate.c
> +++ b/arch/x86/kvm/emulate.c
> @@ -4173,7 +4173,7 @@ static int check_dr_write(struct x86_emulate_ctxt *ctxt)
>  
>  static int check_svme(struct x86_emulate_ctxt *ctxt)
>  {
> -	u64 efer;
> +	u64 efer = 0;
>  
>  	ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
>  
> 

Reviewed-by: David Hildenbrand <david@redhat.com>

-- 

Thanks,

David

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH v2] KVM: nVMX: fix nested_vmx_check_vmptr failure paths under debugging
  2017-05-18 17:37 ` [PATCH 1/4] KVM: nVMX: fix nested_vmx_check_vmptr failure paths under debugging Radim Krčmář
@ 2017-05-19 13:48   ` Radim Krčmář
  2017-05-30 14:34     ` Paolo Bonzini
  0 siblings, 1 reply; 12+ messages in thread
From: Radim Krčmář @ 2017-05-19 13:48 UTC (permalink / raw)
  To: linux-kernel, kvm; +Cc: Paolo Bonzini, Dan Carpenter

kvm_skip_emulated_instruction() will return 0 if userspace is
single-stepping the guest.

kvm_skip_emulated_instruction() uses return status convention of exit
handler: 0 means "exit to userspace" and 1 means "continue vm entries".
The problem is that nested_vmx_check_vmptr() return status means
something else: 0 is ok, 1 is error.

This means we would continue executing after a failure.  Static checker
noticed it because vmptr was not initialized.

Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Fixes: 6affcbedcac7 ("KVM: x86: Add kvm_skip_emulated_instruction and use it.")
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
---
 Second try -- moves a lot of code around to make it less ugly and keep
 the same behavior as v1, hopefully.

 arch/x86/kvm/vmx.c | 140 ++++++++++++++++++++++-------------------------------
 1 file changed, 57 insertions(+), 83 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 9ff1b04502c9..dd274db9bf77 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -6914,97 +6914,21 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
 	return 0;
 }
 
-/*
- * This function performs the various checks including
- * - if it's 4KB aligned
- * - No bits beyond the physical address width are set
- * - Returns 0 on success or else 1
- * (Intel SDM Section 30.3)
- */
-static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
-				  gpa_t *vmpointer)
+static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
 {
 	gva_t gva;
-	gpa_t vmptr;
 	struct x86_exception e;
-	struct page *page;
-	struct vcpu_vmx *vmx = to_vmx(vcpu);
-	int maxphyaddr = cpuid_maxphyaddr(vcpu);
 
 	if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
 			vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
 		return 1;
 
-	if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
-				sizeof(vmptr), &e)) {
+	if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, vmpointer,
+				sizeof(*vmpointer), &e)) {
 		kvm_inject_page_fault(vcpu, &e);
 		return 1;
 	}
 
-	switch (exit_reason) {
-	case EXIT_REASON_VMON:
-		/*
-		 * SDM 3: 24.11.5
-		 * The first 4 bytes of VMXON region contain the supported
-		 * VMCS revision identifier
-		 *
-		 * Note - IA32_VMX_BASIC[48] will never be 1
-		 * for the nested case;
-		 * which replaces physical address width with 32
-		 *
-		 */
-		if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
-			nested_vmx_failInvalid(vcpu);
-			return kvm_skip_emulated_instruction(vcpu);
-		}
-
-		page = nested_get_page(vcpu, vmptr);
-		if (page == NULL) {
-			nested_vmx_failInvalid(vcpu);
-			return kvm_skip_emulated_instruction(vcpu);
-		}
-		if (*(u32 *)kmap(page) != VMCS12_REVISION) {
-			kunmap(page);
-			nested_release_page_clean(page);
-			nested_vmx_failInvalid(vcpu);
-			return kvm_skip_emulated_instruction(vcpu);
-		}
-		kunmap(page);
-		nested_release_page_clean(page);
-		vmx->nested.vmxon_ptr = vmptr;
-		break;
-	case EXIT_REASON_VMCLEAR:
-		if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
-			nested_vmx_failValid(vcpu,
-					     VMXERR_VMCLEAR_INVALID_ADDRESS);
-			return kvm_skip_emulated_instruction(vcpu);
-		}
-
-		if (vmptr == vmx->nested.vmxon_ptr) {
-			nested_vmx_failValid(vcpu,
-					     VMXERR_VMCLEAR_VMXON_POINTER);
-			return kvm_skip_emulated_instruction(vcpu);
-		}
-		break;
-	case EXIT_REASON_VMPTRLD:
-		if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
-			nested_vmx_failValid(vcpu,
-					     VMXERR_VMPTRLD_INVALID_ADDRESS);
-			return kvm_skip_emulated_instruction(vcpu);
-		}
-
-		if (vmptr == vmx->nested.vmxon_ptr) {
-			nested_vmx_failValid(vcpu,
-					     VMXERR_VMPTRLD_VMXON_POINTER);
-			return kvm_skip_emulated_instruction(vcpu);
-		}
-		break;
-	default:
-		return 1; /* shouldn't happen */
-	}
-
-	if (vmpointer)
-		*vmpointer = vmptr;
 	return 0;
 }
 
@@ -7066,6 +6990,8 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
 static int handle_vmon(struct kvm_vcpu *vcpu)
 {
 	int ret;
+	gpa_t vmptr;
+	struct page *page;
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
 		| FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
@@ -7095,9 +7021,37 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
 		return 1;
 	}
 
-	if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMON, NULL))
+	if (nested_vmx_get_vmptr(vcpu, &vmptr))
 		return 1;
- 
+
+	/*
+	 * SDM 3: 24.11.5
+	 * The first 4 bytes of VMXON region contain the supported
+	 * VMCS revision identifier
+	 *
+	 * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
+	 * which replaces physical address width with 32
+	 */
+	if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
+		nested_vmx_failInvalid(vcpu);
+		return kvm_skip_emulated_instruction(vcpu);
+	}
+
+	page = nested_get_page(vcpu, vmptr);
+	if (page == NULL) {
+		nested_vmx_failInvalid(vcpu);
+		return kvm_skip_emulated_instruction(vcpu);
+	}
+	if (*(u32 *)kmap(page) != VMCS12_REVISION) {
+		kunmap(page);
+		nested_release_page_clean(page);
+		nested_vmx_failInvalid(vcpu);
+		return kvm_skip_emulated_instruction(vcpu);
+	}
+	kunmap(page);
+	nested_release_page_clean(page);
+
+	vmx->nested.vmxon_ptr = vmptr;
 	ret = enter_vmx_operation(vcpu);
 	if (ret)
 		return ret;
@@ -7213,9 +7167,19 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
 	if (!nested_vmx_check_permission(vcpu))
 		return 1;
 
-	if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMCLEAR, &vmptr))
+	if (nested_vmx_get_vmptr(vcpu, &vmptr))
 		return 1;
 
+	if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
+		nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS);
+		return kvm_skip_emulated_instruction(vcpu);
+	}
+
+	if (vmptr == vmx->nested.vmxon_ptr) {
+		nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_VMXON_POINTER);
+		return kvm_skip_emulated_instruction(vcpu);
+	}
+
 	if (vmptr == vmx->nested.current_vmptr)
 		nested_release_vmcs12(vmx);
 
@@ -7545,9 +7509,19 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
 	if (!nested_vmx_check_permission(vcpu))
 		return 1;
 
-	if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMPTRLD, &vmptr))
+	if (nested_vmx_get_vmptr(vcpu, &vmptr))
 		return 1;
 
+	if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
+		nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS);
+		return kvm_skip_emulated_instruction(vcpu);
+	}
+
+	if (vmptr == vmx->nested.vmxon_ptr) {
+		nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_VMXON_POINTER);
+		return kvm_skip_emulated_instruction(vcpu);
+	}
+
 	if (vmx->nested.current_vmptr != vmptr) {
 		struct vmcs12 *new_vmcs12;
 		struct page *page;
-- 
2.13.0

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH v2] KVM: nVMX: fix nested_vmx_check_vmptr failure paths under debugging
  2017-05-19 13:48   ` [PATCH v2] " Radim Krčmář
@ 2017-05-30 14:34     ` Paolo Bonzini
  0 siblings, 0 replies; 12+ messages in thread
From: Paolo Bonzini @ 2017-05-30 14:34 UTC (permalink / raw)
  To: Radim Krčmář, linux-kernel, kvm; +Cc: Dan Carpenter



On 19/05/2017 15:48, Radim Krčmář wrote:
> kvm_skip_emulated_instruction() will return 0 if userspace is
> single-stepping the guest.
> 
> kvm_skip_emulated_instruction() uses return status convention of exit
> handler: 0 means "exit to userspace" and 1 means "continue vm entries".
> The problem is that nested_vmx_check_vmptr() return status means
> something else: 0 is ok, 1 is error.
> 
> This means we would continue executing after a failure.  Static checker
> noticed it because vmptr was not initialized.
> 
> Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
> Fixes: 6affcbedcac7 ("KVM: x86: Add kvm_skip_emulated_instruction and use it.")
> Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
> ---
>  Second try -- moves a lot of code around to make it less ugly and keep
>  the same behavior as v1, hopefully.
> 
>  arch/x86/kvm/vmx.c | 140 ++++++++++++++++++++++-------------------------------
>  1 file changed, 57 insertions(+), 83 deletions(-)
> 
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 9ff1b04502c9..dd274db9bf77 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -6914,97 +6914,21 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
>  	return 0;
>  }
>  
> -/*
> - * This function performs the various checks including
> - * - if it's 4KB aligned
> - * - No bits beyond the physical address width are set
> - * - Returns 0 on success or else 1
> - * (Intel SDM Section 30.3)
> - */
> -static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
> -				  gpa_t *vmpointer)
> +static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
>  {
>  	gva_t gva;
> -	gpa_t vmptr;
>  	struct x86_exception e;
> -	struct page *page;
> -	struct vcpu_vmx *vmx = to_vmx(vcpu);
> -	int maxphyaddr = cpuid_maxphyaddr(vcpu);
>  
>  	if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
>  			vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
>  		return 1;
>  
> -	if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
> -				sizeof(vmptr), &e)) {
> +	if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, vmpointer,
> +				sizeof(*vmpointer), &e)) {
>  		kvm_inject_page_fault(vcpu, &e);
>  		return 1;
>  	}
>  
> -	switch (exit_reason) {
> -	case EXIT_REASON_VMON:
> -		/*
> -		 * SDM 3: 24.11.5
> -		 * The first 4 bytes of VMXON region contain the supported
> -		 * VMCS revision identifier
> -		 *
> -		 * Note - IA32_VMX_BASIC[48] will never be 1
> -		 * for the nested case;
> -		 * which replaces physical address width with 32
> -		 *
> -		 */
> -		if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
> -			nested_vmx_failInvalid(vcpu);
> -			return kvm_skip_emulated_instruction(vcpu);
> -		}
> -
> -		page = nested_get_page(vcpu, vmptr);
> -		if (page == NULL) {
> -			nested_vmx_failInvalid(vcpu);
> -			return kvm_skip_emulated_instruction(vcpu);
> -		}
> -		if (*(u32 *)kmap(page) != VMCS12_REVISION) {
> -			kunmap(page);
> -			nested_release_page_clean(page);
> -			nested_vmx_failInvalid(vcpu);
> -			return kvm_skip_emulated_instruction(vcpu);
> -		}
> -		kunmap(page);
> -		nested_release_page_clean(page);
> -		vmx->nested.vmxon_ptr = vmptr;
> -		break;
> -	case EXIT_REASON_VMCLEAR:
> -		if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
> -			nested_vmx_failValid(vcpu,
> -					     VMXERR_VMCLEAR_INVALID_ADDRESS);
> -			return kvm_skip_emulated_instruction(vcpu);
> -		}
> -
> -		if (vmptr == vmx->nested.vmxon_ptr) {
> -			nested_vmx_failValid(vcpu,
> -					     VMXERR_VMCLEAR_VMXON_POINTER);
> -			return kvm_skip_emulated_instruction(vcpu);
> -		}
> -		break;
> -	case EXIT_REASON_VMPTRLD:
> -		if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
> -			nested_vmx_failValid(vcpu,
> -					     VMXERR_VMPTRLD_INVALID_ADDRESS);
> -			return kvm_skip_emulated_instruction(vcpu);
> -		}
> -
> -		if (vmptr == vmx->nested.vmxon_ptr) {
> -			nested_vmx_failValid(vcpu,
> -					     VMXERR_VMPTRLD_VMXON_POINTER);
> -			return kvm_skip_emulated_instruction(vcpu);
> -		}
> -		break;
> -	default:
> -		return 1; /* shouldn't happen */
> -	}
> -
> -	if (vmpointer)
> -		*vmpointer = vmptr;
>  	return 0;
>  }
>  
> @@ -7066,6 +6990,8 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
>  static int handle_vmon(struct kvm_vcpu *vcpu)
>  {
>  	int ret;
> +	gpa_t vmptr;
> +	struct page *page;
>  	struct vcpu_vmx *vmx = to_vmx(vcpu);
>  	const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
>  		| FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
> @@ -7095,9 +7021,37 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
>  		return 1;
>  	}
>  
> -	if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMON, NULL))
> +	if (nested_vmx_get_vmptr(vcpu, &vmptr))
>  		return 1;
> - 
> +
> +	/*
> +	 * SDM 3: 24.11.5
> +	 * The first 4 bytes of VMXON region contain the supported
> +	 * VMCS revision identifier
> +	 *
> +	 * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
> +	 * which replaces physical address width with 32
> +	 */
> +	if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
> +		nested_vmx_failInvalid(vcpu);
> +		return kvm_skip_emulated_instruction(vcpu);
> +	}
> +
> +	page = nested_get_page(vcpu, vmptr);
> +	if (page == NULL) {
> +		nested_vmx_failInvalid(vcpu);
> +		return kvm_skip_emulated_instruction(vcpu);
> +	}
> +	if (*(u32 *)kmap(page) != VMCS12_REVISION) {
> +		kunmap(page);
> +		nested_release_page_clean(page);
> +		nested_vmx_failInvalid(vcpu);
> +		return kvm_skip_emulated_instruction(vcpu);
> +	}
> +	kunmap(page);
> +	nested_release_page_clean(page);
> +
> +	vmx->nested.vmxon_ptr = vmptr;
>  	ret = enter_vmx_operation(vcpu);
>  	if (ret)
>  		return ret;
> @@ -7213,9 +7167,19 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
>  	if (!nested_vmx_check_permission(vcpu))
>  		return 1;
>  
> -	if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMCLEAR, &vmptr))
> +	if (nested_vmx_get_vmptr(vcpu, &vmptr))
>  		return 1;
>  
> +	if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
> +		nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS);
> +		return kvm_skip_emulated_instruction(vcpu);
> +	}
> +
> +	if (vmptr == vmx->nested.vmxon_ptr) {
> +		nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_VMXON_POINTER);
> +		return kvm_skip_emulated_instruction(vcpu);
> +	}
> +
>  	if (vmptr == vmx->nested.current_vmptr)
>  		nested_release_vmcs12(vmx);
>  
> @@ -7545,9 +7509,19 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
>  	if (!nested_vmx_check_permission(vcpu))
>  		return 1;
>  
> -	if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMPTRLD, &vmptr))
> +	if (nested_vmx_get_vmptr(vcpu, &vmptr))
>  		return 1;
>  
> +	if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
> +		nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS);
> +		return kvm_skip_emulated_instruction(vcpu);
> +	}
> +
> +	if (vmptr == vmx->nested.vmxon_ptr) {
> +		nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_VMXON_POINTER);
> +		return kvm_skip_emulated_instruction(vcpu);
> +	}
> +
>  	if (vmx->nested.current_vmptr != vmptr) {
>  		struct vmcs12 *new_vmcs12;
>  		struct page *page;
> 

Nice, diffstat speaks for itself.  Queuing it.

Thanks,

Paolo

^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2017-05-30 14:34 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-05-18 17:37 [PATCH 0/4] KVM: x86: fix bugs reported by Dan Carpenter Radim Krčmář
2017-05-18 17:37 ` [PATCH 1/4] KVM: nVMX: fix nested_vmx_check_vmptr failure paths under debugging Radim Krčmář
2017-05-19 13:48   ` [PATCH v2] " Radim Krčmář
2017-05-30 14:34     ` Paolo Bonzini
2017-05-18 17:37 ` [PATCH 2/4] KVM: x86: zero base3 of unusable segments Radim Krčmář
2017-05-19 13:30   ` David Hildenbrand
2017-05-18 17:37 ` [PATCH 3/4] KVM: x86/vPMU: fix undefined shift in intel_pmu_refresh() Radim Krčmář
2017-05-19 13:31   ` David Hildenbrand
2017-05-18 17:37 ` [PATCH 4/4] KVM: x86: prevent uninitialized variable warning in check_svme() Radim Krčmář
2017-05-19 13:32   ` David Hildenbrand
2017-05-18 18:52 ` [PATCH 0/4] KVM: x86: fix bugs reported by Dan Carpenter Paolo Bonzini
2017-05-18 19:25   ` Radim Krčmář

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.