kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v3 1/3] kvm: nVMX: VMWRITE checks VMCS-link pointer before VMCS field
@ 2019-12-06 23:46 Jim Mattson
  2019-12-06 23:46 ` [PATCH v3 2/3] kvm: nVMX: VMWRITE checks unsupported field before read-only field Jim Mattson
  2019-12-06 23:46 ` [PATCH v3 3/3] kvm: nVMX: Aesthetic cleanup of handle_vmread and handle_vmwrite Jim Mattson
  0 siblings, 2 replies; 4+ messages in thread
From: Jim Mattson @ 2019-12-06 23:46 UTC (permalink / raw)
  To: kvm
  Cc: Jim Mattson, Liran Alon, Paolo Bonzini, Vitaly Kuznetsov,
	Peter Shier, Oliver Upton, Jon Cargille

According to the SDM, a VMWRITE in VMX non-root operation with an
invalid VMCS-link pointer results in VMfailInvalid before the validity
of the VMCS field in the secondary source operand is checked.

For consistency, modify both handle_vmwrite and handle_vmread, even
though there was no problem with the latter.

Fixes: 6d894f498f5d1 ("KVM: nVMX: vmread/vmwrite: Use shadow vmcs12 if running L2")
Signed-off-by: Jim Mattson <jmattson@google.com>
Cc: Liran Alon <liran.alon@oracle.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
Reviewed-by: Peter Shier <pshier@google.com>
Reviewed-by: Oliver Upton <oupton@google.com>
Reviewed-by: Jon Cargille <jcargill@google.com>
---
v1 -> v2:
 * Split the invalid VMCS-link pointer check from the conditional call to
   copy_vmcs02_to_vmcs12_rare.
 * Hoisted the assignment of vmcs12 to its declaration.
 * Modified handle_vmread to maintain consistency with handle_vmwrite.
v2 -> v3:
 * No changes

 arch/x86/kvm/vmx/nested.c | 59 +++++++++++++++++----------------------
 1 file changed, 25 insertions(+), 34 deletions(-)

diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 4aea7d304beb..ee1bf9710e86 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -4753,32 +4753,28 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
 {
 	unsigned long field;
 	u64 field_value;
+	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
 	u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
 	int len;
 	gva_t gva = 0;
-	struct vmcs12 *vmcs12;
+	struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
+						    : get_vmcs12(vcpu);
 	struct x86_exception e;
 	short offset;
 
 	if (!nested_vmx_check_permission(vcpu))
 		return 1;
 
-	if (to_vmx(vcpu)->nested.current_vmptr == -1ull)
+	/*
+	 * In VMX non-root operation, when the VMCS-link pointer is -1ull,
+	 * any VMREAD sets the ALU flags for VMfailInvalid.
+	 */
+	if (vmx->nested.current_vmptr == -1ull ||
+	    (is_guest_mode(vcpu) &&
+	     get_vmcs12(vcpu)->vmcs_link_pointer == -1ull))
 		return nested_vmx_failInvalid(vcpu);
 
-	if (!is_guest_mode(vcpu))
-		vmcs12 = get_vmcs12(vcpu);
-	else {
-		/*
-		 * When vmcs->vmcs_link_pointer is -1ull, any VMREAD
-		 * to shadowed-field sets the ALU flags for VMfailInvalid.
-		 */
-		if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
-			return nested_vmx_failInvalid(vcpu);
-		vmcs12 = get_shadow_vmcs12(vcpu);
-	}
-
 	/* Decode instruction info and find the field to read */
 	field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
 
@@ -4855,13 +4851,20 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
 	 */
 	u64 field_value = 0;
 	struct x86_exception e;
-	struct vmcs12 *vmcs12;
+	struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
+						    : get_vmcs12(vcpu);
 	short offset;
 
 	if (!nested_vmx_check_permission(vcpu))
 		return 1;
 
-	if (vmx->nested.current_vmptr == -1ull)
+	/*
+	 * In VMX non-root operation, when the VMCS-link pointer is -1ull,
+	 * any VMWRITE sets the ALU flags for VMfailInvalid.
+	 */
+	if (vmx->nested.current_vmptr == -1ull ||
+	    (is_guest_mode(vcpu) &&
+	     get_vmcs12(vcpu)->vmcs_link_pointer == -1ull))
 		return nested_vmx_failInvalid(vcpu);
 
 	if (vmx_instruction_info & (1u << 10))
@@ -4889,24 +4892,12 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
 		return nested_vmx_failValid(vcpu,
 			VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
 
-	if (!is_guest_mode(vcpu)) {
-		vmcs12 = get_vmcs12(vcpu);
-
-		/*
-		 * Ensure vmcs12 is up-to-date before any VMWRITE that dirties
-		 * vmcs12, else we may crush a field or consume a stale value.
-		 */
-		if (!is_shadow_field_rw(field))
-			copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
-	} else {
-		/*
-		 * When vmcs->vmcs_link_pointer is -1ull, any VMWRITE
-		 * to shadowed-field sets the ALU flags for VMfailInvalid.
-		 */
-		if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
-			return nested_vmx_failInvalid(vcpu);
-		vmcs12 = get_shadow_vmcs12(vcpu);
-	}
+	/*
+	 * Ensure vmcs12 is up-to-date before any VMWRITE that dirties
+	 * vmcs12, else we may crush a field or consume a stale value.
+	 */
+	if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field))
+		copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
 
 	offset = vmcs_field_to_offset(field);
 	if (offset < 0)
-- 
2.24.0.393.g34dc348eaf-goog


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH v3 2/3] kvm: nVMX: VMWRITE checks unsupported field before read-only field
  2019-12-06 23:46 [PATCH v3 1/3] kvm: nVMX: VMWRITE checks VMCS-link pointer before VMCS field Jim Mattson
@ 2019-12-06 23:46 ` Jim Mattson
  2019-12-06 23:46 ` [PATCH v3 3/3] kvm: nVMX: Aesthetic cleanup of handle_vmread and handle_vmwrite Jim Mattson
  1 sibling, 0 replies; 4+ messages in thread
From: Jim Mattson @ 2019-12-06 23:46 UTC (permalink / raw)
  To: kvm; +Cc: Jim Mattson, Paolo Bonzini, Peter Shier, Oliver Upton, Jon Cargille

According to the SDM, VMWRITE checks to see if the secondary source
operand corresponds to an unsupported VMCS field before it checks to
see if the secondary source operand corresponds to a VM-exit
information field and the processor does not support writing to
VM-exit information fields.

Fixes: 49f705c5324aa ("KVM: nVMX: Implement VMREAD and VMWRITE")
Signed-off-by: Jim Mattson <jmattson@google.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Peter Shier <pshier@google.com>
Reviewed-by: Oliver Upton <oupton@google.com>
Reviewed-by: Jon Cargille <jcargill@google.com>
---
v1 -> v2:
 * New commit in v2.
v2 -> v3:
 * No changes.

 arch/x86/kvm/vmx/nested.c | 11 ++++++-----
 1 file changed, 6 insertions(+), 5 deletions(-)

diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index ee1bf9710e86..94ec089d6d1a 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -4883,6 +4883,12 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
 
 
 	field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
+
+	offset = vmcs_field_to_offset(field);
+	if (offset < 0)
+		return nested_vmx_failValid(vcpu,
+			VMXERR_UNSUPPORTED_VMCS_COMPONENT);
+
 	/*
 	 * If the vCPU supports "VMWRITE to any supported field in the
 	 * VMCS," then the "read-only" fields are actually read/write.
@@ -4899,11 +4905,6 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
 	if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field))
 		copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
 
-	offset = vmcs_field_to_offset(field);
-	if (offset < 0)
-		return nested_vmx_failValid(vcpu,
-			VMXERR_UNSUPPORTED_VMCS_COMPONENT);
-
 	/*
 	 * Some Intel CPUs intentionally drop the reserved bits of the AR byte
 	 * fields on VMWRITE.  Emulate this behavior to ensure consistent KVM
-- 
2.24.0.393.g34dc348eaf-goog


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH v3 3/3] kvm: nVMX: Aesthetic cleanup of handle_vmread and handle_vmwrite
  2019-12-06 23:46 [PATCH v3 1/3] kvm: nVMX: VMWRITE checks VMCS-link pointer before VMCS field Jim Mattson
  2019-12-06 23:46 ` [PATCH v3 2/3] kvm: nVMX: VMWRITE checks unsupported field before read-only field Jim Mattson
@ 2019-12-06 23:46 ` Jim Mattson
  2019-12-09 15:25   ` Paolo Bonzini
  1 sibling, 1 reply; 4+ messages in thread
From: Jim Mattson @ 2019-12-06 23:46 UTC (permalink / raw)
  To: kvm
  Cc: Jim Mattson, Paolo Bonzini, Sean Christopherson, Peter Shier,
	Oliver Upton, Jon Cargille

Apply reverse fir tree declaration order, shorten some variable names
to avoid line wrap, reformat a block comment, delete an extra blank
line, and use BIT(10) instead of (1u << 10).

Signed-off-by: Jim Mattson <jmattson@google.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Sean Christopherson <sean.j.christopherson@intel.com>
Reviewed-by: Peter Shier <pshier@google.com>
Reviewed-by: Oliver Upton <oupton@google.com>
Reviewed-by: Jon Cargille <jcargill@google.com>
---
v1 -> v2:
 * New commit in v2.
v2 -> v3:
 * Shortened some variable names instead of wrapping long lines.
 * Changed BIT_ULL to BIT.

 arch/x86/kvm/vmx/nested.c | 70 +++++++++++++++++++--------------------
 1 file changed, 34 insertions(+), 36 deletions(-)

diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 94ec089d6d1a..336fe366a25f 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -4751,17 +4751,17 @@ static int handle_vmresume(struct kvm_vcpu *vcpu)
 
 static int handle_vmread(struct kvm_vcpu *vcpu)
 {
-	unsigned long field;
-	u64 field_value;
-	struct vcpu_vmx *vmx = to_vmx(vcpu);
-	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
-	u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
-	int len;
-	gva_t gva = 0;
 	struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
 						    : get_vmcs12(vcpu);
+	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+	u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	struct x86_exception e;
+	unsigned long field;
+	u64 value;
+	gva_t gva = 0;
 	short offset;
+	int len;
 
 	if (!nested_vmx_check_permission(vcpu))
 		return 1;
@@ -4776,7 +4776,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
 		return nested_vmx_failInvalid(vcpu);
 
 	/* Decode instruction info and find the field to read */
-	field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
+	field = kvm_register_readl(vcpu, (((instr_info) >> 28) & 0xf));
 
 	offset = vmcs_field_to_offset(field);
 	if (offset < 0)
@@ -4786,24 +4786,23 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
 	if (!is_guest_mode(vcpu) && is_vmcs12_ext_field(field))
 		copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
 
-	/* Read the field, zero-extended to a u64 field_value */
-	field_value = vmcs12_read_any(vmcs12, field, offset);
+	/* Read the field, zero-extended to a u64 value */
+	value = vmcs12_read_any(vmcs12, field, offset);
 
 	/*
 	 * Now copy part of this value to register or memory, as requested.
 	 * Note that the number of bits actually copied is 32 or 64 depending
 	 * on the guest's mode (32 or 64 bit), not on the given field's length.
 	 */
-	if (vmx_instruction_info & (1u << 10)) {
-		kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
-			field_value);
+	if (instr_info & BIT(10)) {
+		kvm_register_writel(vcpu, (((instr_info) >> 3) & 0xf), value);
 	} else {
 		len = is_64_bit_mode(vcpu) ? 8 : 4;
 		if (get_vmx_mem_address(vcpu, exit_qualification,
-				vmx_instruction_info, true, len, &gva))
+					instr_info, true, len, &gva))
 			return 1;
 		/* _system ok, nested_vmx_check_permission has verified cpl=0 */
-		if (kvm_write_guest_virt_system(vcpu, gva, &field_value, len, &e))
+		if (kvm_write_guest_virt_system(vcpu, gva, &value, len, &e))
 			kvm_inject_page_fault(vcpu, &e);
 	}
 
@@ -4836,24 +4835,25 @@ static bool is_shadow_field_ro(unsigned long field)
 
 static int handle_vmwrite(struct kvm_vcpu *vcpu)
 {
+	struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
+						    : get_vmcs12(vcpu);
+	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+	u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+	struct vcpu_vmx *vmx = to_vmx(vcpu);
+	struct x86_exception e;
 	unsigned long field;
-	int len;
+	short offset;
 	gva_t gva;
-	struct vcpu_vmx *vmx = to_vmx(vcpu);
-	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
-	u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+	int len;
 
-	/* The value to write might be 32 or 64 bits, depending on L1's long
+	/*
+	 * The value to write might be 32 or 64 bits, depending on L1's long
 	 * mode, and eventually we need to write that into a field of several
 	 * possible lengths. The code below first zero-extends the value to 64
-	 * bit (field_value), and then copies only the appropriate number of
+	 * bit (value), and then copies only the appropriate number of
 	 * bits into the vmcs12 field.
 	 */
-	u64 field_value = 0;
-	struct x86_exception e;
-	struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
-						    : get_vmcs12(vcpu);
-	short offset;
+	u64 value = 0;
 
 	if (!nested_vmx_check_permission(vcpu))
 		return 1;
@@ -4867,22 +4867,20 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
 	     get_vmcs12(vcpu)->vmcs_link_pointer == -1ull))
 		return nested_vmx_failInvalid(vcpu);
 
-	if (vmx_instruction_info & (1u << 10))
-		field_value = kvm_register_readl(vcpu,
-			(((vmx_instruction_info) >> 3) & 0xf));
+	if (instr_info & BIT(10))
+		value = kvm_register_readl(vcpu, (((instr_info) >> 3) & 0xf));
 	else {
 		len = is_64_bit_mode(vcpu) ? 8 : 4;
 		if (get_vmx_mem_address(vcpu, exit_qualification,
-				vmx_instruction_info, false, len, &gva))
+					instr_info, false, len, &gva))
 			return 1;
-		if (kvm_read_guest_virt(vcpu, gva, &field_value, len, &e)) {
+		if (kvm_read_guest_virt(vcpu, gva, &value, len, &e)) {
 			kvm_inject_page_fault(vcpu, &e);
 			return 1;
 		}
 	}
 
-
-	field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
+	field = kvm_register_readl(vcpu, (((instr_info) >> 28) & 0xf));
 
 	offset = vmcs_field_to_offset(field);
 	if (offset < 0)
@@ -4914,9 +4912,9 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
 	 * the stripped down value, L2 sees the full value as stored by KVM).
 	 */
 	if (field >= GUEST_ES_AR_BYTES && field <= GUEST_TR_AR_BYTES)
-		field_value &= 0x1f0ff;
+		value &= 0x1f0ff;
 
-	vmcs12_write_any(vmcs12, field, offset, field_value);
+	vmcs12_write_any(vmcs12, field, offset, value);
 
 	/*
 	 * Do not track vmcs12 dirty-state if in guest-mode as we actually
@@ -4933,7 +4931,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
 			preempt_disable();
 			vmcs_load(vmx->vmcs01.shadow_vmcs);
 
-			__vmcs_writel(field, field_value);
+			__vmcs_writel(field, value);
 
 			vmcs_clear(vmx->vmcs01.shadow_vmcs);
 			vmcs_load(vmx->loaded_vmcs->vmcs);
-- 
2.24.0.393.g34dc348eaf-goog


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH v3 3/3] kvm: nVMX: Aesthetic cleanup of handle_vmread and handle_vmwrite
  2019-12-06 23:46 ` [PATCH v3 3/3] kvm: nVMX: Aesthetic cleanup of handle_vmread and handle_vmwrite Jim Mattson
@ 2019-12-09 15:25   ` Paolo Bonzini
  0 siblings, 0 replies; 4+ messages in thread
From: Paolo Bonzini @ 2019-12-09 15:25 UTC (permalink / raw)
  To: Jim Mattson, kvm
  Cc: Sean Christopherson, Peter Shier, Oliver Upton, Jon Cargille

On 07/12/19 00:46, Jim Mattson wrote:
> Apply reverse fir tree declaration order, shorten some variable names
> to avoid line wrap, reformat a block comment, delete an extra blank
> line, and use BIT(10) instead of (1u << 10).
> 
> Signed-off-by: Jim Mattson <jmattson@google.com>
> Cc: Paolo Bonzini <pbonzini@redhat.com>
> Cc: Sean Christopherson <sean.j.christopherson@intel.com>
> Reviewed-by: Peter Shier <pshier@google.com>
> Reviewed-by: Oliver Upton <oupton@google.com>
> Reviewed-by: Jon Cargille <jcargill@google.com>
> ---
> v1 -> v2:
>  * New commit in v2.
> v2 -> v3:
>  * Shortened some variable names instead of wrapping long lines.
>  * Changed BIT_ULL to BIT.
> 
>  arch/x86/kvm/vmx/nested.c | 70 +++++++++++++++++++--------------------
>  1 file changed, 34 insertions(+), 36 deletions(-)
> 
> diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
> index 94ec089d6d1a..336fe366a25f 100644
> --- a/arch/x86/kvm/vmx/nested.c
> +++ b/arch/x86/kvm/vmx/nested.c
> @@ -4751,17 +4751,17 @@ static int handle_vmresume(struct kvm_vcpu *vcpu)
>  
>  static int handle_vmread(struct kvm_vcpu *vcpu)
>  {
> -	unsigned long field;
> -	u64 field_value;
> -	struct vcpu_vmx *vmx = to_vmx(vcpu);
> -	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
> -	u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
> -	int len;
> -	gva_t gva = 0;
>  	struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
>  						    : get_vmcs12(vcpu);
> +	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
> +	u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
> +	struct vcpu_vmx *vmx = to_vmx(vcpu);
>  	struct x86_exception e;
> +	unsigned long field;
> +	u64 value;
> +	gva_t gva = 0;
>  	short offset;
> +	int len;
>  
>  	if (!nested_vmx_check_permission(vcpu))
>  		return 1;
> @@ -4776,7 +4776,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
>  		return nested_vmx_failInvalid(vcpu);
>  
>  	/* Decode instruction info and find the field to read */
> -	field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
> +	field = kvm_register_readl(vcpu, (((instr_info) >> 28) & 0xf));
>  
>  	offset = vmcs_field_to_offset(field);
>  	if (offset < 0)
> @@ -4786,24 +4786,23 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
>  	if (!is_guest_mode(vcpu) && is_vmcs12_ext_field(field))
>  		copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
>  
> -	/* Read the field, zero-extended to a u64 field_value */
> -	field_value = vmcs12_read_any(vmcs12, field, offset);
> +	/* Read the field, zero-extended to a u64 value */
> +	value = vmcs12_read_any(vmcs12, field, offset);
>  
>  	/*
>  	 * Now copy part of this value to register or memory, as requested.
>  	 * Note that the number of bits actually copied is 32 or 64 depending
>  	 * on the guest's mode (32 or 64 bit), not on the given field's length.
>  	 */
> -	if (vmx_instruction_info & (1u << 10)) {
> -		kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
> -			field_value);
> +	if (instr_info & BIT(10)) {
> +		kvm_register_writel(vcpu, (((instr_info) >> 3) & 0xf), value);
>  	} else {
>  		len = is_64_bit_mode(vcpu) ? 8 : 4;
>  		if (get_vmx_mem_address(vcpu, exit_qualification,
> -				vmx_instruction_info, true, len, &gva))
> +					instr_info, true, len, &gva))
>  			return 1;
>  		/* _system ok, nested_vmx_check_permission has verified cpl=0 */
> -		if (kvm_write_guest_virt_system(vcpu, gva, &field_value, len, &e))
> +		if (kvm_write_guest_virt_system(vcpu, gva, &value, len, &e))
>  			kvm_inject_page_fault(vcpu, &e);
>  	}
>  
> @@ -4836,24 +4835,25 @@ static bool is_shadow_field_ro(unsigned long field)
>  
>  static int handle_vmwrite(struct kvm_vcpu *vcpu)
>  {
> +	struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
> +						    : get_vmcs12(vcpu);
> +	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
> +	u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
> +	struct vcpu_vmx *vmx = to_vmx(vcpu);
> +	struct x86_exception e;
>  	unsigned long field;
> -	int len;
> +	short offset;
>  	gva_t gva;
> -	struct vcpu_vmx *vmx = to_vmx(vcpu);
> -	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
> -	u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
> +	int len;
>  
> -	/* The value to write might be 32 or 64 bits, depending on L1's long
> +	/*
> +	 * The value to write might be 32 or 64 bits, depending on L1's long
>  	 * mode, and eventually we need to write that into a field of several
>  	 * possible lengths. The code below first zero-extends the value to 64
> -	 * bit (field_value), and then copies only the appropriate number of
> +	 * bit (value), and then copies only the appropriate number of
>  	 * bits into the vmcs12 field.
>  	 */
> -	u64 field_value = 0;
> -	struct x86_exception e;
> -	struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
> -						    : get_vmcs12(vcpu);
> -	short offset;
> +	u64 value = 0;
>  
>  	if (!nested_vmx_check_permission(vcpu))
>  		return 1;
> @@ -4867,22 +4867,20 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
>  	     get_vmcs12(vcpu)->vmcs_link_pointer == -1ull))
>  		return nested_vmx_failInvalid(vcpu);
>  
> -	if (vmx_instruction_info & (1u << 10))
> -		field_value = kvm_register_readl(vcpu,
> -			(((vmx_instruction_info) >> 3) & 0xf));
> +	if (instr_info & BIT(10))
> +		value = kvm_register_readl(vcpu, (((instr_info) >> 3) & 0xf));
>  	else {
>  		len = is_64_bit_mode(vcpu) ? 8 : 4;
>  		if (get_vmx_mem_address(vcpu, exit_qualification,
> -				vmx_instruction_info, false, len, &gva))
> +					instr_info, false, len, &gva))
>  			return 1;
> -		if (kvm_read_guest_virt(vcpu, gva, &field_value, len, &e)) {
> +		if (kvm_read_guest_virt(vcpu, gva, &value, len, &e)) {
>  			kvm_inject_page_fault(vcpu, &e);
>  			return 1;
>  		}
>  	}
>  
> -
> -	field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
> +	field = kvm_register_readl(vcpu, (((instr_info) >> 28) & 0xf));
>  
>  	offset = vmcs_field_to_offset(field);
>  	if (offset < 0)
> @@ -4914,9 +4912,9 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
>  	 * the stripped down value, L2 sees the full value as stored by KVM).
>  	 */
>  	if (field >= GUEST_ES_AR_BYTES && field <= GUEST_TR_AR_BYTES)
> -		field_value &= 0x1f0ff;
> +		value &= 0x1f0ff;
>  
> -	vmcs12_write_any(vmcs12, field, offset, field_value);
> +	vmcs12_write_any(vmcs12, field, offset, value);
>  
>  	/*
>  	 * Do not track vmcs12 dirty-state if in guest-mode as we actually
> @@ -4933,7 +4931,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
>  			preempt_disable();
>  			vmcs_load(vmx->vmcs01.shadow_vmcs);
>  
> -			__vmcs_writel(field, field_value);
> +			__vmcs_writel(field, value);
>  
>  			vmcs_clear(vmx->vmcs01.shadow_vmcs);
>  			vmcs_load(vmx->loaded_vmcs->vmcs);
> 

Queued, thanks.

Paolo


^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2019-12-09 15:26 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-12-06 23:46 [PATCH v3 1/3] kvm: nVMX: VMWRITE checks VMCS-link pointer before VMCS field Jim Mattson
2019-12-06 23:46 ` [PATCH v3 2/3] kvm: nVMX: VMWRITE checks unsupported field before read-only field Jim Mattson
2019-12-06 23:46 ` [PATCH v3 3/3] kvm: nVMX: Aesthetic cleanup of handle_vmread and handle_vmwrite Jim Mattson
2019-12-09 15:25   ` Paolo Bonzini

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).