kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2 1/3] kvm: nVMX: VMWRITE checks VMCS-link pointer before VMCS field
@ 2019-12-06 19:31 Jim Mattson
  2019-12-06 19:31 ` [PATCH v2 2/3] kvm: nVMX: VMWRITE checks unsupported field before read-only field Jim Mattson
  2019-12-06 19:31 ` [PATCH v2 3/3] kvm: nVMX: Aesthetic cleanup of handle_vmread and handle_vmwrite Jim Mattson
  0 siblings, 2 replies; 4+ messages in thread
From: Jim Mattson @ 2019-12-06 19:31 UTC (permalink / raw)
  To: kvm
  Cc: Jim Mattson, Peter Shier, Oliver Upton, Jon Cargille, Liran Alon,
	Paolo Bonzini, Vitaly Kuznetsov

According to the SDM, a VMWRITE in VMX non-root operation with an
invalid VMCS-link pointer results in VMfailInvalid before the validity
of the VMCS field in the secondary source operand is checked.

For consistency, modify both handle_vmwrite and handle_vmread, even
though there was no problem with the latter.

Fixes: 6d894f498f5d1 ("KVM: nVMX: vmread/vmwrite: Use shadow vmcs12 if running L2")
Signed-off-by: Jim Mattson <jmattson@google.com>
Reviewed-by: Peter Shier <pshier@google.com>
Reviewed-by: Oliver Upton <oupton@google.com>
Reviewed-by: Jon Cargille <jcargill@google.com>
Cc: Liran Alon <liran.alon@oracle.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
---
v1 -> v2:
 * Split the invalid VMCS-link pointer check from the conditional call to
   copy_vmcs02_to_vmcs12_rare.
 * Hoisted the assignment of vmcs12 to its declaration.
 * Modified handle_vmread to maintain consistency with handle_vmwrite.

 arch/x86/kvm/vmx/nested.c | 59 +++++++++++++++++----------------------
 1 file changed, 25 insertions(+), 34 deletions(-)

diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 4aea7d304beb..ee1bf9710e86 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -4753,32 +4753,28 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
 {
 	unsigned long field;
 	u64 field_value;
+	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
 	u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
 	int len;
 	gva_t gva = 0;
-	struct vmcs12 *vmcs12;
+	struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
+						    : get_vmcs12(vcpu);
 	struct x86_exception e;
 	short offset;
 
 	if (!nested_vmx_check_permission(vcpu))
 		return 1;
 
-	if (to_vmx(vcpu)->nested.current_vmptr == -1ull)
+	/*
+	 * In VMX non-root operation, when the VMCS-link pointer is -1ull,
+	 * any VMREAD sets the ALU flags for VMfailInvalid.
+	 */
+	if (vmx->nested.current_vmptr == -1ull ||
+	    (is_guest_mode(vcpu) &&
+	     get_vmcs12(vcpu)->vmcs_link_pointer == -1ull))
 		return nested_vmx_failInvalid(vcpu);
 
-	if (!is_guest_mode(vcpu))
-		vmcs12 = get_vmcs12(vcpu);
-	else {
-		/*
-		 * When vmcs->vmcs_link_pointer is -1ull, any VMREAD
-		 * to shadowed-field sets the ALU flags for VMfailInvalid.
-		 */
-		if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
-			return nested_vmx_failInvalid(vcpu);
-		vmcs12 = get_shadow_vmcs12(vcpu);
-	}
-
 	/* Decode instruction info and find the field to read */
 	field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
 
@@ -4855,13 +4851,20 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
 	 */
 	u64 field_value = 0;
 	struct x86_exception e;
-	struct vmcs12 *vmcs12;
+	struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
+						    : get_vmcs12(vcpu);
 	short offset;
 
 	if (!nested_vmx_check_permission(vcpu))
 		return 1;
 
-	if (vmx->nested.current_vmptr == -1ull)
+	/*
+	 * In VMX non-root operation, when the VMCS-link pointer is -1ull,
+	 * any VMWRITE sets the ALU flags for VMfailInvalid.
+	 */
+	if (vmx->nested.current_vmptr == -1ull ||
+	    (is_guest_mode(vcpu) &&
+	     get_vmcs12(vcpu)->vmcs_link_pointer == -1ull))
 		return nested_vmx_failInvalid(vcpu);
 
 	if (vmx_instruction_info & (1u << 10))
@@ -4889,24 +4892,12 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
 		return nested_vmx_failValid(vcpu,
 			VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
 
-	if (!is_guest_mode(vcpu)) {
-		vmcs12 = get_vmcs12(vcpu);
-
-		/*
-		 * Ensure vmcs12 is up-to-date before any VMWRITE that dirties
-		 * vmcs12, else we may crush a field or consume a stale value.
-		 */
-		if (!is_shadow_field_rw(field))
-			copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
-	} else {
-		/*
-		 * When vmcs->vmcs_link_pointer is -1ull, any VMWRITE
-		 * to shadowed-field sets the ALU flags for VMfailInvalid.
-		 */
-		if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
-			return nested_vmx_failInvalid(vcpu);
-		vmcs12 = get_shadow_vmcs12(vcpu);
-	}
+	/*
+	 * Ensure vmcs12 is up-to-date before any VMWRITE that dirties
+	 * vmcs12, else we may crush a field or consume a stale value.
+	 */
+	if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field))
+		copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
 
 	offset = vmcs_field_to_offset(field);
 	if (offset < 0)
-- 
2.24.0.393.g34dc348eaf-goog


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH v2 2/3] kvm: nVMX: VMWRITE checks unsupported field before read-only field
  2019-12-06 19:31 [PATCH v2 1/3] kvm: nVMX: VMWRITE checks VMCS-link pointer before VMCS field Jim Mattson
@ 2019-12-06 19:31 ` Jim Mattson
  2019-12-06 19:31 ` [PATCH v2 3/3] kvm: nVMX: Aesthetic cleanup of handle_vmread and handle_vmwrite Jim Mattson
  1 sibling, 0 replies; 4+ messages in thread
From: Jim Mattson @ 2019-12-06 19:31 UTC (permalink / raw)
  To: kvm; +Cc: Jim Mattson, Peter Shier, Oliver Upton, Jon Cargille, Paolo Bonzini

According to the SDM, VMWRITE checks to see if the secondary source
operand corresponds to an unsupported VMCS field before it checks to
see if the secondary source operand corresponds to a VM-exit
information field and the processor does not support writing to
VM-exit information fields.

Fixes: 49f705c5324aa ("KVM: nVMX: Implement VMREAD and VMWRITE")
Signed-off-by: Jim Mattson <jmattson@google.com>
Reviewed-by: Peter Shier <pshier@google.com>
Reviewed-by: Oliver Upton <oupton@google.com>
Reviewed-by: Jon Cargille <jcargill@google.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/vmx/nested.c | 11 ++++++-----
 1 file changed, 6 insertions(+), 5 deletions(-)

diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index ee1bf9710e86..94ec089d6d1a 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -4883,6 +4883,12 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
 
 
 	field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
+
+	offset = vmcs_field_to_offset(field);
+	if (offset < 0)
+		return nested_vmx_failValid(vcpu,
+			VMXERR_UNSUPPORTED_VMCS_COMPONENT);
+
 	/*
 	 * If the vCPU supports "VMWRITE to any supported field in the
 	 * VMCS," then the "read-only" fields are actually read/write.
@@ -4899,11 +4905,6 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
 	if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field))
 		copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
 
-	offset = vmcs_field_to_offset(field);
-	if (offset < 0)
-		return nested_vmx_failValid(vcpu,
-			VMXERR_UNSUPPORTED_VMCS_COMPONENT);
-
 	/*
 	 * Some Intel CPUs intentionally drop the reserved bits of the AR byte
 	 * fields on VMWRITE.  Emulate this behavior to ensure consistent KVM
-- 
2.24.0.393.g34dc348eaf-goog


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH v2 3/3] kvm: nVMX: Aesthetic cleanup of handle_vmread and handle_vmwrite
  2019-12-06 19:31 [PATCH v2 1/3] kvm: nVMX: VMWRITE checks VMCS-link pointer before VMCS field Jim Mattson
  2019-12-06 19:31 ` [PATCH v2 2/3] kvm: nVMX: VMWRITE checks unsupported field before read-only field Jim Mattson
@ 2019-12-06 19:31 ` Jim Mattson
  2019-12-06 21:23   ` Sean Christopherson
  1 sibling, 1 reply; 4+ messages in thread
From: Jim Mattson @ 2019-12-06 19:31 UTC (permalink / raw)
  To: kvm; +Cc: Jim Mattson, Peter Shier, Oliver Upton, Jon Cargille, Paolo Bonzini

Apply reverse fir tree declaration order, wrap long lines, reformat a
block comment, delete an extra blank line, and use BIT_ULL(10) instead
of (1u << 10i).

Signed-off-by: Jim Mattson <jmattson@google.com>
Reviewed-by: Peter Shier <pshier@google.com>
Reviewed-by: Oliver Upton <oupton@google.com>
Reviewed-by: Jon Cargille <jcargill@google.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/vmx/nested.c | 47 +++++++++++++++++++++------------------
 1 file changed, 25 insertions(+), 22 deletions(-)

diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 94ec089d6d1a..aff163192369 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -4751,17 +4751,17 @@ static int handle_vmresume(struct kvm_vcpu *vcpu)
 
 static int handle_vmread(struct kvm_vcpu *vcpu)
 {
-	unsigned long field;
-	u64 field_value;
-	struct vcpu_vmx *vmx = to_vmx(vcpu);
-	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
-	u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
-	int len;
-	gva_t gva = 0;
 	struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
 						    : get_vmcs12(vcpu);
+	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+	u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	struct x86_exception e;
+	unsigned long field;
+	u64 field_value;
+	gva_t gva = 0;
 	short offset;
+	int len;
 
 	if (!nested_vmx_check_permission(vcpu))
 		return 1;
@@ -4776,7 +4776,8 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
 		return nested_vmx_failInvalid(vcpu);
 
 	/* Decode instruction info and find the field to read */
-	field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
+	field = kvm_register_readl(vcpu,
+				   (((vmx_instruction_info) >> 28) & 0xf));
 
 	offset = vmcs_field_to_offset(field);
 	if (offset < 0)
@@ -4794,7 +4795,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
 	 * Note that the number of bits actually copied is 32 or 64 depending
 	 * on the guest's mode (32 or 64 bit), not on the given field's length.
 	 */
-	if (vmx_instruction_info & (1u << 10)) {
+	if (vmx_instruction_info & BIT_ULL(10)) {
 		kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
 			field_value);
 	} else {
@@ -4803,7 +4804,8 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
 				vmx_instruction_info, true, len, &gva))
 			return 1;
 		/* _system ok, nested_vmx_check_permission has verified cpl=0 */
-		if (kvm_write_guest_virt_system(vcpu, gva, &field_value, len, &e))
+		if (kvm_write_guest_virt_system(vcpu, gva, &field_value,
+						len, &e))
 			kvm_inject_page_fault(vcpu, &e);
 	}
 
@@ -4836,24 +4838,25 @@ static bool is_shadow_field_ro(unsigned long field)
 
 static int handle_vmwrite(struct kvm_vcpu *vcpu)
 {
-	unsigned long field;
-	int len;
-	gva_t gva;
-	struct vcpu_vmx *vmx = to_vmx(vcpu);
+	struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
+						    : get_vmcs12(vcpu);
 	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
 	u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+	struct vcpu_vmx *vmx = to_vmx(vcpu);
+	struct x86_exception e;
+	unsigned long field;
+	short offset;
+	gva_t gva;
+	int len;
 
-	/* The value to write might be 32 or 64 bits, depending on L1's long
+	/*
+	 * The value to write might be 32 or 64 bits, depending on L1's long
 	 * mode, and eventually we need to write that into a field of several
 	 * possible lengths. The code below first zero-extends the value to 64
 	 * bit (field_value), and then copies only the appropriate number of
 	 * bits into the vmcs12 field.
 	 */
 	u64 field_value = 0;
-	struct x86_exception e;
-	struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
-						    : get_vmcs12(vcpu);
-	short offset;
 
 	if (!nested_vmx_check_permission(vcpu))
 		return 1;
@@ -4867,7 +4870,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
 	     get_vmcs12(vcpu)->vmcs_link_pointer == -1ull))
 		return nested_vmx_failInvalid(vcpu);
 
-	if (vmx_instruction_info & (1u << 10))
+	if (vmx_instruction_info & BIT_ULL(10))
 		field_value = kvm_register_readl(vcpu,
 			(((vmx_instruction_info) >> 3) & 0xf));
 	else {
@@ -4881,8 +4884,8 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
 		}
 	}
 
-
-	field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
+	field = kvm_register_readl(vcpu,
+				   (((vmx_instruction_info) >> 28) & 0xf));
 
 	offset = vmcs_field_to_offset(field);
 	if (offset < 0)
-- 
2.24.0.393.g34dc348eaf-goog


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH v2 3/3] kvm: nVMX: Aesthetic cleanup of handle_vmread and handle_vmwrite
  2019-12-06 19:31 ` [PATCH v2 3/3] kvm: nVMX: Aesthetic cleanup of handle_vmread and handle_vmwrite Jim Mattson
@ 2019-12-06 21:23   ` Sean Christopherson
  0 siblings, 0 replies; 4+ messages in thread
From: Sean Christopherson @ 2019-12-06 21:23 UTC (permalink / raw)
  To: Jim Mattson; +Cc: kvm, Peter Shier, Oliver Upton, Jon Cargille, Paolo Bonzini

On Fri, Dec 06, 2019 at 11:31:44AM -0800, Jim Mattson wrote:
> Apply reverse fir tree declaration order, wrap long lines, reformat a
> block comment, delete an extra blank line, and use BIT_ULL(10) instead
> of (1u << 10i).
> 
> Signed-off-by: Jim Mattson <jmattson@google.com>
> Reviewed-by: Peter Shier <pshier@google.com>
> Reviewed-by: Oliver Upton <oupton@google.com>
> Reviewed-by: Jon Cargille <jcargill@google.com>
> Cc: Paolo Bonzini <pbonzini@redhat.com>
> ---
>  arch/x86/kvm/vmx/nested.c | 47 +++++++++++++++++++++------------------
>  1 file changed, 25 insertions(+), 22 deletions(-)
> 
> diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
> index 94ec089d6d1a..aff163192369 100644
> --- a/arch/x86/kvm/vmx/nested.c
> +++ b/arch/x86/kvm/vmx/nested.c
> @@ -4751,17 +4751,17 @@ static int handle_vmresume(struct kvm_vcpu *vcpu)
>  
>  static int handle_vmread(struct kvm_vcpu *vcpu)
>  {
> -	unsigned long field;
> -	u64 field_value;
> -	struct vcpu_vmx *vmx = to_vmx(vcpu);
> -	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
> -	u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
> -	int len;
> -	gva_t gva = 0;
>  	struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
>  						    : get_vmcs12(vcpu);
> +	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
> +	u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
> +	struct vcpu_vmx *vmx = to_vmx(vcpu);
>  	struct x86_exception e;
> +	unsigned long field;
> +	u64 field_value;
> +	gva_t gva = 0;
>  	short offset;
> +	int len;
>  
>  	if (!nested_vmx_check_permission(vcpu))
>  		return 1;
> @@ -4776,7 +4776,8 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
>  		return nested_vmx_failInvalid(vcpu);
>  
>  	/* Decode instruction info and find the field to read */
> -	field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
> +	field = kvm_register_readl(vcpu,
> +				   (((vmx_instruction_info) >> 28) & 0xf));

I find the current version to be more readable.  Alternatively, rename the
local variable to instr_info and eliminate several of these in one shot.

>  
>  	offset = vmcs_field_to_offset(field);
>  	if (offset < 0)
> @@ -4794,7 +4795,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
>  	 * Note that the number of bits actually copied is 32 or 64 depending
>  	 * on the guest's mode (32 or 64 bit), not on the given field's length.
>  	 */
> -	if (vmx_instruction_info & (1u << 10)) {
> +	if (vmx_instruction_info & BIT_ULL(10)) {

BIT_ULL() is overkill and inaccurate in a sense since instr_info is a
32-bit value.

>  		kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
>  			field_value);
>  	} else {
> @@ -4803,7 +4804,8 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
>  				vmx_instruction_info, true, len, &gva))
>  			return 1;
>  		/* _system ok, nested_vmx_check_permission has verified cpl=0 */
> -		if (kvm_write_guest_virt_system(vcpu, gva, &field_value, len, &e))
> +		if (kvm_write_guest_virt_system(vcpu, gva, &field_value,
> +						len, &e))

I'd prefer to let this poke out.

>  			kvm_inject_page_fault(vcpu, &e);
>  	}
>  
> @@ -4836,24 +4838,25 @@ static bool is_shadow_field_ro(unsigned long field)
>  
>  static int handle_vmwrite(struct kvm_vcpu *vcpu)
>  {
> -	unsigned long field;
> -	int len;
> -	gva_t gva;
> -	struct vcpu_vmx *vmx = to_vmx(vcpu);
> +	struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
> +						    : get_vmcs12(vcpu);
>  	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
>  	u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
> +	struct vcpu_vmx *vmx = to_vmx(vcpu);
> +	struct x86_exception e;
> +	unsigned long field;
> +	short offset;
> +	gva_t gva;
> +	int len;
>  
> -	/* The value to write might be 32 or 64 bits, depending on L1's long
> +	/*
> +	 * The value to write might be 32 or 64 bits, depending on L1's long
>  	 * mode, and eventually we need to write that into a field of several
>  	 * possible lengths. The code below first zero-extends the value to 64
>  	 * bit (field_value), and then copies only the appropriate number of
>  	 * bits into the vmcs12 field.
>  	 */
>  	u64 field_value = 0;
> -	struct x86_exception e;
> -	struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
> -						    : get_vmcs12(vcpu);
> -	short offset;
>  
>  	if (!nested_vmx_check_permission(vcpu))
>  		return 1;
> @@ -4867,7 +4870,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
>  	     get_vmcs12(vcpu)->vmcs_link_pointer == -1ull))
>  		return nested_vmx_failInvalid(vcpu);
>  
> -	if (vmx_instruction_info & (1u << 10))
> +	if (vmx_instruction_info & BIT_ULL(10))

Same thing here, BIT() is sufficient.

>  		field_value = kvm_register_readl(vcpu,
>  			(((vmx_instruction_info) >> 3) & 0xf));
>  	else {
> @@ -4881,8 +4884,8 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
>  		}
>  	}
>  
> -
> -	field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
> +	field = kvm_register_readl(vcpu,
> +				   (((vmx_instruction_info) >> 28) & 0xf));
>  
>  	offset = vmcs_field_to_offset(field);
>  	if (offset < 0)
> -- 
> 2.24.0.393.g34dc348eaf-goog
> 

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2019-12-06 21:23 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-12-06 19:31 [PATCH v2 1/3] kvm: nVMX: VMWRITE checks VMCS-link pointer before VMCS field Jim Mattson
2019-12-06 19:31 ` [PATCH v2 2/3] kvm: nVMX: VMWRITE checks unsupported field before read-only field Jim Mattson
2019-12-06 19:31 ` [PATCH v2 3/3] kvm: nVMX: Aesthetic cleanup of handle_vmread and handle_vmwrite Jim Mattson
2019-12-06 21:23   ` Sean Christopherson

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).