All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] kvm: nVMX: Track active shadow VMCSs
@ 2016-07-15 17:39 Jim Mattson
  2016-07-16  0:50 ` Bandan Das
  0 siblings, 1 reply; 18+ messages in thread
From: Jim Mattson @ 2016-07-15 17:39 UTC (permalink / raw)
  To: kvm; +Cc: dmatlack, pfeiner, jmattson

If a shadow VMCS is referenced by the VMCS link pointer in the
current VMCS, then VM-entry makes the shadow VMCS active on the
current processor. No VMCS should ever be active on more than one
processor. If a VMCS is to be migrated from one processor to
another, software should execute a VMCLEAR for the VMCS on the
first processor before the VMCS is made active on the second
processor.

We already keep track of ordinary VMCSs that are made active by
VMPTRLD. Extend that tracking to handle shadow VMCSs that are
made active by VM-entry to their parents.

Signed-off-by: Jim Mattson <jmattson@google.com>

 arch/x86/kvm/vmx.c | 73 +++++++++++++++++++++++++++++++++++++++---------------
 1 file changed, 53 insertions(+), 20 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 64a79f2..dd38521 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -398,7 +398,7 @@ struct nested_vmx {
 	/* The host-usable pointer to the above */
 	struct page *current_vmcs12_page;
 	struct vmcs12 *current_vmcs12;
-	struct vmcs *current_shadow_vmcs;
+	struct loaded_vmcs current_shadow_vmcs;
 	/*
 	 * Indicates if the shadow vmcs must be updated with the
 	 * data hold by vmcs12
@@ -2113,6 +2113,15 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
 			new.control) != old.control);
 }
 
+static void record_loaded_vmcs(struct loaded_vmcs *loaded_vmcs, int cpu)
+{
+       if (loaded_vmcs->vmcs) {
+	       list_add(&loaded_vmcs->loaded_vmcss_on_cpu_link,
+			&per_cpu(loaded_vmcss_on_cpu, cpu));
+	       loaded_vmcs->cpu = cpu;
+       }
+}
+
 /*
  * Switches to specified vcpu, until a matching vcpu_put(), but assumes
  * vcpu mutex is already taken.
@@ -2124,8 +2133,11 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 
 	if (!vmm_exclusive)
 		kvm_cpu_vmxon(phys_addr);
-	else if (vmx->loaded_vmcs->cpu != cpu)
+	else if (vmx->loaded_vmcs->cpu != cpu) {
 		loaded_vmcs_clear(vmx->loaded_vmcs);
+		if (vmx->nested.current_shadow_vmcs.vmcs)
+			loaded_vmcs_clear(&vmx->nested.current_shadow_vmcs);
+	}
 
 	if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
 		per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
@@ -2147,8 +2159,8 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 		 */
 		smp_rmb();
 
-		list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
-			 &per_cpu(loaded_vmcss_on_cpu, cpu));
+		record_loaded_vmcs(vmx->loaded_vmcs, cpu);
+		record_loaded_vmcs(&vmx->nested.current_shadow_vmcs, cpu);
 		crash_enable_local_vmclear(cpu);
 		local_irq_enable();
 
@@ -2161,8 +2173,6 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 
 		rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
 		vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
-
-		vmx->loaded_vmcs->cpu = cpu;
 	}
 
 	/* Setup TSC multiplier */
@@ -6812,6 +6822,34 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
 	return 0;
 }
 
+static int setup_shadow_vmcs(struct vcpu_vmx *vmx)
+{
+       struct vmcs *shadow_vmcs;
+       int cpu;
+
+       shadow_vmcs = alloc_vmcs();
+       if (!shadow_vmcs)
+	       return -ENOMEM;
+
+       /* mark vmcs as shadow */
+       shadow_vmcs->revision_id |= (1u << 31);
+       /* init shadow vmcs */
+       vmx->nested.current_shadow_vmcs.vmcs = shadow_vmcs;
+       loaded_vmcs_init(&vmx->nested.current_shadow_vmcs);
+
+       cpu = get_cpu();
+       local_irq_disable();
+       crash_disable_local_vmclear(cpu);
+
+       record_loaded_vmcs(&vmx->nested.current_shadow_vmcs, cpu);
+
+       crash_enable_local_vmclear(cpu);
+       local_irq_enable();
+       put_cpu();
+
+       return 0;
+}
+
 /*
  * Emulate the VMXON instruction.
  * Currently, we just remember that VMX is active, and do not save or even
@@ -6824,7 +6862,6 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
 {
 	struct kvm_segment cs;
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
-	struct vmcs *shadow_vmcs;
 	const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
 		| FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
 
@@ -6867,14 +6904,9 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
 	}
 
 	if (enable_shadow_vmcs) {
-		shadow_vmcs = alloc_vmcs();
-		if (!shadow_vmcs)
-			return -ENOMEM;
-		/* mark vmcs as shadow */
-		shadow_vmcs->revision_id |= (1u << 31);
-		/* init shadow vmcs */
-		vmcs_clear(shadow_vmcs);
-		vmx->nested.current_shadow_vmcs = shadow_vmcs;
+		int ret = setup_shadow_vmcs(vmx);
+		if (ret)
+			return ret;
 	}
 
 	INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));
@@ -6959,7 +6991,7 @@ static void free_nested(struct vcpu_vmx *vmx)
 	free_vpid(vmx->nested.vpid02);
 	nested_release_vmcs12(vmx);
 	if (enable_shadow_vmcs)
-		free_vmcs(vmx->nested.current_shadow_vmcs);
+		free_loaded_vmcs(&vmx->nested.current_shadow_vmcs);
 	/* Unpin physical memory we referred to in current vmcs02 */
 	if (vmx->nested.apic_access_page) {
 		nested_release_page(vmx->nested.apic_access_page);
@@ -7135,7 +7167,7 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
 	int i;
 	unsigned long field;
 	u64 field_value;
-	struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
+	struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs.vmcs;
 	const unsigned long *fields = shadow_read_write_fields;
 	const int num_fields = max_shadow_read_write_fields;
 
@@ -7184,7 +7216,7 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
 	int i, q;
 	unsigned long field;
 	u64 field_value = 0;
-	struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
+	struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs.vmcs;
 
 	vmcs_load(shadow_vmcs);
 
@@ -7364,10 +7396,11 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
 		vmx->nested.current_vmcs12 = new_vmcs12;
 		vmx->nested.current_vmcs12_page = page;
 		if (enable_shadow_vmcs) {
+			struct vmcs *shadow_vmcs;
+			shadow_vmcs = vmx->nested.current_shadow_vmcs.vmcs;
 			vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
 				      SECONDARY_EXEC_SHADOW_VMCS);
-			vmcs_write64(VMCS_LINK_POINTER,
-				     __pa(vmx->nested.current_shadow_vmcs));
+			vmcs_write64(VMCS_LINK_POINTER, __pa(shadow_vmcs));
 			vmx->nested.sync_shadow_vmcs = true;
 		}
 	}

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* Re: [PATCH] kvm: nVMX: Track active shadow VMCSs
  2016-07-15 17:39 [PATCH] kvm: nVMX: Track active shadow VMCSs Jim Mattson
@ 2016-07-16  0:50 ` Bandan Das
  2016-07-16 14:50   ` Paolo Bonzini
  2016-07-18 21:30   ` [PATCH] kvm: nVMX: Track active shadow VMCSs Jim Mattson
  0 siblings, 2 replies; 18+ messages in thread
From: Bandan Das @ 2016-07-16  0:50 UTC (permalink / raw)
  To: Jim Mattson; +Cc: kvm, dmatlack, pfeiner


Hi Jim,

Some comments below.

Jim Mattson <jmattson@google.com> writes:

> If a shadow VMCS is referenced by the VMCS link pointer in the
> current VMCS, then VM-entry makes the shadow VMCS active on the
> current processor. No VMCS should ever be active on more than one
> processor. If a VMCS is to be migrated from one processor to
> another, software should execute a VMCLEAR for the VMCS on the
> first processor before the VMCS is made active on the second
> processor.
>
> We already keep track of ordinary VMCSs that are made active by
> VMPTRLD. Extend that tracking to handle shadow VMCSs that are
> made active by VM-entry to their parents.
>
> Signed-off-by: Jim Mattson <jmattson@google.com>
>
>  arch/x86/kvm/vmx.c | 73 +++++++++++++++++++++++++++++++++++++++---------------
>  1 file changed, 53 insertions(+), 20 deletions(-)
>
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 64a79f2..dd38521 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -398,7 +398,7 @@ struct nested_vmx {
>  	/* The host-usable pointer to the above */
>  	struct page *current_vmcs12_page;
>  	struct vmcs12 *current_vmcs12;
> -	struct vmcs *current_shadow_vmcs;
> +	struct loaded_vmcs current_shadow_vmcs;
>  	/*
>  	 * Indicates if the shadow vmcs must be updated with the
>  	 * data hold by vmcs12
> @@ -2113,6 +2113,15 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
>  			new.control) != old.control);
>  }
>  
> +static void record_loaded_vmcs(struct loaded_vmcs *loaded_vmcs, int cpu)
> +{
> +       if (loaded_vmcs->vmcs) {
> +	       list_add(&loaded_vmcs->loaded_vmcss_on_cpu_link,
> +			&per_cpu(loaded_vmcss_on_cpu, cpu));
> +	       loaded_vmcs->cpu = cpu;
> +       }
> +}
> +
>  /*
>   * Switches to specified vcpu, until a matching vcpu_put(), but assumes
>   * vcpu mutex is already taken.
> @@ -2124,8 +2133,11 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
>  
>  	if (!vmm_exclusive)
>  		kvm_cpu_vmxon(phys_addr);
> -	else if (vmx->loaded_vmcs->cpu != cpu)
> +	else if (vmx->loaded_vmcs->cpu != cpu) {
>  		loaded_vmcs_clear(vmx->loaded_vmcs);
> +		if (vmx->nested.current_shadow_vmcs.vmcs)
> +			loaded_vmcs_clear(&vmx->nested.current_shadow_vmcs);
> +	}
>  
>  	if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
>  		per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
> @@ -2147,8 +2159,8 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
>  		 */
>  		smp_rmb();
>  
> -		list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
> -			 &per_cpu(loaded_vmcss_on_cpu, cpu));
> +		record_loaded_vmcs(vmx->loaded_vmcs, cpu);
> +		record_loaded_vmcs(&vmx->nested.current_shadow_vmcs, cpu);
>  		crash_enable_local_vmclear(cpu);
>  		local_irq_enable();
>  
> @@ -2161,8 +2173,6 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
>  
>  		rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
>  		vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
> -
> -		vmx->loaded_vmcs->cpu = cpu;
>  	}

Is the order of setting loaded_vmcs->cpu important here ? Your patch changed it, I can't
think of anything wrong with it however...

>  	/* Setup TSC multiplier */
> @@ -6812,6 +6822,34 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
>  	return 0;
>  }
>  
> +static int setup_shadow_vmcs(struct vcpu_vmx *vmx)
> +{
> +       struct vmcs *shadow_vmcs;
> +       int cpu;
> +
> +       shadow_vmcs = alloc_vmcs();
> +       if (!shadow_vmcs)
> +	       return -ENOMEM;
> +
> +       /* mark vmcs as shadow */
> +       shadow_vmcs->revision_id |= (1u << 31);
> +       /* init shadow vmcs */
> +       vmx->nested.current_shadow_vmcs.vmcs = shadow_vmcs;
> +       loaded_vmcs_init(&vmx->nested.current_shadow_vmcs);
> +
> +       cpu = get_cpu();
> +       local_irq_disable();
> +       crash_disable_local_vmclear(cpu);
> +
> +       record_loaded_vmcs(&vmx->nested.current_shadow_vmcs, cpu);
> +
> +       crash_enable_local_vmclear(cpu);
> +       local_irq_enable();
> +       put_cpu();
> +
> +       return 0;
> +}
> +
>  /*
>   * Emulate the VMXON instruction.
>   * Currently, we just remember that VMX is active, and do not save or even
> @@ -6824,7 +6862,6 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
>  {
>  	struct kvm_segment cs;
>  	struct vcpu_vmx *vmx = to_vmx(vcpu);
> -	struct vmcs *shadow_vmcs;
>  	const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
>  		| FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
>  
> @@ -6867,14 +6904,9 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
>  	}
>  
>  	if (enable_shadow_vmcs) {
> -		shadow_vmcs = alloc_vmcs();
> -		if (!shadow_vmcs)
> -			return -ENOMEM;
> -		/* mark vmcs as shadow */
> -		shadow_vmcs->revision_id |= (1u << 31);
> -		/* init shadow vmcs */
> -		vmcs_clear(shadow_vmcs);
> -		vmx->nested.current_shadow_vmcs = shadow_vmcs;
> +		int ret = setup_shadow_vmcs(vmx);
> +		if (ret)
> +			return ret;
Nit:
             if (setup_shadow_vmcs(vmx))
                       return -ENOMEM;

Also, isn't it better to actually add the shadow vmcs to the list
in handle_vmptrld than in handle_vmon ? That is where shadow is actually
enabled and the link pointer set.

>  	}
>  
>  	INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));
> @@ -6959,7 +6991,7 @@ static void free_nested(struct vcpu_vmx *vmx)
>  	free_vpid(vmx->nested.vpid02);
>  	nested_release_vmcs12(vmx);
>  	if (enable_shadow_vmcs)
> -		free_vmcs(vmx->nested.current_shadow_vmcs);
> +		free_loaded_vmcs(&vmx->nested.current_shadow_vmcs);
>  	/* Unpin physical memory we referred to in current vmcs02 */
>  	if (vmx->nested.apic_access_page) {
>  		nested_release_page(vmx->nested.apic_access_page);
> @@ -7135,7 +7167,7 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
>  	int i;
>  	unsigned long field;
>  	u64 field_value;
> -	struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
> +	struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs.vmcs;
>  	const unsigned long *fields = shadow_read_write_fields;
>  	const int num_fields = max_shadow_read_write_fields;
>  
> @@ -7184,7 +7216,7 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
>  	int i, q;
>  	unsigned long field;
>  	u64 field_value = 0;
> -	struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
> +	struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs.vmcs;
>  
>  	vmcs_load(shadow_vmcs);
>  
> @@ -7364,10 +7396,11 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
>  		vmx->nested.current_vmcs12 = new_vmcs12;
>  		vmx->nested.current_vmcs12_page = page;
>  		if (enable_shadow_vmcs) {
> +			struct vmcs *shadow_vmcs;
> +			shadow_vmcs = vmx->nested.current_shadow_vmcs.vmcs;

Nit: struct vmcs *shadow_vmcs =
                 vmx->nested.current_shadow_vmcs.vmcs;

Bandan

>  			vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
>  				      SECONDARY_EXEC_SHADOW_VMCS);
> -			vmcs_write64(VMCS_LINK_POINTER,
> -				     __pa(vmx->nested.current_shadow_vmcs));
> +			vmcs_write64(VMCS_LINK_POINTER, __pa(shadow_vmcs));
>  			vmx->nested.sync_shadow_vmcs = true;
>  		}
>  	}
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH] kvm: nVMX: Track active shadow VMCSs
  2016-07-16  0:50 ` Bandan Das
@ 2016-07-16 14:50   ` Paolo Bonzini
  2016-07-16 19:48     ` Bandan Das
  2016-07-18 21:30   ` [PATCH] kvm: nVMX: Track active shadow VMCSs Jim Mattson
  1 sibling, 1 reply; 18+ messages in thread
From: Paolo Bonzini @ 2016-07-16 14:50 UTC (permalink / raw)
  To: Bandan Das, Jim Mattson; +Cc: kvm, dmatlack, pfeiner



On 16/07/2016 02:50, Bandan Das wrote:
>> > +		int ret = setup_shadow_vmcs(vmx);
>> > +		if (ret)
>> > +			return ret;
> Nit:
>              if (setup_shadow_vmcs(vmx))
>                        return -ENOMEM;

Nope, -errno is a much better return value than bool, because you never
know if bool means success or failure.  Jim's patch got it right. :)

Paolo

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH] kvm: nVMX: Track active shadow VMCSs
  2016-07-16 14:50   ` Paolo Bonzini
@ 2016-07-16 19:48     ` Bandan Das
  2016-07-19 16:22       ` Jim Mattson
  0 siblings, 1 reply; 18+ messages in thread
From: Bandan Das @ 2016-07-16 19:48 UTC (permalink / raw)
  To: Paolo Bonzini; +Cc: Jim Mattson, kvm, dmatlack, pfeiner

Hi Paolo,

Paolo Bonzini <pbonzini@redhat.com> writes:

> On 16/07/2016 02:50, Bandan Das wrote:
>>> > +		int ret = setup_shadow_vmcs(vmx);
>>> > +		if (ret)
>>> > +			return ret;
>> Nit:
>>              if (setup_shadow_vmcs(vmx))
>>                        return -ENOMEM;
>
> Nope, -errno is a much better return value than bool, because you never
> know if bool means success or failure.  Jim's patch got it right. :)

This doesn't seem to be ambiguous in the context of setup_shadow_vmcs since
it has one error path and it always returns 0 on success. Just a nit to get rid of
that local variable...

> Paolo
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH] kvm: nVMX: Track active shadow VMCSs
  2016-07-16  0:50 ` Bandan Das
  2016-07-16 14:50   ` Paolo Bonzini
@ 2016-07-18 21:30   ` Jim Mattson
  2016-07-18 23:36     ` Bandan Das
  1 sibling, 1 reply; 18+ messages in thread
From: Jim Mattson @ 2016-07-18 21:30 UTC (permalink / raw)
  To: Bandan Das; +Cc: kvm, David Matlack, Peter Feiner

My replies are inline.

On Fri, Jul 15, 2016 at 5:50 PM, Bandan Das <bsd@redhat.com> wrote:
>
> Hi Jim,
>
> Some comments below.
>
> Jim Mattson <jmattson@google.com> writes:
>
>> If a shadow VMCS is referenced by the VMCS link pointer in the
>> current VMCS, then VM-entry makes the shadow VMCS active on the
>> current processor. No VMCS should ever be active on more than one
>> processor. If a VMCS is to be migrated from one processor to
>> another, software should execute a VMCLEAR for the VMCS on the
>> first processor before the VMCS is made active on the second
>> processor.
>>
>> We already keep track of ordinary VMCSs that are made active by
>> VMPTRLD. Extend that tracking to handle shadow VMCSs that are
>> made active by VM-entry to their parents.
>>
>> Signed-off-by: Jim Mattson <jmattson@google.com>
>>
>>  arch/x86/kvm/vmx.c | 73 +++++++++++++++++++++++++++++++++++++++---------------
>>  1 file changed, 53 insertions(+), 20 deletions(-)
>>
>> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
>> index 64a79f2..dd38521 100644
>> --- a/arch/x86/kvm/vmx.c
>> +++ b/arch/x86/kvm/vmx.c
>> @@ -398,7 +398,7 @@ struct nested_vmx {
>>       /* The host-usable pointer to the above */
>>       struct page *current_vmcs12_page;
>>       struct vmcs12 *current_vmcs12;
>> -     struct vmcs *current_shadow_vmcs;
>> +     struct loaded_vmcs current_shadow_vmcs;
>>       /*
>>        * Indicates if the shadow vmcs must be updated with the
>>        * data hold by vmcs12
>> @@ -2113,6 +2113,15 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
>>                       new.control) != old.control);
>>  }
>>
>> +static void record_loaded_vmcs(struct loaded_vmcs *loaded_vmcs, int cpu)
>> +{
>> +       if (loaded_vmcs->vmcs) {
>> +            list_add(&loaded_vmcs->loaded_vmcss_on_cpu_link,
>> +                     &per_cpu(loaded_vmcss_on_cpu, cpu));
>> +            loaded_vmcs->cpu = cpu;
>> +       }
>> +}
>> +
>>  /*
>>   * Switches to specified vcpu, until a matching vcpu_put(), but assumes
>>   * vcpu mutex is already taken.
>> @@ -2124,8 +2133,11 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
>>
>>       if (!vmm_exclusive)
>>               kvm_cpu_vmxon(phys_addr);
>> -     else if (vmx->loaded_vmcs->cpu != cpu)
>> +     else if (vmx->loaded_vmcs->cpu != cpu) {
>>               loaded_vmcs_clear(vmx->loaded_vmcs);
>> +             if (vmx->nested.current_shadow_vmcs.vmcs)
>> +                     loaded_vmcs_clear(&vmx->nested.current_shadow_vmcs);
>> +     }
>>
>>       if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
>>               per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
>> @@ -2147,8 +2159,8 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
>>                */
>>               smp_rmb();
>>
>> -             list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
>> -                      &per_cpu(loaded_vmcss_on_cpu, cpu));
>> +             record_loaded_vmcs(vmx->loaded_vmcs, cpu);
>> +             record_loaded_vmcs(&vmx->nested.current_shadow_vmcs, cpu);
>>               crash_enable_local_vmclear(cpu);
>>               local_irq_enable();
>>
>> @@ -2161,8 +2173,6 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
>>
>>               rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
>>               vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
>> -
>> -             vmx->loaded_vmcs->cpu = cpu;
>>       }
>
> Is the order of setting loaded_vmcs->cpu important here ? Your patch changed it, I can't
> think of anything wrong with it however...
>

I can't find any references to loaded_vmcs->cpu between the new
assignment and the original assignment. I believe this change is
okay.

However, on a related note, I have preserved the existing ordering of
VMPTRLD followed by adding the VMCS to the loaded_vmcss_on_cpu. Is
this ordering safe in the event of a KEXEC?

>>       /* Setup TSC multiplier */
>> @@ -6812,6 +6822,34 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
>>       return 0;
>>  }
>>
>> +static int setup_shadow_vmcs(struct vcpu_vmx *vmx)
>> +{
>> +       struct vmcs *shadow_vmcs;
>> +       int cpu;
>> +
>> +       shadow_vmcs = alloc_vmcs();
>> +       if (!shadow_vmcs)
>> +            return -ENOMEM;
>> +
>> +       /* mark vmcs as shadow */
>> +       shadow_vmcs->revision_id |= (1u << 31);
>> +       /* init shadow vmcs */
>> +       vmx->nested.current_shadow_vmcs.vmcs = shadow_vmcs;
>> +       loaded_vmcs_init(&vmx->nested.current_shadow_vmcs);
>> +
>> +       cpu = get_cpu();
>> +       local_irq_disable();
>> +       crash_disable_local_vmclear(cpu);
>> +
>> +       record_loaded_vmcs(&vmx->nested.current_shadow_vmcs, cpu);
>> +
>> +       crash_enable_local_vmclear(cpu);
>> +       local_irq_enable();
>> +       put_cpu();
>> +
>> +       return 0;
>> +}
>> +
>>  /*
>>   * Emulate the VMXON instruction.
>>   * Currently, we just remember that VMX is active, and do not save or even
>> @@ -6824,7 +6862,6 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
>>  {
>>       struct kvm_segment cs;
>>       struct vcpu_vmx *vmx = to_vmx(vcpu);
>> -     struct vmcs *shadow_vmcs;
>>       const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
>>               | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
>>
>> @@ -6867,14 +6904,9 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
>>       }
>>
>>       if (enable_shadow_vmcs) {
>> -             shadow_vmcs = alloc_vmcs();
>> -             if (!shadow_vmcs)
>> -                     return -ENOMEM;
>> -             /* mark vmcs as shadow */
>> -             shadow_vmcs->revision_id |= (1u << 31);
>> -             /* init shadow vmcs */
>> -             vmcs_clear(shadow_vmcs);
>> -             vmx->nested.current_shadow_vmcs = shadow_vmcs;
>> +             int ret = setup_shadow_vmcs(vmx);
>> +             if (ret)
>> +                     return ret;
> Nit:
>              if (setup_shadow_vmcs(vmx))
>                        return -ENOMEM;
>
> Also, isn't it better to actually add the shadow vmcs to the list
> in handle_vmptrld than in handle_vmon ? That is where shadow is actually
> enabled and the link pointer set.
>

Are you are suggesting that the invariant should be that the
shadow VMCS is on the loaded_vmcss_on_cpu list iff it is
referenced by the link pointer in the parent? Maintaining that
invariant will require some extra work. The shadow VMCS will have
to be removed from the loaded_vmcss_on_cpu list in
nested_release_vmcs12, and vmx_vcpu_load will have to check
vmx->nested.current_vmptr before the loaded_vmcss_on_cpu
operations involving the shadow VMCS.

>>       }
>>
>>       INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));
>> @@ -6959,7 +6991,7 @@ static void free_nested(struct vcpu_vmx *vmx)
>>       free_vpid(vmx->nested.vpid02);
>>       nested_release_vmcs12(vmx);
>>       if (enable_shadow_vmcs)
>> -             free_vmcs(vmx->nested.current_shadow_vmcs);
>> +             free_loaded_vmcs(&vmx->nested.current_shadow_vmcs);
>>       /* Unpin physical memory we referred to in current vmcs02 */
>>       if (vmx->nested.apic_access_page) {
>>               nested_release_page(vmx->nested.apic_access_page);
>> @@ -7135,7 +7167,7 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
>>       int i;
>>       unsigned long field;
>>       u64 field_value;
>> -     struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
>> +     struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs.vmcs;
>>       const unsigned long *fields = shadow_read_write_fields;
>>       const int num_fields = max_shadow_read_write_fields;
>>
>> @@ -7184,7 +7216,7 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
>>       int i, q;
>>       unsigned long field;
>>       u64 field_value = 0;
>> -     struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
>> +     struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs.vmcs;
>>
>>       vmcs_load(shadow_vmcs);
>>
>> @@ -7364,10 +7396,11 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
>>               vmx->nested.current_vmcs12 = new_vmcs12;
>>               vmx->nested.current_vmcs12_page = page;
>>               if (enable_shadow_vmcs) {
>> +                     struct vmcs *shadow_vmcs;
>> +                     shadow_vmcs = vmx->nested.current_shadow_vmcs.vmcs;
>
> Nit: struct vmcs *shadow_vmcs =
>                  vmx->nested.current_shadow_vmcs.vmcs;
>
> Bandan
>
>>                       vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
>>                                     SECONDARY_EXEC_SHADOW_VMCS);
>> -                     vmcs_write64(VMCS_LINK_POINTER,
>> -                                  __pa(vmx->nested.current_shadow_vmcs));
>> +                     vmcs_write64(VMCS_LINK_POINTER, __pa(shadow_vmcs));
>>                       vmx->nested.sync_shadow_vmcs = true;
>>               }
>>       }
>> --
>> To unsubscribe from this list: send the line "unsubscribe kvm" in
>> the body of a message to majordomo@vger.kernel.org
>> More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH] kvm: nVMX: Track active shadow VMCSs
  2016-07-18 21:30   ` [PATCH] kvm: nVMX: Track active shadow VMCSs Jim Mattson
@ 2016-07-18 23:36     ` Bandan Das
  2016-07-19  8:41       ` Paolo Bonzini
  0 siblings, 1 reply; 18+ messages in thread
From: Bandan Das @ 2016-07-18 23:36 UTC (permalink / raw)
  To: Jim Mattson; +Cc: kvm, David Matlack, Peter Feiner, Paolo Bonzini

Jim Mattson <jmattson@google.com> writes:

> My replies are inline.
>
> On Fri, Jul 15, 2016 at 5:50 PM, Bandan Das <bsd@redhat.com> wrote:
>>
>> Hi Jim,
>>
>> Some comments below.
>>
>> Jim Mattson <jmattson@google.com> writes:
>>
>>> If a shadow VMCS is referenced by the VMCS link pointer in the
>>> current VMCS, then VM-entry makes the shadow VMCS active on the
>>> current processor. No VMCS should ever be active on more than one
>>> processor. If a VMCS is to be migrated from one processor to
>>> another, software should execute a VMCLEAR for the VMCS on the
>>> first processor before the VMCS is made active on the second
>>> processor.
>>>
>>> We already keep track of ordinary VMCSs that are made active by
>>> VMPTRLD. Extend that tracking to handle shadow VMCSs that are
>>> made active by VM-entry to their parents.
>>>
>>> Signed-off-by: Jim Mattson <jmattson@google.com>
>>>
>>>  arch/x86/kvm/vmx.c | 73 +++++++++++++++++++++++++++++++++++++++---------------
>>>  1 file changed, 53 insertions(+), 20 deletions(-)
>>>
>>> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
>>> index 64a79f2..dd38521 100644
>>> --- a/arch/x86/kvm/vmx.c
>>> +++ b/arch/x86/kvm/vmx.c
>>> @@ -398,7 +398,7 @@ struct nested_vmx {
>>>       /* The host-usable pointer to the above */
>>>       struct page *current_vmcs12_page;
>>>       struct vmcs12 *current_vmcs12;
>>> -     struct vmcs *current_shadow_vmcs;
>>> +     struct loaded_vmcs current_shadow_vmcs;
>>>       /*
>>>        * Indicates if the shadow vmcs must be updated with the
>>>        * data hold by vmcs12
>>> @@ -2113,6 +2113,15 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
>>>                       new.control) != old.control);
>>>  }
>>>
>>> +static void record_loaded_vmcs(struct loaded_vmcs *loaded_vmcs, int cpu)
>>> +{
>>> +       if (loaded_vmcs->vmcs) {
>>> +            list_add(&loaded_vmcs->loaded_vmcss_on_cpu_link,
>>> +                     &per_cpu(loaded_vmcss_on_cpu, cpu));
>>> +            loaded_vmcs->cpu = cpu;
>>> +       }
>>> +}
>>> +
>>>  /*
>>>   * Switches to specified vcpu, until a matching vcpu_put(), but assumes
>>>   * vcpu mutex is already taken.
>>> @@ -2124,8 +2133,11 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
>>>
>>>       if (!vmm_exclusive)
>>>               kvm_cpu_vmxon(phys_addr);
>>> -     else if (vmx->loaded_vmcs->cpu != cpu)
>>> +     else if (vmx->loaded_vmcs->cpu != cpu) {
>>>               loaded_vmcs_clear(vmx->loaded_vmcs);
>>> +             if (vmx->nested.current_shadow_vmcs.vmcs)
>>> +                     loaded_vmcs_clear(&vmx->nested.current_shadow_vmcs);
>>> +     }
>>>
>>>       if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
>>>               per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
>>> @@ -2147,8 +2159,8 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
>>>                */
>>>               smp_rmb();
>>>
>>> -             list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
>>> -                      &per_cpu(loaded_vmcss_on_cpu, cpu));
>>> +             record_loaded_vmcs(vmx->loaded_vmcs, cpu);
>>> +             record_loaded_vmcs(&vmx->nested.current_shadow_vmcs, cpu);
>>>               crash_enable_local_vmclear(cpu);
>>>               local_irq_enable();
>>>
>>> @@ -2161,8 +2173,6 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
>>>
>>>               rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
>>>               vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
>>> -
>>> -             vmx->loaded_vmcs->cpu = cpu;
>>>       }
>>
>> Is the order of setting loaded_vmcs->cpu important here ? Your patch changed it, I can't
>> think of anything wrong with it however...
>>
>
> I can't find any references to loaded_vmcs->cpu between the new
> assignment and the original assignment. I believe this change is
> okay.
>
> However, on a related note, I have preserved the existing ordering of
> VMPTRLD followed by adding the VMCS to the loaded_vmcss_on_cpu. Is
> this ordering safe in the event of a KEXEC?

So, there might be the possibility that kexec doesn't clear the recently loaded vmcs 
since it was not on the list ? Looks like an issue to me but I have cc-ed Paolo to
comment/confirm.

>>>       /* Setup TSC multiplier */
>>> @@ -6812,6 +6822,34 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
>>>       return 0;
>>>  }
>>>
>>> +static int setup_shadow_vmcs(struct vcpu_vmx *vmx)
>>> +{
>>> +       struct vmcs *shadow_vmcs;
>>> +       int cpu;
>>> +
>>> +       shadow_vmcs = alloc_vmcs();
>>> +       if (!shadow_vmcs)
>>> +            return -ENOMEM;
>>> +
>>> +       /* mark vmcs as shadow */
>>> +       shadow_vmcs->revision_id |= (1u << 31);
>>> +       /* init shadow vmcs */
>>> +       vmx->nested.current_shadow_vmcs.vmcs = shadow_vmcs;
>>> +       loaded_vmcs_init(&vmx->nested.current_shadow_vmcs);
>>> +
>>> +       cpu = get_cpu();
>>> +       local_irq_disable();
>>> +       crash_disable_local_vmclear(cpu);
>>> +
>>> +       record_loaded_vmcs(&vmx->nested.current_shadow_vmcs, cpu);
>>> +
>>> +       crash_enable_local_vmclear(cpu);
>>> +       local_irq_enable();
>>> +       put_cpu();
>>> +
>>> +       return 0;
>>> +}
>>> +
>>>  /*
>>>   * Emulate the VMXON instruction.
>>>   * Currently, we just remember that VMX is active, and do not save or even
>>> @@ -6824,7 +6862,6 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
>>>  {
>>>       struct kvm_segment cs;
>>>       struct vcpu_vmx *vmx = to_vmx(vcpu);
>>> -     struct vmcs *shadow_vmcs;
>>>       const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
>>>               | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
>>>
>>> @@ -6867,14 +6904,9 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
>>>       }
>>>
>>>       if (enable_shadow_vmcs) {
>>> -             shadow_vmcs = alloc_vmcs();
>>> -             if (!shadow_vmcs)
>>> -                     return -ENOMEM;
>>> -             /* mark vmcs as shadow */
>>> -             shadow_vmcs->revision_id |= (1u << 31);
>>> -             /* init shadow vmcs */
>>> -             vmcs_clear(shadow_vmcs);
>>> -             vmx->nested.current_shadow_vmcs = shadow_vmcs;
>>> +             int ret = setup_shadow_vmcs(vmx);
>>> +             if (ret)
>>> +                     return ret;
>> Nit:
>>              if (setup_shadow_vmcs(vmx))
>>                        return -ENOMEM;
>>
>> Also, isn't it better to actually add the shadow vmcs to the list
>> in handle_vmptrld than in handle_vmon ? That is where shadow is actually
>> enabled and the link pointer set.
>>
>
> Are you are suggesting that the invariant should be that the
> shadow VMCS is on the loaded_vmcss_on_cpu list iff it is
> referenced by the link pointer in the parent? Maintaining that
> invariant will require some extra work. The shadow VMCS will have
> to be removed from the loaded_vmcss_on_cpu list in
> nested_release_vmcs12, and vmx_vcpu_load will have to check
> vmx->nested.current_vmptr before the loaded_vmcss_on_cpu
> operations involving the shadow VMCS.

Hmm... It just seemed like managing shadow_vmcs() is more of a vmptrld/vmclear
operation but that also means removing/adding vmcss everytime vmclear is called
and iirc the current code does a good job of reusing vmcss. So, I think this is
fine.

Bandan

>>>       }
>>>
>>>       INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));
>>> @@ -6959,7 +6991,7 @@ static void free_nested(struct vcpu_vmx *vmx)
>>>       free_vpid(vmx->nested.vpid02);
>>>       nested_release_vmcs12(vmx);
>>>       if (enable_shadow_vmcs)
>>> -             free_vmcs(vmx->nested.current_shadow_vmcs);
>>> +             free_loaded_vmcs(&vmx->nested.current_shadow_vmcs);
>>>       /* Unpin physical memory we referred to in current vmcs02 */
>>>       if (vmx->nested.apic_access_page) {
>>>               nested_release_page(vmx->nested.apic_access_page);
>>> @@ -7135,7 +7167,7 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
>>>       int i;
>>>       unsigned long field;
>>>       u64 field_value;
>>> -     struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
>>> +     struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs.vmcs;
>>>       const unsigned long *fields = shadow_read_write_fields;
>>>       const int num_fields = max_shadow_read_write_fields;
>>>
>>> @@ -7184,7 +7216,7 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
>>>       int i, q;
>>>       unsigned long field;
>>>       u64 field_value = 0;
>>> -     struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
>>> +     struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs.vmcs;
>>>
>>>       vmcs_load(shadow_vmcs);
>>>
>>> @@ -7364,10 +7396,11 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
>>>               vmx->nested.current_vmcs12 = new_vmcs12;
>>>               vmx->nested.current_vmcs12_page = page;
>>>               if (enable_shadow_vmcs) {
>>> +                     struct vmcs *shadow_vmcs;
>>> +                     shadow_vmcs = vmx->nested.current_shadow_vmcs.vmcs;
>>
>> Nit: struct vmcs *shadow_vmcs =
>>                  vmx->nested.current_shadow_vmcs.vmcs;
>>
>> Bandan
>>
>>>                       vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
>>>                                     SECONDARY_EXEC_SHADOW_VMCS);
>>> -                     vmcs_write64(VMCS_LINK_POINTER,
>>> -                                  __pa(vmx->nested.current_shadow_vmcs));
>>> +                     vmcs_write64(VMCS_LINK_POINTER, __pa(shadow_vmcs));
>>>                       vmx->nested.sync_shadow_vmcs = true;
>>>               }
>>>       }
>>> --
>>> To unsubscribe from this list: send the line "unsubscribe kvm" in
>>> the body of a message to majordomo@vger.kernel.org
>>> More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH] kvm: nVMX: Track active shadow VMCSs
  2016-07-18 23:36     ` Bandan Das
@ 2016-07-19  8:41       ` Paolo Bonzini
  2016-07-19 16:13         ` Jim Mattson
  0 siblings, 1 reply; 18+ messages in thread
From: Paolo Bonzini @ 2016-07-19  8:41 UTC (permalink / raw)
  To: Bandan Das, Jim Mattson; +Cc: kvm, David Matlack, Peter Feiner



On 19/07/2016 01:36, Bandan Das wrote:
>> However, on a related note, I have preserved the existing ordering
>> of VMPTRLD followed by adding the VMCS to the loaded_vmcss_on_cpu.
>> Is this ordering safe in the event of a KEXEC?
> 
> So, there might be the possibility that kexec doesn't clear the
> recently loaded vmcs since it was not on the list ? Looks like an
> issue to me but I have cc-ed Paolo to comment/confirm.

Indeed it would be better to order them the other way round.

>> Are you are suggesting that the invariant should be that the shadow
>> VMCS is on the loaded_vmcss_on_cpu list iff it is referenced by the
>> link pointer in the parent? Maintaining that invariant will require
>> some extra work. The shadow VMCS will have to be removed from the
>> loaded_vmcss_on_cpu list in nested_release_vmcs12, and
>> vmx_vcpu_load will have to check vmx->nested.current_vmptr before
>> the loaded_vmcss_on_cpu operations involving the shadow VMCS.
> 
> Hmm... It just seemed like managing shadow_vmcs() is more of a
> vmptrld/vmclear operation but that also means removing/adding vmcss
> everytime vmclear is called and iirc the current code does a good job
> of reusing vmcss. So, I think this is fine.

Yeah, it's extra work for little reason.  Doing an extra vmclear on
kexec is not an issue.

Paolo

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH] kvm: nVMX: Track active shadow VMCSs
  2016-07-19  8:41       ` Paolo Bonzini
@ 2016-07-19 16:13         ` Jim Mattson
  0 siblings, 0 replies; 18+ messages in thread
From: Jim Mattson @ 2016-07-19 16:13 UTC (permalink / raw)
  To: Paolo Bonzini; +Cc: Bandan Das, kvm, David Matlack, Peter Feiner

On Tue, Jul 19, 2016 at 1:41 AM, Paolo Bonzini <pbonzini@redhat.com> wrote:
>
> Indeed it would be better to order them the other way round.

I'll have a revised patch out shortly to handle the reordering.

> Yeah, it's extra work for little reason.  Doing an extra vmclear on
> kexec is not an issue.

There is also an extra VMCLEAR on processor migration, if the vCPU is
in VMX root operation without a current VMCS. But I still favor the
simpler approach of the original patch.

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH] kvm: nVMX: Track active shadow VMCSs
  2016-07-16 19:48     ` Bandan Das
@ 2016-07-19 16:22       ` Jim Mattson
  2016-07-22 18:25         ` [PATCH v2] KVM: " Jim Mattson
  2016-10-28 15:29         ` [PATCH v2] kvm: nVMX: VMCLEAR an active shadow VMCS after last use Jim Mattson
  0 siblings, 2 replies; 18+ messages in thread
From: Jim Mattson @ 2016-07-19 16:22 UTC (permalink / raw)
  To: Bandan Das; +Cc: Paolo Bonzini, kvm, David Matlack, Peter Feiner

On Sat, Jul 16, 2016 at 12:48 PM, Bandan Das <bsd@redhat.com> wrote:
>
> This doesn't seem to be ambiguous in the context of setup_shadow_vmcs since
> it has one error path and it always returns 0 on success. Just a nit to get rid of
> that local variable...

I think it's slightly better to keep the -ENOMEM close to the
allocation failure, but I don't feel strongly either way.

^ permalink raw reply	[flat|nested] 18+ messages in thread

* [PATCH v2] KVM: nVMX: Track active shadow VMCSs
  2016-07-19 16:22       ` Jim Mattson
@ 2016-07-22 18:25         ` Jim Mattson
  2016-07-27 20:45           ` Radim Krčmář
  2016-10-28 15:29         ` [PATCH v2] kvm: nVMX: VMCLEAR an active shadow VMCS after last use Jim Mattson
  1 sibling, 1 reply; 18+ messages in thread
From: Jim Mattson @ 2016-07-22 18:25 UTC (permalink / raw)
  To: kvm, pbonzini, bsd, dmatlack, pfeiner; +Cc: Jim Mattson

If a shadow VMCS is referenced by the VMCS link pointer in the
current VMCS, then VM-entry makes the shadow VMCS active on the
current processor. No VMCS should ever be active on more than one
processor. If a VMCS is to be migrated from one processor to
another, software should execute a VMCLEAR for the VMCS on the
first processor before the VMCS is made active on the second
processor.

We already keep track of ordinary VMCSs that are made active by
VMPTRLD. Extend that tracking to handle shadow VMCSs that are
made active by VM-entry to their parents.

Signed-off-by: Jim Mattson <jmattson@google.com>
---
 arch/x86/kvm/vmx.c | 82 ++++++++++++++++++++++++++++++++++++++----------------
 1 file changed, 58 insertions(+), 24 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 64a79f2..4d3a332 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -398,7 +398,7 @@ struct nested_vmx {
 	/* The host-usable pointer to the above */
 	struct page *current_vmcs12_page;
 	struct vmcs12 *current_vmcs12;
-	struct vmcs *current_shadow_vmcs;
+	struct loaded_vmcs current_shadow_vmcs;
 	/*
 	 * Indicates if the shadow vmcs must be updated with the
 	 * data hold by vmcs12
@@ -2113,6 +2113,15 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
 			new.control) != old.control);
 }
 
+static void record_loaded_vmcs(struct loaded_vmcs *loaded_vmcs, int cpu)
+{
+	if (loaded_vmcs->vmcs) {
+		list_add(&loaded_vmcs->loaded_vmcss_on_cpu_link,
+			 &per_cpu(loaded_vmcss_on_cpu, cpu));
+		loaded_vmcs->cpu = cpu;
+	}
+}
+
 /*
  * Switches to specified vcpu, until a matching vcpu_put(), but assumes
  * vcpu mutex is already taken.
@@ -2124,15 +2133,13 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 
 	if (!vmm_exclusive)
 		kvm_cpu_vmxon(phys_addr);
-	else if (vmx->loaded_vmcs->cpu != cpu)
+	else if (vmx->loaded_vmcs->cpu != cpu) {
 		loaded_vmcs_clear(vmx->loaded_vmcs);
+		if (vmx->nested.current_shadow_vmcs.vmcs)
+			loaded_vmcs_clear(&vmx->nested.current_shadow_vmcs);
+        }
 
 	if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
-		per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
-		vmcs_load(vmx->loaded_vmcs->vmcs);
-	}
-
-	if (vmx->loaded_vmcs->cpu != cpu) {
 		struct desc_ptr *gdt = this_cpu_ptr(&host_gdt);
 		unsigned long sysenter_esp;
 
@@ -2147,11 +2154,15 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 		 */
 		smp_rmb();
 
-		list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
-			 &per_cpu(loaded_vmcss_on_cpu, cpu));
+		record_loaded_vmcs(vmx->loaded_vmcs, cpu);
+		record_loaded_vmcs(&vmx->nested.current_shadow_vmcs, cpu);
+
 		crash_enable_local_vmclear(cpu);
 		local_irq_enable();
 
+		per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
+		vmcs_load(vmx->loaded_vmcs->vmcs);
+
 		/*
 		 * Linux uses per-cpu TSS and GDT, so set these when switching
 		 * processors.
@@ -2161,8 +2172,6 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 
 		rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
 		vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
-
-		vmx->loaded_vmcs->cpu = cpu;
 	}
 
 	/* Setup TSC multiplier */
@@ -6812,6 +6821,34 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
 	return 0;
 }
 
+static int setup_shadow_vmcs(struct vcpu_vmx *vmx)
+{
+	struct vmcs *shadow_vmcs;
+	int cpu;
+
+	shadow_vmcs = alloc_vmcs();
+	if (!shadow_vmcs)
+		return -ENOMEM;
+
+	/* mark vmcs as shadow */
+	shadow_vmcs->revision_id |= (1u << 31);
+	/* init shadow vmcs */
+	vmx->nested.current_shadow_vmcs.vmcs = shadow_vmcs;
+	loaded_vmcs_init(&vmx->nested.current_shadow_vmcs);
+
+	cpu = get_cpu();
+	local_irq_disable();
+	crash_disable_local_vmclear(cpu);
+
+	record_loaded_vmcs(&vmx->nested.current_shadow_vmcs, cpu);
+
+	crash_enable_local_vmclear(cpu);
+	local_irq_enable();
+	put_cpu();
+
+	return 0;
+}
+
 /*
  * Emulate the VMXON instruction.
  * Currently, we just remember that VMX is active, and do not save or even
@@ -6824,7 +6861,6 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
 {
 	struct kvm_segment cs;
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
-	struct vmcs *shadow_vmcs;
 	const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
 		| FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
 
@@ -6867,14 +6903,9 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
 	}
 
 	if (enable_shadow_vmcs) {
-		shadow_vmcs = alloc_vmcs();
-		if (!shadow_vmcs)
-			return -ENOMEM;
-		/* mark vmcs as shadow */
-		shadow_vmcs->revision_id |= (1u << 31);
-		/* init shadow vmcs */
-		vmcs_clear(shadow_vmcs);
-		vmx->nested.current_shadow_vmcs = shadow_vmcs;
+		int ret = setup_shadow_vmcs(vmx);
+		if (ret < 0)
+			return ret;
 	}
 
 	INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));
@@ -6959,7 +6990,7 @@ static void free_nested(struct vcpu_vmx *vmx)
 	free_vpid(vmx->nested.vpid02);
 	nested_release_vmcs12(vmx);
 	if (enable_shadow_vmcs)
-		free_vmcs(vmx->nested.current_shadow_vmcs);
+		free_loaded_vmcs(&vmx->nested.current_shadow_vmcs);
 	/* Unpin physical memory we referred to in current vmcs02 */
 	if (vmx->nested.apic_access_page) {
 		nested_release_page(vmx->nested.apic_access_page);
@@ -7135,7 +7166,7 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
 	int i;
 	unsigned long field;
 	u64 field_value;
-	struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
+	struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs.vmcs;
 	const unsigned long *fields = shadow_read_write_fields;
 	const int num_fields = max_shadow_read_write_fields;
 
@@ -7184,7 +7215,7 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
 	int i, q;
 	unsigned long field;
 	u64 field_value = 0;
-	struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
+	struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs.vmcs;
 
 	vmcs_load(shadow_vmcs);
 
@@ -7364,10 +7395,13 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
 		vmx->nested.current_vmcs12 = new_vmcs12;
 		vmx->nested.current_vmcs12_page = page;
 		if (enable_shadow_vmcs) {
+			struct vmcs *shadow_vmcs;
+
+			shadow_vmcs = vmx->nested.current_shadow_vmcs.vmcs;
 			vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
 				      SECONDARY_EXEC_SHADOW_VMCS);
 			vmcs_write64(VMCS_LINK_POINTER,
-				     __pa(vmx->nested.current_shadow_vmcs));
+				     __pa(shadow_vmcs));
 			vmx->nested.sync_shadow_vmcs = true;
 		}
 	}
-- 
2.8.0.rc3.226.g39d4020


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* Re: [PATCH v2] KVM: nVMX: Track active shadow VMCSs
  2016-07-22 18:25         ` [PATCH v2] KVM: " Jim Mattson
@ 2016-07-27 20:45           ` Radim Krčmář
  2016-07-27 21:53             ` Bandan Das
  0 siblings, 1 reply; 18+ messages in thread
From: Radim Krčmář @ 2016-07-27 20:45 UTC (permalink / raw)
  To: Jim Mattson; +Cc: kvm, pbonzini, bsd, dmatlack, pfeiner

2016-07-22 11:25-0700, Jim Mattson:
> If a shadow VMCS is referenced by the VMCS link pointer in the
> current VMCS, then VM-entry makes the shadow VMCS active on the
> current processor. No VMCS should ever be active on more than one
> processor. If a VMCS is to be migrated from one processor to
> another, software should execute a VMCLEAR for the VMCS on the
> first processor before the VMCS is made active on the second
> processor.
> 
> We already keep track of ordinary VMCSs that are made active by
> VMPTRLD. Extend that tracking to handle shadow VMCSs that are
> made active by VM-entry to their parents.
> 
> Signed-off-by: Jim Mattson <jmattson@google.com>
> ---
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> @@ -2113,6 +2113,15 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
>  			new.control) != old.control);
>  }
>  
> +static void record_loaded_vmcs(struct loaded_vmcs *loaded_vmcs, int cpu)
> +{
> +	if (loaded_vmcs->vmcs) {
> +		list_add(&loaded_vmcs->loaded_vmcss_on_cpu_link,
> +			 &per_cpu(loaded_vmcss_on_cpu, cpu));
> +		loaded_vmcs->cpu = cpu;
> +	}
> +}
> +
>  /*
>   * Switches to specified vcpu, until a matching vcpu_put(), but assumes
>   * vcpu mutex is already taken.
> @@ -2124,15 +2133,13 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
>  
>  	if (!vmm_exclusive)
>  		kvm_cpu_vmxon(phys_addr);
> -	else if (vmx->loaded_vmcs->cpu != cpu)
> +	else if (vmx->loaded_vmcs->cpu != cpu) {
>  		loaded_vmcs_clear(vmx->loaded_vmcs);
> +		if (vmx->nested.current_shadow_vmcs.vmcs)
> +			loaded_vmcs_clear(&vmx->nested.current_shadow_vmcs);

loaded_vmcs_clear() uses expensive IPI, so would want to do both in one
call in future patches as they are always active on the same CPU.

Another possible complication is marking current_shadow_vmcs as active
on a cpu only after a successful vmlaunch.
(We don't seem to ever vmptrld shadow vmcs explicitly.)

> +        }

Incorrect whitespace for indentation.

>  
>  	if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
> -		per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
> -		vmcs_load(vmx->loaded_vmcs->vmcs);
> -	}
> -
> -	if (vmx->loaded_vmcs->cpu != cpu) {

This condition is nice for performance because a non-current vmcs is
usually already active on the same CPU, so we skip all the code below.

(This is the only thing that has to be fixed as it regresses non-nested,
 the rest are mostly ideas for simplifications.)

>  		struct desc_ptr *gdt = this_cpu_ptr(&host_gdt);
>  		unsigned long sysenter_esp;
>  
> @@ -2147,11 +2154,15 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
>  		 */
>  		smp_rmb();
>  
> -		list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
> -			 &per_cpu(loaded_vmcss_on_cpu, cpu));
> +		record_loaded_vmcs(vmx->loaded_vmcs, cpu);

Adding and an element to a list multiple times seems invalid, which the
condition was also guarding.

> +		record_loaded_vmcs(&vmx->nested.current_shadow_vmcs, cpu);

current_shadow_vmcs is always active on the same cpu as loaded_vmcs ...
I think we could skip the list and just clear current_shadow_vmcs when
clearing its loaded_vmcs.

>  		crash_enable_local_vmclear(cpu);
>  		local_irq_enable();
>  
> +		per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
> +		vmcs_load(vmx->loaded_vmcs->vmcs);
> +
>  		/*
>  		 * Linux uses per-cpu TSS and GDT, so set these when switching
>  		 * processors.
> @@ -2161,8 +2172,6 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
>  
>  		rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
>  		vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
> -
> -		vmx->loaded_vmcs->cpu = cpu;
>  	}
>  
>  	/* Setup TSC multiplier */
> @@ -6812,6 +6821,34 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
>  	return 0;
>  }
>  
> +static int setup_shadow_vmcs(struct vcpu_vmx *vmx)
> +{
> +	struct vmcs *shadow_vmcs;
> +	int cpu;
> +
> +	shadow_vmcs = alloc_vmcs();
> +	if (!shadow_vmcs)
> +		return -ENOMEM;
> +
> +	/* mark vmcs as shadow */
> +	shadow_vmcs->revision_id |= (1u << 31);
> +	/* init shadow vmcs */
> +	vmx->nested.current_shadow_vmcs.vmcs = shadow_vmcs;
> +	loaded_vmcs_init(&vmx->nested.current_shadow_vmcs);
> +
> +	cpu = get_cpu();
> +	local_irq_disable();
> +	crash_disable_local_vmclear(cpu);
> +
> +	record_loaded_vmcs(&vmx->nested.current_shadow_vmcs, cpu);

This could be avoided if we assumed that shadow vmcs is always active on
the same vcpu.  The assumption looks rock-solid, because shadow vmcs is
activated on vmlaunch and its linking vmcs must be active (and current)
on the same CPU.

> +
> +	crash_enable_local_vmclear(cpu);
> +	local_irq_enable();
> +	put_cpu();
> +
> +	return 0;
> +}
> +
>  /*
>   * Emulate the VMXON instruction.
>   * Currently, we just remember that VMX is active, and do not save or even
> @@ -6867,14 +6903,9 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
>  	}
>  
>  	if (enable_shadow_vmcs) {
> -		shadow_vmcs = alloc_vmcs();
> -		if (!shadow_vmcs)
> -			return -ENOMEM;
> -		/* mark vmcs as shadow */
> -		shadow_vmcs->revision_id |= (1u << 31);
> -		/* init shadow vmcs */
> -		vmcs_clear(shadow_vmcs);
> -		vmx->nested.current_shadow_vmcs = shadow_vmcs;
> +		int ret = setup_shadow_vmcs(vmx);
> +		if (ret < 0)
> +			return ret;
>  	}
>  
>  	INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH v2] KVM: nVMX: Track active shadow VMCSs
  2016-07-27 20:45           ` Radim Krčmář
@ 2016-07-27 21:53             ` Bandan Das
  2016-07-28 13:30               ` Jim Mattson
  0 siblings, 1 reply; 18+ messages in thread
From: Bandan Das @ 2016-07-27 21:53 UTC (permalink / raw)
  To: Radim Krčmář; +Cc: Jim Mattson, kvm, pbonzini, dmatlack, pfeiner

Radim Krčmář <rkrcmar@redhat.com> writes:

> 2016-07-22 11:25-0700, Jim Mattson:
>> If a shadow VMCS is referenced by the VMCS link pointer in the
>> current VMCS, then VM-entry makes the shadow VMCS active on the
>> current processor. No VMCS should ever be active on more than one
>> processor. If a VMCS is to be migrated from one processor to
>> another, software should execute a VMCLEAR for the VMCS on the
>> first processor before the VMCS is made active on the second
>> processor.
>> 
>> We already keep track of ordinary VMCSs that are made active by
>> VMPTRLD. Extend that tracking to handle shadow VMCSs that are
>> made active by VM-entry to their parents.
>> 
>> Signed-off-by: Jim Mattson <jmattson@google.com>
>> ---
>> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
>> @@ -2113,6 +2113,15 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
>>  			new.control) != old.control);
>>  }
>>  
>> +static void record_loaded_vmcs(struct loaded_vmcs *loaded_vmcs, int cpu)
>> +{
>> +	if (loaded_vmcs->vmcs) {
>> +		list_add(&loaded_vmcs->loaded_vmcss_on_cpu_link,
>> +			 &per_cpu(loaded_vmcss_on_cpu, cpu));
>> +		loaded_vmcs->cpu = cpu;
>> +	}
>> +}
>> +
>>  /*
>>   * Switches to specified vcpu, until a matching vcpu_put(), but assumes
>>   * vcpu mutex is already taken.
>> @@ -2124,15 +2133,13 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
>>  
>>  	if (!vmm_exclusive)
>>  		kvm_cpu_vmxon(phys_addr);
>> -	else if (vmx->loaded_vmcs->cpu != cpu)
>> +	else if (vmx->loaded_vmcs->cpu != cpu) {
>>  		loaded_vmcs_clear(vmx->loaded_vmcs);
>> +		if (vmx->nested.current_shadow_vmcs.vmcs)
>> +			loaded_vmcs_clear(&vmx->nested.current_shadow_vmcs);
>
> loaded_vmcs_clear() uses expensive IPI, so would want to do both in one
> call in future patches as they are always active on the same CPU.

Maybe just move the check for an active shadow vmcs to loaded_vmcs_clear
and clear it there unconditionally.

> Another possible complication is marking current_shadow_vmcs as active
> on a cpu only after a successful vmlaunch.
> (We don't seem to ever vmptrld shadow vmcs explicitly.)
>
>> +        }
>
> Incorrect whitespace for indentation.
>
>>  
>>  	if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
>> -		per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
>> -		vmcs_load(vmx->loaded_vmcs->vmcs);
>> -	}
>> -
>> -	if (vmx->loaded_vmcs->cpu != cpu) {
>
> This condition is nice for performance because a non-current vmcs is
> usually already active on the same CPU, so we skip all the code below.
>
> (This is the only thing that has to be fixed as it regresses non-nested,
>  the rest are mostly ideas for simplifications.)

I think he wanted to make sure to call vmcs_load after the call
to crash_disable_local_vmclear() but that should be possible without
removing the check.

Bandan

>>  		struct desc_ptr *gdt = this_cpu_ptr(&host_gdt);
>>  		unsigned long sysenter_esp;
>>  
>> @@ -2147,11 +2154,15 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
>>  		 */
>>  		smp_rmb();
>>  
>> -		list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
>> -			 &per_cpu(loaded_vmcss_on_cpu, cpu));
>> +		record_loaded_vmcs(vmx->loaded_vmcs, cpu);
>
> Adding and an element to a list multiple times seems invalid, which the
> condition was also guarding.
>
>> +		record_loaded_vmcs(&vmx->nested.current_shadow_vmcs, cpu);
>
> current_shadow_vmcs is always active on the same cpu as loaded_vmcs ...
> I think we could skip the list and just clear current_shadow_vmcs when
> clearing its loaded_vmcs.
>
>>  		crash_enable_local_vmclear(cpu);
>>  		local_irq_enable();
>>  
>> +		per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
>> +		vmcs_load(vmx->loaded_vmcs->vmcs);
>> +
>>  		/*
>>  		 * Linux uses per-cpu TSS and GDT, so set these when switching
>>  		 * processors.
>> @@ -2161,8 +2172,6 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
>>  
>>  		rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
>>  		vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
>> -
>> -		vmx->loaded_vmcs->cpu = cpu;
>>  	}
>>  
>>  	/* Setup TSC multiplier */
>> @@ -6812,6 +6821,34 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
>>  	return 0;
>>  }
>>  
>> +static int setup_shadow_vmcs(struct vcpu_vmx *vmx)
>> +{
>> +	struct vmcs *shadow_vmcs;
>> +	int cpu;
>> +
>> +	shadow_vmcs = alloc_vmcs();
>> +	if (!shadow_vmcs)
>> +		return -ENOMEM;
>> +
>> +	/* mark vmcs as shadow */
>> +	shadow_vmcs->revision_id |= (1u << 31);
>> +	/* init shadow vmcs */
>> +	vmx->nested.current_shadow_vmcs.vmcs = shadow_vmcs;
>> +	loaded_vmcs_init(&vmx->nested.current_shadow_vmcs);
>> +
>> +	cpu = get_cpu();
>> +	local_irq_disable();
>> +	crash_disable_local_vmclear(cpu);
>> +
>> +	record_loaded_vmcs(&vmx->nested.current_shadow_vmcs, cpu);
>
> This could be avoided if we assumed that shadow vmcs is always active on
> the same vcpu.  The assumption looks rock-solid, because shadow vmcs is
> activated on vmlaunch and its linking vmcs must be active (and current)
> on the same CPU.
>
>> +
>> +	crash_enable_local_vmclear(cpu);
>> +	local_irq_enable();
>> +	put_cpu();
>> +
>> +	return 0;
>> +}
>> +
>>  /*
>>   * Emulate the VMXON instruction.
>>   * Currently, we just remember that VMX is active, and do not save or even
>> @@ -6867,14 +6903,9 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
>>  	}
>>  
>>  	if (enable_shadow_vmcs) {
>> -		shadow_vmcs = alloc_vmcs();
>> -		if (!shadow_vmcs)
>> -			return -ENOMEM;
>> -		/* mark vmcs as shadow */
>> -		shadow_vmcs->revision_id |= (1u << 31);
>> -		/* init shadow vmcs */
>> -		vmcs_clear(shadow_vmcs);
>> -		vmx->nested.current_shadow_vmcs = shadow_vmcs;
>> +		int ret = setup_shadow_vmcs(vmx);
>> +		if (ret < 0)
>> +			return ret;
>>  	}
>>  
>>  	INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH v2] KVM: nVMX: Track active shadow VMCSs
  2016-07-27 21:53             ` Bandan Das
@ 2016-07-28 13:30               ` Jim Mattson
  2016-07-28 14:01                 ` Radim Krčmář
  0 siblings, 1 reply; 18+ messages in thread
From: Jim Mattson @ 2016-07-28 13:30 UTC (permalink / raw)
  To: Bandan Das
  Cc: Radim Krčmář,
	kvm, Paolo Bonzini, David Matlack, Peter Feiner

I'm going to be on vacation next week, but I'll address these concerns
when I return. If time permits, I'll send out a patch this week that
just addresses the issue of doing a VMPTRLD before putting the VMCS on
the loaded VMCSs list.

On Wed, Jul 27, 2016 at 2:53 PM, Bandan Das <bsd@redhat.com> wrote:
> Radim Krčmář <rkrcmar@redhat.com> writes:
>
>> 2016-07-22 11:25-0700, Jim Mattson:
>>> If a shadow VMCS is referenced by the VMCS link pointer in the
>>> current VMCS, then VM-entry makes the shadow VMCS active on the
>>> current processor. No VMCS should ever be active on more than one
>>> processor. If a VMCS is to be migrated from one processor to
>>> another, software should execute a VMCLEAR for the VMCS on the
>>> first processor before the VMCS is made active on the second
>>> processor.
>>>
>>> We already keep track of ordinary VMCSs that are made active by
>>> VMPTRLD. Extend that tracking to handle shadow VMCSs that are
>>> made active by VM-entry to their parents.
>>>
>>> Signed-off-by: Jim Mattson <jmattson@google.com>
>>> ---
>>> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
>>> @@ -2113,6 +2113,15 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
>>>                      new.control) != old.control);
>>>  }
>>>
>>> +static void record_loaded_vmcs(struct loaded_vmcs *loaded_vmcs, int cpu)
>>> +{
>>> +    if (loaded_vmcs->vmcs) {
>>> +            list_add(&loaded_vmcs->loaded_vmcss_on_cpu_link,
>>> +                     &per_cpu(loaded_vmcss_on_cpu, cpu));
>>> +            loaded_vmcs->cpu = cpu;
>>> +    }
>>> +}
>>> +
>>>  /*
>>>   * Switches to specified vcpu, until a matching vcpu_put(), but assumes
>>>   * vcpu mutex is already taken.
>>> @@ -2124,15 +2133,13 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
>>>
>>>      if (!vmm_exclusive)
>>>              kvm_cpu_vmxon(phys_addr);
>>> -    else if (vmx->loaded_vmcs->cpu != cpu)
>>> +    else if (vmx->loaded_vmcs->cpu != cpu) {
>>>              loaded_vmcs_clear(vmx->loaded_vmcs);
>>> +            if (vmx->nested.current_shadow_vmcs.vmcs)
>>> +                    loaded_vmcs_clear(&vmx->nested.current_shadow_vmcs);
>>
>> loaded_vmcs_clear() uses expensive IPI, so would want to do both in one
>> call in future patches as they are always active on the same CPU.
>
> Maybe just move the check for an active shadow vmcs to loaded_vmcs_clear
> and clear it there unconditionally.
>
>> Another possible complication is marking current_shadow_vmcs as active
>> on a cpu only after a successful vmlaunch.
>> (We don't seem to ever vmptrld shadow vmcs explicitly.)
>>
>>> +        }
>>
>> Incorrect whitespace for indentation.
>>
>>>
>>>      if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
>>> -            per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
>>> -            vmcs_load(vmx->loaded_vmcs->vmcs);
>>> -    }
>>> -
>>> -    if (vmx->loaded_vmcs->cpu != cpu) {
>>
>> This condition is nice for performance because a non-current vmcs is
>> usually already active on the same CPU, so we skip all the code below.
>>
>> (This is the only thing that has to be fixed as it regresses non-nested,
>>  the rest are mostly ideas for simplifications.)
>
> I think he wanted to make sure to call vmcs_load after the call
> to crash_disable_local_vmclear() but that should be possible without
> removing the check.
>
> Bandan
>
>>>              struct desc_ptr *gdt = this_cpu_ptr(&host_gdt);
>>>              unsigned long sysenter_esp;
>>>
>>> @@ -2147,11 +2154,15 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
>>>               */
>>>              smp_rmb();
>>>
>>> -            list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
>>> -                     &per_cpu(loaded_vmcss_on_cpu, cpu));
>>> +            record_loaded_vmcs(vmx->loaded_vmcs, cpu);
>>
>> Adding and an element to a list multiple times seems invalid, which the
>> condition was also guarding.
>>
>>> +            record_loaded_vmcs(&vmx->nested.current_shadow_vmcs, cpu);
>>
>> current_shadow_vmcs is always active on the same cpu as loaded_vmcs ...
>> I think we could skip the list and just clear current_shadow_vmcs when
>> clearing its loaded_vmcs.
>>
>>>              crash_enable_local_vmclear(cpu);
>>>              local_irq_enable();
>>>
>>> +            per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
>>> +            vmcs_load(vmx->loaded_vmcs->vmcs);
>>> +
>>>              /*
>>>               * Linux uses per-cpu TSS and GDT, so set these when switching
>>>               * processors.
>>> @@ -2161,8 +2172,6 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
>>>
>>>              rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
>>>              vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
>>> -
>>> -            vmx->loaded_vmcs->cpu = cpu;
>>>      }
>>>
>>>      /* Setup TSC multiplier */
>>> @@ -6812,6 +6821,34 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
>>>      return 0;
>>>  }
>>>
>>> +static int setup_shadow_vmcs(struct vcpu_vmx *vmx)
>>> +{
>>> +    struct vmcs *shadow_vmcs;
>>> +    int cpu;
>>> +
>>> +    shadow_vmcs = alloc_vmcs();
>>> +    if (!shadow_vmcs)
>>> +            return -ENOMEM;
>>> +
>>> +    /* mark vmcs as shadow */
>>> +    shadow_vmcs->revision_id |= (1u << 31);
>>> +    /* init shadow vmcs */
>>> +    vmx->nested.current_shadow_vmcs.vmcs = shadow_vmcs;
>>> +    loaded_vmcs_init(&vmx->nested.current_shadow_vmcs);
>>> +
>>> +    cpu = get_cpu();
>>> +    local_irq_disable();
>>> +    crash_disable_local_vmclear(cpu);
>>> +
>>> +    record_loaded_vmcs(&vmx->nested.current_shadow_vmcs, cpu);
>>
>> This could be avoided if we assumed that shadow vmcs is always active on
>> the same vcpu.  The assumption looks rock-solid, because shadow vmcs is
>> activated on vmlaunch and its linking vmcs must be active (and current)
>> on the same CPU.
>>
>>> +
>>> +    crash_enable_local_vmclear(cpu);
>>> +    local_irq_enable();
>>> +    put_cpu();
>>> +
>>> +    return 0;
>>> +}
>>> +
>>>  /*
>>>   * Emulate the VMXON instruction.
>>>   * Currently, we just remember that VMX is active, and do not save or even
>>> @@ -6867,14 +6903,9 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
>>>      }
>>>
>>>      if (enable_shadow_vmcs) {
>>> -            shadow_vmcs = alloc_vmcs();
>>> -            if (!shadow_vmcs)
>>> -                    return -ENOMEM;
>>> -            /* mark vmcs as shadow */
>>> -            shadow_vmcs->revision_id |= (1u << 31);
>>> -            /* init shadow vmcs */
>>> -            vmcs_clear(shadow_vmcs);
>>> -            vmx->nested.current_shadow_vmcs = shadow_vmcs;
>>> +            int ret = setup_shadow_vmcs(vmx);
>>> +            if (ret < 0)
>>> +                    return ret;
>>>      }
>>>
>>>      INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH v2] KVM: nVMX: Track active shadow VMCSs
  2016-07-28 13:30               ` Jim Mattson
@ 2016-07-28 14:01                 ` Radim Krčmář
  0 siblings, 0 replies; 18+ messages in thread
From: Radim Krčmář @ 2016-07-28 14:01 UTC (permalink / raw)
  To: Jim Mattson; +Cc: Bandan Das, kvm, Paolo Bonzini, David Matlack, Peter Feiner

2016-07-28 06:30-0700, Jim Mattson:
> I'm going to be on vacation next week, but I'll address these concerns
> when I return. If time permits, I'll send out a patch this week that
> just addresses the issue of doing a VMPTRLD before putting the VMCS on
> the loaded VMCSs list.

Ah, I missed the point of that hunk.  It is not related to nVMX so
fixing it in a separate patch would be best.

Thanks.

> On Wed, Jul 27, 2016 at 2:53 PM, Bandan Das <bsd@redhat.com> wrote:
>> Radim Krčmář <rkrcmar@redhat.com> writes:
>>
>>> 2016-07-22 11:25-0700, Jim Mattson:
>>>> If a shadow VMCS is referenced by the VMCS link pointer in the
>>>> current VMCS, then VM-entry makes the shadow VMCS active on the
>>>> current processor. No VMCS should ever be active on more than one
>>>> processor. If a VMCS is to be migrated from one processor to
>>>> another, software should execute a VMCLEAR for the VMCS on the
>>>> first processor before the VMCS is made active on the second
>>>> processor.
>>>>
>>>> We already keep track of ordinary VMCSs that are made active by
>>>> VMPTRLD. Extend that tracking to handle shadow VMCSs that are
>>>> made active by VM-entry to their parents.
>>>>
>>>> Signed-off-by: Jim Mattson <jmattson@google.com>
>>>> ---
>>>> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
>>>> @@ -2113,6 +2113,15 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
>>>>                      new.control) != old.control);
>>>>  }
>>>>
>>>> +static void record_loaded_vmcs(struct loaded_vmcs *loaded_vmcs, int cpu)
>>>> +{
>>>> +    if (loaded_vmcs->vmcs) {
>>>> +            list_add(&loaded_vmcs->loaded_vmcss_on_cpu_link,
>>>> +                     &per_cpu(loaded_vmcss_on_cpu, cpu));
>>>> +            loaded_vmcs->cpu = cpu;
>>>> +    }
>>>> +}
>>>> +
>>>>  /*
>>>>   * Switches to specified vcpu, until a matching vcpu_put(), but assumes
>>>>   * vcpu mutex is already taken.
>>>> @@ -2124,15 +2133,13 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
>>>>
>>>>      if (!vmm_exclusive)
>>>>              kvm_cpu_vmxon(phys_addr);
>>>> -    else if (vmx->loaded_vmcs->cpu != cpu)
>>>> +    else if (vmx->loaded_vmcs->cpu != cpu) {
>>>>              loaded_vmcs_clear(vmx->loaded_vmcs);
>>>> +            if (vmx->nested.current_shadow_vmcs.vmcs)
>>>> +                    loaded_vmcs_clear(&vmx->nested.current_shadow_vmcs);
>>>
>>> loaded_vmcs_clear() uses expensive IPI, so would want to do both in one
>>> call in future patches as they are always active on the same CPU.
>>
>> Maybe just move the check for an active shadow vmcs to loaded_vmcs_clear
>> and clear it there unconditionally.
>>
>>> Another possible complication is marking current_shadow_vmcs as active
>>> on a cpu only after a successful vmlaunch.
>>> (We don't seem to ever vmptrld shadow vmcs explicitly.)
>>>
>>>> +        }
>>>
>>> Incorrect whitespace for indentation.
>>>
>>>>
>>>>      if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
>>>> -            per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
>>>> -            vmcs_load(vmx->loaded_vmcs->vmcs);
>>>> -    }
>>>> -
>>>> -    if (vmx->loaded_vmcs->cpu != cpu) {
>>>
>>> This condition is nice for performance because a non-current vmcs is
>>> usually already active on the same CPU, so we skip all the code below.
>>>
>>> (This is the only thing that has to be fixed as it regresses non-nested,
>>>  the rest are mostly ideas for simplifications.)
>>
>> I think he wanted to make sure to call vmcs_load after the call
>> to crash_disable_local_vmclear() but that should be possible without
>> removing the check.
>>
>> Bandan
>>
>>>>              struct desc_ptr *gdt = this_cpu_ptr(&host_gdt);
>>>>              unsigned long sysenter_esp;
>>>>
>>>> @@ -2147,11 +2154,15 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
>>>>               */
>>>>              smp_rmb();
>>>>
>>>> -            list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
>>>> -                     &per_cpu(loaded_vmcss_on_cpu, cpu));
>>>> +            record_loaded_vmcs(vmx->loaded_vmcs, cpu);
>>>
>>> Adding and an element to a list multiple times seems invalid, which the
>>> condition was also guarding.
>>>
>>>> +            record_loaded_vmcs(&vmx->nested.current_shadow_vmcs, cpu);
>>>
>>> current_shadow_vmcs is always active on the same cpu as loaded_vmcs ...
>>> I think we could skip the list and just clear current_shadow_vmcs when
>>> clearing its loaded_vmcs.
>>>
>>>>              crash_enable_local_vmclear(cpu);
>>>>              local_irq_enable();
>>>>
>>>> +            per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
>>>> +            vmcs_load(vmx->loaded_vmcs->vmcs);
>>>> +
>>>>              /*
>>>>               * Linux uses per-cpu TSS and GDT, so set these when switching
>>>>               * processors.
>>>> @@ -2161,8 +2172,6 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
>>>>
>>>>              rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
>>>>              vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
>>>> -
>>>> -            vmx->loaded_vmcs->cpu = cpu;
>>>>      }
>>>>
>>>>      /* Setup TSC multiplier */
>>>> @@ -6812,6 +6821,34 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
>>>>      return 0;
>>>>  }
>>>>
>>>> +static int setup_shadow_vmcs(struct vcpu_vmx *vmx)
>>>> +{
>>>> +    struct vmcs *shadow_vmcs;
>>>> +    int cpu;
>>>> +
>>>> +    shadow_vmcs = alloc_vmcs();
>>>> +    if (!shadow_vmcs)
>>>> +            return -ENOMEM;
>>>> +
>>>> +    /* mark vmcs as shadow */
>>>> +    shadow_vmcs->revision_id |= (1u << 31);
>>>> +    /* init shadow vmcs */
>>>> +    vmx->nested.current_shadow_vmcs.vmcs = shadow_vmcs;
>>>> +    loaded_vmcs_init(&vmx->nested.current_shadow_vmcs);
>>>> +
>>>> +    cpu = get_cpu();
>>>> +    local_irq_disable();
>>>> +    crash_disable_local_vmclear(cpu);
>>>> +
>>>> +    record_loaded_vmcs(&vmx->nested.current_shadow_vmcs, cpu);
>>>
>>> This could be avoided if we assumed that shadow vmcs is always active on
>>> the same vcpu.  The assumption looks rock-solid, because shadow vmcs is
>>> activated on vmlaunch and its linking vmcs must be active (and current)
>>> on the same CPU.
>>>
>>>> +
>>>> +    crash_enable_local_vmclear(cpu);
>>>> +    local_irq_enable();
>>>> +    put_cpu();
>>>> +
>>>> +    return 0;
>>>> +}
>>>> +
>>>>  /*
>>>>   * Emulate the VMXON instruction.
>>>>   * Currently, we just remember that VMX is active, and do not save or even
>>>> @@ -6867,14 +6903,9 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
>>>>      }
>>>>
>>>>      if (enable_shadow_vmcs) {
>>>> -            shadow_vmcs = alloc_vmcs();
>>>> -            if (!shadow_vmcs)
>>>> -                    return -ENOMEM;
>>>> -            /* mark vmcs as shadow */
>>>> -            shadow_vmcs->revision_id |= (1u << 31);
>>>> -            /* init shadow vmcs */
>>>> -            vmcs_clear(shadow_vmcs);
>>>> -            vmx->nested.current_shadow_vmcs = shadow_vmcs;
>>>> +            int ret = setup_shadow_vmcs(vmx);
>>>> +            if (ret < 0)
>>>> +                    return ret;
>>>>      }
>>>>
>>>>      INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));

^ permalink raw reply	[flat|nested] 18+ messages in thread

* [PATCH v2] kvm: nVMX: VMCLEAR an active shadow VMCS after last use
  2016-07-19 16:22       ` Jim Mattson
  2016-07-22 18:25         ` [PATCH v2] KVM: " Jim Mattson
@ 2016-10-28 15:29         ` Jim Mattson
  2016-11-02 17:23           ` Paolo Bonzini
  1 sibling, 1 reply; 18+ messages in thread
From: Jim Mattson @ 2016-10-28 15:29 UTC (permalink / raw)
  To: kvm, Bandan Das, Paolo Bonzini, David Matlack, Peter Feiner; +Cc: Jim Mattson

After a successful VM-entry with the "VMCS shadowing" VM-execution
control set, the shadow VMCS referenced by the VMCS link pointer field
in the current VMCS becomes active on the logical processor.

A VMCS that is made active on more than one logical processor may become
corrupted. Therefore, before an active VMCS can be migrated to another
logical processor, the first logical processor must execute a VMCLEAR
for the active VMCS. VMCLEAR both ensures that all VMCS data are written
to memory and makes the VMCS inactive.

Signed-off-by: Jim Mattson <jmattson@google.com>
Reviewed-By: David Matlack <dmatlack@google.com>
---
 arch/x86/kvm/vmx.c | 22 +++++++++++++++-------
 1 file changed, 15 insertions(+), 7 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index cf0b7be..046b03f 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -187,6 +187,7 @@ struct vmcs {
  */
 struct loaded_vmcs {
 	struct vmcs *vmcs;
+	struct vmcs *shadow_vmcs;
 	int cpu;
 	int launched;
 	struct list_head loaded_vmcss_on_cpu_link;
@@ -411,7 +412,6 @@ struct nested_vmx {
 	 * memory during VMXOFF, VMCLEAR, VMPTRLD.
 	 */
 	struct vmcs12 *cached_vmcs12;
-	struct vmcs *current_shadow_vmcs;
 	/*
 	 * Indicates if the shadow vmcs must be updated with the
 	 * data hold by vmcs12
@@ -1419,6 +1419,8 @@ static void vmcs_clear(struct vmcs *vmcs)
 static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
 {
 	vmcs_clear(loaded_vmcs->vmcs);
+	if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
+		vmcs_clear(loaded_vmcs->shadow_vmcs);
 	loaded_vmcs->cpu = -1;
 	loaded_vmcs->launched = 0;
 }
@@ -3562,6 +3564,7 @@ static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
 	loaded_vmcs_clear(loaded_vmcs);
 	free_vmcs(loaded_vmcs->vmcs);
 	loaded_vmcs->vmcs = NULL;
+	WARN_ON(loaded_vmcs->shadow_vmcs != NULL);
 }
 
 static void free_kvm_area(void)
@@ -6696,6 +6699,7 @@ static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx)
 	if (!item)
 		return NULL;
 	item->vmcs02.vmcs = alloc_vmcs();
+	item->vmcs02.shadow_vmcs = NULL;
 	if (!item->vmcs02.vmcs) {
 		kfree(item);
 		return NULL;
@@ -7072,7 +7076,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
 		shadow_vmcs->revision_id |= (1u << 31);
 		/* init shadow vmcs */
 		vmcs_clear(shadow_vmcs);
-		vmx->nested.current_shadow_vmcs = shadow_vmcs;
+		vmx->vmcs01.shadow_vmcs = shadow_vmcs;
 	}
 
 	INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));
@@ -7174,8 +7178,11 @@ static void free_nested(struct vcpu_vmx *vmx)
 		free_page((unsigned long)vmx->nested.msr_bitmap);
 		vmx->nested.msr_bitmap = NULL;
 	}
-	if (enable_shadow_vmcs)
-		free_vmcs(vmx->nested.current_shadow_vmcs);
+	if (enable_shadow_vmcs) {
+		vmcs_clear(vmx->vmcs01.shadow_vmcs);
+		free_vmcs(vmx->vmcs01.shadow_vmcs);
+		vmx->vmcs01.shadow_vmcs = NULL;
+	}
 	kfree(vmx->nested.cached_vmcs12);
 	/* Unpin physical memory we referred to in current vmcs02 */
 	if (vmx->nested.apic_access_page) {
@@ -7352,7 +7359,7 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
 	int i;
 	unsigned long field;
 	u64 field_value;
-	struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
+	struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
 	const unsigned long *fields = shadow_read_write_fields;
 	const int num_fields = max_shadow_read_write_fields;
 
@@ -7401,7 +7408,7 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
 	int i, q;
 	unsigned long field;
 	u64 field_value = 0;
-	struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
+	struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
 
 	vmcs_load(shadow_vmcs);
 
@@ -7591,7 +7598,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
 			vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
 				      SECONDARY_EXEC_SHADOW_VMCS);
 			vmcs_write64(VMCS_LINK_POINTER,
-				     __pa(vmx->nested.current_shadow_vmcs));
+				     __pa(vmx->vmcs01.shadow_vmcs));
 			vmx->nested.sync_shadow_vmcs = true;
 		}
 	}
@@ -9156,6 +9163,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
 
 	vmx->loaded_vmcs = &vmx->vmcs01;
 	vmx->loaded_vmcs->vmcs = alloc_vmcs();
+	vmx->loaded_vmcs->shadow_vmcs = NULL;
 	if (!vmx->loaded_vmcs->vmcs)
 		goto free_msrs;
 	if (!vmm_exclusive)
-- 
2.8.0.rc3.226.g39d4020


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* Re: [PATCH v2] kvm: nVMX: VMCLEAR an active shadow VMCS after last use
  2016-10-28 15:29         ` [PATCH v2] kvm: nVMX: VMCLEAR an active shadow VMCS after last use Jim Mattson
@ 2016-11-02 17:23           ` Paolo Bonzini
  2016-11-02 17:56             ` Jim Mattson
  0 siblings, 1 reply; 18+ messages in thread
From: Paolo Bonzini @ 2016-11-02 17:23 UTC (permalink / raw)
  To: Jim Mattson, kvm, Bandan Das, David Matlack, Peter Feiner

On 28/10/2016 17:29, Jim Mattson wrote:
> After a successful VM-entry with the "VMCS shadowing" VM-execution
> control set, the shadow VMCS referenced by the VMCS link pointer field
> in the current VMCS becomes active on the logical processor.
> 
> A VMCS that is made active on more than one logical processor may become
> corrupted. Therefore, before an active VMCS can be migrated to another
> logical processor, the first logical processor must execute a VMCLEAR
> for the active VMCS. VMCLEAR both ensures that all VMCS data are written
> to memory and makes the VMCS inactive.
> 
> Signed-off-by: Jim Mattson <jmattson@google.com>
> Reviewed-By: David Matlack <dmatlack@google.com>

The patch looks good but---sorry for the possibly stupid
question---where was v1?

Paolo

> ---
>  arch/x86/kvm/vmx.c | 22 +++++++++++++++-------
>  1 file changed, 15 insertions(+), 7 deletions(-)
> 
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index cf0b7be..046b03f 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -187,6 +187,7 @@ struct vmcs {
>   */
>  struct loaded_vmcs {
>  	struct vmcs *vmcs;
> +	struct vmcs *shadow_vmcs;
>  	int cpu;
>  	int launched;
>  	struct list_head loaded_vmcss_on_cpu_link;
> @@ -411,7 +412,6 @@ struct nested_vmx {
>  	 * memory during VMXOFF, VMCLEAR, VMPTRLD.
>  	 */
>  	struct vmcs12 *cached_vmcs12;
> -	struct vmcs *current_shadow_vmcs;
>  	/*
>  	 * Indicates if the shadow vmcs must be updated with the
>  	 * data hold by vmcs12
> @@ -1419,6 +1419,8 @@ static void vmcs_clear(struct vmcs *vmcs)
>  static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
>  {
>  	vmcs_clear(loaded_vmcs->vmcs);
> +	if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
> +		vmcs_clear(loaded_vmcs->shadow_vmcs);
>  	loaded_vmcs->cpu = -1;
>  	loaded_vmcs->launched = 0;
>  }
> @@ -3562,6 +3564,7 @@ static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
>  	loaded_vmcs_clear(loaded_vmcs);
>  	free_vmcs(loaded_vmcs->vmcs);
>  	loaded_vmcs->vmcs = NULL;
> +	WARN_ON(loaded_vmcs->shadow_vmcs != NULL);
>  }
>  
>  static void free_kvm_area(void)
> @@ -6696,6 +6699,7 @@ static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx)
>  	if (!item)
>  		return NULL;
>  	item->vmcs02.vmcs = alloc_vmcs();
> +	item->vmcs02.shadow_vmcs = NULL;
>  	if (!item->vmcs02.vmcs) {
>  		kfree(item);
>  		return NULL;
> @@ -7072,7 +7076,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
>  		shadow_vmcs->revision_id |= (1u << 31);
>  		/* init shadow vmcs */
>  		vmcs_clear(shadow_vmcs);
> -		vmx->nested.current_shadow_vmcs = shadow_vmcs;
> +		vmx->vmcs01.shadow_vmcs = shadow_vmcs;
>  	}
>  
>  	INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));
> @@ -7174,8 +7178,11 @@ static void free_nested(struct vcpu_vmx *vmx)
>  		free_page((unsigned long)vmx->nested.msr_bitmap);
>  		vmx->nested.msr_bitmap = NULL;
>  	}
> -	if (enable_shadow_vmcs)
> -		free_vmcs(vmx->nested.current_shadow_vmcs);
> +	if (enable_shadow_vmcs) {
> +		vmcs_clear(vmx->vmcs01.shadow_vmcs);
> +		free_vmcs(vmx->vmcs01.shadow_vmcs);
> +		vmx->vmcs01.shadow_vmcs = NULL;
> +	}
>  	kfree(vmx->nested.cached_vmcs12);
>  	/* Unpin physical memory we referred to in current vmcs02 */
>  	if (vmx->nested.apic_access_page) {
> @@ -7352,7 +7359,7 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
>  	int i;
>  	unsigned long field;
>  	u64 field_value;
> -	struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
> +	struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
>  	const unsigned long *fields = shadow_read_write_fields;
>  	const int num_fields = max_shadow_read_write_fields;
>  
> @@ -7401,7 +7408,7 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
>  	int i, q;
>  	unsigned long field;
>  	u64 field_value = 0;
> -	struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
> +	struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
>  
>  	vmcs_load(shadow_vmcs);
>  
> @@ -7591,7 +7598,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
>  			vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
>  				      SECONDARY_EXEC_SHADOW_VMCS);
>  			vmcs_write64(VMCS_LINK_POINTER,
> -				     __pa(vmx->nested.current_shadow_vmcs));
> +				     __pa(vmx->vmcs01.shadow_vmcs));
>  			vmx->nested.sync_shadow_vmcs = true;
>  		}
>  	}
> @@ -9156,6 +9163,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
>  
>  	vmx->loaded_vmcs = &vmx->vmcs01;
>  	vmx->loaded_vmcs->vmcs = alloc_vmcs();
> +	vmx->loaded_vmcs->shadow_vmcs = NULL;
>  	if (!vmx->loaded_vmcs->vmcs)
>  		goto free_msrs;
>  	if (!vmm_exclusive)
> 

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH v2] kvm: nVMX: VMCLEAR an active shadow VMCS after last use
  2016-11-02 17:23           ` Paolo Bonzini
@ 2016-11-02 17:56             ` Jim Mattson
  2016-11-02 18:04               ` Paolo Bonzini
  0 siblings, 1 reply; 18+ messages in thread
From: Jim Mattson @ 2016-11-02 17:56 UTC (permalink / raw)
  To: Paolo Bonzini; +Cc: kvm, Bandan Das, David Matlack, Peter Feiner

Sorry; I changed the title after reworking the patch. V1 was "[PATCH]
kvm: nVMX: Track active shadow VMCSs" (15 July 2016).

On Wed, Nov 2, 2016 at 10:23 AM, Paolo Bonzini <pbonzini@redhat.com> wrote:
> On 28/10/2016 17:29, Jim Mattson wrote:
>> After a successful VM-entry with the "VMCS shadowing" VM-execution
>> control set, the shadow VMCS referenced by the VMCS link pointer field
>> in the current VMCS becomes active on the logical processor.
>>
>> A VMCS that is made active on more than one logical processor may become
>> corrupted. Therefore, before an active VMCS can be migrated to another
>> logical processor, the first logical processor must execute a VMCLEAR
>> for the active VMCS. VMCLEAR both ensures that all VMCS data are written
>> to memory and makes the VMCS inactive.
>>
>> Signed-off-by: Jim Mattson <jmattson@google.com>
>> Reviewed-By: David Matlack <dmatlack@google.com>
>
> The patch looks good but---sorry for the possibly stupid
> question---where was v1?
>
> Paolo
>
>> ---
>>  arch/x86/kvm/vmx.c | 22 +++++++++++++++-------
>>  1 file changed, 15 insertions(+), 7 deletions(-)
>>
>> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
>> index cf0b7be..046b03f 100644
>> --- a/arch/x86/kvm/vmx.c
>> +++ b/arch/x86/kvm/vmx.c
>> @@ -187,6 +187,7 @@ struct vmcs {
>>   */
>>  struct loaded_vmcs {
>>       struct vmcs *vmcs;
>> +     struct vmcs *shadow_vmcs;
>>       int cpu;
>>       int launched;
>>       struct list_head loaded_vmcss_on_cpu_link;
>> @@ -411,7 +412,6 @@ struct nested_vmx {
>>        * memory during VMXOFF, VMCLEAR, VMPTRLD.
>>        */
>>       struct vmcs12 *cached_vmcs12;
>> -     struct vmcs *current_shadow_vmcs;
>>       /*
>>        * Indicates if the shadow vmcs must be updated with the
>>        * data hold by vmcs12
>> @@ -1419,6 +1419,8 @@ static void vmcs_clear(struct vmcs *vmcs)
>>  static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
>>  {
>>       vmcs_clear(loaded_vmcs->vmcs);
>> +     if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
>> +             vmcs_clear(loaded_vmcs->shadow_vmcs);
>>       loaded_vmcs->cpu = -1;
>>       loaded_vmcs->launched = 0;
>>  }
>> @@ -3562,6 +3564,7 @@ static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
>>       loaded_vmcs_clear(loaded_vmcs);
>>       free_vmcs(loaded_vmcs->vmcs);
>>       loaded_vmcs->vmcs = NULL;
>> +     WARN_ON(loaded_vmcs->shadow_vmcs != NULL);
>>  }
>>
>>  static void free_kvm_area(void)
>> @@ -6696,6 +6699,7 @@ static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx)
>>       if (!item)
>>               return NULL;
>>       item->vmcs02.vmcs = alloc_vmcs();
>> +     item->vmcs02.shadow_vmcs = NULL;
>>       if (!item->vmcs02.vmcs) {
>>               kfree(item);
>>               return NULL;
>> @@ -7072,7 +7076,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
>>               shadow_vmcs->revision_id |= (1u << 31);
>>               /* init shadow vmcs */
>>               vmcs_clear(shadow_vmcs);
>> -             vmx->nested.current_shadow_vmcs = shadow_vmcs;
>> +             vmx->vmcs01.shadow_vmcs = shadow_vmcs;
>>       }
>>
>>       INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));
>> @@ -7174,8 +7178,11 @@ static void free_nested(struct vcpu_vmx *vmx)
>>               free_page((unsigned long)vmx->nested.msr_bitmap);
>>               vmx->nested.msr_bitmap = NULL;
>>       }
>> -     if (enable_shadow_vmcs)
>> -             free_vmcs(vmx->nested.current_shadow_vmcs);
>> +     if (enable_shadow_vmcs) {
>> +             vmcs_clear(vmx->vmcs01.shadow_vmcs);
>> +             free_vmcs(vmx->vmcs01.shadow_vmcs);
>> +             vmx->vmcs01.shadow_vmcs = NULL;
>> +     }
>>       kfree(vmx->nested.cached_vmcs12);
>>       /* Unpin physical memory we referred to in current vmcs02 */
>>       if (vmx->nested.apic_access_page) {
>> @@ -7352,7 +7359,7 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
>>       int i;
>>       unsigned long field;
>>       u64 field_value;
>> -     struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
>> +     struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
>>       const unsigned long *fields = shadow_read_write_fields;
>>       const int num_fields = max_shadow_read_write_fields;
>>
>> @@ -7401,7 +7408,7 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
>>       int i, q;
>>       unsigned long field;
>>       u64 field_value = 0;
>> -     struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
>> +     struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
>>
>>       vmcs_load(shadow_vmcs);
>>
>> @@ -7591,7 +7598,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
>>                       vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
>>                                     SECONDARY_EXEC_SHADOW_VMCS);
>>                       vmcs_write64(VMCS_LINK_POINTER,
>> -                                  __pa(vmx->nested.current_shadow_vmcs));
>> +                                  __pa(vmx->vmcs01.shadow_vmcs));
>>                       vmx->nested.sync_shadow_vmcs = true;
>>               }
>>       }
>> @@ -9156,6 +9163,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
>>
>>       vmx->loaded_vmcs = &vmx->vmcs01;
>>       vmx->loaded_vmcs->vmcs = alloc_vmcs();
>> +     vmx->loaded_vmcs->shadow_vmcs = NULL;
>>       if (!vmx->loaded_vmcs->vmcs)
>>               goto free_msrs;
>>       if (!vmm_exclusive)
>>

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH v2] kvm: nVMX: VMCLEAR an active shadow VMCS after last use
  2016-11-02 17:56             ` Jim Mattson
@ 2016-11-02 18:04               ` Paolo Bonzini
  0 siblings, 0 replies; 18+ messages in thread
From: Paolo Bonzini @ 2016-11-02 18:04 UTC (permalink / raw)
  To: Jim Mattson; +Cc: kvm, Bandan Das, David Matlack, Peter Feiner



On 02/11/2016 18:56, Jim Mattson wrote:
> Sorry; I changed the title after reworking the patch. V1 was "[PATCH]
> kvm: nVMX: Track active shadow VMCSs" (15 July 2016).

Aha, now it makes sense.  I'm queuing it for -rc4.

Thanks,

Paolo

> On Wed, Nov 2, 2016 at 10:23 AM, Paolo Bonzini <pbonzini@redhat.com> wrote:
>> On 28/10/2016 17:29, Jim Mattson wrote:
>>> After a successful VM-entry with the "VMCS shadowing" VM-execution
>>> control set, the shadow VMCS referenced by the VMCS link pointer field
>>> in the current VMCS becomes active on the logical processor.
>>>
>>> A VMCS that is made active on more than one logical processor may become
>>> corrupted. Therefore, before an active VMCS can be migrated to another
>>> logical processor, the first logical processor must execute a VMCLEAR
>>> for the active VMCS. VMCLEAR both ensures that all VMCS data are written
>>> to memory and makes the VMCS inactive.
>>>
>>> Signed-off-by: Jim Mattson <jmattson@google.com>
>>> Reviewed-By: David Matlack <dmatlack@google.com>
>>
>> The patch looks good but---sorry for the possibly stupid
>> question---where was v1?
>>
>> Paolo
>>
>>> ---
>>>  arch/x86/kvm/vmx.c | 22 +++++++++++++++-------
>>>  1 file changed, 15 insertions(+), 7 deletions(-)
>>>
>>> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
>>> index cf0b7be..046b03f 100644
>>> --- a/arch/x86/kvm/vmx.c
>>> +++ b/arch/x86/kvm/vmx.c
>>> @@ -187,6 +187,7 @@ struct vmcs {
>>>   */
>>>  struct loaded_vmcs {
>>>       struct vmcs *vmcs;
>>> +     struct vmcs *shadow_vmcs;
>>>       int cpu;
>>>       int launched;
>>>       struct list_head loaded_vmcss_on_cpu_link;
>>> @@ -411,7 +412,6 @@ struct nested_vmx {
>>>        * memory during VMXOFF, VMCLEAR, VMPTRLD.
>>>        */
>>>       struct vmcs12 *cached_vmcs12;
>>> -     struct vmcs *current_shadow_vmcs;
>>>       /*
>>>        * Indicates if the shadow vmcs must be updated with the
>>>        * data hold by vmcs12
>>> @@ -1419,6 +1419,8 @@ static void vmcs_clear(struct vmcs *vmcs)
>>>  static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
>>>  {
>>>       vmcs_clear(loaded_vmcs->vmcs);
>>> +     if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
>>> +             vmcs_clear(loaded_vmcs->shadow_vmcs);
>>>       loaded_vmcs->cpu = -1;
>>>       loaded_vmcs->launched = 0;
>>>  }
>>> @@ -3562,6 +3564,7 @@ static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
>>>       loaded_vmcs_clear(loaded_vmcs);
>>>       free_vmcs(loaded_vmcs->vmcs);
>>>       loaded_vmcs->vmcs = NULL;
>>> +     WARN_ON(loaded_vmcs->shadow_vmcs != NULL);
>>>  }
>>>
>>>  static void free_kvm_area(void)
>>> @@ -6696,6 +6699,7 @@ static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx)
>>>       if (!item)
>>>               return NULL;
>>>       item->vmcs02.vmcs = alloc_vmcs();
>>> +     item->vmcs02.shadow_vmcs = NULL;
>>>       if (!item->vmcs02.vmcs) {
>>>               kfree(item);
>>>               return NULL;
>>> @@ -7072,7 +7076,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
>>>               shadow_vmcs->revision_id |= (1u << 31);
>>>               /* init shadow vmcs */
>>>               vmcs_clear(shadow_vmcs);
>>> -             vmx->nested.current_shadow_vmcs = shadow_vmcs;
>>> +             vmx->vmcs01.shadow_vmcs = shadow_vmcs;
>>>       }
>>>
>>>       INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));
>>> @@ -7174,8 +7178,11 @@ static void free_nested(struct vcpu_vmx *vmx)
>>>               free_page((unsigned long)vmx->nested.msr_bitmap);
>>>               vmx->nested.msr_bitmap = NULL;
>>>       }
>>> -     if (enable_shadow_vmcs)
>>> -             free_vmcs(vmx->nested.current_shadow_vmcs);
>>> +     if (enable_shadow_vmcs) {
>>> +             vmcs_clear(vmx->vmcs01.shadow_vmcs);
>>> +             free_vmcs(vmx->vmcs01.shadow_vmcs);
>>> +             vmx->vmcs01.shadow_vmcs = NULL;
>>> +     }
>>>       kfree(vmx->nested.cached_vmcs12);
>>>       /* Unpin physical memory we referred to in current vmcs02 */
>>>       if (vmx->nested.apic_access_page) {
>>> @@ -7352,7 +7359,7 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
>>>       int i;
>>>       unsigned long field;
>>>       u64 field_value;
>>> -     struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
>>> +     struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
>>>       const unsigned long *fields = shadow_read_write_fields;
>>>       const int num_fields = max_shadow_read_write_fields;
>>>
>>> @@ -7401,7 +7408,7 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
>>>       int i, q;
>>>       unsigned long field;
>>>       u64 field_value = 0;
>>> -     struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
>>> +     struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
>>>
>>>       vmcs_load(shadow_vmcs);
>>>
>>> @@ -7591,7 +7598,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
>>>                       vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
>>>                                     SECONDARY_EXEC_SHADOW_VMCS);
>>>                       vmcs_write64(VMCS_LINK_POINTER,
>>> -                                  __pa(vmx->nested.current_shadow_vmcs));
>>> +                                  __pa(vmx->vmcs01.shadow_vmcs));
>>>                       vmx->nested.sync_shadow_vmcs = true;
>>>               }
>>>       }
>>> @@ -9156,6 +9163,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
>>>
>>>       vmx->loaded_vmcs = &vmx->vmcs01;
>>>       vmx->loaded_vmcs->vmcs = alloc_vmcs();
>>> +     vmx->loaded_vmcs->shadow_vmcs = NULL;
>>>       if (!vmx->loaded_vmcs->vmcs)
>>>               goto free_msrs;
>>>       if (!vmm_exclusive)
>>>

^ permalink raw reply	[flat|nested] 18+ messages in thread

end of thread, other threads:[~2016-11-02 18:04 UTC | newest]

Thread overview: 18+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-07-15 17:39 [PATCH] kvm: nVMX: Track active shadow VMCSs Jim Mattson
2016-07-16  0:50 ` Bandan Das
2016-07-16 14:50   ` Paolo Bonzini
2016-07-16 19:48     ` Bandan Das
2016-07-19 16:22       ` Jim Mattson
2016-07-22 18:25         ` [PATCH v2] KVM: " Jim Mattson
2016-07-27 20:45           ` Radim Krčmář
2016-07-27 21:53             ` Bandan Das
2016-07-28 13:30               ` Jim Mattson
2016-07-28 14:01                 ` Radim Krčmář
2016-10-28 15:29         ` [PATCH v2] kvm: nVMX: VMCLEAR an active shadow VMCS after last use Jim Mattson
2016-11-02 17:23           ` Paolo Bonzini
2016-11-02 17:56             ` Jim Mattson
2016-11-02 18:04               ` Paolo Bonzini
2016-07-18 21:30   ` [PATCH] kvm: nVMX: Track active shadow VMCSs Jim Mattson
2016-07-18 23:36     ` Bandan Das
2016-07-19  8:41       ` Paolo Bonzini
2016-07-19 16:13         ` Jim Mattson

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.