All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/4] KVM: SVM: Virtual VMLOAD VMSAVE
@ 2017-07-05 16:48 Janakarajan Natarajan
  2017-07-05 16:48 ` [PATCH 1/4] KVM: SVM: Prepare for new bit definition in lbr_ctl Janakarajan Natarajan
                   ` (3 more replies)
  0 siblings, 4 replies; 6+ messages in thread
From: Janakarajan Natarajan @ 2017-07-05 16:48 UTC (permalink / raw)
  To: kvm; +Cc: Paolo Bonzini, Radim Krcmar, Joerg Roedel, Janakarajan Natarajan

This patchset adds support for the Virtual VMLOAD VMSAVE feature. This
feature allows the VMLOAD and VMSAVE instructions to be executed in the
Guest mode and not require a #VMEXIT. The value in the RAX for VMLOAD
and VMSAVE is treated as a guest physical address and is translated to
the host physical address and then the instruction does its normal
operation.

This feature is enabled by setting bit 1 at position B8h in the VMCB. This
bit is set only if the hypervisor has nested paging enabled, is in 64-bit
mode and has support for the Virtual VMLOAD VMSAVE feature. Any page
faults during this will result in a normal #VMEXIT with nested page fault
as the exit code.

The advantage of this feature will be the greatly reduced number of world
switches to support the VMLOAD and VMSAVE instructions by the outermost
hypervisor at Current Privilege Level (CPL) 0.

This has been tested with Xen, Hyper-V and KVM as the nested hypervisors.

Janakarajan Natarajan (4):
  KVM: SVM: Prepare for new bit definition in lbr_ctl
  KVM: SVM: Rename lbr_ctl field in the vmcb control area
  KVM: SVM: Add Virtual VMLOAD VMSAVE feature definition
  KVM: SVM: Enable Virtual VMLOAD VMSAVE feature

 arch/x86/include/asm/cpufeatures.h |  1 +
 arch/x86/include/asm/svm.h         |  5 ++++-
 arch/x86/kvm/svm.c                 | 31 ++++++++++++++++++++++++++-----
 3 files changed, 31 insertions(+), 6 deletions(-)

-- 
2.7.4

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH 1/4] KVM: SVM: Prepare for new bit definition in lbr_ctl
  2017-07-05 16:48 [PATCH 0/4] KVM: SVM: Virtual VMLOAD VMSAVE Janakarajan Natarajan
@ 2017-07-05 16:48 ` Janakarajan Natarajan
  2017-07-05 16:48 ` [PATCH 2/4] KVM: SVM: Rename lbr_ctl field in the vmcb control area Janakarajan Natarajan
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 6+ messages in thread
From: Janakarajan Natarajan @ 2017-07-05 16:48 UTC (permalink / raw)
  To: kvm; +Cc: Paolo Bonzini, Radim Krcmar, Joerg Roedel, Janakarajan Natarajan

The lbr_ctl variable in the vmcb control area is used to enable or
disable Last Branch Record (LBR) virtualization. However, this is to be
done using only bit 0 of the variable. To correct this and to prepare
for a new feature, change the current usage to work only on a particular
bit.

Signed-off-by: Janakarajan Natarajan <Janakarajan.Natarajan@amd.com>
---
 arch/x86/include/asm/svm.h | 2 ++
 arch/x86/kvm/svm.c         | 4 ++--
 2 files changed, 4 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 14824fc..d1163f6 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -119,6 +119,8 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
 #define AVIC_ENABLE_SHIFT 31
 #define AVIC_ENABLE_MASK (1 << AVIC_ENABLE_SHIFT)
 
+#define LBR_CTL_ENABLE_MASK BIT_ULL(0)
+
 #define SVM_INTERRUPT_SHADOW_MASK 1
 
 #define SVM_IOIO_STR_SHIFT 2
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index ba9891a..219ac81 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -945,7 +945,7 @@ static void svm_enable_lbrv(struct vcpu_svm *svm)
 {
 	u32 *msrpm = svm->msrpm;
 
-	svm->vmcb->control.lbr_ctl = 1;
+	svm->vmcb->control.lbr_ctl |= LBR_CTL_ENABLE_MASK;
 	set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
 	set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
 	set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
@@ -956,7 +956,7 @@ static void svm_disable_lbrv(struct vcpu_svm *svm)
 {
 	u32 *msrpm = svm->msrpm;
 
-	svm->vmcb->control.lbr_ctl = 0;
+	svm->vmcb->control.lbr_ctl &= ~LBR_CTL_ENABLE_MASK;
 	set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
 	set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
 	set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 2/4] KVM: SVM: Rename lbr_ctl field in the vmcb control area
  2017-07-05 16:48 [PATCH 0/4] KVM: SVM: Virtual VMLOAD VMSAVE Janakarajan Natarajan
  2017-07-05 16:48 ` [PATCH 1/4] KVM: SVM: Prepare for new bit definition in lbr_ctl Janakarajan Natarajan
@ 2017-07-05 16:48 ` Janakarajan Natarajan
  2017-07-05 16:48 ` [PATCH 3/4] KVM: SVM: Add Virtual VMLOAD VMSAVE feature definition Janakarajan Natarajan
  2017-07-05 16:48 ` [PATCH 4/4] KVM: SVM: Enable Virtual VMLOAD VMSAVE feature Janakarajan Natarajan
  3 siblings, 0 replies; 6+ messages in thread
From: Janakarajan Natarajan @ 2017-07-05 16:48 UTC (permalink / raw)
  To: kvm; +Cc: Paolo Bonzini, Radim Krcmar, Joerg Roedel, Janakarajan Natarajan

Rename the lbr_ctl variable to better reflect the purpose of the field -
provide support for virtualization extensions.

Signed-off-by: Janakarajan Natarajan <Janakarajan.Natarajan@amd.com>
---
 arch/x86/include/asm/svm.h |  2 +-
 arch/x86/kvm/svm.c         | 10 +++++-----
 2 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index d1163f6..74d1393 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -83,7 +83,7 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
 	u32 event_inj;
 	u32 event_inj_err;
 	u64 nested_cr3;
-	u64 lbr_ctl;
+	u64 virt_ext;
 	u32 clean;
 	u32 reserved_5;
 	u64 next_rip;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 219ac81..eadecee 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -945,7 +945,7 @@ static void svm_enable_lbrv(struct vcpu_svm *svm)
 {
 	u32 *msrpm = svm->msrpm;
 
-	svm->vmcb->control.lbr_ctl |= LBR_CTL_ENABLE_MASK;
+	svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
 	set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
 	set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
 	set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
@@ -956,7 +956,7 @@ static void svm_disable_lbrv(struct vcpu_svm *svm)
 {
 	u32 *msrpm = svm->msrpm;
 
-	svm->vmcb->control.lbr_ctl &= ~LBR_CTL_ENABLE_MASK;
+	svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
 	set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
 	set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
 	set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
@@ -2649,7 +2649,7 @@ static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *fr
 	dst->event_inj            = from->event_inj;
 	dst->event_inj_err        = from->event_inj_err;
 	dst->nested_cr3           = from->nested_cr3;
-	dst->lbr_ctl              = from->lbr_ctl;
+	dst->virt_ext              = from->virt_ext;
 }
 
 static int nested_svm_vmexit(struct vcpu_svm *svm)
@@ -2955,7 +2955,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
 	/* We don't want to see VMMCALLs from a nested guest */
 	clr_intercept(svm, INTERCEPT_VMMCALL);
 
-	svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl;
+	svm->vmcb->control.virt_ext = nested_vmcb->control.virt_ext;
 	svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
 	svm->vmcb->control.int_state = nested_vmcb->control.int_state;
 	svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
@@ -4064,7 +4064,7 @@ static void dump_vmcb(struct kvm_vcpu *vcpu)
 	pr_err("%-20s%016llx\n", "avic_vapic_bar:", control->avic_vapic_bar);
 	pr_err("%-20s%08x\n", "event_inj:", control->event_inj);
 	pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err);
-	pr_err("%-20s%lld\n", "lbr_ctl:", control->lbr_ctl);
+	pr_err("%-20s%lld\n", "virt_ext:", control->virt_ext);
 	pr_err("%-20s%016llx\n", "next_rip:", control->next_rip);
 	pr_err("%-20s%016llx\n", "avic_backing_page:", control->avic_backing_page);
 	pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id);
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 3/4] KVM: SVM: Add Virtual VMLOAD VMSAVE feature definition
  2017-07-05 16:48 [PATCH 0/4] KVM: SVM: Virtual VMLOAD VMSAVE Janakarajan Natarajan
  2017-07-05 16:48 ` [PATCH 1/4] KVM: SVM: Prepare for new bit definition in lbr_ctl Janakarajan Natarajan
  2017-07-05 16:48 ` [PATCH 2/4] KVM: SVM: Rename lbr_ctl field in the vmcb control area Janakarajan Natarajan
@ 2017-07-05 16:48 ` Janakarajan Natarajan
  2017-07-05 16:48 ` [PATCH 4/4] KVM: SVM: Enable Virtual VMLOAD VMSAVE feature Janakarajan Natarajan
  3 siblings, 0 replies; 6+ messages in thread
From: Janakarajan Natarajan @ 2017-07-05 16:48 UTC (permalink / raw)
  To: kvm; +Cc: Paolo Bonzini, Radim Krcmar, Joerg Roedel, Janakarajan Natarajan

Define a new cpufeature definition for Virtual VMLOAD VMSAVE.

Signed-off-by: Janakarajan Natarajan <Janakarajan.Natarajan@amd.com>
---
 arch/x86/include/asm/cpufeatures.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 2701e5f..ca3c48c 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -286,6 +286,7 @@
 #define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
 #define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
 #define X86_FEATURE_AVIC	(15*32+13) /* Virtual Interrupt Controller */
+#define X86_FEATURE_VIRTUAL_VMLOAD_VMSAVE (15*32+15) /* Virtual VMLOAD VMSAVE */
 
 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */
 #define X86_FEATURE_AVX512VBMI  (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 4/4] KVM: SVM: Enable Virtual VMLOAD VMSAVE feature
  2017-07-05 16:48 [PATCH 0/4] KVM: SVM: Virtual VMLOAD VMSAVE Janakarajan Natarajan
                   ` (2 preceding siblings ...)
  2017-07-05 16:48 ` [PATCH 3/4] KVM: SVM: Add Virtual VMLOAD VMSAVE feature definition Janakarajan Natarajan
@ 2017-07-05 16:48 ` Janakarajan Natarajan
  2017-07-05 16:53   ` Paolo Bonzini
  3 siblings, 1 reply; 6+ messages in thread
From: Janakarajan Natarajan @ 2017-07-05 16:48 UTC (permalink / raw)
  To: kvm; +Cc: Paolo Bonzini, Radim Krcmar, Joerg Roedel, Janakarajan Natarajan

Enable the Virtual VMLOAD VMSAVE feature. This is done by setting bit 1
at position B8h in the vmcb.

The processor must have nested paging enabled, be in 64-bit mode and
have support for the Virtual VMLOAD VMSAVE feature for the bit to be set
in the vmcb.

Signed-off-by: Janakarajan Natarajan <Janakarajan.Natarajan@amd.com>
---
 arch/x86/include/asm/svm.h |  1 +
 arch/x86/kvm/svm.c         | 21 +++++++++++++++++++++
 2 files changed, 22 insertions(+)

diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 74d1393..58fffe7 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -120,6 +120,7 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
 #define AVIC_ENABLE_MASK (1 << AVIC_ENABLE_SHIFT)
 
 #define LBR_CTL_ENABLE_MASK BIT_ULL(0)
+#define VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK BIT_ULL(1)
 
 #define SVM_INTERRUPT_SHADOW_MASK 1
 
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index eadecee..ae73e7c 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -275,6 +275,9 @@ static int avic;
 module_param(avic, int, S_IRUGO);
 #endif
 
+/* enable/disable Virtual VMLOAD VMSAVE */
+static bool has_vls = false;
+
 /* AVIC VM ID bit masks and lock */
 static DECLARE_BITMAP(avic_vm_id_bitmap, AVIC_VM_ID_NR);
 static DEFINE_SPINLOCK(avic_vm_id_lock);
@@ -1079,6 +1082,14 @@ static __init int svm_hardware_setup(void)
 		}
 	}
 
+	if (npt_enabled) {
+		if (boot_cpu_has(X86_FEATURE_VIRTUAL_VMLOAD_VMSAVE) &&
+		    IS_ENABLED(CONFIG_X86_64)) {
+			pr_info("Virtual VMLOAD VMSAVE supported\n");
+			has_vls = true;
+		}
+	}
+
 	return 0;
 
 err:
@@ -1266,6 +1277,16 @@ static void init_vmcb(struct vcpu_svm *svm)
 	if (avic)
 		avic_init_vmcb(svm);
 
+	/*
+	 * If hardware supports Virtual VMLOAD VMSAVE then enable it
+	 * in VMCB and clear intercepts to avoid #VMEXIT.
+	 */
+	if (has_vls) {
+		clr_intercept(svm, INTERCEPT_VMLOAD);
+		clr_intercept(svm, INTERCEPT_VMSAVE);
+		svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
+	}
+
 	mark_all_dirty(svm->vmcb);
 
 	enable_gif(svm);
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH 4/4] KVM: SVM: Enable Virtual VMLOAD VMSAVE feature
  2017-07-05 16:48 ` [PATCH 4/4] KVM: SVM: Enable Virtual VMLOAD VMSAVE feature Janakarajan Natarajan
@ 2017-07-05 16:53   ` Paolo Bonzini
  0 siblings, 0 replies; 6+ messages in thread
From: Paolo Bonzini @ 2017-07-05 16:53 UTC (permalink / raw)
  To: Janakarajan Natarajan, kvm; +Cc: Radim Krcmar, Joerg Roedel



On 05/07/2017 18:48, Janakarajan Natarajan wrote:
> Enable the Virtual VMLOAD VMSAVE feature. This is done by setting bit 1
> at position B8h in the vmcb.
> 
> The processor must have nested paging enabled, be in 64-bit mode and
> have support for the Virtual VMLOAD VMSAVE feature for the bit to be set
> in the vmcb.
> 
> Signed-off-by: Janakarajan Natarajan <Janakarajan.Natarajan@amd.com>
> ---
>  arch/x86/include/asm/svm.h |  1 +
>  arch/x86/kvm/svm.c         | 21 +++++++++++++++++++++
>  2 files changed, 22 insertions(+)
> 
> diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
> index 74d1393..58fffe7 100644
> --- a/arch/x86/include/asm/svm.h
> +++ b/arch/x86/include/asm/svm.h
> @@ -120,6 +120,7 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
>  #define AVIC_ENABLE_MASK (1 << AVIC_ENABLE_SHIFT)
>  
>  #define LBR_CTL_ENABLE_MASK BIT_ULL(0)
> +#define VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK BIT_ULL(1)
>  
>  #define SVM_INTERRUPT_SHADOW_MASK 1
>  
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index eadecee..ae73e7c 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -275,6 +275,9 @@ static int avic;
>  module_param(avic, int, S_IRUGO);
>  #endif
>  
> +/* enable/disable Virtual VMLOAD VMSAVE */
> +static bool has_vls = false;

Please make this a module parameter.  Initialize the variable to true...

>  /* AVIC VM ID bit masks and lock */
>  static DECLARE_BITMAP(avic_vm_id_bitmap, AVIC_VM_ID_NR);
>  static DEFINE_SPINLOCK(avic_vm_id_lock);
> @@ -1079,6 +1082,14 @@ static __init int svm_hardware_setup(void)
>  		}
>  	}
>  
> +	if (npt_enabled) {
> +		if (boot_cpu_has(X86_FEATURE_VIRTUAL_VMLOAD_VMSAVE) &&
> +		    IS_ENABLED(CONFIG_X86_64)) {

... and reset it here if !npt_enabled ||
!boot_cpu_has(X86_FEATURE_VIRTUAL_VMLOAD_VMSAVE) ||
!IS_ENABLED(CONFIG_X86_64).

Paolo

> +			pr_info("Virtual VMLOAD VMSAVE supported\n");
> +			has_vls = true;
> +		}
> +	}
> +
>  	return 0;
>  
>  err:
> @@ -1266,6 +1277,16 @@ static void init_vmcb(struct vcpu_svm *svm)
>  	if (avic)
>  		avic_init_vmcb(svm);
>  
> +	/*
> +	 * If hardware supports Virtual VMLOAD VMSAVE then enable it
> +	 * in VMCB and clear intercepts to avoid #VMEXIT.
> +	 */
> +	if (has_vls) {
> +		clr_intercept(svm, INTERCEPT_VMLOAD);
> +		clr_intercept(svm, INTERCEPT_VMSAVE);
> +		svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
> +	}
> +
>  	mark_all_dirty(svm->vmcb);
>  
>  	enable_gif(svm);
> 

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2017-07-05 16:53 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-07-05 16:48 [PATCH 0/4] KVM: SVM: Virtual VMLOAD VMSAVE Janakarajan Natarajan
2017-07-05 16:48 ` [PATCH 1/4] KVM: SVM: Prepare for new bit definition in lbr_ctl Janakarajan Natarajan
2017-07-05 16:48 ` [PATCH 2/4] KVM: SVM: Rename lbr_ctl field in the vmcb control area Janakarajan Natarajan
2017-07-05 16:48 ` [PATCH 3/4] KVM: SVM: Add Virtual VMLOAD VMSAVE feature definition Janakarajan Natarajan
2017-07-05 16:48 ` [PATCH 4/4] KVM: SVM: Enable Virtual VMLOAD VMSAVE feature Janakarajan Natarajan
2017-07-05 16:53   ` Paolo Bonzini

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.