All of lore.kernel.org
 help / color / mirror / Atom feed
* [Patch 1/6] KVM: VMX: Remove unneeded check for X86_FEATURE_XSAVE
@ 2019-10-09  0:41 Aaron Lewis
  2019-10-09  0:41 ` [Patch 2/6] KVM: VMX: Use wrmsr for switching between guest and host IA32_XSS Aaron Lewis
                   ` (4 more replies)
  0 siblings, 5 replies; 25+ messages in thread
From: Aaron Lewis @ 2019-10-09  0:41 UTC (permalink / raw)
  To: Babu Moger, Yang Weijiang, Sebastian Andrzej Siewior, kvm
  Cc: Paolo Bonzini, Aaron Lewis, Jim Mattson

The SDM says that IA32_XSS is supported "If(CPUID.(0DH, 1):EAX.[3] = 1",
so only the X86_FEATURE_XSAVES check is necessary.

Fixes: 4d763b168e9c5 ("KVM: VMX: check CPUID before allowing read/write of IA32_XSS")
Reviewed-by: Jim Mattson <jmattson@google.com>
Signed-off-by: Aaron Lewis <aaronlewis@google.com>
---
 arch/x86/kvm/vmx/vmx.c | 6 ++----
 1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index e7970a2e8eae..409e9a7323f1 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1823,8 +1823,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 	case MSR_IA32_XSS:
 		if (!vmx_xsaves_supported() ||
 		    (!msr_info->host_initiated &&
-		     !(guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
-		       guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))))
+		     !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)))
 			return 1;
 		msr_info->data = vcpu->arch.ia32_xss;
 		break;
@@ -2066,8 +2065,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 	case MSR_IA32_XSS:
 		if (!vmx_xsaves_supported() ||
 		    (!msr_info->host_initiated &&
-		     !(guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
-		       guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))))
+		     !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)))
 			return 1;
 		/*
 		 * The only supported bit as of Skylake is bit 8, but
-- 
2.23.0.581.g78d2f28ef7-goog


^ permalink raw reply related	[flat|nested] 25+ messages in thread

* [Patch 2/6] KVM: VMX: Use wrmsr for switching between guest and host IA32_XSS
  2019-10-09  0:41 [Patch 1/6] KVM: VMX: Remove unneeded check for X86_FEATURE_XSAVE Aaron Lewis
@ 2019-10-09  0:41 ` Aaron Lewis
  2019-10-09  4:49   ` Yang Weijiang
                     ` (2 more replies)
  2019-10-09  0:41 ` [Patch 3/6] kvm: svm: Add support for XSAVES on AMD Aaron Lewis
                   ` (3 subsequent siblings)
  4 siblings, 3 replies; 25+ messages in thread
From: Aaron Lewis @ 2019-10-09  0:41 UTC (permalink / raw)
  To: Babu Moger, Yang Weijiang, Sebastian Andrzej Siewior, kvm
  Cc: Paolo Bonzini, Aaron Lewis, Jim Mattson

Set IA32_XSS for the guest and host during VM Enter and VM Exit
transitions rather than by using the MSR-load areas.

Reviewed-by: Jim Mattson <jmattson@google.com>
Signed-off-by: Aaron Lewis <aaronlewis@google.com>
---
 arch/x86/kvm/svm.c     |  4 ++--
 arch/x86/kvm/vmx/vmx.c | 14 ++------------
 arch/x86/kvm/x86.c     | 25 +++++++++++++++++++++----
 arch/x86/kvm/x86.h     |  4 ++--
 4 files changed, 27 insertions(+), 20 deletions(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index f8ecb6df5106..e2d7a7738c76 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -5628,7 +5628,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 	svm->vmcb->save.cr2 = vcpu->arch.cr2;
 
 	clgi();
-	kvm_load_guest_xcr0(vcpu);
+	kvm_load_guest_xsave_controls(vcpu);
 
 	if (lapic_in_kernel(vcpu) &&
 		vcpu->arch.apic->lapic_timer.timer_advance_ns)
@@ -5778,7 +5778,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 	if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
 		kvm_before_interrupt(&svm->vcpu);
 
-	kvm_put_guest_xcr0(vcpu);
+	kvm_load_host_xsave_controls(vcpu);
 	stgi();
 
 	/* Any pending NMI will happen here */
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 409e9a7323f1..ff5ba28abecb 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -106,8 +106,6 @@ module_param(enable_apicv, bool, S_IRUGO);
 static bool __read_mostly nested = 1;
 module_param(nested, bool, S_IRUGO);
 
-static u64 __read_mostly host_xss;
-
 bool __read_mostly enable_pml = 1;
 module_param_named(pml, enable_pml, bool, S_IRUGO);
 
@@ -2074,11 +2072,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		if (data != 0)
 			return 1;
 		vcpu->arch.ia32_xss = data;
-		if (vcpu->arch.ia32_xss != host_xss)
-			add_atomic_switch_msr(vmx, MSR_IA32_XSS,
-				vcpu->arch.ia32_xss, host_xss, false);
-		else
-			clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
 		break;
 	case MSR_IA32_RTIT_CTL:
 		if ((pt_mode != PT_MODE_HOST_GUEST) ||
@@ -6540,7 +6533,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
 	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
 		vmx_set_interrupt_shadow(vcpu, 0);
 
-	kvm_load_guest_xcr0(vcpu);
+	kvm_load_guest_xsave_controls(vcpu);
 
 	if (static_cpu_has(X86_FEATURE_PKU) &&
 	    kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
@@ -6647,7 +6640,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
 			__write_pkru(vmx->host_pkru);
 	}
 
-	kvm_put_guest_xcr0(vcpu);
+	kvm_load_host_xsave_controls(vcpu);
 
 	vmx->nested.nested_run_pending = 0;
 	vmx->idt_vectoring_info = 0;
@@ -7599,9 +7592,6 @@ static __init int hardware_setup(void)
 		WARN_ONCE(host_bndcfgs, "KVM: BNDCFGS in host will be lost");
 	}
 
-	if (boot_cpu_has(X86_FEATURE_XSAVES))
-		rdmsrl(MSR_IA32_XSS, host_xss);
-
 	if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() ||
 	    !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global()))
 		enable_vpid = 0;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 661e2bf38526..e90e658fd8a9 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -176,6 +176,8 @@ struct kvm_shared_msrs {
 static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
 static struct kvm_shared_msrs __percpu *shared_msrs;
 
+static u64 __read_mostly host_xss;
+
 struct kvm_stats_debugfs_item debugfs_entries[] = {
 	{ "pf_fixed", VCPU_STAT(pf_fixed) },
 	{ "pf_guest", VCPU_STAT(pf_guest) },
@@ -812,27 +814,39 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
 }
 EXPORT_SYMBOL_GPL(kvm_lmsw);
 
-void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
+void kvm_load_guest_xsave_controls(struct kvm_vcpu *vcpu)
 {
 	if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
 			!vcpu->guest_xcr0_loaded) {
 		/* kvm_set_xcr() also depends on this */
 		if (vcpu->arch.xcr0 != host_xcr0)
 			xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
+
+		if (kvm_x86_ops->xsaves_supported() &&
+		    guest_cpuid_has(vcpu, X86_FEATURE_XSAVES) &&
+		    vcpu->arch.ia32_xss != host_xss)
+			wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss);
+
 		vcpu->guest_xcr0_loaded = 1;
 	}
 }
-EXPORT_SYMBOL_GPL(kvm_load_guest_xcr0);
+EXPORT_SYMBOL_GPL(kvm_load_guest_xsave_controls);
 
-void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
+void kvm_load_host_xsave_controls(struct kvm_vcpu *vcpu)
 {
 	if (vcpu->guest_xcr0_loaded) {
 		if (vcpu->arch.xcr0 != host_xcr0)
 			xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
+
+		if (kvm_x86_ops->xsaves_supported() &&
+		    guest_cpuid_has(vcpu, X86_FEATURE_XSAVES) &&
+		    vcpu->arch.ia32_xss != host_xss)
+			wrmsrl(MSR_IA32_XSS, host_xss);
+
 		vcpu->guest_xcr0_loaded = 0;
 	}
 }
-EXPORT_SYMBOL_GPL(kvm_put_guest_xcr0);
+EXPORT_SYMBOL_GPL(kvm_load_host_xsave_controls);
 
 static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
 {
@@ -9293,6 +9307,9 @@ int kvm_arch_hardware_setup(void)
 		kvm_default_tsc_scaling_ratio = 1ULL << kvm_tsc_scaling_ratio_frac_bits;
 	}
 
+	if (boot_cpu_has(X86_FEATURE_XSAVES))
+		rdmsrl(MSR_IA32_XSS, host_xss);
+
 	kvm_init_msr_list();
 	return 0;
 }
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index dbf7442a822b..0d04e865665b 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -366,7 +366,7 @@ static inline bool kvm_pat_valid(u64 data)
 	return (data | ((data & 0x0202020202020202ull) << 1)) == data;
 }
 
-void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu);
-void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu);
+void kvm_load_guest_xsave_controls(struct kvm_vcpu *vcpu);
+void kvm_load_host_xsave_controls(struct kvm_vcpu *vcpu);
 
 #endif
-- 
2.23.0.581.g78d2f28ef7-goog


^ permalink raw reply related	[flat|nested] 25+ messages in thread

* [Patch 3/6] kvm: svm: Add support for XSAVES on AMD
  2019-10-09  0:41 [Patch 1/6] KVM: VMX: Remove unneeded check for X86_FEATURE_XSAVE Aaron Lewis
  2019-10-09  0:41 ` [Patch 2/6] KVM: VMX: Use wrmsr for switching between guest and host IA32_XSS Aaron Lewis
@ 2019-10-09  0:41 ` Aaron Lewis
  2019-10-09  6:44   ` Sebastian Andrzej Siewior
  2019-10-09  7:01   ` Paolo Bonzini
  2019-10-09  0:41 ` [Patch 4/6] kvm: svm: Enumerate XSAVES in guest CPUID when it is available to the guest Aaron Lewis
                   ` (2 subsequent siblings)
  4 siblings, 2 replies; 25+ messages in thread
From: Aaron Lewis @ 2019-10-09  0:41 UTC (permalink / raw)
  To: Babu Moger, Yang Weijiang, Sebastian Andrzej Siewior, kvm
  Cc: Paolo Bonzini, Aaron Lewis, Jim Mattson

Hoist support for IA32_XSS so it can be used for both AMD and Intel,
instead of for just Intel.

AMD has no equivalent of Intel's "Enable XSAVES/XRSTORS" VM-execution
control. Instead, XSAVES is always available to the guest when supported
on the host.

Reviewed-by: Jim Mattson <jmattson@google.com>
Signed-off-by: Aaron Lewis <aaronlewis@google.com>
---
 arch/x86/kvm/svm.c     |  2 +-
 arch/x86/kvm/vmx/vmx.c | 20 --------------------
 arch/x86/kvm/x86.c     | 16 ++++++++++++++++
 3 files changed, 17 insertions(+), 21 deletions(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index e2d7a7738c76..65223827c675 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -5962,7 +5962,7 @@ static bool svm_mpx_supported(void)
 
 static bool svm_xsaves_supported(void)
 {
-	return false;
+	return boot_cpu_has(X86_FEATURE_XSAVES);
 }
 
 static bool svm_umip_emulated(void)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index ff5ba28abecb..bd4ce33bd52f 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1818,13 +1818,6 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 			return 1;
 		return vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index,
 				       &msr_info->data);
-	case MSR_IA32_XSS:
-		if (!vmx_xsaves_supported() ||
-		    (!msr_info->host_initiated &&
-		     !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)))
-			return 1;
-		msr_info->data = vcpu->arch.ia32_xss;
-		break;
 	case MSR_IA32_RTIT_CTL:
 		if (pt_mode != PT_MODE_HOST_GUEST)
 			return 1;
@@ -2060,19 +2053,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		if (!nested_vmx_allowed(vcpu))
 			return 1;
 		return vmx_set_vmx_msr(vcpu, msr_index, data);
-	case MSR_IA32_XSS:
-		if (!vmx_xsaves_supported() ||
-		    (!msr_info->host_initiated &&
-		     !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)))
-			return 1;
-		/*
-		 * The only supported bit as of Skylake is bit 8, but
-		 * it is not supported on KVM.
-		 */
-		if (data != 0)
-			return 1;
-		vcpu->arch.ia32_xss = data;
-		break;
 	case MSR_IA32_RTIT_CTL:
 		if ((pt_mode != PT_MODE_HOST_GUEST) ||
 			vmx_rtit_ctl_check(vcpu, data) ||
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e90e658fd8a9..77f2e8c05047 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2702,6 +2702,15 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 	case MSR_IA32_TSC:
 		kvm_write_tsc(vcpu, msr_info);
 		break;
+	case MSR_IA32_XSS:
+		if (!kvm_x86_ops->xsaves_supported() ||
+		    (!msr_info->host_initiated &&
+		     !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)))
+			return 1;
+		if (data != 0)
+			return 1;
+		vcpu->arch.ia32_xss = data;
+		break;
 	case MSR_SMI_COUNT:
 		if (!msr_info->host_initiated)
 			return 1;
@@ -3032,6 +3041,13 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 	case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
 		return get_msr_mce(vcpu, msr_info->index, &msr_info->data,
 				   msr_info->host_initiated);
+	case MSR_IA32_XSS:
+		if (!kvm_x86_ops->xsaves_supported() ||
+		    (!msr_info->host_initiated &&
+		     !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)))
+			return 1;
+		msr_info->data = vcpu->arch.ia32_xss;
+		break;
 	case MSR_K7_CLK_CTL:
 		/*
 		 * Provide expected ramp-up count for K7. All other
-- 
2.23.0.581.g78d2f28ef7-goog


^ permalink raw reply related	[flat|nested] 25+ messages in thread

* [Patch 4/6] kvm: svm: Enumerate XSAVES in guest CPUID when it is available to the guest
  2019-10-09  0:41 [Patch 1/6] KVM: VMX: Remove unneeded check for X86_FEATURE_XSAVE Aaron Lewis
  2019-10-09  0:41 ` [Patch 2/6] KVM: VMX: Use wrmsr for switching between guest and host IA32_XSS Aaron Lewis
  2019-10-09  0:41 ` [Patch 3/6] kvm: svm: Add support for XSAVES on AMD Aaron Lewis
@ 2019-10-09  0:41 ` Aaron Lewis
  2019-10-09  1:42   ` Yang Weijiang
  2019-10-09  0:41 ` [Patch 5/6] kvm: x86: Add IA32_XSS to the emulated_msrs list Aaron Lewis
  2019-10-09  0:41 ` [Patch 6/6] kvm: tests: Add test to verify MSR_IA32_XSS Aaron Lewis
  4 siblings, 1 reply; 25+ messages in thread
From: Aaron Lewis @ 2019-10-09  0:41 UTC (permalink / raw)
  To: Babu Moger, Yang Weijiang, Sebastian Andrzej Siewior, kvm
  Cc: Paolo Bonzini, Aaron Lewis, Jim Mattson

Add the function guest_cpuid_set() to allow a bit in the guest cpuid to
be set.  This is complementary to the guest_cpuid_clear() function.

Also, set the XSAVES bit in the guest cpuid if the host has the same bit
set and guest has XSAVE bit set.  This is to ensure that XSAVES will be
enumerated in the guest CPUID if XSAVES can be used in the guest.

Reviewed-by: Jim Mattson <jmattson@google.com>
Signed-off-by: Aaron Lewis <aaronlewis@google.com>
---
 arch/x86/kvm/cpuid.h | 9 +++++++++
 arch/x86/kvm/svm.c   | 4 ++++
 2 files changed, 13 insertions(+)

diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index d78a61408243..420ceea02fd1 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -113,6 +113,15 @@ static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu, unsigned x8
 		*reg &= ~bit(x86_feature);
 }
 
+static __always_inline void guest_cpuid_set(struct kvm_vcpu *vcpu, unsigned x86_feature)
+{
+	int *reg;
+
+	reg = guest_cpuid_get_register(vcpu, x86_feature);
+	if (reg)
+		*reg |= ~bit(x86_feature);
+}
+
 static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu)
 {
 	struct kvm_cpuid_entry2 *best;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 65223827c675..2522a467bbc0 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -5887,6 +5887,10 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 
+	if (guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
+	    boot_cpu_has(X86_FEATURE_XSAVES))
+		guest_cpuid_set(vcpu, X86_FEATURE_XSAVES);
+
 	/* Update nrips enabled cache */
 	svm->nrips_enabled = !!guest_cpuid_has(&svm->vcpu, X86_FEATURE_NRIPS);
 
-- 
2.23.0.581.g78d2f28ef7-goog


^ permalink raw reply related	[flat|nested] 25+ messages in thread

* [Patch 5/6] kvm: x86: Add IA32_XSS to the emulated_msrs list
  2019-10-09  0:41 [Patch 1/6] KVM: VMX: Remove unneeded check for X86_FEATURE_XSAVE Aaron Lewis
                   ` (2 preceding siblings ...)
  2019-10-09  0:41 ` [Patch 4/6] kvm: svm: Enumerate XSAVES in guest CPUID when it is available to the guest Aaron Lewis
@ 2019-10-09  0:41 ` Aaron Lewis
  2019-10-09  0:41 ` [Patch 6/6] kvm: tests: Add test to verify MSR_IA32_XSS Aaron Lewis
  4 siblings, 0 replies; 25+ messages in thread
From: Aaron Lewis @ 2019-10-09  0:41 UTC (permalink / raw)
  To: Babu Moger, Yang Weijiang, Sebastian Andrzej Siewior, kvm
  Cc: Paolo Bonzini, Aaron Lewis, Jim Mattson

Add IA32_XSS to the list of emulated MSRs if it is supported in the
guest.

Reviewed-by: Jim Mattson <jmattson@google.com>
Signed-off-by: Aaron Lewis <aaronlewis@google.com>
---
 arch/x86/kvm/svm.c     | 12 +++++++-----
 arch/x86/kvm/vmx/vmx.c |  2 ++
 arch/x86/kvm/x86.c     |  1 +
 3 files changed, 10 insertions(+), 5 deletions(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 2522a467bbc0..8de6705ac30d 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -498,6 +498,11 @@ static inline bool avic_vcpu_is_running(struct kvm_vcpu *vcpu)
 	return (READ_ONCE(*entry) & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
 }
 
+static bool svm_xsaves_supported(void)
+{
+	return boot_cpu_has(X86_FEATURE_XSAVES);
+}
+
 static void recalc_intercepts(struct vcpu_svm *svm)
 {
 	struct vmcb_control_area *c, *h;
@@ -5871,6 +5876,8 @@ static bool svm_has_emulated_msr(int index)
 	case MSR_IA32_MCG_EXT_CTL:
 	case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
 		return false;
+	case MSR_IA32_XSS:
+		return svm_xsaves_supported();
 	default:
 		break;
 	}
@@ -5964,11 +5971,6 @@ static bool svm_mpx_supported(void)
 	return false;
 }
 
-static bool svm_xsaves_supported(void)
-{
-	return boot_cpu_has(X86_FEATURE_XSAVES);
-}
-
 static bool svm_umip_emulated(void)
 {
 	return false;
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index bd4ce33bd52f..c28461385c2b 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6270,6 +6270,8 @@ static bool vmx_has_emulated_msr(int index)
 	case MSR_AMD64_VIRT_SPEC_CTRL:
 		/* This is AMD only.  */
 		return false;
+	case MSR_IA32_XSS:
+		return vmx_xsaves_supported();
 	default:
 		return true;
 	}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 77f2e8c05047..243c6df12d81 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1229,6 +1229,7 @@ static u32 emulated_msrs[] = {
 	MSR_MISC_FEATURES_ENABLES,
 	MSR_AMD64_VIRT_SPEC_CTRL,
 	MSR_IA32_POWER_CTL,
+	MSR_IA32_XSS,
 
 	/*
 	 * The following list leaves out MSRs whose values are determined
-- 
2.23.0.581.g78d2f28ef7-goog


^ permalink raw reply related	[flat|nested] 25+ messages in thread

* [Patch 6/6] kvm: tests: Add test to verify MSR_IA32_XSS
  2019-10-09  0:41 [Patch 1/6] KVM: VMX: Remove unneeded check for X86_FEATURE_XSAVE Aaron Lewis
                   ` (3 preceding siblings ...)
  2019-10-09  0:41 ` [Patch 5/6] kvm: x86: Add IA32_XSS to the emulated_msrs list Aaron Lewis
@ 2019-10-09  0:41 ` Aaron Lewis
  2019-10-09  6:30   ` Paolo Bonzini
  4 siblings, 1 reply; 25+ messages in thread
From: Aaron Lewis @ 2019-10-09  0:41 UTC (permalink / raw)
  To: Babu Moger, Yang Weijiang, Sebastian Andrzej Siewior, kvm
  Cc: Paolo Bonzini, Aaron Lewis, Jim Mattson

Verify that calling get and set for MSR_IA32_XSS returns expected results.

Reviewed-by: Jim Mattson <jmattson@google.com>
Signed-off-by: Aaron Lewis <aaronlewis@google.com>
---
 tools/testing/selftests/kvm/.gitignore        |  1 +
 tools/testing/selftests/kvm/Makefile          |  1 +
 .../selftests/kvm/include/x86_64/processor.h  |  7 +-
 .../selftests/kvm/lib/x86_64/processor.c      | 69 ++++++++++++++++---
 .../selftests/kvm/x86_64/xss_msr_test.c       | 65 +++++++++++++++++
 5 files changed, 134 insertions(+), 9 deletions(-)
 create mode 100644 tools/testing/selftests/kvm/x86_64/xss_msr_test.c

diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore
index b35da375530a..6e9ec34f8124 100644
--- a/tools/testing/selftests/kvm/.gitignore
+++ b/tools/testing/selftests/kvm/.gitignore
@@ -11,6 +11,7 @@
 /x86_64/vmx_close_while_nested_test
 /x86_64/vmx_set_nested_state_test
 /x86_64/vmx_tsc_adjust_test
+/x86_64/xss_msr_test
 /clear_dirty_log_test
 /dirty_log_test
 /kvm_create_max_vcpus
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index c5ec868fa1e5..3138a916574a 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -25,6 +25,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_dirty_log_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_set_nested_state_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test
+TEST_GEN_PROGS_x86_64 += x86_64/xss_msr_test
 TEST_GEN_PROGS_x86_64 += clear_dirty_log_test
 TEST_GEN_PROGS_x86_64 += dirty_log_test
 TEST_GEN_PROGS_x86_64 += kvm_create_max_vcpus
diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
index ff234018219c..1dc55eea756a 100644
--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
+++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
@@ -308,6 +308,8 @@ struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid);
 void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid,
 		     struct kvm_x86_state *state);
 
+struct kvm_msr_list *kvm_get_msr_index_list(void);
+
 struct kvm_cpuid2 *kvm_get_supported_cpuid(void);
 void vcpu_set_cpuid(struct kvm_vm *vm, uint32_t vcpuid,
 		    struct kvm_cpuid2 *cpuid);
@@ -324,8 +326,11 @@ kvm_get_supported_cpuid_entry(uint32_t function)
 uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index);
 void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
 	  	  uint64_t msr_value);
+void vcpu_set_msr_expect_result(struct kvm_vm *vm, uint32_t vcpuid,
+		  uint64_t msr_index, uint64_t msr_value, int result);
 
-uint32_t kvm_get_cpuid_max(void);
+uint32_t kvm_get_cpuid_max_basic(void);
+uint32_t kvm_get_cpuid_max_extended(void);
 void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits);
 
 /*
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index 6698cb741e10..425262e15afa 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -869,13 +869,14 @@ uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index)
 	return buffer.entry.data;
 }
 
-/* VCPU Set MSR
+/* VCPU Set MSR Expect Result
  *
  * Input Args:
  *   vm - Virtual Machine
  *   vcpuid - VCPU ID
  *   msr_index - Index of MSR
  *   msr_value - New value of MSR
+ *   result - The expected result of KVM_SET_MSRS
  *
  * Output Args: None
  *
@@ -883,8 +884,9 @@ uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index)
  *
  * Set value of MSR for VCPU.
  */
-void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
-	uint64_t msr_value)
+void vcpu_set_msr_expect_result(struct kvm_vm *vm, uint32_t vcpuid,
+				uint64_t msr_index, uint64_t msr_value,
+				int result)
 {
 	struct vcpu *vcpu = vcpu_find(vm, vcpuid);
 	struct {
@@ -899,10 +901,30 @@ void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
 	buffer.entry.index = msr_index;
 	buffer.entry.data = msr_value;
 	r = ioctl(vcpu->fd, KVM_SET_MSRS, &buffer.header);
-	TEST_ASSERT(r == 1, "KVM_SET_MSRS IOCTL failed,\n"
+	TEST_ASSERT(r == result, "KVM_SET_MSRS IOCTL failed,\n"
 		"  rc: %i errno: %i", r, errno);
 }
 
+/* VCPU Set MSR
+ *
+ * Input Args:
+ *   vm - Virtual Machine
+ *   vcpuid - VCPU ID
+ *   msr_index - Index of MSR
+ *   msr_value - New value of MSR
+ *
+ * Output Args: None
+ *
+ * Return: On success, nothing. On failure a TEST_ASSERT is produced.
+ *
+ * Set value of MSR for VCPU.
+ */
+void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
+	uint64_t msr_value)
+{
+	vcpu_set_msr_expect_result(vm, vcpuid, msr_index, msr_value, 1);
+}
+
 /* VM VCPU Args Set
  *
  * Input Args:
@@ -1000,19 +1022,45 @@ struct kvm_x86_state {
 	struct kvm_msrs msrs;
 };
 
-static int kvm_get_num_msrs(struct kvm_vm *vm)
+static int kvm_get_num_msrs_fd(int kvm_fd)
 {
 	struct kvm_msr_list nmsrs;
 	int r;
 
 	nmsrs.nmsrs = 0;
-	r = ioctl(vm->kvm_fd, KVM_GET_MSR_INDEX_LIST, &nmsrs);
+	r = ioctl(kvm_fd, KVM_GET_MSR_INDEX_LIST, &nmsrs);
 	TEST_ASSERT(r == -1 && errno == E2BIG, "Unexpected result from KVM_GET_MSR_INDEX_LIST probe, r: %i",
 		r);
 
 	return nmsrs.nmsrs;
 }
 
+static int kvm_get_num_msrs(struct kvm_vm *vm)
+{
+	return kvm_get_num_msrs_fd(vm->kvm_fd);
+}
+
+struct kvm_msr_list *kvm_get_msr_index_list(void)
+{
+	struct kvm_msr_list *list;
+	int nmsrs, r, kvm_fd;
+
+	kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
+	if (kvm_fd < 0)
+		exit(KSFT_SKIP);
+
+	nmsrs = kvm_get_num_msrs_fd(kvm_fd);
+	list = malloc(sizeof(*list) + nmsrs * sizeof(list->indices[0]));
+	list->nmsrs = nmsrs;
+	r = ioctl(kvm_fd, KVM_GET_MSR_INDEX_LIST, list);
+	close(kvm_fd);
+
+	TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_MSR_INDEX_LIST, r: %i",
+		r);
+
+	return list;
+}
+
 struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid)
 {
 	struct vcpu *vcpu = vcpu_find(vm, vcpuid);
@@ -1158,7 +1206,12 @@ bool is_intel_cpu(void)
 	return (ebx == chunk[0] && edx == chunk[1] && ecx == chunk[2]);
 }
 
-uint32_t kvm_get_cpuid_max(void)
+uint32_t kvm_get_cpuid_max_basic(void)
+{
+	return kvm_get_supported_cpuid_entry(0)->eax;
+}
+
+uint32_t kvm_get_cpuid_max_extended(void)
 {
 	return kvm_get_supported_cpuid_entry(0x80000000)->eax;
 }
@@ -1169,7 +1222,7 @@ void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits)
 	bool pae;
 
 	/* SDM 4.1.4 */
-	if (kvm_get_cpuid_max() < 0x80000008) {
+	if (kvm_get_cpuid_max_extended() < 0x80000008) {
 		pae = kvm_get_supported_cpuid_entry(1)->edx & (1 << 6);
 		*pa_bits = pae ? 36 : 32;
 		*va_bits = 32;
diff --git a/tools/testing/selftests/kvm/x86_64/xss_msr_test.c b/tools/testing/selftests/kvm/x86_64/xss_msr_test.c
new file mode 100644
index 000000000000..47060eff06ce
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/xss_msr_test.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019, Google LLC.
+ *
+ * Tests for the IA32_XSS MSR.
+ */
+
+#define _GNU_SOURCE /* for program_invocation_short_name */
+#include <sys/ioctl.h>
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "vmx.h"
+
+#define VCPU_ID	      1
+
+#define X86_FEATURE_XSAVES	(1<<3)
+
+int main(int argc, char *argv[])
+{
+	struct kvm_cpuid_entry2 *entry;
+	struct kvm_msr_list *list;
+	bool found_xss = false;
+	struct kvm_vm *vm;
+	uint64_t xss_val;
+	int i;
+
+	/* Create VM */
+	vm = vm_create_default(VCPU_ID, 0, 0);
+
+	list = kvm_get_msr_index_list();
+	for (i = 0; i < list->nmsrs; ++i) {
+		if (list->indices[i] == MSR_IA32_XSS) {
+			found_xss = true;
+			break;
+		}
+	}
+
+	if (kvm_get_cpuid_max_basic() < 0xd) {
+		printf("XSAVES is not supported by the CPU.\n");
+		exit(KSFT_SKIP);
+	}
+
+	entry = kvm_get_supported_cpuid_index(0xd, 1);
+	TEST_ASSERT(found_xss == !!(entry->eax & X86_FEATURE_XSAVES),
+		    "Support for IA32_XSS and support for XSAVES do not match.\n");
+
+	if (!found_xss) {
+		printf("IA32_XSS and XSAVES are not supported.  Skipping test.\n");
+		exit(KSFT_SKIP);
+	}
+
+	xss_val = vcpu_get_msr(vm, VCPU_ID, MSR_IA32_XSS);
+	TEST_ASSERT(xss_val == 0, "MSR_IA32_XSS should always be zero\n");
+
+	vcpu_set_msr(vm, VCPU_ID, MSR_IA32_XSS, xss_val);
+	/*
+	 * At present, KVM only supports a guest IA32_XSS value of 0. Verify
+	 * that trying to set the guest IA32_XSS to an unsupported value fails.
+	 */
+	vcpu_set_msr_expect_result(vm, VCPU_ID, MSR_IA32_XSS, ~0ull, 0);
+
+	free(list);
+	kvm_vm_free(vm);
+}
-- 
2.23.0.581.g78d2f28ef7-goog


^ permalink raw reply related	[flat|nested] 25+ messages in thread

* Re: [Patch 4/6] kvm: svm: Enumerate XSAVES in guest CPUID when it is available to the guest
  2019-10-09  0:41 ` [Patch 4/6] kvm: svm: Enumerate XSAVES in guest CPUID when it is available to the guest Aaron Lewis
@ 2019-10-09  1:42   ` Yang Weijiang
  2019-10-09  6:32     ` Paolo Bonzini
  0 siblings, 1 reply; 25+ messages in thread
From: Yang Weijiang @ 2019-10-09  1:42 UTC (permalink / raw)
  To: Aaron Lewis
  Cc: Babu Moger, Yang Weijiang, Sebastian Andrzej Siewior, kvm,
	Paolo Bonzini, Jim Mattson

On Tue, Oct 08, 2019 at 05:41:40PM -0700, Aaron Lewis wrote:
> Add the function guest_cpuid_set() to allow a bit in the guest cpuid to
> be set.  This is complementary to the guest_cpuid_clear() function.
> 
> Also, set the XSAVES bit in the guest cpuid if the host has the same bit
> set and guest has XSAVE bit set.  This is to ensure that XSAVES will be
> enumerated in the guest CPUID if XSAVES can be used in the guest.
> 
> Reviewed-by: Jim Mattson <jmattson@google.com>
> Signed-off-by: Aaron Lewis <aaronlewis@google.com>
> ---
>  arch/x86/kvm/cpuid.h | 9 +++++++++
>  arch/x86/kvm/svm.c   | 4 ++++
>  2 files changed, 13 insertions(+)
> 
> diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
> index d78a61408243..420ceea02fd1 100644
> --- a/arch/x86/kvm/cpuid.h
> +++ b/arch/x86/kvm/cpuid.h
> @@ -113,6 +113,15 @@ static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu, unsigned x8
>  		*reg &= ~bit(x86_feature);
>  }
>  
> +static __always_inline void guest_cpuid_set(struct kvm_vcpu *vcpu, unsigned x86_feature)
> +{
> +	int *reg;
> +
> +	reg = guest_cpuid_get_register(vcpu, x86_feature);
> +	if (reg)
> +		*reg |= ~bit(x86_feature);
> +}
> +
Sounds like it's to set the bit, is it: *reg |= bit(x86_feature)?
>  static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu)
>  {
>  	struct kvm_cpuid_entry2 *best;
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index 65223827c675..2522a467bbc0 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -5887,6 +5887,10 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu)
>  {
>  	struct vcpu_svm *svm = to_svm(vcpu);
>  
> +	if (guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
> +	    boot_cpu_has(X86_FEATURE_XSAVES))
> +		guest_cpuid_set(vcpu, X86_FEATURE_XSAVES);
> +
>  	/* Update nrips enabled cache */
>  	svm->nrips_enabled = !!guest_cpuid_has(&svm->vcpu, X86_FEATURE_NRIPS);
>  
> -- 
> 2.23.0.581.g78d2f28ef7-goog

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [Patch 2/6] KVM: VMX: Use wrmsr for switching between guest and host IA32_XSS
  2019-10-09  0:41 ` [Patch 2/6] KVM: VMX: Use wrmsr for switching between guest and host IA32_XSS Aaron Lewis
@ 2019-10-09  4:49   ` Yang Weijiang
  2019-10-09  6:30   ` Paolo Bonzini
  2019-10-09  6:31   ` Sebastian Andrzej Siewior
  2 siblings, 0 replies; 25+ messages in thread
From: Yang Weijiang @ 2019-10-09  4:49 UTC (permalink / raw)
  To: Aaron Lewis
  Cc: Babu Moger, Yang Weijiang, Sebastian Andrzej Siewior, kvm,
	Paolo Bonzini, Jim Mattson

On Tue, Oct 08, 2019 at 05:41:38PM -0700, Aaron Lewis wrote:
> Set IA32_XSS for the guest and host during VM Enter and VM Exit
> transitions rather than by using the MSR-load areas.
> 
> Reviewed-by: Jim Mattson <jmattson@google.com>
> Signed-off-by: Aaron Lewis <aaronlewis@google.com>
> ---
>  arch/x86/kvm/svm.c     |  4 ++--
>  arch/x86/kvm/vmx/vmx.c | 14 ++------------
>  arch/x86/kvm/x86.c     | 25 +++++++++++++++++++++----
>  arch/x86/kvm/x86.h     |  4 ++--
>  4 files changed, 27 insertions(+), 20 deletions(-)
> 
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index f8ecb6df5106..e2d7a7738c76 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -5628,7 +5628,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
>  	svm->vmcb->save.cr2 = vcpu->arch.cr2;
>  
>  	clgi();
> -	kvm_load_guest_xcr0(vcpu);
> +	kvm_load_guest_xsave_controls(vcpu);
>  
>  	if (lapic_in_kernel(vcpu) &&
>  		vcpu->arch.apic->lapic_timer.timer_advance_ns)
> @@ -5778,7 +5778,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
>  	if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
>  		kvm_before_interrupt(&svm->vcpu);
>  
> -	kvm_put_guest_xcr0(vcpu);
> +	kvm_load_host_xsave_controls(vcpu);
>  	stgi();
>  
>  	/* Any pending NMI will happen here */
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index 409e9a7323f1..ff5ba28abecb 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -106,8 +106,6 @@ module_param(enable_apicv, bool, S_IRUGO);
>  static bool __read_mostly nested = 1;
>  module_param(nested, bool, S_IRUGO);
>  
> -static u64 __read_mostly host_xss;
> -
>  bool __read_mostly enable_pml = 1;
>  module_param_named(pml, enable_pml, bool, S_IRUGO);
>  
> @@ -2074,11 +2072,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>  		if (data != 0)
>  			return 1;
>  		vcpu->arch.ia32_xss = data;
> -		if (vcpu->arch.ia32_xss != host_xss)
> -			add_atomic_switch_msr(vmx, MSR_IA32_XSS,
> -				vcpu->arch.ia32_xss, host_xss, false);
> -		else
> -			clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
>  		break;
>  	case MSR_IA32_RTIT_CTL:
>  		if ((pt_mode != PT_MODE_HOST_GUEST) ||
> @@ -6540,7 +6533,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
>  	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
>  		vmx_set_interrupt_shadow(vcpu, 0);
>  
> -	kvm_load_guest_xcr0(vcpu);
> +	kvm_load_guest_xsave_controls(vcpu);
>  
>  	if (static_cpu_has(X86_FEATURE_PKU) &&
>  	    kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
> @@ -6647,7 +6640,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
>  			__write_pkru(vmx->host_pkru);
>  	}
>  
> -	kvm_put_guest_xcr0(vcpu);
> +	kvm_load_host_xsave_controls(vcpu);
>  
>  	vmx->nested.nested_run_pending = 0;
>  	vmx->idt_vectoring_info = 0;
> @@ -7599,9 +7592,6 @@ static __init int hardware_setup(void)
>  		WARN_ONCE(host_bndcfgs, "KVM: BNDCFGS in host will be lost");
>  	}
>  
> -	if (boot_cpu_has(X86_FEATURE_XSAVES))
> -		rdmsrl(MSR_IA32_XSS, host_xss);
> -
>  	if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() ||
>  	    !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global()))
>  		enable_vpid = 0;
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 661e2bf38526..e90e658fd8a9 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -176,6 +176,8 @@ struct kvm_shared_msrs {
>  static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
>  static struct kvm_shared_msrs __percpu *shared_msrs;
>  
> +static u64 __read_mostly host_xss;
> +
>  struct kvm_stats_debugfs_item debugfs_entries[] = {
>  	{ "pf_fixed", VCPU_STAT(pf_fixed) },
>  	{ "pf_guest", VCPU_STAT(pf_guest) },
> @@ -812,27 +814,39 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
>  }
>  EXPORT_SYMBOL_GPL(kvm_lmsw);
>  
> -void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
> +void kvm_load_guest_xsave_controls(struct kvm_vcpu *vcpu)
>  {
>  	if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
>  			!vcpu->guest_xcr0_loaded) {
>  		/* kvm_set_xcr() also depends on this */
>  		if (vcpu->arch.xcr0 != host_xcr0)
>  			xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
> +
> +		if (kvm_x86_ops->xsaves_supported() &&
> +		    guest_cpuid_has(vcpu, X86_FEATURE_XSAVES) &&
> +		    vcpu->arch.ia32_xss != host_xss)
> +			wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss);
> +
>  		vcpu->guest_xcr0_loaded = 1;
>  	}
>  }
> -EXPORT_SYMBOL_GPL(kvm_load_guest_xcr0);
> +EXPORT_SYMBOL_GPL(kvm_load_guest_xsave_controls);
>  
> -void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
> +void kvm_load_host_xsave_controls(struct kvm_vcpu *vcpu)
>  {
>  	if (vcpu->guest_xcr0_loaded) {
>  		if (vcpu->arch.xcr0 != host_xcr0)
>  			xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
> +
> +		if (kvm_x86_ops->xsaves_supported() &&
> +		    guest_cpuid_has(vcpu, X86_FEATURE_XSAVES) &&
Should it check guest/VMX XSAVES support before load *host*
XSS states?
> +		    vcpu->arch.ia32_xss != host_xss)
> +			wrmsrl(MSR_IA32_XSS, host_xss);
> +
>  		vcpu->guest_xcr0_loaded = 0;
>  	}
>  }
> -EXPORT_SYMBOL_GPL(kvm_put_guest_xcr0);
> +EXPORT_SYMBOL_GPL(kvm_load_host_xsave_controls);
>  
>  static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
>  {
> @@ -9293,6 +9307,9 @@ int kvm_arch_hardware_setup(void)
>  		kvm_default_tsc_scaling_ratio = 1ULL << kvm_tsc_scaling_ratio_frac_bits;
>  	}
>  
> +	if (boot_cpu_has(X86_FEATURE_XSAVES))
> +		rdmsrl(MSR_IA32_XSS, host_xss);
> +
>  	kvm_init_msr_list();
>  	return 0;
>  }
> diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
> index dbf7442a822b..0d04e865665b 100644
> --- a/arch/x86/kvm/x86.h
> +++ b/arch/x86/kvm/x86.h
> @@ -366,7 +366,7 @@ static inline bool kvm_pat_valid(u64 data)
>  	return (data | ((data & 0x0202020202020202ull) << 1)) == data;
>  }
>  
> -void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu);
> -void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu);
> +void kvm_load_guest_xsave_controls(struct kvm_vcpu *vcpu);
> +void kvm_load_host_xsave_controls(struct kvm_vcpu *vcpu);
>  
>  #endif
> -- 
> 2.23.0.581.g78d2f28ef7-goog

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [Patch 6/6] kvm: tests: Add test to verify MSR_IA32_XSS
  2019-10-09  0:41 ` [Patch 6/6] kvm: tests: Add test to verify MSR_IA32_XSS Aaron Lewis
@ 2019-10-09  6:30   ` Paolo Bonzini
  2019-10-09 23:44     ` Jim Mattson
  0 siblings, 1 reply; 25+ messages in thread
From: Paolo Bonzini @ 2019-10-09  6:30 UTC (permalink / raw)
  To: Aaron Lewis, Babu Moger, Yang Weijiang, Sebastian Andrzej Siewior, kvm
  Cc: Jim Mattson

On 09/10/19 02:41, Aaron Lewis wrote:
>   * Set value of MSR for VCPU.
>   */
> -void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
> -	uint64_t msr_value)
> +void vcpu_set_msr_expect_result(struct kvm_vm *vm, uint32_t vcpuid,
> +				uint64_t msr_index, uint64_t msr_value,
> +				int result)
>  {
>  	struct vcpu *vcpu = vcpu_find(vm, vcpuid);
>  	struct {
> @@ -899,10 +901,30 @@ void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
>  	buffer.entry.index = msr_index;
>  	buffer.entry.data = msr_value;
>  	r = ioctl(vcpu->fd, KVM_SET_MSRS, &buffer.header);
> -	TEST_ASSERT(r == 1, "KVM_SET_MSRS IOCTL failed,\n"
> +	TEST_ASSERT(r == result, "KVM_SET_MSRS IOCTL failed,\n"
>  		"  rc: %i errno: %i", r, errno);
>  }

This is a library, so the functions to some extent should make sense
even outside tests.  Please make a function _vcpu_set_msr that returns
the result of the ioctl; it can still be used in vcpu_set_msr, and the
tests can TEST_ASSERT what they want.

> +uint32_t kvm_get_cpuid_max_basic(void)
> +{
> +	return kvm_get_supported_cpuid_entry(0)->eax;
> +}
> +
> +uint32_t kvm_get_cpuid_max_extended(void)

I would leave the existing function aside, and call this one
kvm_get_cpuid_max_amd() since CPUID leaves at 0x80000000 are allocated
by AMD.

Otherwise looks good.

Paolo


^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [Patch 2/6] KVM: VMX: Use wrmsr for switching between guest and host IA32_XSS
  2019-10-09  0:41 ` [Patch 2/6] KVM: VMX: Use wrmsr for switching between guest and host IA32_XSS Aaron Lewis
  2019-10-09  4:49   ` Yang Weijiang
@ 2019-10-09  6:30   ` Paolo Bonzini
  2019-10-09 12:34     ` Vitaly Kuznetsov
  2019-10-09  6:31   ` Sebastian Andrzej Siewior
  2 siblings, 1 reply; 25+ messages in thread
From: Paolo Bonzini @ 2019-10-09  6:30 UTC (permalink / raw)
  To: Aaron Lewis, Babu Moger, Yang Weijiang, Sebastian Andrzej Siewior, kvm
  Cc: Jim Mattson

On 09/10/19 02:41, Aaron Lewis wrote:
> Set IA32_XSS for the guest and host during VM Enter and VM Exit
> transitions rather than by using the MSR-load areas.
> 
> Reviewed-by: Jim Mattson <jmattson@google.com>
> Signed-off-by: Aaron Lewis <aaronlewis@google.com>

This commit message is missing an explanation of why this is a good thing.

Also, the series is missing a cover letter that explains a bit more of
the overall picture.  I have no problem with no cover letter for
two-patch series, but at six it is definitely a requirement.

So I'm replying to this patch as a proxy for the whole series, and
asking: why is it useful to enable XSAVES (on AMD or anywhere) if anyway
IA32_XSS is limited to zero?

On AMD, we do have the problem that XSAVES is essentially a WRMSR with
no exit, albeit confined to the MSRs included in the set bits of
IA32_XSS.  But while that would be a (good) argument for writing 0 to
IA32_XSS around AMD vmentry, it shouldn't require making XSAVES
available to the guests.

Thanks,

Paolo

> ---
>  arch/x86/kvm/svm.c     |  4 ++--
>  arch/x86/kvm/vmx/vmx.c | 14 ++------------
>  arch/x86/kvm/x86.c     | 25 +++++++++++++++++++++----
>  arch/x86/kvm/x86.h     |  4 ++--
>  4 files changed, 27 insertions(+), 20 deletions(-)
> 
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index f8ecb6df5106..e2d7a7738c76 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -5628,7 +5628,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
>  	svm->vmcb->save.cr2 = vcpu->arch.cr2;
>  
>  	clgi();
> -	kvm_load_guest_xcr0(vcpu);
> +	kvm_load_guest_xsave_controls(vcpu);
>  
>  	if (lapic_in_kernel(vcpu) &&
>  		vcpu->arch.apic->lapic_timer.timer_advance_ns)
> @@ -5778,7 +5778,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
>  	if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
>  		kvm_before_interrupt(&svm->vcpu);
>  
> -	kvm_put_guest_xcr0(vcpu);
> +	kvm_load_host_xsave_controls(vcpu);
>  	stgi();
>  
>  	/* Any pending NMI will happen here */
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index 409e9a7323f1..ff5ba28abecb 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -106,8 +106,6 @@ module_param(enable_apicv, bool, S_IRUGO);
>  static bool __read_mostly nested = 1;
>  module_param(nested, bool, S_IRUGO);
>  
> -static u64 __read_mostly host_xss;
> -
>  bool __read_mostly enable_pml = 1;
>  module_param_named(pml, enable_pml, bool, S_IRUGO);
>  
> @@ -2074,11 +2072,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>  		if (data != 0)
>  			return 1;
>  		vcpu->arch.ia32_xss = data;
> -		if (vcpu->arch.ia32_xss != host_xss)
> -			add_atomic_switch_msr(vmx, MSR_IA32_XSS,
> -				vcpu->arch.ia32_xss, host_xss, false);
> -		else
> -			clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
>  		break;
>  	case MSR_IA32_RTIT_CTL:
>  		if ((pt_mode != PT_MODE_HOST_GUEST) ||
> @@ -6540,7 +6533,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
>  	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
>  		vmx_set_interrupt_shadow(vcpu, 0);
>  
> -	kvm_load_guest_xcr0(vcpu);
> +	kvm_load_guest_xsave_controls(vcpu);
>  
>  	if (static_cpu_has(X86_FEATURE_PKU) &&
>  	    kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
> @@ -6647,7 +6640,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
>  			__write_pkru(vmx->host_pkru);
>  	}
>  
> -	kvm_put_guest_xcr0(vcpu);
> +	kvm_load_host_xsave_controls(vcpu);
>  
>  	vmx->nested.nested_run_pending = 0;
>  	vmx->idt_vectoring_info = 0;
> @@ -7599,9 +7592,6 @@ static __init int hardware_setup(void)
>  		WARN_ONCE(host_bndcfgs, "KVM: BNDCFGS in host will be lost");
>  	}
>  
> -	if (boot_cpu_has(X86_FEATURE_XSAVES))
> -		rdmsrl(MSR_IA32_XSS, host_xss);
> -
>  	if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() ||
>  	    !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global()))
>  		enable_vpid = 0;
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 661e2bf38526..e90e658fd8a9 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -176,6 +176,8 @@ struct kvm_shared_msrs {
>  static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
>  static struct kvm_shared_msrs __percpu *shared_msrs;
>  
> +static u64 __read_mostly host_xss;
> +
>  struct kvm_stats_debugfs_item debugfs_entries[] = {
>  	{ "pf_fixed", VCPU_STAT(pf_fixed) },
>  	{ "pf_guest", VCPU_STAT(pf_guest) },
> @@ -812,27 +814,39 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
>  }
>  EXPORT_SYMBOL_GPL(kvm_lmsw);
>  
> -void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
> +void kvm_load_guest_xsave_controls(struct kvm_vcpu *vcpu)
>  {
>  	if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
>  			!vcpu->guest_xcr0_loaded) {
>  		/* kvm_set_xcr() also depends on this */
>  		if (vcpu->arch.xcr0 != host_xcr0)
>  			xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
> +
> +		if (kvm_x86_ops->xsaves_supported() &&
> +		    guest_cpuid_has(vcpu, X86_FEATURE_XSAVES) &&
> +		    vcpu->arch.ia32_xss != host_xss)
> +			wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss);
> +
>  		vcpu->guest_xcr0_loaded = 1;
>  	}
>  }
> -EXPORT_SYMBOL_GPL(kvm_load_guest_xcr0);
> +EXPORT_SYMBOL_GPL(kvm_load_guest_xsave_controls);
>  
> -void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
> +void kvm_load_host_xsave_controls(struct kvm_vcpu *vcpu)
>  {
>  	if (vcpu->guest_xcr0_loaded) {
>  		if (vcpu->arch.xcr0 != host_xcr0)
>  			xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
> +
> +		if (kvm_x86_ops->xsaves_supported() &&
> +		    guest_cpuid_has(vcpu, X86_FEATURE_XSAVES) &&
> +		    vcpu->arch.ia32_xss != host_xss)
> +			wrmsrl(MSR_IA32_XSS, host_xss);
> +
>  		vcpu->guest_xcr0_loaded = 0;
>  	}
>  }
> -EXPORT_SYMBOL_GPL(kvm_put_guest_xcr0);
> +EXPORT_SYMBOL_GPL(kvm_load_host_xsave_controls);
>  
>  static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
>  {
> @@ -9293,6 +9307,9 @@ int kvm_arch_hardware_setup(void)
>  		kvm_default_tsc_scaling_ratio = 1ULL << kvm_tsc_scaling_ratio_frac_bits;
>  	}
>  
> +	if (boot_cpu_has(X86_FEATURE_XSAVES))
> +		rdmsrl(MSR_IA32_XSS, host_xss);
> +
>  	kvm_init_msr_list();
>  	return 0;
>  }
> diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
> index dbf7442a822b..0d04e865665b 100644
> --- a/arch/x86/kvm/x86.h
> +++ b/arch/x86/kvm/x86.h
> @@ -366,7 +366,7 @@ static inline bool kvm_pat_valid(u64 data)
>  	return (data | ((data & 0x0202020202020202ull) << 1)) == data;
>  }
>  
> -void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu);
> -void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu);
> +void kvm_load_guest_xsave_controls(struct kvm_vcpu *vcpu);
> +void kvm_load_host_xsave_controls(struct kvm_vcpu *vcpu);
>  
>  #endif
> 


^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [Patch 2/6] KVM: VMX: Use wrmsr for switching between guest and host IA32_XSS
  2019-10-09  0:41 ` [Patch 2/6] KVM: VMX: Use wrmsr for switching between guest and host IA32_XSS Aaron Lewis
  2019-10-09  4:49   ` Yang Weijiang
  2019-10-09  6:30   ` Paolo Bonzini
@ 2019-10-09  6:31   ` Sebastian Andrzej Siewior
  2 siblings, 0 replies; 25+ messages in thread
From: Sebastian Andrzej Siewior @ 2019-10-09  6:31 UTC (permalink / raw)
  To: Aaron Lewis; +Cc: Babu Moger, Yang Weijiang, kvm, Paolo Bonzini, Jim Mattson

On 2019-10-08 17:41:38 [-0700], Aaron Lewis wrote:
> Set IA32_XSS for the guest and host during VM Enter and VM Exit
> transitions rather than by using the MSR-load areas.

Could you please explain the *why* as part of the message?

> Reviewed-by: Jim Mattson <jmattson@google.com>
> Signed-off-by: Aaron Lewis <aaronlewis@google.com>

Sebastian

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [Patch 4/6] kvm: svm: Enumerate XSAVES in guest CPUID when it is available to the guest
  2019-10-09  1:42   ` Yang Weijiang
@ 2019-10-09  6:32     ` Paolo Bonzini
  2019-10-09 23:35       ` Jim Mattson
  0 siblings, 1 reply; 25+ messages in thread
From: Paolo Bonzini @ 2019-10-09  6:32 UTC (permalink / raw)
  To: Yang Weijiang, Aaron Lewis
  Cc: Babu Moger, Sebastian Andrzej Siewior, kvm, Jim Mattson

On 09/10/19 03:42, Yang Weijiang wrote:
> +	if (guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
> +	    boot_cpu_has(X86_FEATURE_XSAVES))
> +		guest_cpuid_set(vcpu, X86_FEATURE_XSAVES);
> +

This is incorrect, as it would cause a change in the guest ABI when
migrating from an XSAVES-enabled processor to one that doesn't have it.

As long as IA32_XSS is 0, XSAVES is indistinguishable from XSAVEC, so
it's okay if the guest "tries" to run it despite the bit being clear in
CPUID.

Paolo


^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [Patch 3/6] kvm: svm: Add support for XSAVES on AMD
  2019-10-09  0:41 ` [Patch 3/6] kvm: svm: Add support for XSAVES on AMD Aaron Lewis
@ 2019-10-09  6:44   ` Sebastian Andrzej Siewior
  2019-10-10 14:42     ` Aaron Lewis
  2019-10-09  7:01   ` Paolo Bonzini
  1 sibling, 1 reply; 25+ messages in thread
From: Sebastian Andrzej Siewior @ 2019-10-09  6:44 UTC (permalink / raw)
  To: Aaron Lewis; +Cc: Babu Moger, Yang Weijiang, kvm, Paolo Bonzini, Jim Mattson

On 2019-10-08 17:41:39 [-0700], Aaron Lewis wrote:
> Hoist support for IA32_XSS so it can be used for both AMD and Intel,

Hoist

> instead of for just Intel.
> 
> AMD has no equivalent of Intel's "Enable XSAVES/XRSTORS" VM-execution
> control. Instead, XSAVES is always available to the guest when supported
> on the host.

You could add that implement the XSAVES check based on host's features
and move the MSR_IA32_XSS msr R/W from Intel only code to the common
code.

…
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index e90e658fd8a9..77f2e8c05047 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -2702,6 +2702,15 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>  	case MSR_IA32_TSC:
>  		kvm_write_tsc(vcpu, msr_info);
>  		break;
> +	case MSR_IA32_XSS:
> +		if (!kvm_x86_ops->xsaves_supported() ||
> +		    (!msr_info->host_initiated &&
> +		     !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)))
> +			return 1;

I wouldn't ditch the comment. You could explain why only zero is allowed
to be written. The Skylake is not true for both but this probably
requires an explanation.

> +		if (data != 0)
> +			return 1;
> +		vcpu->arch.ia32_xss = data;
> +		break;
>  	case MSR_SMI_COUNT:
>  		if (!msr_info->host_initiated)
>  			return 1;

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [Patch 3/6] kvm: svm: Add support for XSAVES on AMD
  2019-10-09  0:41 ` [Patch 3/6] kvm: svm: Add support for XSAVES on AMD Aaron Lewis
  2019-10-09  6:44   ` Sebastian Andrzej Siewior
@ 2019-10-09  7:01   ` Paolo Bonzini
  2019-10-09 21:29     ` Jim Mattson
  1 sibling, 1 reply; 25+ messages in thread
From: Paolo Bonzini @ 2019-10-09  7:01 UTC (permalink / raw)
  To: Aaron Lewis, Babu Moger, Yang Weijiang, Sebastian Andrzej Siewior, kvm
  Cc: Jim Mattson, Luwei Kang

On 09/10/19 02:41, Aaron Lewis wrote:
> -		/*
> -		 * The only supported bit as of Skylake is bit 8, but
> -		 * it is not supported on KVM.
> -		 */
> -		if (data != 0)
> -			return 1;

This comment is actually not true anymore; Intel supports PT (bit 8) on
Cascade Lake, so it could be changed to something like

	/*
	 * We do support PT (bit 8) if kvm_x86_ops->pt_supported(), but
	 * guests will have to configure it using WRMSR rather than
	 * XSAVES.
	 */

Paolo


^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [Patch 2/6] KVM: VMX: Use wrmsr for switching between guest and host IA32_XSS
  2019-10-09  6:30   ` Paolo Bonzini
@ 2019-10-09 12:34     ` Vitaly Kuznetsov
  0 siblings, 0 replies; 25+ messages in thread
From: Vitaly Kuznetsov @ 2019-10-09 12:34 UTC (permalink / raw)
  To: Paolo Bonzini
  Cc: Jim Mattson, Aaron Lewis, Babu Moger, Yang Weijiang,
	Sebastian Andrzej Siewior, kvm

Paolo Bonzini <pbonzini@redhat.com> writes:

> On 09/10/19 02:41, Aaron Lewis wrote:
>> Set IA32_XSS for the guest and host during VM Enter and VM Exit
>> transitions rather than by using the MSR-load areas.
>> 
>> Reviewed-by: Jim Mattson <jmattson@google.com>
>> Signed-off-by: Aaron Lewis <aaronlewis@google.com>
>
> This commit message is missing an explanation of why this is a good thing.
>
> Also, the series is missing a cover letter that explains a bit more of
> the overall picture.  I have no problem with no cover letter for
> two-patch series, but at six it is definitely a requirement.
>
> So I'm replying to this patch as a proxy for the whole series, and
> asking: why is it useful to enable XSAVES (on AMD or anywhere) if anyway
> IA32_XSS is limited to zero?

I know at least one good reason to do so: Hyper-V 2016 Gen1 (but not
Gen2!) doesn't seem to be able to boot without XSAVES (don't ask me
why). I'm not particularly sure if Aaron is aiming at fixing this one
though.

-- 
Vitaly

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [Patch 3/6] kvm: svm: Add support for XSAVES on AMD
  2019-10-09  7:01   ` Paolo Bonzini
@ 2019-10-09 21:29     ` Jim Mattson
  2019-10-09 21:40       ` Paolo Bonzini
  0 siblings, 1 reply; 25+ messages in thread
From: Jim Mattson @ 2019-10-09 21:29 UTC (permalink / raw)
  To: Paolo Bonzini
  Cc: Aaron Lewis, Babu Moger, Yang Weijiang,
	Sebastian Andrzej Siewior, kvm list, Luwei Kang

On Wed, Oct 9, 2019 at 12:02 AM Paolo Bonzini <pbonzini@redhat.com> wrote:
>
> On 09/10/19 02:41, Aaron Lewis wrote:
> > -             /*
> > -              * The only supported bit as of Skylake is bit 8, but
> > -              * it is not supported on KVM.
> > -              */
> > -             if (data != 0)
> > -                     return 1;
>
> This comment is actually not true anymore; Intel supports PT (bit 8) on
> Cascade Lake, so it could be changed to something like
>
>         /*
>          * We do support PT (bit 8) if kvm_x86_ops->pt_supported(), but
>          * guests will have to configure it using WRMSR rather than
>          * XSAVES.
>          */
>
> Paolo

Isn't it necessary for the host to set IA32_XSS to a superset of the
guest IA32_XSS for proper host-level context-switching?

arch/x86/kernel/fpu/xstate.c has this comment:

 * Note that we do not currently set any bits on IA32_XSS so
 * 'XCR0 | IA32_XSS == XCR0' for now.

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [Patch 3/6] kvm: svm: Add support for XSAVES on AMD
  2019-10-09 21:29     ` Jim Mattson
@ 2019-10-09 21:40       ` Paolo Bonzini
  2019-10-09 21:58         ` Jim Mattson
  0 siblings, 1 reply; 25+ messages in thread
From: Paolo Bonzini @ 2019-10-09 21:40 UTC (permalink / raw)
  To: Jim Mattson
  Cc: Aaron Lewis, Babu Moger, Yang Weijiang,
	Sebastian Andrzej Siewior, kvm list, Luwei Kang

On 09/10/19 23:29, Jim Mattson wrote:
> On Wed, Oct 9, 2019 at 12:02 AM Paolo Bonzini <pbonzini@redhat.com> wrote:
>>
>> On 09/10/19 02:41, Aaron Lewis wrote:
>>> -             /*
>>> -              * The only supported bit as of Skylake is bit 8, but
>>> -              * it is not supported on KVM.
>>> -              */
>>> -             if (data != 0)
>>> -                     return 1;
>>
>> This comment is actually not true anymore; Intel supports PT (bit 8) on
>> Cascade Lake, so it could be changed to something like
>>
>>         /*
>>          * We do support PT (bit 8) if kvm_x86_ops->pt_supported(), but
>>          * guests will have to configure it using WRMSR rather than
>>          * XSAVES.
>>          */
>>
>> Paolo
> 
> Isn't it necessary for the host to set IA32_XSS to a superset of the
> guest IA32_XSS for proper host-level context-switching?

Yes, this is why we cannot allow the guest to set bit 8.  But the
comment is obsolete:

1) of course Skylake is not the newest model

2) processor tracing was not supported at all when the comment was
written; but on CascadeLake, guest PT is now supported---just not the
processor tracing XSAVES component.

Paolo

> arch/x86/kernel/fpu/xstate.c has this comment:
> 
>  * Note that we do not currently set any bits on IA32_XSS so
>  * 'XCR0 | IA32_XSS == XCR0' for now.
> 


^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [Patch 3/6] kvm: svm: Add support for XSAVES on AMD
  2019-10-09 21:40       ` Paolo Bonzini
@ 2019-10-09 21:58         ` Jim Mattson
  2019-10-09 22:49           ` Paolo Bonzini
  0 siblings, 1 reply; 25+ messages in thread
From: Jim Mattson @ 2019-10-09 21:58 UTC (permalink / raw)
  To: Paolo Bonzini
  Cc: Aaron Lewis, Babu Moger, Yang Weijiang,
	Sebastian Andrzej Siewior, kvm list, Luwei Kang

On Wed, Oct 9, 2019 at 2:40 PM Paolo Bonzini <pbonzini@redhat.com> wrote:
>
> On 09/10/19 23:29, Jim Mattson wrote:
> > On Wed, Oct 9, 2019 at 12:02 AM Paolo Bonzini <pbonzini@redhat.com> wrote:
> >>
> >> On 09/10/19 02:41, Aaron Lewis wrote:
> >>> -             /*
> >>> -              * The only supported bit as of Skylake is bit 8, but
> >>> -              * it is not supported on KVM.
> >>> -              */
> >>> -             if (data != 0)
> >>> -                     return 1;
> >>
> >> This comment is actually not true anymore; Intel supports PT (bit 8) on
> >> Cascade Lake, so it could be changed to something like
> >>
> >>         /*
> >>          * We do support PT (bit 8) if kvm_x86_ops->pt_supported(), but
> >>          * guests will have to configure it using WRMSR rather than
> >>          * XSAVES.
> >>          */
> >>
> >> Paolo
> >
> > Isn't it necessary for the host to set IA32_XSS to a superset of the
> > guest IA32_XSS for proper host-level context-switching?
>
> Yes, this is why we cannot allow the guest to set bit 8.  But the
> comment is obsolete:
>
> 1) of course Skylake is not the newest model
>
> 2) processor tracing was not supported at all when the comment was
> written; but on CascadeLake, guest PT is now supported---just not the
> processor tracing XSAVES component.

I think we're on the same page. I was just confused by your wording;
it sounded like you were saying that KVM supported bit 8.

How about:

/*
 * We do support PT if kvm_x86_ops->pt_supported(), but we do not
 * support IA32_XSS[bit 8]. Guests will have to use WRMSR rather than
 * XSAVES/XRSTORS to save/restore PT MSRs.
 */

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [Patch 3/6] kvm: svm: Add support for XSAVES on AMD
  2019-10-09 21:58         ` Jim Mattson
@ 2019-10-09 22:49           ` Paolo Bonzini
  2019-10-10  0:42             ` Kang, Luwei
  0 siblings, 1 reply; 25+ messages in thread
From: Paolo Bonzini @ 2019-10-09 22:49 UTC (permalink / raw)
  To: Jim Mattson
  Cc: Aaron Lewis, Babu Moger, Yang Weijiang,
	Sebastian Andrzej Siewior, kvm list, Luwei Kang

On 09/10/19 23:58, Jim Mattson wrote:
> I was just confused by your wording;
> it sounded like you were saying that KVM supported bit 8.
> 
> How about:
> 
> /*
>  * We do support PT if kvm_x86_ops->pt_supported(), but we do not
>  * support IA32_XSS[bit 8]. Guests will have to use WRMSR rather than
>  * XSAVES/XRSTORS to save/restore PT MSRs.
>  */

Good!

Paolo


^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [Patch 4/6] kvm: svm: Enumerate XSAVES in guest CPUID when it is available to the guest
  2019-10-09  6:32     ` Paolo Bonzini
@ 2019-10-09 23:35       ` Jim Mattson
  0 siblings, 0 replies; 25+ messages in thread
From: Jim Mattson @ 2019-10-09 23:35 UTC (permalink / raw)
  To: Paolo Bonzini
  Cc: Yang Weijiang, Aaron Lewis, Babu Moger,
	Sebastian Andrzej Siewior, kvm list

On Tue, Oct 8, 2019 at 11:32 PM Paolo Bonzini <pbonzini@redhat.com> wrote:
>
> On 09/10/19 03:42, Yang Weijiang wrote:
> > +     if (guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
> > +         boot_cpu_has(X86_FEATURE_XSAVES))
> > +             guest_cpuid_set(vcpu, X86_FEATURE_XSAVES);
> > +
>
> This is incorrect, as it would cause a change in the guest ABI when
> migrating from an XSAVES-enabled processor to one that doesn't have it.
>
> As long as IA32_XSS is 0, XSAVES is indistinguishable from XSAVEC, so
> it's okay if the guest "tries" to run it despite the bit being clear in
> CPUID.

My bad. When Aaron and I were discussing this, I expressed concern
about guest behavior on a future day when (a) AMD supports a non-zero
IA32_XSS, and (b) Linux supports an intercepting non-zero IA32_XSS.
One could argue that if XSAVES isn't enumerated, the guest gets what
it deserves for trying it anyway. Or, one could argue that the
decision to swap guest/host IA32_XSS values on VM-entry/VM-exit
shouldn't be predicated on whether or not the guest CPUID enumerates
XSAVES. I'm going to suggest the latter. How would you feel about
having {vmx,svm}_cpuid_update set a boolean in kvm_vcpu_arch that
indicates whether or not the guest can execute XSAVES, regardless of
what is enumerated in the guest CPUID? The wrmsr(IA32_XSS) in the
VM-entry/VM-exit path would then query that boolean rather than
guest_cpuid_has(XSAVES).

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [Patch 6/6] kvm: tests: Add test to verify MSR_IA32_XSS
  2019-10-09  6:30   ` Paolo Bonzini
@ 2019-10-09 23:44     ` Jim Mattson
  2019-10-10 16:01       ` Paolo Bonzini
  0 siblings, 1 reply; 25+ messages in thread
From: Jim Mattson @ 2019-10-09 23:44 UTC (permalink / raw)
  To: Paolo Bonzini
  Cc: Aaron Lewis, Babu Moger, Yang Weijiang,
	Sebastian Andrzej Siewior, kvm list

On Tue, Oct 8, 2019 at 11:32 PM Paolo Bonzini <pbonzini@redhat.com> wrote:
>
> On 09/10/19 02:41, Aaron Lewis wrote:
> >   * Set value of MSR for VCPU.
> >   */
> > -void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
> > -     uint64_t msr_value)
> > +void vcpu_set_msr_expect_result(struct kvm_vm *vm, uint32_t vcpuid,
> > +                             uint64_t msr_index, uint64_t msr_value,
> > +                             int result)
> >  {
> >       struct vcpu *vcpu = vcpu_find(vm, vcpuid);
> >       struct {
> > @@ -899,10 +901,30 @@ void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
> >       buffer.entry.index = msr_index;
> >       buffer.entry.data = msr_value;
> >       r = ioctl(vcpu->fd, KVM_SET_MSRS, &buffer.header);
> > -     TEST_ASSERT(r == 1, "KVM_SET_MSRS IOCTL failed,\n"
> > +     TEST_ASSERT(r == result, "KVM_SET_MSRS IOCTL failed,\n"
> >               "  rc: %i errno: %i", r, errno);
> >  }
>
> This is a library, so the functions to some extent should make sense
> even outside tests.  Please make a function _vcpu_set_msr that returns
> the result of the ioctl; it can still be used in vcpu_set_msr, and the
> tests can TEST_ASSERT what they want.
>
> > +uint32_t kvm_get_cpuid_max_basic(void)
> > +{
> > +     return kvm_get_supported_cpuid_entry(0)->eax;
> > +}
> > +
> > +uint32_t kvm_get_cpuid_max_extended(void)
>
> I would leave the existing function aside, and call this one
> kvm_get_cpuid_max_amd() since CPUID leaves at 0x80000000 are allocated
> by AMD.

The existing function *is* the one that gives the largest
AMD-allocated leaf. Note that Intel documents CPUID.80000000:EAX as
"Maximum Input Value for Extended Function CPUID Information," and AMD
documents this as "Largest extended function."

> Otherwise looks good.
>
> Paolo
>

^ permalink raw reply	[flat|nested] 25+ messages in thread

* RE: [Patch 3/6] kvm: svm: Add support for XSAVES on AMD
  2019-10-09 22:49           ` Paolo Bonzini
@ 2019-10-10  0:42             ` Kang, Luwei
  0 siblings, 0 replies; 25+ messages in thread
From: Kang, Luwei @ 2019-10-10  0:42 UTC (permalink / raw)
  To: Paolo Bonzini, Jim Mattson
  Cc: Aaron Lewis, Babu Moger, Yang, Weijiang,
	Sebastian Andrzej Siewior, kvm list

> > I was just confused by your wording;
> > it sounded like you were saying that KVM supported bit 8.
> >
> > How about:
> >
> > /*
> >  * We do support PT if kvm_x86_ops->pt_supported(), but we do not
> >  * support IA32_XSS[bit 8]. Guests will have to use WRMSR rather than
> >  * XSAVES/XRSTORS to save/restore PT MSRs.
> >  */
> 
> Good!

Looks good to me.

Thanks,
Luwei Kang

> 
> Paolo


^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [Patch 3/6] kvm: svm: Add support for XSAVES on AMD
  2019-10-09  6:44   ` Sebastian Andrzej Siewior
@ 2019-10-10 14:42     ` Aaron Lewis
  2019-10-10 16:04       ` Paolo Bonzini
  0 siblings, 1 reply; 25+ messages in thread
From: Aaron Lewis @ 2019-10-10 14:42 UTC (permalink / raw)
  To: Sebastian Andrzej Siewior
  Cc: Babu Moger, Yang Weijiang, kvm, Paolo Bonzini, Jim Mattson

On Tue, Oct 8, 2019 at 11:44 PM Sebastian Andrzej Siewior
<bigeasy@linutronix.de> wrote:
>
> On 2019-10-08 17:41:39 [-0700], Aaron Lewis wrote:
> > Hoist support for IA32_XSS so it can be used for both AMD and Intel,
>
> Hoist
>
> > instead of for just Intel.
> >
> > AMD has no equivalent of Intel's "Enable XSAVES/XRSTORS" VM-execution
> > control. Instead, XSAVES is always available to the guest when supported
> > on the host.
>
> You could add that implement the XSAVES check based on host's features
> and move the MSR_IA32_XSS msr R/W from Intel only code to the common
> code.

Isn't this covered by my comments?  I mention that we are hoisting
IA32_XSS to common code in the first comment, then in the second
comment I say that XSAVES is available in the guest when supported on
the host.

>
> …
> > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> > index e90e658fd8a9..77f2e8c05047 100644
> > --- a/arch/x86/kvm/x86.c
> > +++ b/arch/x86/kvm/x86.c
> > @@ -2702,6 +2702,15 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
> >       case MSR_IA32_TSC:
> >               kvm_write_tsc(vcpu, msr_info);
> >               break;
> > +     case MSR_IA32_XSS:
> > +             if (!kvm_x86_ops->xsaves_supported() ||
> > +                 (!msr_info->host_initiated &&
> > +                  !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)))
> > +                     return 1;
>
> I wouldn't ditch the comment. You could explain why only zero is allowed
> to be written. The Skylake is not true for both but this probably
> requires an explanation.
>
> > +             if (data != 0)
> > +                     return 1;
> > +             vcpu->arch.ia32_xss = data;
> > +             break;
> >       case MSR_SMI_COUNT:
> >               if (!msr_info->host_initiated)
> >                       return 1;

Resending as plain text!

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [Patch 6/6] kvm: tests: Add test to verify MSR_IA32_XSS
  2019-10-09 23:44     ` Jim Mattson
@ 2019-10-10 16:01       ` Paolo Bonzini
  0 siblings, 0 replies; 25+ messages in thread
From: Paolo Bonzini @ 2019-10-10 16:01 UTC (permalink / raw)
  To: Jim Mattson
  Cc: Aaron Lewis, Babu Moger, Yang Weijiang,
	Sebastian Andrzej Siewior, kvm list

On 10/10/19 01:44, Jim Mattson wrote:
>>> +uint32_t kvm_get_cpuid_max_basic(void)
>>> +{
>>> +     return kvm_get_supported_cpuid_entry(0)->eax;
>>> +}
>>> +
>>> +uint32_t kvm_get_cpuid_max_extended(void)
>> I would leave the existing function aside, and call this one
>> kvm_get_cpuid_max_amd() since CPUID leaves at 0x80000000 are allocated
>> by AMD.
> The existing function *is* the one that gives the largest
> AMD-allocated leaf.

Sorry about that---my fault for snipping the patch and assuming some
"git diff" craziness.

> Note that Intel documents CPUID.80000000:EAX as
> "Maximum Input Value for Extended Function CPUID Information," and AMD
> documents this as "Largest extended function."

Objection taken back, then!

Paolo


^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [Patch 3/6] kvm: svm: Add support for XSAVES on AMD
  2019-10-10 14:42     ` Aaron Lewis
@ 2019-10-10 16:04       ` Paolo Bonzini
  0 siblings, 0 replies; 25+ messages in thread
From: Paolo Bonzini @ 2019-10-10 16:04 UTC (permalink / raw)
  To: Aaron Lewis, Sebastian Andrzej Siewior
  Cc: Babu Moger, Yang Weijiang, kvm, Jim Mattson

On 10/10/19 16:42, Aaron Lewis wrote:
>>> Hoist support for IA32_XSS so it can be used for both AMD and Intel,
>>> instead of for just Intel.
>>>
>>> AMD has no equivalent of Intel's "Enable XSAVES/XRSTORS" VM-execution
>>> control. Instead, XSAVES is always available to the guest when supported
>>> on the host.
>>
>> You could add that implement the XSAVES check based on host's features
>> and move the MSR_IA32_XSS msr R/W from Intel only code to the common
>> code.
>
> Isn't this covered by my comments?  I mention that we are hoisting
> IA32_XSS to common code in the first comment, then in the second
> comment I say that XSAVES is available in the guest when supported on
> the host.

Yes, I agree.

Perhaps you can add something like "Fortunately, right now Linux does
not use MSR_IA32_XSS so the guest's usage of XSAVES will be a glorified
XSAVEC, and cannot bypass vmexits for MSR loads and stores".

Paolo


^ permalink raw reply	[flat|nested] 25+ messages in thread

end of thread, other threads:[~2019-10-10 16:04 UTC | newest]

Thread overview: 25+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-10-09  0:41 [Patch 1/6] KVM: VMX: Remove unneeded check for X86_FEATURE_XSAVE Aaron Lewis
2019-10-09  0:41 ` [Patch 2/6] KVM: VMX: Use wrmsr for switching between guest and host IA32_XSS Aaron Lewis
2019-10-09  4:49   ` Yang Weijiang
2019-10-09  6:30   ` Paolo Bonzini
2019-10-09 12:34     ` Vitaly Kuznetsov
2019-10-09  6:31   ` Sebastian Andrzej Siewior
2019-10-09  0:41 ` [Patch 3/6] kvm: svm: Add support for XSAVES on AMD Aaron Lewis
2019-10-09  6:44   ` Sebastian Andrzej Siewior
2019-10-10 14:42     ` Aaron Lewis
2019-10-10 16:04       ` Paolo Bonzini
2019-10-09  7:01   ` Paolo Bonzini
2019-10-09 21:29     ` Jim Mattson
2019-10-09 21:40       ` Paolo Bonzini
2019-10-09 21:58         ` Jim Mattson
2019-10-09 22:49           ` Paolo Bonzini
2019-10-10  0:42             ` Kang, Luwei
2019-10-09  0:41 ` [Patch 4/6] kvm: svm: Enumerate XSAVES in guest CPUID when it is available to the guest Aaron Lewis
2019-10-09  1:42   ` Yang Weijiang
2019-10-09  6:32     ` Paolo Bonzini
2019-10-09 23:35       ` Jim Mattson
2019-10-09  0:41 ` [Patch 5/6] kvm: x86: Add IA32_XSS to the emulated_msrs list Aaron Lewis
2019-10-09  0:41 ` [Patch 6/6] kvm: tests: Add test to verify MSR_IA32_XSS Aaron Lewis
2019-10-09  6:30   ` Paolo Bonzini
2019-10-09 23:44     ` Jim Mattson
2019-10-10 16:01       ` Paolo Bonzini

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.