All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH RFC 0/2] KVM: x86: generalize guest cpuid helpers
@ 2017-08-02 20:41 Radim Krčmář
  2017-08-02 20:41 ` [PATCH RFC 1/2] KVM: x86: generalize guest_cpuid_has_ helpers Radim Krčmář
                   ` (2 more replies)
  0 siblings, 3 replies; 8+ messages in thread
From: Radim Krčmář @ 2017-08-02 20:41 UTC (permalink / raw)
  To: linux-kernel, kvm; +Cc: Paolo Bonzini, David Hildenbrand

This series does the generalization that we've spoken about recently
Might be a good time to change the function names as well.


Radim Krčmář (2):
  KVM: x86: generalize guest_cpuid_has_ helpers
  KVM: x86: use general helpers for some cpuid manipulation

 arch/x86/kvm/cpuid.h | 205 ++++++++++++++++++---------------------------------
 arch/x86/kvm/mmu.c   |   7 +-
 arch/x86/kvm/mtrr.c  |   2 +-
 arch/x86/kvm/svm.c   |   7 +-
 arch/x86/kvm/vmx.c   |  33 ++++-----
 arch/x86/kvm/x86.c   |  52 ++++++-------
 6 files changed, 116 insertions(+), 190 deletions(-)

-- 
2.13.3

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH RFC 1/2] KVM: x86: generalize guest_cpuid_has_ helpers
  2017-08-02 20:41 [PATCH RFC 0/2] KVM: x86: generalize guest cpuid helpers Radim Krčmář
@ 2017-08-02 20:41 ` Radim Krčmář
  2017-08-04 15:20   ` David Hildenbrand
  2017-08-04 15:40   ` Paolo Bonzini
  2017-08-02 20:41 ` [PATCH RFC 2/2] KVM: x86: use general helpers for some cpuid manipulation Radim Krčmář
  2017-08-03 11:53 ` [PATCH RFC 0/2] KVM: x86: generalize guest cpuid helpers David Hildenbrand
  2 siblings, 2 replies; 8+ messages in thread
From: Radim Krčmář @ 2017-08-02 20:41 UTC (permalink / raw)
  To: linux-kernel, kvm; +Cc: Paolo Bonzini, David Hildenbrand

This patch turns guest_cpuid_has_XYZ(cpuid) into guest_cpuid_has(cpuid,
X86_FEATURE_XYZ), which gets rid of many very similar helpers.

When seeing a X86_FEATURE_*, we can know which cpuid it belongs to, but
this information isn't in common code, so we recreate it for KVM.

Add some BUILD_BUG_ONs to make sure that it runs nicely.

Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
---
 arch/x86/kvm/cpuid.h | 202 +++++++++++++++++----------------------------------
 arch/x86/kvm/mmu.c   |   7 +-
 arch/x86/kvm/mtrr.c  |   2 +-
 arch/x86/kvm/svm.c   |   2 +-
 arch/x86/kvm/vmx.c   |  26 +++----
 arch/x86/kvm/x86.c   |  38 +++++-----
 6 files changed, 105 insertions(+), 172 deletions(-)

diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index da6728383052..3b17d915b608 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -3,6 +3,7 @@
 
 #include "x86.h"
 #include <asm/cpu.h>
+#include <asm/processor.h>
 
 int kvm_update_cpuid(struct kvm_vcpu *vcpu);
 bool kvm_mpx_supported(void);
@@ -29,95 +30,78 @@ static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
 	return vcpu->arch.maxphyaddr;
 }
 
-static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
+struct cpuid_reg {
+	u32 function;
+	u32 index;
+	int reg;
+};
+
+static const struct cpuid_reg reverse_cpuid[] = {
+	[CPUID_1_EDX]         = {         1, 0, CPUID_EDX},
+	[CPUID_8000_0001_EDX] = {0x80000001, 0, CPUID_EDX},
+	[CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX},
+	[CPUID_1_ECX]         = {         1, 0, CPUID_ECX},
+	[CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX},
+	[CPUID_8000_0001_ECX] = {0xc0000001, 0, CPUID_ECX},
+	[CPUID_7_0_EBX]       = {         7, 0, CPUID_EBX},
+	[CPUID_D_1_EAX]       = {       0xd, 1, CPUID_EAX},
+	[CPUID_F_0_EDX]       = {       0xf, 0, CPUID_EDX},
+	[CPUID_F_1_EDX]       = {       0xf, 1, CPUID_EDX},
+	[CPUID_8000_0008_EBX] = {0x80000008, 0, CPUID_EBX},
+	[CPUID_6_EAX]         = {         6, 0, CPUID_EAX},
+	[CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX},
+	[CPUID_7_ECX]         = {         7, 0, CPUID_ECX},
+	[CPUID_8000_0007_EBX] = {0x80000007, 0, CPUID_EBX},
+};
+
+static inline struct cpuid_reg x86_feature_cpuid(unsigned x86_feature)
+{
+	unsigned x86_leaf = x86_feature / 32;
+
+	BUILD_BUG_ON(!__builtin_constant_p(x86_leaf));
+	BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid));
+	BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0);
+
+	return reverse_cpuid[x86_leaf];
+}
+
+static inline int *guest_cpuid_get_register(struct kvm_vcpu *vcpu, unsigned x86_feature)
 {
 	struct kvm_cpuid_entry2 *best;
+	struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
 
-	if (!static_cpu_has(X86_FEATURE_XSAVE))
+	best = kvm_find_cpuid_entry(vcpu, cpuid.function, cpuid.index);
+	if (!best)
+		return NULL;
+
+	switch (cpuid.reg) {
+	case CPUID_EAX:
+		return &best->eax;
+	case CPUID_EBX:
+		return &best->ebx;
+	case CPUID_ECX:
+		return &best->ecx;
+	case CPUID_EDX:
+		return &best->edx;
+	default:
+		BUILD_BUG();
+		return NULL;
+	}
+}
+
+static inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, unsigned x86_feature)
+{
+	int *reg;
+
+	if (x86_feature == X86_FEATURE_XSAVE &&
+			!static_cpu_has(X86_FEATURE_XSAVE))
 		return false;
 
-	best = kvm_find_cpuid_entry(vcpu, 1, 0);
-	return best && (best->ecx & bit(X86_FEATURE_XSAVE));
-}
+	reg = guest_cpuid_get_register(vcpu, x86_feature);
+	if (!reg)
+		return false;
 
-static inline bool guest_cpuid_has_mtrr(struct kvm_vcpu *vcpu)
-{
-	struct kvm_cpuid_entry2 *best;
-
-	best = kvm_find_cpuid_entry(vcpu, 1, 0);
-	return best && (best->edx & bit(X86_FEATURE_MTRR));
-}
-
-static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu)
-{
-	struct kvm_cpuid_entry2 *best;
-
-	best = kvm_find_cpuid_entry(vcpu, 7, 0);
-	return best && (best->ebx & bit(X86_FEATURE_TSC_ADJUST));
-}
-
-static inline bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu)
-{
-	struct kvm_cpuid_entry2 *best;
-
-	best = kvm_find_cpuid_entry(vcpu, 7, 0);
-	return best && (best->ebx & bit(X86_FEATURE_SMEP));
-}
-
-static inline bool guest_cpuid_has_smap(struct kvm_vcpu *vcpu)
-{
-	struct kvm_cpuid_entry2 *best;
-
-	best = kvm_find_cpuid_entry(vcpu, 7, 0);
-	return best && (best->ebx & bit(X86_FEATURE_SMAP));
-}
-
-static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu)
-{
-	struct kvm_cpuid_entry2 *best;
-
-	best = kvm_find_cpuid_entry(vcpu, 7, 0);
-	return best && (best->ebx & bit(X86_FEATURE_FSGSBASE));
-}
-
-static inline bool guest_cpuid_has_pku(struct kvm_vcpu *vcpu)
-{
-	struct kvm_cpuid_entry2 *best;
-
-	best = kvm_find_cpuid_entry(vcpu, 7, 0);
-	return best && (best->ecx & bit(X86_FEATURE_PKU));
-}
-
-static inline bool guest_cpuid_has_longmode(struct kvm_vcpu *vcpu)
-{
-	struct kvm_cpuid_entry2 *best;
-
-	best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
-	return best && (best->edx & bit(X86_FEATURE_LM));
-}
-
-static inline bool guest_cpuid_has_osvw(struct kvm_vcpu *vcpu)
-{
-	struct kvm_cpuid_entry2 *best;
-
-	best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
-	return best && (best->ecx & bit(X86_FEATURE_OSVW));
-}
-
-static inline bool guest_cpuid_has_pcid(struct kvm_vcpu *vcpu)
-{
-	struct kvm_cpuid_entry2 *best;
-
-	best = kvm_find_cpuid_entry(vcpu, 1, 0);
-	return best && (best->ecx & bit(X86_FEATURE_PCID));
-}
-
-static inline bool guest_cpuid_has_x2apic(struct kvm_vcpu *vcpu)
-{
-	struct kvm_cpuid_entry2 *best;
-
-	best = kvm_find_cpuid_entry(vcpu, 1, 0);
-	return best && (best->ecx & bit(X86_FEATURE_X2APIC));
+	return *reg & bit(x86_feature);
 }
 
 static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu)
@@ -128,58 +112,6 @@ static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu)
 	return best && best->ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx;
 }
 
-static inline bool guest_cpuid_has_gbpages(struct kvm_vcpu *vcpu)
-{
-	struct kvm_cpuid_entry2 *best;
-
-	best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
-	return best && (best->edx & bit(X86_FEATURE_GBPAGES));
-}
-
-static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu)
-{
-	struct kvm_cpuid_entry2 *best;
-
-	best = kvm_find_cpuid_entry(vcpu, 7, 0);
-	return best && (best->ebx & bit(X86_FEATURE_RTM));
-}
-
-static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
-{
-	struct kvm_cpuid_entry2 *best;
-
-	best = kvm_find_cpuid_entry(vcpu, 7, 0);
-	return best && (best->ebx & bit(X86_FEATURE_MPX));
-}
-
-static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu)
-{
-	struct kvm_cpuid_entry2 *best;
-
-	best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
-	return best && (best->edx & bit(X86_FEATURE_RDTSCP));
-}
-
-/*
- * NRIPS is provided through cpuidfn 0x8000000a.edx bit 3
- */
-#define BIT_NRIPS	3
-
-static inline bool guest_cpuid_has_nrips(struct kvm_vcpu *vcpu)
-{
-	struct kvm_cpuid_entry2 *best;
-
-	best = kvm_find_cpuid_entry(vcpu, 0x8000000a, 0);
-
-	/*
-	 * NRIPS is a scattered cpuid feature, so we can't use
-	 * X86_FEATURE_NRIPS here (X86_FEATURE_NRIPS would be bit
-	 * position 8, not 3).
-	 */
-	return best && (best->edx & bit(BIT_NRIPS));
-}
-#undef BIT_NRIPS
-
 static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
 {
 	struct kvm_cpuid_entry2 *best;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 9b1dd114956a..2fac6f78c420 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4052,7 +4052,8 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
 {
 	__reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check,
 				cpuid_maxphyaddr(vcpu), context->root_level,
-				context->nx, guest_cpuid_has_gbpages(vcpu),
+				context->nx,
+				guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
 				is_pse(vcpu), guest_cpuid_is_amd(vcpu));
 }
 
@@ -4114,8 +4115,8 @@ reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
 	__reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
 				boot_cpu_data.x86_phys_bits,
 				context->shadow_root_level, uses_nx,
-				guest_cpuid_has_gbpages(vcpu), is_pse(vcpu),
-				true);
+				guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
+				is_pse(vcpu), true);
 }
 EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask);
 
diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
index 0149ac59c273..e9ea2d45ae66 100644
--- a/arch/x86/kvm/mtrr.c
+++ b/arch/x86/kvm/mtrr.c
@@ -130,7 +130,7 @@ static u8 mtrr_disabled_type(struct kvm_vcpu *vcpu)
 	 * enable MTRRs and it is obviously undesirable to run the
 	 * guest entirely with UC memory and we use WB.
 	 */
-	if (guest_cpuid_has_mtrr(vcpu))
+	if (guest_cpuid_has(vcpu, X86_FEATURE_MTRR))
 		return MTRR_TYPE_UNCACHABLE;
 	else
 		return MTRR_TYPE_WRBACK;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 4d8141e533c3..fcdc1412792e 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -5068,7 +5068,7 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu)
 	struct kvm_cpuid_entry2 *entry;
 
 	/* Update nrips enabled cache */
-	svm->nrips_enabled = !!guest_cpuid_has_nrips(&svm->vcpu);
+	svm->nrips_enabled = !!guest_cpuid_has(&svm->vcpu, X86_FEATURE_NRIPS);
 
 	if (!kvm_vcpu_apicv_active(vcpu))
 		return;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 29fd8af5c347..8367f901d681 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2568,7 +2568,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
 		if (index >= 0)
 			move_msr_up(vmx, index, save_nmsrs++);
 		index = __find_msr_index(vmx, MSR_TSC_AUX);
-		if (index >= 0 && guest_cpuid_has_rdtscp(&vmx->vcpu))
+		if (index >= 0 && guest_cpuid_has(&vmx->vcpu, X86_FEATURE_MPX))
 			move_msr_up(vmx, index, save_nmsrs++);
 		/*
 		 * MSR_STAR is only needed on long mode guests, and only
@@ -2628,12 +2628,6 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
 	}
 }
 
-static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu)
-{
-	struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0);
-	return best && (best->ecx & (1 << (X86_FEATURE_VMX & 31)));
-}
-
 /*
  * nested_vmx_allowed() checks whether a guest should be allowed to use VMX
  * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for
@@ -2642,7 +2636,7 @@ static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu)
  */
 static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu)
 {
-	return nested && guest_cpuid_has_vmx(vcpu);
+	return nested && guest_cpuid_has(vcpu, X86_FEATURE_VMX);
 }
 
 /*
@@ -3224,7 +3218,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		break;
 	case MSR_IA32_BNDCFGS:
 		if (!kvm_mpx_supported() ||
-		    (!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu)))
+		    (!msr_info->host_initiated &&
+		     !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
 			return 1;
 		msr_info->data = vmcs_read64(GUEST_BNDCFGS);
 		break;
@@ -3248,7 +3243,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		msr_info->data = vcpu->arch.ia32_xss;
 		break;
 	case MSR_TSC_AUX:
-		if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated)
+		if (!msr_info->host_initiated &&
+		    !guest_cpuid_has(vcpu, X86_FEATURE_MPX))
 			return 1;
 		/* Otherwise falls through */
 	default:
@@ -3307,7 +3303,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		break;
 	case MSR_IA32_BNDCFGS:
 		if (!kvm_mpx_supported() ||
-		    (!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu)))
+		    (!msr_info->host_initiated &&
+		     !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
 			return 1;
 		if (is_noncanonical_address(data & PAGE_MASK) ||
 		    (data & MSR_IA32_BNDCFGS_RSVD))
@@ -3370,7 +3367,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 			clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
 		break;
 	case MSR_TSC_AUX:
-		if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated)
+		if (!msr_info->host_initiated &&
+		    !guest_cpuid_has(vcpu, X86_FEATURE_MPX))
 			return 1;
 		/* Check reserved bit, higher 32 bits should be zero */
 		if ((data >> 32) != 0)
@@ -9383,7 +9381,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
 	u32 secondary_exec_ctl = vmx_secondary_exec_control(vmx);
 
 	if (vmx_rdtscp_supported()) {
-		bool rdtscp_enabled = guest_cpuid_has_rdtscp(vcpu);
+		bool rdtscp_enabled = guest_cpuid_has(vcpu, X86_FEATURE_MPX);
 		if (!rdtscp_enabled)
 			secondary_exec_ctl &= ~SECONDARY_EXEC_RDTSCP;
 
@@ -9401,7 +9399,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
 	best = kvm_find_cpuid_entry(vcpu, 0x7, 0);
 	if (vmx_invpcid_supported() &&
 	    (!best || !(best->ebx & bit(X86_FEATURE_INVPCID)) ||
-	    !guest_cpuid_has_pcid(vcpu))) {
+	    !guest_cpuid_has(vcpu, X86_FEATURE_PCID))) {
 		secondary_exec_ctl &= ~SECONDARY_EXEC_ENABLE_INVPCID;
 
 		if (best)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 82a63c59f77b..d75997ba65b9 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -310,8 +310,8 @@ int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		(MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
 	u64 new_state = msr_info->data &
 		(MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
-	u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) |
-		0x2ff | (guest_cpuid_has_x2apic(vcpu) ? 0 : X2APIC_ENABLE);
+	u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) | 0x2ff |
+		(guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) ? 0 : X2APIC_ENABLE);
 
 	if (!msr_info->host_initiated &&
 	    ((msr_info->data & reserved_bits) != 0 ||
@@ -754,19 +754,19 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 	if (cr4 & CR4_RESERVED_BITS)
 		return 1;
 
-	if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE))
+	if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && (cr4 & X86_CR4_OSXSAVE))
 		return 1;
 
-	if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP))
+	if (!guest_cpuid_has(vcpu, X86_FEATURE_SMEP) && (cr4 & X86_CR4_SMEP))
 		return 1;
 
-	if (!guest_cpuid_has_smap(vcpu) && (cr4 & X86_CR4_SMAP))
+	if (!guest_cpuid_has(vcpu, X86_FEATURE_SMAP) && (cr4 & X86_CR4_SMAP))
 		return 1;
 
-	if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_FSGSBASE))
+	if (!guest_cpuid_has(vcpu, X86_FEATURE_FSGSBASE) && (cr4 & X86_CR4_FSGSBASE))
 		return 1;
 
-	if (!guest_cpuid_has_pku(vcpu) && (cr4 & X86_CR4_PKE))
+	if (!guest_cpuid_has(vcpu, X86_FEATURE_PKU) && (cr4 & X86_CR4_PKE))
 		return 1;
 
 	if (is_long_mode(vcpu)) {
@@ -779,7 +779,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 		return 1;
 
 	if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) {
-		if (!guest_cpuid_has_pcid(vcpu))
+		if (!guest_cpuid_has(vcpu, X86_FEATURE_PCID))
 			return 1;
 
 		/* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */
@@ -883,7 +883,7 @@ static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu)
 {
 	u64 fixed = DR6_FIXED_1;
 
-	if (!guest_cpuid_has_rtm(vcpu))
+	if (!guest_cpuid_has(vcpu, X86_FEATURE_RTM))
 		fixed |= DR6_RTM;
 	return fixed;
 }
@@ -1533,8 +1533,9 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
 	vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
 	vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
 
-	if (guest_cpuid_has_tsc_adjust(vcpu) && !msr->host_initiated)
+	if (!msr->host_initiated && guest_cpuid_has(vcpu, X86_FEATURE_TSC_ADJUST))
 		update_ia32_tsc_adjust_msr(vcpu, offset);
+
 	kvm_vcpu_write_tsc_offset(vcpu, offset);
 	raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
 
@@ -2184,7 +2185,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		kvm_set_lapic_tscdeadline_msr(vcpu, data);
 		break;
 	case MSR_IA32_TSC_ADJUST:
-		if (guest_cpuid_has_tsc_adjust(vcpu)) {
+		if (guest_cpuid_has(vcpu, X86_FEATURE_TSC_ADJUST)) {
 			if (!msr_info->host_initiated) {
 				s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
 				adjust_tsc_offset_guest(vcpu, adj);
@@ -2306,12 +2307,12 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n", msr, data);
 		break;
 	case MSR_AMD64_OSVW_ID_LENGTH:
-		if (!guest_cpuid_has_osvw(vcpu))
+		if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
 			return 1;
 		vcpu->arch.osvw.length = data;
 		break;
 	case MSR_AMD64_OSVW_STATUS:
-		if (!guest_cpuid_has_osvw(vcpu))
+		if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
 			return 1;
 		vcpu->arch.osvw.status = data;
 		break;
@@ -2536,12 +2537,12 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		msr_info->data = 0xbe702111;
 		break;
 	case MSR_AMD64_OSVW_ID_LENGTH:
-		if (!guest_cpuid_has_osvw(vcpu))
+		if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
 			return 1;
 		msr_info->data = vcpu->arch.osvw.length;
 		break;
 	case MSR_AMD64_OSVW_STATUS:
-		if (!guest_cpuid_has_osvw(vcpu))
+		if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
 			return 1;
 		msr_info->data = vcpu->arch.osvw.status;
 		break;
@@ -6601,7 +6602,7 @@ static void enter_smm(struct kvm_vcpu *vcpu)
 	trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
 	vcpu->arch.hflags |= HF_SMM_MASK;
 	memset(buf, 0, 512);
-	if (guest_cpuid_has_longmode(vcpu))
+	if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
 		enter_smm_save_state_64(vcpu, buf);
 	else
 		enter_smm_save_state_32(vcpu, buf);
@@ -6653,7 +6654,7 @@ static void enter_smm(struct kvm_vcpu *vcpu)
 	kvm_set_segment(vcpu, &ds, VCPU_SREG_GS);
 	kvm_set_segment(vcpu, &ds, VCPU_SREG_SS);
 
-	if (guest_cpuid_has_longmode(vcpu))
+	if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
 		kvm_x86_ops->set_efer(vcpu, 0);
 
 	kvm_update_cpuid(vcpu);
@@ -7419,7 +7420,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 	int pending_vec, max_bits, idx;
 	struct desc_ptr dt;
 
-	if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE))
+	if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
+			(sregs->cr4 & X86_CR4_OSXSAVE))
 		return -EINVAL;
 
 	dt.size = sregs->idt.limit;
-- 
2.13.3

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH RFC 2/2] KVM: x86: use general helpers for some cpuid manipulation
  2017-08-02 20:41 [PATCH RFC 0/2] KVM: x86: generalize guest cpuid helpers Radim Krčmář
  2017-08-02 20:41 ` [PATCH RFC 1/2] KVM: x86: generalize guest_cpuid_has_ helpers Radim Krčmář
@ 2017-08-02 20:41 ` Radim Krčmář
  2017-08-03 11:53 ` [PATCH RFC 0/2] KVM: x86: generalize guest cpuid helpers David Hildenbrand
  2 siblings, 0 replies; 8+ messages in thread
From: Radim Krčmář @ 2017-08-02 20:41 UTC (permalink / raw)
  To: linux-kernel, kvm; +Cc: Paolo Bonzini, David Hildenbrand

Add guest_cpuid_clear() and use it instead of kvm_find_cpuid_entry().
Also replace some uses of kvm_find_cpuid_entry() with guest_cpuid_has().

Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
---
 arch/x86/kvm/cpuid.h |  9 +++++++++
 arch/x86/kvm/svm.c   |  5 +----
 arch/x86/kvm/vmx.c   |  9 +++------
 arch/x86/kvm/x86.c   | 14 ++------------
 4 files changed, 15 insertions(+), 22 deletions(-)

diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index 3b17d915b608..650b5c80c5a0 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -104,6 +104,15 @@ static inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, unsigned x86_feature)
 	return *reg & bit(x86_feature);
 }
 
+static inline void guest_cpuid_clear(struct kvm_vcpu *vcpu, unsigned x86_feature)
+{
+	int *reg;
+
+	reg = guest_cpuid_get_register(vcpu, x86_feature);
+	if (reg)
+		*reg &= ~bit(x86_feature);
+}
+
 static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu)
 {
 	struct kvm_cpuid_entry2 *best;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index fcdc1412792e..5ceb99ff145b 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -5065,7 +5065,6 @@ static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
 static void svm_cpuid_update(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
-	struct kvm_cpuid_entry2 *entry;
 
 	/* Update nrips enabled cache */
 	svm->nrips_enabled = !!guest_cpuid_has(&svm->vcpu, X86_FEATURE_NRIPS);
@@ -5073,9 +5072,7 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu)
 	if (!kvm_vcpu_apicv_active(vcpu))
 		return;
 
-	entry = kvm_find_cpuid_entry(vcpu, 1, 0);
-	if (entry)
-		entry->ecx &= ~bit(X86_FEATURE_X2APIC);
+	guest_cpuid_clear(vcpu, X86_FEATURE_X2APIC);
 }
 
 static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 8367f901d681..e34373838b31 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -9376,7 +9376,6 @@ static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
 
 static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
 {
-	struct kvm_cpuid_entry2 *best;
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	u32 secondary_exec_ctl = vmx_secondary_exec_control(vmx);
 
@@ -9396,14 +9395,12 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
 	}
 
 	/* Exposing INVPCID only when PCID is exposed */
-	best = kvm_find_cpuid_entry(vcpu, 0x7, 0);
 	if (vmx_invpcid_supported() &&
-	    (!best || !(best->ebx & bit(X86_FEATURE_INVPCID)) ||
-	    !guest_cpuid_has(vcpu, X86_FEATURE_PCID))) {
+	    (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID) ||
+	     !guest_cpuid_has(vcpu, X86_FEATURE_PCID))) {
 		secondary_exec_ctl &= ~SECONDARY_EXEC_ENABLE_INVPCID;
 
-		if (best)
-			best->ebx &= ~bit(X86_FEATURE_INVPCID);
+		guest_cpuid_clear(vcpu, X86_FEATURE_INVPCID);
 	}
 
 	if (cpu_has_secondary_exec_ctrls())
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index d75997ba65b9..9bba971fb51e 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1021,21 +1021,11 @@ bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
 	if (efer & efer_reserved_bits)
 		return false;
 
-	if (efer & EFER_FFXSR) {
-		struct kvm_cpuid_entry2 *feat;
-
-		feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
-		if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
+	if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT))
 			return false;
-	}
 
-	if (efer & EFER_SVME) {
-		struct kvm_cpuid_entry2 *feat;
-
-		feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
-		if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
+	if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM))
 			return false;
-	}
 
 	return true;
 }
-- 
2.13.3

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH RFC 0/2] KVM: x86: generalize guest cpuid helpers
  2017-08-02 20:41 [PATCH RFC 0/2] KVM: x86: generalize guest cpuid helpers Radim Krčmář
  2017-08-02 20:41 ` [PATCH RFC 1/2] KVM: x86: generalize guest_cpuid_has_ helpers Radim Krčmář
  2017-08-02 20:41 ` [PATCH RFC 2/2] KVM: x86: use general helpers for some cpuid manipulation Radim Krčmář
@ 2017-08-03 11:53 ` David Hildenbrand
  2 siblings, 0 replies; 8+ messages in thread
From: David Hildenbrand @ 2017-08-03 11:53 UTC (permalink / raw)
  To: Radim Krčmář, linux-kernel, kvm; +Cc: Paolo Bonzini

On 02.08.2017 22:41, Radim Krčmář wrote:
> This series does the generalization that we've spoken about recently
> Might be a good time to change the function names as well.
> 
> 
> Radim Krčmář (2):
>   KVM: x86: generalize guest_cpuid_has_ helpers
>   KVM: x86: use general helpers for some cpuid manipulation
> 
>  arch/x86/kvm/cpuid.h | 205 ++++++++++++++++++---------------------------------
>  arch/x86/kvm/mmu.c   |   7 +-
>  arch/x86/kvm/mtrr.c  |   2 +-
>  arch/x86/kvm/svm.c   |   7 +-
>  arch/x86/kvm/vmx.c   |  33 ++++-----
>  arch/x86/kvm/x86.c   |  52 ++++++-------
>  6 files changed, 116 insertions(+), 190 deletions(-)
> 

These numbers speak for themselves.

I really like this cleanup!

Had a quick look over the patches, nothing jumped at me.

-- 

Thanks,

David

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH RFC 1/2] KVM: x86: generalize guest_cpuid_has_ helpers
  2017-08-02 20:41 ` [PATCH RFC 1/2] KVM: x86: generalize guest_cpuid_has_ helpers Radim Krčmář
@ 2017-08-04 15:20   ` David Hildenbrand
  2017-08-04 20:44     ` Radim Krčmář
  2017-08-04 15:40   ` Paolo Bonzini
  1 sibling, 1 reply; 8+ messages in thread
From: David Hildenbrand @ 2017-08-04 15:20 UTC (permalink / raw)
  To: Radim Krčmář, linux-kernel, kvm; +Cc: Paolo Bonzini

On 02.08.2017 22:41, Radim Krčmář wrote:
> This patch turns guest_cpuid_has_XYZ(cpuid) into guest_cpuid_has(cpuid,
> X86_FEATURE_XYZ), which gets rid of many very similar helpers.
> 
> When seeing a X86_FEATURE_*, we can know which cpuid it belongs to, but
> this information isn't in common code, so we recreate it for KVM.
> 
> Add some BUILD_BUG_ONs to make sure that it runs nicely.
> 
> Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
> ---
>  arch/x86/kvm/cpuid.h | 202 +++++++++++++++++----------------------------------
>  arch/x86/kvm/mmu.c   |   7 +-
>  arch/x86/kvm/mtrr.c  |   2 +-
>  arch/x86/kvm/svm.c   |   2 +-
>  arch/x86/kvm/vmx.c   |  26 +++----
>  arch/x86/kvm/x86.c   |  38 +++++-----
>  6 files changed, 105 insertions(+), 172 deletions(-)
> 
> diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
> index da6728383052..3b17d915b608 100644
> --- a/arch/x86/kvm/cpuid.h
> +++ b/arch/x86/kvm/cpuid.h
> @@ -3,6 +3,7 @@
>  
>  #include "x86.h"
>  #include <asm/cpu.h>
> +#include <asm/processor.h>
>  
>  int kvm_update_cpuid(struct kvm_vcpu *vcpu);
>  bool kvm_mpx_supported(void);
> @@ -29,95 +30,78 @@ static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
>  	return vcpu->arch.maxphyaddr;
>  }
>  
> -static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
> +struct cpuid_reg {
> +	u32 function;
> +	u32 index;
> +	int reg;
> +};
> +
> +static const struct cpuid_reg reverse_cpuid[] = {
> +	[CPUID_1_EDX]         = {         1, 0, CPUID_EDX},
> +	[CPUID_8000_0001_EDX] = {0x80000001, 0, CPUID_EDX},
> +	[CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX},
> +	[CPUID_1_ECX]         = {         1, 0, CPUID_ECX},
> +	[CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX},
> +	[CPUID_8000_0001_ECX] = {0xc0000001, 0, CPUID_ECX},
> +	[CPUID_7_0_EBX]       = {         7, 0, CPUID_EBX},
> +	[CPUID_D_1_EAX]       = {       0xd, 1, CPUID_EAX},
> +	[CPUID_F_0_EDX]       = {       0xf, 0, CPUID_EDX},
> +	[CPUID_F_1_EDX]       = {       0xf, 1, CPUID_EDX},
> +	[CPUID_8000_0008_EBX] = {0x80000008, 0, CPUID_EBX},
> +	[CPUID_6_EAX]         = {         6, 0, CPUID_EAX},
> +	[CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX},
> +	[CPUID_7_ECX]         = {         7, 0, CPUID_ECX},
> +	[CPUID_8000_0007_EBX] = {0x80000007, 0, CPUID_EBX},
> +};
> +
> +static inline struct cpuid_reg x86_feature_cpuid(unsigned x86_feature)
> +{
> +	unsigned x86_leaf = x86_feature / 32;
> +
> +	BUILD_BUG_ON(!__builtin_constant_p(x86_leaf));
> +	BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid));
> +	BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0);
> +
> +	return reverse_cpuid[x86_leaf];
> +}
> +
> +static inline int *guest_cpuid_get_register(struct kvm_vcpu *vcpu, unsigned x86_feature)
>  {
>  	struct kvm_cpuid_entry2 *best;

somehow I don't like the name best. entry?

> +	struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);

you could make this const.

>  
> -	if (!static_cpu_has(X86_FEATURE_XSAVE))
> +	best = kvm_find_cpuid_entry(vcpu, cpuid.function, cpuid.index);
> +	if (!best)
> +		return NULL;
> +
> +	switch (cpuid.reg) {
> +	case CPUID_EAX:
> +		return &best->eax;
> +	case CPUID_EBX:
> +		return &best->ebx;
> +	case CPUID_ECX:
> +		return &best->ecx;
> +	case CPUID_EDX:
> +		return &best->edx;
> +	default:
> +		BUILD_BUG();
> +		return NULL;
> +	}
> +}
> +

[...]

> -/*
> - * NRIPS is provided through cpuidfn 0x8000000a.edx bit 3
> - */
> -#define BIT_NRIPS	3
> -
> -static inline bool guest_cpuid_has_nrips(struct kvm_vcpu *vcpu)
> -{
> -	struct kvm_cpuid_entry2 *best;
> -
> -	best = kvm_find_cpuid_entry(vcpu, 0x8000000a, 0);
> -
> -	/*
> -	 * NRIPS is a scattered cpuid feature, so we can't use
> -	 * X86_FEATURE_NRIPS here (X86_FEATURE_NRIPS would be bit
> -	 * position 8, not 3).
> -	 */

Is it okay to ignore that comment and use X86_FEATURE_NRIPS in the
calling code?

> -	return best && (best->edx & bit(BIT_NRIPS));
> -}
> -#undef BIT_NRIPS
> -
>  static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
>  {
>  	struct kvm_cpuid_entry2 *best;


> -		if (index >= 0 && guest_cpuid_has_rdtscp(&vmx->vcpu))
> +		if (index >= 0 && guest_cpuid_has(&vmx->vcpu, X86_FEATURE_MPX))

X86_FEATURE_RDTSCP ? (or is there an implication I don't know?)

>  			move_msr_up(vmx, index, save_nmsrs++);
>  		/*
>  		 * MSR_STAR is only needed on long mode guests, and only


> -		if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated)
> +		if (!msr_info->host_initiated &&
> +		    !guest_cpuid_has(vcpu, X86_FEATURE_MPX))

X86_FEATURE_RDTSCP ?

>  			return 1;
>  		/* Otherwise falls through */
>  	default:
> @@ -3307,7 +3303,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>  		break;
>  	case MSR_IA32_BNDCFGS:
>  		if (!kvm_mpx_supported() ||
> -		    (!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu)))
> +		    (!msr_info->host_initiated &&
> +		     !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
>  			return 1;
>  		if (is_noncanonical_address(data & PAGE_MASK) ||
>  		    (data & MSR_IA32_BNDCFGS_RSVD))
> @@ -3370,7 +3367,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>  			clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
>  		break;
>  	case MSR_TSC_AUX:
> -		if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated)
> +		if (!msr_info->host_initiated &&
> +		    !guest_cpuid_has(vcpu, X86_FEATURE_MPX))

dito

>  			return 1;
>  		/* Check reserved bit, higher 32 bits should be zero */
>  		if ((data >> 32) != 0)
> @@ -9383,7 +9381,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
>  	u32 secondary_exec_ctl = vmx_secondary_exec_control(vmx);
>  
>  	if (vmx_rdtscp_supported()) {
> -		bool rdtscp_enabled = guest_cpuid_has_rdtscp(vcpu);
> +		bool rdtscp_enabled = guest_cpuid_has(vcpu, X86_FEATURE_MPX);

dito

>  		if (!rdtscp_enabled)


-- 

Thanks,

David

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH RFC 1/2] KVM: x86: generalize guest_cpuid_has_ helpers
  2017-08-02 20:41 ` [PATCH RFC 1/2] KVM: x86: generalize guest_cpuid_has_ helpers Radim Krčmář
  2017-08-04 15:20   ` David Hildenbrand
@ 2017-08-04 15:40   ` Paolo Bonzini
  2017-08-04 20:48     ` Radim Krčmář
  1 sibling, 1 reply; 8+ messages in thread
From: Paolo Bonzini @ 2017-08-04 15:40 UTC (permalink / raw)
  To: Radim Krčmář, linux-kernel, kvm; +Cc: David Hildenbrand

On 02/08/2017 22:41, Radim Krčmář wrote:
> This patch turns guest_cpuid_has_XYZ(cpuid) into guest_cpuid_has(cpuid,
> X86_FEATURE_XYZ), which gets rid of many very similar helpers.
> 
> When seeing a X86_FEATURE_*, we can know which cpuid it belongs to, but
> this information isn't in common code, so we recreate it for KVM.
> 
> Add some BUILD_BUG_ONs to make sure that it runs nicely.
> 
> Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
> ---
>  arch/x86/kvm/cpuid.h | 202 +++++++++++++++++----------------------------------
>  arch/x86/kvm/mmu.c   |   7 +-
>  arch/x86/kvm/mtrr.c  |   2 +-
>  arch/x86/kvm/svm.c   |   2 +-
>  arch/x86/kvm/vmx.c   |  26 +++----
>  arch/x86/kvm/x86.c   |  38 +++++-----
>  6 files changed, 105 insertions(+), 172 deletions(-)
> 
> diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
> index da6728383052..3b17d915b608 100644
> --- a/arch/x86/kvm/cpuid.h
> +++ b/arch/x86/kvm/cpuid.h
> @@ -3,6 +3,7 @@
>  
>  #include "x86.h"
>  #include <asm/cpu.h>
> +#include <asm/processor.h>
>  
>  int kvm_update_cpuid(struct kvm_vcpu *vcpu);
>  bool kvm_mpx_supported(void);
> @@ -29,95 +30,78 @@ static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
>  	return vcpu->arch.maxphyaddr;
>  }
>  
> -static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
> +struct cpuid_reg {
> +	u32 function;
> +	u32 index;
> +	int reg;
> +};
> +
> +static const struct cpuid_reg reverse_cpuid[] = {
> +	[CPUID_1_EDX]         = {         1, 0, CPUID_EDX},
> +	[CPUID_8000_0001_EDX] = {0x80000001, 0, CPUID_EDX},
> +	[CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX},
> +	[CPUID_1_ECX]         = {         1, 0, CPUID_ECX},
> +	[CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX},
> +	[CPUID_8000_0001_ECX] = {0xc0000001, 0, CPUID_ECX},
> +	[CPUID_7_0_EBX]       = {         7, 0, CPUID_EBX},
> +	[CPUID_D_1_EAX]       = {       0xd, 1, CPUID_EAX},
> +	[CPUID_F_0_EDX]       = {       0xf, 0, CPUID_EDX},
> +	[CPUID_F_1_EDX]       = {       0xf, 1, CPUID_EDX},
> +	[CPUID_8000_0008_EBX] = {0x80000008, 0, CPUID_EBX},
> +	[CPUID_6_EAX]         = {         6, 0, CPUID_EAX},
> +	[CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX},
> +	[CPUID_7_ECX]         = {         7, 0, CPUID_ECX},
> +	[CPUID_8000_0007_EBX] = {0x80000007, 0, CPUID_EBX},
> +};
> +
> +static inline struct cpuid_reg x86_feature_cpuid(unsigned x86_feature)
> +{
> +	unsigned x86_leaf = x86_feature / 32;
> +
> +	BUILD_BUG_ON(!__builtin_constant_p(x86_leaf));
> +	BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid));
> +	BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0);
> +
> +	return reverse_cpuid[x86_leaf];
> +}
> +
> +static inline int *guest_cpuid_get_register(struct kvm_vcpu *vcpu, unsigned x86_feature)
>  {
>  	struct kvm_cpuid_entry2 *best;
> +	struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
>  
> -	if (!static_cpu_has(X86_FEATURE_XSAVE))
> +	best = kvm_find_cpuid_entry(vcpu, cpuid.function, cpuid.index);
> +	if (!best)
> +		return NULL;
> +
> +	switch (cpuid.reg) {
> +	case CPUID_EAX:
> +		return &best->eax;
> +	case CPUID_EBX:
> +		return &best->ebx;
> +	case CPUID_ECX:
> +		return &best->ecx;
> +	case CPUID_EDX:
> +		return &best->edx;
> +	default:
> +		BUILD_BUG();
> +		return NULL;
> +	}
> +}

Wow, I didn't expect the compiler to be able to inline all of this and
even do BUILD_BUG_ON()s on array lookups.  Maybe change inline to
__always_inline just to be safe?

If anybody complains we can make it just a BUG.

Paolo

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH RFC 1/2] KVM: x86: generalize guest_cpuid_has_ helpers
  2017-08-04 15:20   ` David Hildenbrand
@ 2017-08-04 20:44     ` Radim Krčmář
  0 siblings, 0 replies; 8+ messages in thread
From: Radim Krčmář @ 2017-08-04 20:44 UTC (permalink / raw)
  To: David Hildenbrand; +Cc: linux-kernel, kvm, Paolo Bonzini

2017-08-04 17:20+0200, David Hildenbrand:
> On 02.08.2017 22:41, Radim Krčmář wrote:
> > This patch turns guest_cpuid_has_XYZ(cpuid) into guest_cpuid_has(cpuid,
> > X86_FEATURE_XYZ), which gets rid of many very similar helpers.
> > 
> > When seeing a X86_FEATURE_*, we can know which cpuid it belongs to, but
> > this information isn't in common code, so we recreate it for KVM.
> > 
> > Add some BUILD_BUG_ONs to make sure that it runs nicely.
> > 
> > Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
> > ---
> > diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
> > +static inline int *guest_cpuid_get_register(struct kvm_vcpu *vcpu, unsigned x86_feature)
> >  {
> >  	struct kvm_cpuid_entry2 *best;
> 
> somehow I don't like the name best. entry?

Sure.  This function always returns the entry we wanted, so best is
unfourtunate ...

> > +	struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
> 
> you could make this const.

Ok.

> > -/*
> > - * NRIPS is provided through cpuidfn 0x8000000a.edx bit 3
> > - */
> > -#define BIT_NRIPS	3
> > -
> > -static inline bool guest_cpuid_has_nrips(struct kvm_vcpu *vcpu)
> > -{
> > -	struct kvm_cpuid_entry2 *best;
> > -
> > -	best = kvm_find_cpuid_entry(vcpu, 0x8000000a, 0);
> > -
> > -	/*
> > -	 * NRIPS is a scattered cpuid feature, so we can't use
> > -	 * X86_FEATURE_NRIPS here (X86_FEATURE_NRIPS would be bit
> > -	 * position 8, not 3).
> > -	 */
> 
> Is it okay to ignore that comment and use X86_FEATURE_NRIPS in the
> calling code?

X86_FEATURE_NRIPS is not scattered anymore, but I'll mention it in the
commit message. (Scattered feature would BUILD_BUG_ON.)
> 
> > -	return best && (best->edx & bit(BIT_NRIPS));
> > -}
> > -#undef BIT_NRIPS
> > -
> >  static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
> >  {
> >  	struct kvm_cpuid_entry2 *best;
> 
> 
> > -		if (index >= 0 && guest_cpuid_has_rdtscp(&vmx->vcpu))
> > +		if (index >= 0 && guest_cpuid_has(&vmx->vcpu, X86_FEATURE_MPX))
> 
> X86_FEATURE_RDTSCP ? (or is there an implication I don't know?)

Ugh, copy-paste error ... thanks.

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH RFC 1/2] KVM: x86: generalize guest_cpuid_has_ helpers
  2017-08-04 15:40   ` Paolo Bonzini
@ 2017-08-04 20:48     ` Radim Krčmář
  0 siblings, 0 replies; 8+ messages in thread
From: Radim Krčmář @ 2017-08-04 20:48 UTC (permalink / raw)
  To: Paolo Bonzini; +Cc: linux-kernel, kvm, David Hildenbrand

2017-08-04 17:40+0200, Paolo Bonzini:
> Wow, I didn't expect the compiler to be able to inline all of this and
> even do BUILD_BUG_ON()s on array lookups.  Maybe change inline to
> __always_inline just to be safe?
> 
> If anybody complains we can make it just a BUG.

Good point, i386 wanted to have non-inlined guest_cpuid_get_register(),
but I hope that __always_inline is going to be enough for all.

Thanks.

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2017-08-04 20:48 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-08-02 20:41 [PATCH RFC 0/2] KVM: x86: generalize guest cpuid helpers Radim Krčmář
2017-08-02 20:41 ` [PATCH RFC 1/2] KVM: x86: generalize guest_cpuid_has_ helpers Radim Krčmář
2017-08-04 15:20   ` David Hildenbrand
2017-08-04 20:44     ` Radim Krčmář
2017-08-04 15:40   ` Paolo Bonzini
2017-08-04 20:48     ` Radim Krčmář
2017-08-02 20:41 ` [PATCH RFC 2/2] KVM: x86: use general helpers for some cpuid manipulation Radim Krčmář
2017-08-03 11:53 ` [PATCH RFC 0/2] KVM: x86: generalize guest cpuid helpers David Hildenbrand

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.