From mboxrd@z Thu Jan 1 00:00:00 1970 From: Andrew Cooper Subject: [PATCH v2 17/30] x86/cpu: Common infrastructure for levelling context switching Date: Fri, 5 Feb 2016 13:42:10 +0000 Message-ID: <1454679743-18133-18-git-send-email-andrew.cooper3@citrix.com> References: <1454679743-18133-1-git-send-email-andrew.cooper3@citrix.com> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: In-Reply-To: <1454679743-18133-1-git-send-email-andrew.cooper3@citrix.com> List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xen.org Errors-To: xen-devel-bounces@lists.xen.org To: Xen-devel Cc: Andrew Cooper , Jan Beulich List-Id: xen-devel@lists.xenproject.org This change is purely scaffolding to reduce the complexity of the following three patches. Signed-off-by: Andrew Cooper --- CC: Jan Beulich v2: s/cpumasks/cpuidmasks/ --- xen/arch/x86/cpu/common.c | 6 ++++++ xen/include/asm-x86/cpufeature.h | 1 + xen/include/asm-x86/processor.h | 28 ++++++++++++++++++++++++++++ 3 files changed, 35 insertions(+) diff --git a/xen/arch/x86/cpu/common.c b/xen/arch/x86/cpu/common.c index 46d93a6..3fdae96 100644 --- a/xen/arch/x86/cpu/common.c +++ b/xen/arch/x86/cpu/common.c @@ -36,6 +36,12 @@ integer_param("cpuid_mask_ext_ecx", opt_cpuid_mask_ext_ecx); unsigned int opt_cpuid_mask_ext_edx = ~0u; integer_param("cpuid_mask_ext_edx", opt_cpuid_mask_ext_edx); +unsigned int __initdata expected_levelling_cap; +unsigned int __read_mostly levelling_caps; + +DEFINE_PER_CPU(struct cpuidmasks, cpuidmasks); +struct cpuidmasks __read_mostly cpuidmask_defaults; + const struct cpu_dev *__read_mostly cpu_devs[X86_VENDOR_NUM] = {}; unsigned int paddr_bits __read_mostly = 36; diff --git a/xen/include/asm-x86/cpufeature.h b/xen/include/asm-x86/cpufeature.h index f228fa2..8ac6b56 100644 --- a/xen/include/asm-x86/cpufeature.h +++ b/xen/include/asm-x86/cpufeature.h @@ -95,6 +95,7 @@ #define cpu_has_xsavec boot_cpu_has(X86_FEATURE_XSAVEC) #define cpu_has_xgetbv1 boot_cpu_has(X86_FEATURE_XGETBV1) #define cpu_has_xsaves boot_cpu_has(X86_FEATURE_XSAVES) +#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) enum _cache_type { CACHE_TYPE_NULL = 0, diff --git a/xen/include/asm-x86/processor.h b/xen/include/asm-x86/processor.h index 271340e..09e82d8 100644 --- a/xen/include/asm-x86/processor.h +++ b/xen/include/asm-x86/processor.h @@ -574,6 +574,34 @@ void microcode_set_module(unsigned int); int microcode_update(XEN_GUEST_HANDLE_PARAM(const_void), unsigned long len); int microcode_resume_cpu(unsigned int cpu); +#define LCAP_faulting (1U << 0) +#define LCAP_1cd (3U << 1) +#define LCAP_e1cd (3U << 3) +#define LCAP_Da1 (1U << 5) +#define LCAP_6c (1U << 6) +#define LCAP_7ab0 (3U << 7) + +/* + * Expected levelling capabilities (given cpuid vendor/family information), + * and levelling capabilities actually available (given MSR probing). + */ +extern unsigned int expected_levelling_cap, levelling_caps; + +struct cpuidmasks +{ + uint64_t _1cd; + uint64_t e1cd; + uint64_t Da1; + uint64_t _6c; + uint64_t _7ab0; +}; + +/* Per CPU shadows of masking MSR values, for lazy context switching. */ +DECLARE_PER_CPU(struct cpuidmasks, cpuidmasks); + +/* Default masking MSR values, calculated at boot. */ +extern struct cpuidmasks cpuidmask_defaults; + enum get_cpu_vendor { gcv_host_early, gcv_host_late, -- 2.1.4