All of lore.kernel.org
 help / color / mirror / Atom feed
From: Pu Wen <puwen@hygon.cn>
To: xen-devel@lists.xenproject.org
Cc: "Pu Wen" <puwen@hygon.cn>,
	"Roger Pau Monné" <roger.pau@citrix.com>,
	"Wei Liu" <wei.liu2@citrix.com>,
	"Jan Beulich" <jbeulich@suse.com>,
	"Andrew Cooper" <andrew.cooper3@citrix.com>
Subject: [PATCH v2 01/14] x86/cpu: Create Hygon Dhyana architecture support file
Date: Thu, 21 Feb 2019 17:48:53 +0800	[thread overview]
Message-ID: <026111a83e9d83902bc2acee243858a21ecec304.1550647742.git.puwen@hygon.cn> (raw)
In-Reply-To: <cover.1550647742.git.puwen@hygon.cn>

Add x86 architecture support for a new processor: Hygon Dhyana Family
18h. Carve out initialization codes from amd.c needed by Dhyana into a
separate file hygon.c by removing unnecessary codes and make Hygon
initialization codes more clear.

To identify Hygon Dhyana CPU, add a new vendor type X86_VENDOR_HYGON
for system recognition.

As opt_cpuid_mask_l7s0_eax and opt_cpuid_mask_l7s0_ebx are used by both
AMD and Hygon, so move them to common.c.

Signed-off-by: Pu Wen <puwen@hygon.cn>
---
 xen/arch/x86/cpu/Makefile         |   1 +
 xen/arch/x86/cpu/amd.c            |   5 -
 xen/arch/x86/cpu/common.c         |   6 +
 xen/arch/x86/cpu/cpu.h            |   2 +
 xen/arch/x86/cpu/hygon.c          | 248 ++++++++++++++++++++++++++++++++++++++
 xen/include/asm-x86/x86-vendors.h |   3 +-
 6 files changed, 259 insertions(+), 6 deletions(-)
 create mode 100644 xen/arch/x86/cpu/hygon.c

diff --git a/xen/arch/x86/cpu/Makefile b/xen/arch/x86/cpu/Makefile
index 34a01ca..1db7d88 100644
--- a/xen/arch/x86/cpu/Makefile
+++ b/xen/arch/x86/cpu/Makefile
@@ -8,4 +8,5 @@ obj-y += intel.o
 obj-y += intel_cacheinfo.o
 obj-y += mwait-idle.o
 obj-y += shanghai.o
+obj-y += hygon.o
 obj-y += vpmu.o vpmu_amd.o vpmu_intel.o
diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c
index c790416..4c595cf 100644
--- a/xen/arch/x86/cpu/amd.c
+++ b/xen/arch/x86/cpu/amd.c
@@ -32,11 +32,6 @@
 static char __initdata opt_famrev[14];
 string_param("cpuid_mask_cpu", opt_famrev);
 
-static unsigned int __initdata opt_cpuid_mask_l7s0_eax = ~0u;
-integer_param("cpuid_mask_l7s0_eax", opt_cpuid_mask_l7s0_eax);
-static unsigned int __initdata opt_cpuid_mask_l7s0_ebx = ~0u;
-integer_param("cpuid_mask_l7s0_ebx", opt_cpuid_mask_l7s0_ebx);
-
 static unsigned int __initdata opt_cpuid_mask_thermal_ecx = ~0u;
 integer_param("cpuid_mask_thermal_ecx", opt_cpuid_mask_thermal_ecx);
 
diff --git a/xen/arch/x86/cpu/common.c b/xen/arch/x86/cpu/common.c
index de6c5c9..5bab845 100644
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -36,6 +36,11 @@ integer_param("cpuid_mask_ext_ecx", opt_cpuid_mask_ext_ecx);
 unsigned int opt_cpuid_mask_ext_edx = ~0u;
 integer_param("cpuid_mask_ext_edx", opt_cpuid_mask_ext_edx);
 
+unsigned int  opt_cpuid_mask_l7s0_eax = ~0u;
+integer_param("cpuid_mask_l7s0_eax", opt_cpuid_mask_l7s0_eax);
+unsigned int  opt_cpuid_mask_l7s0_ebx = ~0u;
+integer_param("cpuid_mask_l7s0_ebx", opt_cpuid_mask_l7s0_ebx);
+
 unsigned int __initdata expected_levelling_cap;
 unsigned int __read_mostly levelling_caps;
 
@@ -704,6 +709,7 @@ void __init early_cpu_init(void)
 {
 	intel_cpu_init();
 	amd_init_cpu();
+	hygon_init_cpu();
 	centaur_init_cpu();
 	shanghai_init_cpu();
 	early_cpu_detect();
diff --git a/xen/arch/x86/cpu/cpu.h b/xen/arch/x86/cpu/cpu.h
index 2fcb931..9ea53e5 100644
--- a/xen/arch/x86/cpu/cpu.h
+++ b/xen/arch/x86/cpu/cpu.h
@@ -13,11 +13,13 @@ extern bool_t opt_arat;
 extern unsigned int opt_cpuid_mask_ecx, opt_cpuid_mask_edx;
 extern unsigned int opt_cpuid_mask_xsave_eax;
 extern unsigned int opt_cpuid_mask_ext_ecx, opt_cpuid_mask_ext_edx;
+extern unsigned int opt_cpuid_mask_l7s0_eax, opt_cpuid_mask_l7s0_ebx;
 
 extern int get_model_name(struct cpuinfo_x86 *c);
 extern void display_cacheinfo(struct cpuinfo_x86 *c);
 
 int intel_cpu_init(void);
 int amd_init_cpu(void);
+int hygon_init_cpu(void);
 int centaur_init_cpu(void);
 int shanghai_init_cpu(void);
diff --git a/xen/arch/x86/cpu/hygon.c b/xen/arch/x86/cpu/hygon.c
new file mode 100644
index 0000000..3e79441
--- /dev/null
+++ b/xen/arch/x86/cpu/hygon.c
@@ -0,0 +1,248 @@
+#include <xen/init.h>
+#include <asm/processor.h>
+#include <asm/hvm/support.h>
+#include <asm/spec_ctrl.h>
+
+#include "cpu.h"
+
+/*
+ * Sets caps in expected_levelling_cap, probes for the specified mask MSR, and
+ * set caps in levelling_caps if it is found.  Returns the default value.
+ */
+static uint64_t __init _probe_mask_msr(unsigned int msr, uint64_t caps)
+{
+	uint64_t value;
+
+	expected_levelling_cap |= caps;
+
+	if ((rdmsr_safe(msr, value) == 0) && (wrmsr_safe(msr, value) == 0))
+		levelling_caps |= caps;
+
+	return value;
+}
+
+/* Probe for the existance of the expected masking MSRs. */
+static void __init noinline probe_masking_msrs(void)
+{
+	const struct cpuinfo_x86 *c = &boot_cpu_data;
+
+	/* Work out which masking MSRs we should have. */
+	cpuidmask_defaults._1cd =
+		_probe_mask_msr(MSR_K8_FEATURE_MASK, LCAP_1cd);
+	cpuidmask_defaults.e1cd =
+		_probe_mask_msr(MSR_K8_EXT_FEATURE_MASK, LCAP_e1cd);
+	if (c->cpuid_level >= 7)
+		cpuidmask_defaults._7ab0 =
+			_probe_mask_msr(MSR_AMD_L7S0_FEATURE_MASK, LCAP_7ab0);
+}
+
+/*
+ * Context switch CPUID masking state to the next domain.  Only called if
+ * CPUID Faulting isn't available, but masking MSRs have been detected.  A
+ * parameter of NULL is used to context switch to the default host state (by
+ * the cpu bringup-code, crash path, etc).
+ */
+static void hygon_ctxt_switch_masking(const struct vcpu *next)
+{
+	struct cpuidmasks *these_masks = &this_cpu(cpuidmasks);
+	const struct domain *nextd = next ? next->domain : NULL;
+	const struct cpuidmasks *masks =
+		(nextd && is_pv_domain(nextd) && nextd->arch.pv.cpuidmasks)
+		? nextd->arch.pv.cpuidmasks : &cpuidmask_defaults;
+
+	if ((levelling_caps & LCAP_1cd) == LCAP_1cd) {
+		uint64_t val = masks->_1cd;
+
+		/*
+		 * OSXSAVE defaults to 1, which causes fast-forwarding of
+		 * Xen's real setting.  Clobber it if disabled by the guest
+		 * kernel.
+		 */
+		if (next && is_pv_vcpu(next) && !is_idle_vcpu(next) &&
+		    !(next->arch.pv.ctrlreg[4] & X86_CR4_OSXSAVE))
+			val &= ~((uint64_t)cpufeat_mask(X86_FEATURE_OSXSAVE) << 32);
+
+		if (unlikely(these_masks->_1cd != val)) {
+			wrmsrl(MSR_K8_FEATURE_MASK, val);
+			these_masks->_1cd = val;
+		}
+	}
+
+#define LAZY(cap, msr, field)						\
+	({								\
+		if (unlikely(these_masks->field != masks->field) &&	\
+		    ((levelling_caps & cap) == cap)) {							\
+			wrmsrl(msr, masks->field);			\
+			these_masks->field = masks->field;		\
+		}							\
+	})
+
+	LAZY(LCAP_e1cd, MSR_K8_EXT_FEATURE_MASK,   e1cd);
+	LAZY(LCAP_7ab0, MSR_AMD_L7S0_FEATURE_MASK, _7ab0);
+#undef LAZY
+}
+
+/*
+ * Mask the features and extended features returned by CPUID.  Parameters are
+ * set from the boot line via user-defined masks.
+ */
+static void __init noinline hygon_init_levelling(void)
+{
+	probe_masking_msrs();
+
+	if ((levelling_caps & LCAP_1cd) == LCAP_1cd) {
+		uint32_t ecx, edx, tmp;
+
+		cpuid(0x00000001, &tmp, &tmp, &ecx, &edx);
+
+		if (~(opt_cpuid_mask_ecx & opt_cpuid_mask_edx)) {
+			ecx &= opt_cpuid_mask_ecx;
+			edx &= opt_cpuid_mask_edx;
+		}
+
+		/* Fast-forward bits - Must be set. */
+		if (ecx & cpufeat_mask(X86_FEATURE_XSAVE))
+			ecx |= cpufeat_mask(X86_FEATURE_OSXSAVE);
+		edx |= cpufeat_mask(X86_FEATURE_APIC);
+
+		/* Allow the HYPERVISOR bit to be set via guest policy. */
+		ecx |= cpufeat_mask(X86_FEATURE_HYPERVISOR);
+
+		cpuidmask_defaults._1cd = ((uint64_t)ecx << 32) | edx;
+	}
+
+	if ((levelling_caps & LCAP_e1cd) == LCAP_e1cd) {
+		uint32_t ecx, edx, tmp;
+
+		cpuid(0x80000001, &tmp, &tmp, &ecx, &edx);
+
+		if (~(opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx)) {
+			ecx &= opt_cpuid_mask_ext_ecx;
+			edx &= opt_cpuid_mask_ext_edx;
+		}
+
+		/* Fast-forward bits - Must be set. */
+		edx |= cpufeat_mask(X86_FEATURE_APIC);
+
+		cpuidmask_defaults.e1cd = ((uint64_t)ecx << 32) | edx;
+	}
+
+	if ((levelling_caps & LCAP_7ab0) == LCAP_7ab0) {
+		uint32_t eax, ebx, tmp;
+
+		cpuid(0x00000007, &eax, &ebx, &tmp, &tmp);
+
+		if (~(opt_cpuid_mask_l7s0_eax & opt_cpuid_mask_l7s0_ebx)) {
+			eax &= opt_cpuid_mask_l7s0_eax;
+			ebx &= opt_cpuid_mask_l7s0_ebx;
+		}
+
+		cpuidmask_defaults._7ab0 &= ((uint64_t)eax << 32) | ebx;
+	}
+
+	if (opt_cpu_info) {
+		printk(XENLOG_INFO "Levelling caps: %#x\n", levelling_caps);
+		printk(XENLOG_INFO
+		       "MSR defaults: 1d 0x%08x, 1c 0x%08x, e1d 0x%08x, "
+		       "e1c 0x%08x, 7a0 0x%08x, 7b0 0x%08x\n",
+		       (uint32_t)cpuidmask_defaults._1cd,
+		       (uint32_t)(cpuidmask_defaults._1cd >> 32),
+		       (uint32_t)cpuidmask_defaults.e1cd,
+		       (uint32_t)(cpuidmask_defaults.e1cd >> 32),
+		       (uint32_t)(cpuidmask_defaults._7ab0 >> 32),
+		       (uint32_t)cpuidmask_defaults._7ab0);
+	}
+
+	if (levelling_caps)
+		ctxt_switch_masking = hygon_ctxt_switch_masking;
+}
+
+static void hygon_get_topology(struct cpuinfo_x86 *c)
+{
+	u32 ebx;
+
+	if (c->x86_max_cores <= 1)
+		return;
+
+	/* Convert local APIC ID into the socket ID */
+	c->phys_proc_id >>= (cpuid_ecx(0x80000008) >> 12) & 0xf;
+
+	ebx = cpuid_ebx(0x8000001e);
+	c->x86_num_siblings = ((ebx >> 8) & 0x3) + 1;
+	c->x86_max_cores /= c->x86_num_siblings;
+	c->cpu_core_id = ebx & 0xff;
+
+	if (opt_cpu_info)
+	        printk("CPU %d(%d) -> Processor %d, Core %d\n",
+	                smp_processor_id(), c->x86_max_cores,
+	                        c->phys_proc_id, c->cpu_core_id);
+}
+
+static void early_init_hygon(struct cpuinfo_x86 *c)
+{
+	if (c == &boot_cpu_data)
+		hygon_init_levelling();
+
+	ctxt_switch_levelling(NULL);
+}
+
+static void init_hygon(struct cpuinfo_x86 *c)
+{
+	u32 l, h;
+	unsigned long long value;
+
+	/* Attempt to set lfence to be Dispatch Serialising. */
+	if (rdmsr_safe(MSR_AMD64_DE_CFG, value))
+		/* Unable to read.  Assume the safer default. */
+		__clear_bit(X86_FEATURE_LFENCE_DISPATCH, c->x86_capability);
+	else if (value & AMD64_DE_CFG_LFENCE_SERIALISE)
+		/* Already dispatch serialising. */
+		__set_bit(X86_FEATURE_LFENCE_DISPATCH, c->x86_capability);
+
+	/*
+	 * If the user has explicitly chosen to disable Memory Disambiguation
+	 * to mitigiate Speculative Store Bypass, poke the appropriate MSR.
+	 */
+ 	if (opt_ssbd && !rdmsr_safe(MSR_AMD64_LS_CFG, value)) {
+		value |= 1ull << 10;
+		wrmsr_safe(MSR_AMD64_LS_CFG, value);
+	}
+
+	display_cacheinfo(c);
+
+	if (cpu_has(c, X86_FEATURE_ITSC)) {
+		__set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
+		__set_bit(X86_FEATURE_NONSTOP_TSC, c->x86_capability);
+		__set_bit(X86_FEATURE_TSC_RELIABLE, c->x86_capability);
+	}
+
+	c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
+
+	hygon_get_topology(c);
+
+	/* Hygon CPUs do not support SYSENTER outside of legacy mode. */
+	__clear_bit(X86_FEATURE_SEP, c->x86_capability);
+
+	/* Hygon processors have APIC timer running in deep C states. */
+	if ( opt_arat )
+		__set_bit(X86_FEATURE_ARAT, c->x86_capability);
+
+	if (cpu_has(c, X86_FEATURE_EFRO)) {
+		rdmsr(MSR_K7_HWCR, l, h);
+		l |= (1 << 27); /* Enable read-only APERF/MPERF bit */
+		wrmsr(MSR_K7_HWCR, l, h);
+	}
+}
+
+static const struct cpu_dev hygon_cpu_dev = {
+	.c_vendor	= "Hygon",
+	.c_ident 	= { "HygonGenuine" },
+	.c_early_init	= early_init_hygon,
+	.c_init		= init_hygon,
+};
+
+int __init hygon_init_cpu(void)
+{
+	cpu_devs[X86_VENDOR_HYGON] = &hygon_cpu_dev;
+	return 0;
+}
diff --git a/xen/include/asm-x86/x86-vendors.h b/xen/include/asm-x86/x86-vendors.h
index 38a81c3..fa1cbb4 100644
--- a/xen/include/asm-x86/x86-vendors.h
+++ b/xen/include/asm-x86/x86-vendors.h
@@ -9,6 +9,7 @@
 #define X86_VENDOR_AMD 2
 #define X86_VENDOR_CENTAUR 3
 #define X86_VENDOR_SHANGHAI 4
-#define X86_VENDOR_NUM 5
+#define X86_VENDOR_HYGON 5
+#define X86_VENDOR_NUM 6
 
 #endif	/* __XEN_X86_VENDORS_H__ */
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

  reply	other threads:[~2019-02-21  9:51 UTC|newest]

Thread overview: 51+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-02-21  9:48 [PATCH v2 00/14] Add support for Hygon Dhyana Family 18h processor Pu Wen
2019-02-21  9:48 ` Pu Wen [this message]
2019-03-14 17:10   ` [PATCH v2 01/14] x86/cpu: Create Hygon Dhyana architecture support file Jan Beulich
2019-03-15 10:17     ` Pu Wen
2019-03-15 11:17       ` Jan Beulich
2019-03-16  9:57         ` Pu Wen
2019-03-18  8:54           ` Jan Beulich
2019-03-19 12:33             ` Pu Wen
2019-03-19 13:02               ` Jan Beulich
2019-03-19 13:34                 ` Pu Wen
2019-02-21  9:48 ` [PATCH v2 02/14] x86/cpu/mtrr: Add Hygon Dhyana support to get TOP_MEM2 Pu Wen
2019-03-15 12:39   ` Jan Beulich
2019-03-16 10:06     ` Pu Wen
2019-03-18  8:57       ` Jan Beulich
2019-03-18 15:51         ` Pu Wen
2019-03-18 16:51           ` Jan Beulich
2019-02-21  9:50 ` [PATCH v2 03/14] x86/cpu/vpmu: Add Hygon Dhyana and AMD Zen support for vPMU Pu Wen
2019-03-15 12:41   ` Jan Beulich
2019-03-16 10:11     ` Pu Wen
2019-03-18  8:59       ` Jan Beulich
2019-03-19 11:32         ` Pu Wen
2019-03-19 12:57           ` Jan Beulich
2019-03-19 13:47             ` Pu Wen
2019-03-19 13:57               ` Jan Beulich
2019-03-19 15:22                 ` Pu Wen
2019-02-21  9:51 ` [PATCH v2 04/14] x86/cpu/mce: Add Hygon Dhyana support to the MCA infrastructure Pu Wen
2019-03-15 12:47   ` Jan Beulich
2019-02-21  9:51 ` [PATCH v2 05/14] x86/spec_ctrl: Add Hygon Dhyana to the respective mitigation machinery Pu Wen
2019-03-15 12:47   ` Jan Beulich
2019-02-21  9:51 ` [PATCH v2 06/14] x86/apic: Add Hygon Dhyana support Pu Wen
2019-03-15 12:48   ` Jan Beulich
2019-02-21  9:52 ` [PATCH v2 07/14] x86/acpi: " Pu Wen
2019-03-15 12:49   ` Jan Beulich
2019-02-21  9:52 ` [PATCH v2 08/14] x86/iommu: " Pu Wen
2019-03-15 13:18   ` Jan Beulich
2019-02-21  9:52 ` [PATCH v2 09/14] x86/pv: Add Hygon Dhyana support to emulate MSRs access Pu Wen
2019-03-15 13:23   ` Jan Beulich
2019-02-21  9:53 ` [PATCH v2 10/14] x86/domain: Add Hygon Dhyana support Pu Wen
2019-03-15 13:24   ` Jan Beulich
2019-02-21  9:53 ` [PATCH v2 11/14] x86/domctl: " Pu Wen
2019-03-15 13:28   ` Jan Beulich
2019-03-16 10:14     ` Pu Wen
2019-02-21  9:53 ` [PATCH v2 12/14] x86/traps: " Pu Wen
2019-02-21  9:54 ` [PATCH v2 13/14] x86/cpuid: " Pu Wen
2019-03-15 13:29   ` Jan Beulich
2019-02-21  9:54 ` [PATCH v2 14/14] tools/libxc: " Pu Wen
2019-02-21 16:37 ` [PATCH v2 00/14] Add support for Hygon Dhyana Family 18h processor Wei Liu
2019-02-22  2:30   ` Pu Wen
2019-02-22 13:21     ` Wei Liu
2019-03-15 13:41 ` Jan Beulich
2019-03-16 10:40   ` Pu Wen

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=026111a83e9d83902bc2acee243858a21ecec304.1550647742.git.puwen@hygon.cn \
    --to=puwen@hygon.cn \
    --cc=andrew.cooper3@citrix.com \
    --cc=jbeulich@suse.com \
    --cc=roger.pau@citrix.com \
    --cc=wei.liu2@citrix.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.