linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/2] x86/speculation/l1tf: Increase l1tf memory limit for Nehalem+
@ 2018-08-24 17:03 Andi Kleen
  2018-08-24 17:03 ` [PATCH 2/2] x86/spectre: Add missing family 6 check to microcode check Andi Kleen
  2018-08-27  8:34 ` [tip:x86/urgent] x86/speculation/l1tf: Increase l1tf memory limit for Nehalem+ tip-bot for Andi Kleen
  0 siblings, 2 replies; 4+ messages in thread
From: Andi Kleen @ 2018-08-24 17:03 UTC (permalink / raw)
  To: x86; +Cc: linux-kernel, Andi Kleen, Michael Hocko, vbabka

From: Andi Kleen <ak@linux.intel.com>

On Nehalem and newer core CPUs the CPU cache internally uses 44 bits physical
address space. The L1TF workaround is limited by this internal cache
address width, and needs to have one bit free there for the mitigation
to work.

Older client systems report only 36bit physical address space so
we warn that L1TF is not mitigated for a 36bit phys/32GB system
with some memory holes.

But since these actually have the larger internal cache width
this warning is bogus because it would only really be needed
if the system had more than 43bits of memory

This patch adds a new internal x86_cache_bits field. Normally
it is the same as the phys bits field reported by CPUID,
but for Nehalem and newer we force it to be at least 44bits.

Then change the L1TF memory size warning to use the new
cache_bits field to avoid bogus warnings.

I also removed a now known to be bogus comment about
the memory size.

Cc: Michael Hocko <mhocko@suse.com>
Cc: vbabka@suse.cz
Reported-by: xxxxxx xxxxxx <xxxxxx@xxxxxx.xxx>
Reported-by: Christopher Snowhill <kode54@gmail.com>
Signed-off-by: Andi Kleen <ak@linux.intel.com>
---
 arch/x86/include/asm/processor.h |  4 +++-
 arch/x86/kernel/cpu/bugs.c       | 41 ++++++++++++++++++++++++++++----
 arch/x86/kernel/cpu/common.c     |  1 +
 3 files changed, 40 insertions(+), 6 deletions(-)

diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index c24297268ebc..d53c54b842da 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -132,6 +132,8 @@ struct cpuinfo_x86 {
 	/* Index into per_cpu list: */
 	u16			cpu_index;
 	u32			microcode;
+	/* Address space bits used by the cache internally */
+	u8			x86_cache_bits;
 	unsigned		initialized : 1;
 } __randomize_layout;
 
@@ -183,7 +185,7 @@ extern void cpu_detect(struct cpuinfo_x86 *c);
 
 static inline unsigned long long l1tf_pfn_limit(void)
 {
-	return BIT_ULL(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT);
+	return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
 }
 
 extern void early_cpu_init(void);
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 4c2313d0b9ca..e35d2c89af11 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -668,6 +668,40 @@ EXPORT_SYMBOL_GPL(l1tf_mitigation);
 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
 
+/*
+ * These CPUs all support 44bits internally in the cache but report a smaller
+ * number of bits for phys bits. For the L1TF max memory size warning
+ * we want to use the internal cache width to not give unnecessary
+ * warnings.
+ */
+static void override_cache_bits(struct cpuinfo_x86 *c)
+{
+	if (c->x86 != 6)
+		return;
+	switch (c->x86_model) {
+	case INTEL_FAM6_NEHALEM:
+	case INTEL_FAM6_WESTMERE:
+	case INTEL_FAM6_SANDYBRIDGE:
+	case INTEL_FAM6_IVYBRIDGE:
+	case INTEL_FAM6_HASWELL_CORE:
+	case INTEL_FAM6_HASWELL_ULT:
+	case INTEL_FAM6_HASWELL_GT3E:
+	case INTEL_FAM6_BROADWELL_CORE:
+	case INTEL_FAM6_BROADWELL_GT3E:
+	case INTEL_FAM6_SKYLAKE_MOBILE:
+	case INTEL_FAM6_SKYLAKE_DESKTOP:
+	case INTEL_FAM6_KABYLAKE_MOBILE:
+	case INTEL_FAM6_KABYLAKE_DESKTOP:
+		/*
+		 * No need to add new model numbers, as this is only
+		 * used by workarounds for older CPUs.
+		 */
+		if (c->x86_cache_bits < 44)
+			c->x86_cache_bits = 44;
+		break;
+	}
+}
+
 static void __init l1tf_select_mitigation(void)
 {
 	u64 half_pa;
@@ -675,6 +709,8 @@ static void __init l1tf_select_mitigation(void)
 	if (!boot_cpu_has_bug(X86_BUG_L1TF))
 		return;
 
+	override_cache_bits(&boot_cpu_data);
+
 	switch (l1tf_mitigation) {
 	case L1TF_MITIGATION_OFF:
 	case L1TF_MITIGATION_FLUSH_NOWARN:
@@ -694,11 +730,6 @@ static void __init l1tf_select_mitigation(void)
 	return;
 #endif
 
-	/*
-	 * This is extremely unlikely to happen because almost all
-	 * systems have far more MAX_PA/2 than RAM can be fit into
-	 * DIMM slots.
-	 */
 	half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
 	if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
 		pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 84dee5ab745a..44c4ef3d989b 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -919,6 +919,7 @@ void get_cpu_address_sizes(struct cpuinfo_x86 *c)
 	else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
 		c->x86_phys_bits = 36;
 #endif
+	c->x86_cache_bits = c->x86_phys_bits;
 }
 
 static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH 2/2] x86/spectre: Add missing family 6 check to microcode check
  2018-08-24 17:03 [PATCH 1/2] x86/speculation/l1tf: Increase l1tf memory limit for Nehalem+ Andi Kleen
@ 2018-08-24 17:03 ` Andi Kleen
  2018-08-27  8:34   ` [tip:x86/urgent] " tip-bot for Andi Kleen
  2018-08-27  8:34 ` [tip:x86/urgent] x86/speculation/l1tf: Increase l1tf memory limit for Nehalem+ tip-bot for Andi Kleen
  1 sibling, 1 reply; 4+ messages in thread
From: Andi Kleen @ 2018-08-24 17:03 UTC (permalink / raw)
  To: x86; +Cc: linux-kernel, Andi Kleen

From: Andi Kleen <ak@linux.intel.com>

The check for Spectre microcodes did not check for family 6,
only the model numbers. Add a family 6 check too just to
avoid ambiguity with other families.

Signed-off-by: Andi Kleen <ak@linux.intel.com>
---
 arch/x86/kernel/cpu/intel.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 401e8c133108..fc3c07fe7df5 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -150,6 +150,9 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
 	if (cpu_has(c, X86_FEATURE_HYPERVISOR))
 		return false;
 
+	if (c->x86 != 6)
+		return false;
+
 	for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
 		if (c->x86_model == spectre_bad_microcodes[i].model &&
 		    c->x86_stepping == spectre_bad_microcodes[i].stepping)
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [tip:x86/urgent] x86/spectre: Add missing family 6 check to microcode check
  2018-08-24 17:03 ` [PATCH 2/2] x86/spectre: Add missing family 6 check to microcode check Andi Kleen
@ 2018-08-27  8:34   ` tip-bot for Andi Kleen
  0 siblings, 0 replies; 4+ messages in thread
From: tip-bot for Andi Kleen @ 2018-08-27  8:34 UTC (permalink / raw)
  To: linux-tip-commits; +Cc: mingo, tglx, hpa, linux-kernel, ak

Commit-ID:  1ab534e85c93945f7862378d8c8adcf408205b19
Gitweb:     https://git.kernel.org/tip/1ab534e85c93945f7862378d8c8adcf408205b19
Author:     Andi Kleen <ak@linux.intel.com>
AuthorDate: Fri, 24 Aug 2018 10:03:51 -0700
Committer:  Thomas Gleixner <tglx@linutronix.de>
CommitDate: Mon, 27 Aug 2018 10:29:14 +0200

x86/spectre: Add missing family 6 check to microcode check

The check for Spectre microcodes does not check for family 6, only the
model numbers.

Add a family 6 check to avoid ambiguity with other families.

Fixes: a5b296636453 ("x86/cpufeature: Blacklist SPEC_CTRL/PRED_CMD on early Spectre v2 microcodes")
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: x86@kernel.org
Cc: linux-kernel@vger.kernel.org
Cc: stable@vger.kernel.org
Link: https://lkml.kernel.org/r/20180824170351.34874-2-andi@firstfloor.org

---
 arch/x86/kernel/cpu/intel.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 401e8c133108..fc3c07fe7df5 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -150,6 +150,9 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
 	if (cpu_has(c, X86_FEATURE_HYPERVISOR))
 		return false;
 
+	if (c->x86 != 6)
+		return false;
+
 	for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
 		if (c->x86_model == spectre_bad_microcodes[i].model &&
 		    c->x86_stepping == spectre_bad_microcodes[i].stepping)

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [tip:x86/urgent] x86/speculation/l1tf: Increase l1tf memory limit for Nehalem+
  2018-08-24 17:03 [PATCH 1/2] x86/speculation/l1tf: Increase l1tf memory limit for Nehalem+ Andi Kleen
  2018-08-24 17:03 ` [PATCH 2/2] x86/spectre: Add missing family 6 check to microcode check Andi Kleen
@ 2018-08-27  8:34 ` tip-bot for Andi Kleen
  1 sibling, 0 replies; 4+ messages in thread
From: tip-bot for Andi Kleen @ 2018-08-27  8:34 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: linux-kernel, xxxxxx, ak, kode54, mhocko, hpa, mingo, tglx

Commit-ID:  cc51e5428ea54f575d49cfcede1d4cb3a72b4ec4
Gitweb:     https://git.kernel.org/tip/cc51e5428ea54f575d49cfcede1d4cb3a72b4ec4
Author:     Andi Kleen <ak@linux.intel.com>
AuthorDate: Fri, 24 Aug 2018 10:03:50 -0700
Committer:  Thomas Gleixner <tglx@linutronix.de>
CommitDate: Mon, 27 Aug 2018 10:29:14 +0200

x86/speculation/l1tf: Increase l1tf memory limit for Nehalem+

On Nehalem and newer core CPUs the CPU cache internally uses 44 bits
physical address space. The L1TF workaround is limited by this internal
cache address width, and needs to have one bit free there for the
mitigation to work.

Older client systems report only 36bit physical address space so the range
check decides that L1TF is not mitigated for a 36bit phys/32GB system with
some memory holes.

But since these actually have the larger internal cache width this warning
is bogus because it would only really be needed if the system had more than
43bits of memory.

Add a new internal x86_cache_bits field. Normally it is the same as the
physical bits field reported by CPUID, but for Nehalem and newerforce it to
be at least 44bits.

Change the L1TF memory size warning to use the new cache_bits field to
avoid bogus warnings and remove the bogus comment about memory size.

Fixes: 17dbca119312 ("x86/speculation/l1tf: Add sysfs reporting for l1tf")
Reported-by: xxxxxx xxxxxx <xxxxxx@xxxxxx.xxx>
Reported-by: Christopher Snowhill <kode54@gmail.com>
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: x86@kernel.org
Cc: linux-kernel@vger.kernel.org
Cc: Michael Hocko <mhocko@suse.com>
Cc: vbabka@suse.cz
Cc: stable@vger.kernel.org
Link: https://lkml.kernel.org/r/20180824170351.34874-1-andi@firstfloor.org

---
 arch/x86/include/asm/processor.h |  4 +++-
 arch/x86/kernel/cpu/bugs.c       | 46 +++++++++++++++++++++++++++++++++++-----
 arch/x86/kernel/cpu/common.c     |  1 +
 3 files changed, 45 insertions(+), 6 deletions(-)

diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index c24297268ebc..d53c54b842da 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -132,6 +132,8 @@ struct cpuinfo_x86 {
 	/* Index into per_cpu list: */
 	u16			cpu_index;
 	u32			microcode;
+	/* Address space bits used by the cache internally */
+	u8			x86_cache_bits;
 	unsigned		initialized : 1;
 } __randomize_layout;
 
@@ -183,7 +185,7 @@ extern void cpu_detect(struct cpuinfo_x86 *c);
 
 static inline unsigned long long l1tf_pfn_limit(void)
 {
-	return BIT_ULL(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT);
+	return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
 }
 
 extern void early_cpu_init(void);
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 4c2313d0b9ca..40bdaea97fe7 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -668,6 +668,45 @@ EXPORT_SYMBOL_GPL(l1tf_mitigation);
 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
 
+/*
+ * These CPUs all support 44bits physical address space internally in the
+ * cache but CPUID can report a smaller number of physical address bits.
+ *
+ * The L1TF mitigation uses the top most address bit for the inversion of
+ * non present PTEs. When the installed memory reaches into the top most
+ * address bit due to memory holes, which has been observed on machines
+ * which report 36bits physical address bits and have 32G RAM installed,
+ * then the mitigation range check in l1tf_select_mitigation() triggers.
+ * This is a false positive because the mitigation is still possible due to
+ * the fact that the cache uses 44bit internally. Use the cache bits
+ * instead of the reported physical bits and adjust them on the affected
+ * machines to 44bit if the reported bits are less than 44.
+ */
+static void override_cache_bits(struct cpuinfo_x86 *c)
+{
+	if (c->x86 != 6)
+		return;
+
+	switch (c->x86_model) {
+	case INTEL_FAM6_NEHALEM:
+	case INTEL_FAM6_WESTMERE:
+	case INTEL_FAM6_SANDYBRIDGE:
+	case INTEL_FAM6_IVYBRIDGE:
+	case INTEL_FAM6_HASWELL_CORE:
+	case INTEL_FAM6_HASWELL_ULT:
+	case INTEL_FAM6_HASWELL_GT3E:
+	case INTEL_FAM6_BROADWELL_CORE:
+	case INTEL_FAM6_BROADWELL_GT3E:
+	case INTEL_FAM6_SKYLAKE_MOBILE:
+	case INTEL_FAM6_SKYLAKE_DESKTOP:
+	case INTEL_FAM6_KABYLAKE_MOBILE:
+	case INTEL_FAM6_KABYLAKE_DESKTOP:
+		if (c->x86_cache_bits < 44)
+			c->x86_cache_bits = 44;
+		break;
+	}
+}
+
 static void __init l1tf_select_mitigation(void)
 {
 	u64 half_pa;
@@ -675,6 +714,8 @@ static void __init l1tf_select_mitigation(void)
 	if (!boot_cpu_has_bug(X86_BUG_L1TF))
 		return;
 
+	override_cache_bits(&boot_cpu_data);
+
 	switch (l1tf_mitigation) {
 	case L1TF_MITIGATION_OFF:
 	case L1TF_MITIGATION_FLUSH_NOWARN:
@@ -694,11 +735,6 @@ static void __init l1tf_select_mitigation(void)
 	return;
 #endif
 
-	/*
-	 * This is extremely unlikely to happen because almost all
-	 * systems have far more MAX_PA/2 than RAM can be fit into
-	 * DIMM slots.
-	 */
 	half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
 	if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
 		pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 84dee5ab745a..44c4ef3d989b 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -919,6 +919,7 @@ void get_cpu_address_sizes(struct cpuinfo_x86 *c)
 	else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
 		c->x86_phys_bits = 36;
 #endif
+	c->x86_cache_bits = c->x86_phys_bits;
 }
 
 static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)

^ permalink raw reply related	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2018-08-27  8:36 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-08-24 17:03 [PATCH 1/2] x86/speculation/l1tf: Increase l1tf memory limit for Nehalem+ Andi Kleen
2018-08-24 17:03 ` [PATCH 2/2] x86/spectre: Add missing family 6 check to microcode check Andi Kleen
2018-08-27  8:34   ` [tip:x86/urgent] " tip-bot for Andi Kleen
2018-08-27  8:34 ` [tip:x86/urgent] x86/speculation/l1tf: Increase l1tf memory limit for Nehalem+ tip-bot for Andi Kleen

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).