From: Dave Hansen <dave.hansen@linux.intel.com>
To: linux-kernel@vger.kernel.org
Cc: kirill.shutemov@linux.intel.com,pbonzini@redhat.com,tglx@linutronix.de,x86@kernel.org,bp@alien8.de,Dave
Hansen <dave.hansen@linux.intel.com>
Subject: [RFC][PATCH 24/34] x86/cpu: Establish 'min_cache_bits' configuration
Date: Thu, 22 Feb 2024 10:39:58 -0800 [thread overview]
Message-ID: <20240222183958.F5A0812A@davehans-spike.ostc.intel.com> (raw)
In-Reply-To: <20240222183926.517AFCD2@davehans-spike.ostc.intel.com>
From: Dave Hansen <dave.hansen@linux.intel.com>
Continue moving towards a setup where code never tweaks 'boot_cpu_data'.
Code must establish their intent in 'x86_addr_config' and then later
code will use that config information to establish the system-wide
config.
The L1TF wants to tweak x86_cache_bits. Let it do this, but move
the code away from bugs.c so that ti can be easily called earlier
en boot.
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
---
b/arch/x86/include/asm/processor.h | 6 +++++
b/arch/x86/kernel/cpu/bugs.c | 41 -------------------------------------
b/arch/x86/kernel/cpu/common.c | 2 +
b/arch/x86/kernel/cpu/intel.c | 40 ++++++++++++++++++++++++++++++++++++
4 files changed, 48 insertions(+), 41 deletions(-)
diff -puN arch/x86/include/asm/processor.h~bsp-min_cache_bits arch/x86/include/asm/processor.h
--- a/arch/x86/include/asm/processor.h~bsp-min_cache_bits 2024-02-22 10:09:00.220915031 -0800
+++ b/arch/x86/include/asm/processor.h 2024-02-22 10:09:00.224915188 -0800
@@ -177,6 +177,12 @@ struct x86_addr_config {
* will take place at a more coarse granularity.
*/
u8 cache_align_mult;
+
+ /*
+ * Specify a floor for the number of bits that the CPU
+ * caches comprehend. Used only for L1TF mitigation.
+ */
+ u8 min_cache_bits;
};
/*
diff -puN arch/x86/kernel/cpu/bugs.c~bsp-min_cache_bits arch/x86/kernel/cpu/bugs.c
--- a/arch/x86/kernel/cpu/bugs.c~bsp-min_cache_bits 2024-02-22 10:09:00.220915031 -0800
+++ b/arch/x86/kernel/cpu/bugs.c 2024-02-22 10:09:00.224915188 -0800
@@ -2237,45 +2237,6 @@ EXPORT_SYMBOL_GPL(l1tf_mitigation);
enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
-/*
- * These CPUs all support 44bits physical address space internally in the
- * cache but CPUID can report a smaller number of physical address bits.
- *
- * The L1TF mitigation uses the top most address bit for the inversion of
- * non present PTEs. When the installed memory reaches into the top most
- * address bit due to memory holes, which has been observed on machines
- * which report 36bits physical address bits and have 32G RAM installed,
- * then the mitigation range check in l1tf_select_mitigation() triggers.
- * This is a false positive because the mitigation is still possible due to
- * the fact that the cache uses 44bit internally. Use the cache bits
- * instead of the reported physical bits and adjust them on the affected
- * machines to 44bit if the reported bits are less than 44.
- */
-static void override_cache_bits(struct cpuinfo_x86 *c)
-{
- if (c->x86 != 6)
- return;
-
- switch (c->x86_model) {
- case INTEL_FAM6_NEHALEM:
- case INTEL_FAM6_WESTMERE:
- case INTEL_FAM6_SANDYBRIDGE:
- case INTEL_FAM6_IVYBRIDGE:
- case INTEL_FAM6_HASWELL:
- case INTEL_FAM6_HASWELL_L:
- case INTEL_FAM6_HASWELL_G:
- case INTEL_FAM6_BROADWELL:
- case INTEL_FAM6_BROADWELL_G:
- case INTEL_FAM6_SKYLAKE_L:
- case INTEL_FAM6_SKYLAKE:
- case INTEL_FAM6_KABYLAKE_L:
- case INTEL_FAM6_KABYLAKE:
- if (c->x86_cache_bits < 44)
- c->x86_cache_bits = 44;
- break;
- }
-}
-
static void __init l1tf_select_mitigation(void)
{
u64 half_pa;
@@ -2288,8 +2249,6 @@ static void __init l1tf_select_mitigatio
else if (cpu_mitigations_auto_nosmt())
l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
- override_cache_bits(&boot_cpu_data);
-
switch (l1tf_mitigation) {
case L1TF_MITIGATION_OFF:
case L1TF_MITIGATION_FLUSH_NOWARN:
diff -puN arch/x86/kernel/cpu/common.c~bsp-min_cache_bits arch/x86/kernel/cpu/common.c
--- a/arch/x86/kernel/cpu/common.c~bsp-min_cache_bits 2024-02-22 10:09:00.220915031 -0800
+++ b/arch/x86/kernel/cpu/common.c 2024-02-22 10:09:00.228915345 -0800
@@ -1139,6 +1139,8 @@ void get_cpu_address_sizes(struct cpuinf
x86_config.clflush_size = detect_clflush_size(c);
c->x86_cache_bits = x86_config.phys_bits;
+ if (c->x86_cache_bits < bsp_addr_config.min_cache_bits)
+ c->x86_cache_bits = bsp_addr_config.min_cache_bits;
x86_config.cache_alignment = x86_clflush_size();
if (bsp_addr_config.cache_align_mult)
diff -puN arch/x86/kernel/cpu/intel.c~bsp-min_cache_bits arch/x86/kernel/cpu/intel.c
--- a/arch/x86/kernel/cpu/intel.c~bsp-min_cache_bits 2024-02-22 10:09:00.224915188 -0800
+++ b/arch/x86/kernel/cpu/intel.c 2024-02-22 10:09:00.228915345 -0800
@@ -395,6 +395,44 @@ detect_keyid_bits:
return keyid_bits;
}
+/*
+ * These CPUs all support 44bits physical address space internally in the
+ * cache but CPUID can report a smaller number of physical address bits.
+ *
+ * The L1TF mitigation uses the top most address bit for the inversion of
+ * non present PTEs. When the installed memory reaches into the top most
+ * address bit due to memory holes, which has been observed on machines
+ * which report 36bits physical address bits and have 32G RAM installed,
+ * then the mitigation range check in l1tf_select_mitigation() triggers.
+ * This is a false positive because the mitigation is still possible due to
+ * the fact that the cache uses 44bit internally. Use the cache bits
+ * instead of the reported physical bits and adjust them on the affected
+ * machines to 44bit if the reported bits are less than 44.
+ */
+static void set_min_cache_bits(struct cpuinfo_x86 *c)
+{
+ if (c->x86 != 6)
+ return;
+
+ switch (c->x86_model) {
+ case INTEL_FAM6_NEHALEM:
+ case INTEL_FAM6_WESTMERE:
+ case INTEL_FAM6_SANDYBRIDGE:
+ case INTEL_FAM6_IVYBRIDGE:
+ case INTEL_FAM6_HASWELL:
+ case INTEL_FAM6_HASWELL_L:
+ case INTEL_FAM6_HASWELL_G:
+ case INTEL_FAM6_BROADWELL:
+ case INTEL_FAM6_BROADWELL_G:
+ case INTEL_FAM6_SKYLAKE_L:
+ case INTEL_FAM6_SKYLAKE:
+ case INTEL_FAM6_KABYLAKE_L:
+ case INTEL_FAM6_KABYLAKE:
+ bsp_addr_config.min_cache_bits = 44;
+ break;
+ }
+}
+
static void bsp_init_intel(struct cpuinfo_x86 *c)
{
int keyid_bits = 0;
@@ -418,6 +456,8 @@ static void bsp_init_intel(struct cpuinf
/* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
if (c->x86 == 15)
bsp_addr_config.cache_align_mult = 2;
+
+ set_min_cache_bits(c);
}
#ifdef CONFIG_X86_32
_
next prev parent reply other threads:[~2024-02-22 18:39 UTC|newest]
Thread overview: 54+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-02-22 18:39 [RFC][PATCH 00/34] [RFC] x86: Rework system-wide configuration masquerading as per-cpu data Dave Hansen
2024-02-22 18:39 ` [RFC][PATCH 01/34] x86/xen: Explain why calling get_cpu_cap() so early is a hack Dave Hansen
2024-03-07 16:27 ` Borislav Petkov
2024-02-22 18:39 ` [RFC][PATCH 02/34] x86/xen: Remove early "debug" physical address lookups Dave Hansen
2024-03-07 17:01 ` Borislav Petkov
2024-03-11 13:16 ` Jürgen Groß
2024-02-22 18:39 ` [RFC][PATCH 03/34] x86/pci: Assume that clflush size is always provided Dave Hansen
2024-03-08 8:57 ` Borislav Petkov
2024-02-22 18:39 ` [RFC][PATCH 04/34] x86/mm: Introduce physical address limit helper Dave Hansen
2024-02-27 11:05 ` Huang, Kai
2024-02-22 18:39 ` [RFC][PATCH 05/34] x86/cpu: Move /proc per-cpu ->x86_phys_bits reference to global one Dave Hansen
2024-02-27 11:05 ` Huang, Kai
2024-02-22 18:39 ` [RFC][PATCH 06/34] x86/boot: Use consistent value for iomem_resource.end Dave Hansen
2024-02-27 10:59 ` Huang, Kai
2024-02-28 14:22 ` Zhang, Rui
2024-02-22 18:39 ` [RFC][PATCH 07/34] x86/mm: Introduce virtual address space limit helper Dave Hansen
2024-02-27 11:09 ` Huang, Kai
2024-02-22 18:39 ` [RFC][PATCH 08/34] x86/cpu: Add CLFLUSH size helper Dave Hansen
2024-02-22 18:39 ` [RFC][PATCH 09/34] x86/cpu: Introduce address configuration structure Dave Hansen
2024-02-27 23:47 ` Huang, Kai
2024-02-28 17:29 ` Dave Hansen
2024-02-22 18:39 ` [RFC][PATCH 10/34] x86/cpu/amd: Use new "address configuration" infrastructure Dave Hansen
2024-02-22 18:39 ` [RFC][PATCH 11/34] x86/cpu/intel: Prepare MKTME for " Dave Hansen
2024-02-23 11:33 ` Kirill A. Shutemov
2024-02-23 16:22 ` Dave Hansen
2024-02-26 12:14 ` Kirill A. Shutemov
2024-02-27 21:48 ` Huang, Kai
2024-02-28 15:20 ` Kirill A. Shutemov
2024-02-28 16:57 ` Dave Hansen
2024-02-22 18:39 ` [RFC][PATCH 12/34] x86/cpu/intel: Actually use "address configuration" infrastructure for MKTME Dave Hansen
2024-02-23 11:41 ` Kirill A. Shutemov
2024-02-23 16:16 ` Dave Hansen
2024-02-22 18:39 ` [RFC][PATCH 13/34] x86/boot: Use address reduction config to handle erratum Dave Hansen
2024-02-22 18:39 ` [RFC][PATCH 14/34] x86/cpu: Remove default physical address space settings Dave Hansen
2024-02-22 18:39 ` [RFC][PATCH 15/34] x86/cpu: Remove default x86_phys_bits assignment Dave Hansen
2024-02-22 18:39 ` [RFC][PATCH 16/34] x86/cpu: Move physical address limit out of cpuinfo_x86 Dave Hansen
2024-02-22 18:39 ` [RFC][PATCH 17/34] x86/cpu: Move virtual " Dave Hansen
2024-02-22 18:39 ` [RFC][PATCH 18/34] x86/cpu/centaur: Move cache alignment override to BSP init Dave Hansen
2024-02-22 18:39 ` [RFC][PATCH 19/34] x86/cpu: Introduce cache alignment multiplier Dave Hansen
2024-02-22 18:39 ` [RFC][PATCH 20/34] x86/cpu: Remove superfluous cache alignment assignments Dave Hansen
2024-02-22 18:39 ` [RFC][PATCH 21/34] x86/cpu: Consolidate CLFLUSH size setting Dave Hansen
2024-02-22 18:39 ` [RFC][PATCH 22/34] x86/cpu: Move CLFLUSH size into global config Dave Hansen
2024-02-22 18:39 ` [RFC][PATCH 23/34] x86/cpu: Move cache alignment configuration to global struct Dave Hansen
2024-02-22 18:39 ` Dave Hansen [this message]
2024-02-22 18:39 ` [RFC][PATCH 25/34] x86/cpu: Move cache bits to global config Dave Hansen
2024-02-22 18:40 ` [RFC][PATCH 26/34] x86/cpu: Zap superfluous get_cpu_address_sizes() Dave Hansen
2024-02-22 18:40 ` [RFC][PATCH 27/34] x86/cpu: Enforce read-only x86_config state (lightly) Dave Hansen
2024-02-22 18:40 ` [RFC][PATCH 28/34] x86/cpu: Return sane defaults for early x86_config reads Dave Hansen
2024-02-22 18:40 ` [RFC][PATCH 29/34] x86/xen: Remove extra get_cpu_address_sizes() call Dave Hansen
2024-02-22 18:40 ` [RFC][PATCH 30/34] x86/cpu/centaur: Mark BSP init function as __init Dave Hansen
2024-02-22 18:40 ` [RFC][PATCH 31/34] x86/cpu/intel: " Dave Hansen
2024-02-22 18:40 ` [RFC][PATCH 32/34] x86/cpu/amd: Move memory encryption detection Dave Hansen
2024-02-22 18:40 ` [RFC][PATCH 33/34] x86/cpu: Make get_cpu_address_sizes() static and __init Dave Hansen
2024-02-22 18:40 ` [RFC][PATCH 34/34] x86/cpu: Mark new boot CPU and config structures appropriately Dave Hansen
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240222183958.F5A0812A@davehans-spike.ostc.intel.com \
--to=dave.hansen@linux.intel.com \
--cc=bp@alien8.de \
--cc=kirill.shutemov@linux.intel.com \
--cc=linux-kernel@vger.kernel.org \
--cc=pbonzini@redhat.com \
--cc=tglx@linutronix.de \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).