All of lore.kernel.org
 help / color / mirror / Atom feed
From: "tip-bot2 for Balbir Singh" <tip-bot2@linutronix.de>
To: linux-tip-commits@vger.kernel.org
Cc: Thomas Gleixner <tglx@linutronix.de>,
	Balbir Singh <sblbir@amazon.com>, x86 <x86@kernel.org>,
	LKML <linux-kernel@vger.kernel.org>
Subject: [tip: x86/mm] x86/kvm: Refactor L1D flushing
Date: Fri, 22 May 2020 09:32:55 -0000	[thread overview]
Message-ID: <159013997582.17951.6885445528498270174.tip-bot2@tip-bot2> (raw)
In-Reply-To: <20200510014803.12190-5-sblbir@amazon.com>

The following commit has been merged into the x86/mm branch of tip:

Commit-ID:     3f768f0032dbc0657ed7e48f4735a3c4e49e25d7
Gitweb:        https://git.kernel.org/tip/3f768f0032dbc0657ed7e48f4735a3c4e49e25d7
Author:        Balbir Singh <sblbir@amazon.com>
AuthorDate:    Sun, 10 May 2020 11:48:01 +10:00
Committer:     Thomas Gleixner <tglx@linutronix.de>
CommitterDate: Wed, 13 May 2020 18:12:20 +02:00

x86/kvm: Refactor L1D flushing

Move more L1D flush related code out of KVM/VMX into builtin code to allow
reuse for L1D flushing:

 - Move the initialization to l1d_flush_init_once() and remove the
   deallocation of the L1D flush pages.

   This avoids adding complex refcounting of users (VMX or tasks which
   opt into a L1D flush on context switch) for the price of a few pages
   potentially wasted when no users are left.

 - Unify the flush invocations as arch_l1d_flush() which attempts
   hardware flushing and falls back to the software implementation
   with the option of prepopulating the TLB entries first.

[ tglx: Massage changelog and add a paranoid check of the flush pages
  	pointer ]

Suggested-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Balbir Singh <sblbir@amazon.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lkml.kernel.org/r/20200510014803.12190-5-sblbir@amazon.com

---
 arch/x86/include/asm/cacheflush.h | 12 ++---
 arch/x86/kernel/l1d_flush.c       | 68 ++++++++++++++++++++++--------
 arch/x86/kvm/vmx/vmx.c            | 20 +--------
 3 files changed, 60 insertions(+), 40 deletions(-)

diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index 21cc3b2..851d8f1 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -7,11 +7,13 @@
 #include <asm/special_insns.h>
 
 #define L1D_CACHE_ORDER 4
+
+enum l1d_flush_options {
+	L1D_FLUSH_POPULATE_TLB = 0x1,
+};
+
 void clflush_cache_range(void *addr, unsigned int size);
-void l1d_flush_populate_tlb(void *l1d_flush_pages);
-void *l1d_flush_alloc_pages(void);
-void l1d_flush_cleanup_pages(void *l1d_flush_pages);
-void l1d_flush_sw(void *l1d_flush_pages);
-int l1d_flush_hw(void);
+int l1d_flush_init_once(void);
+void arch_l1d_flush(enum l1d_flush_options options);
 
 #endif /* _ASM_X86_CACHEFLUSH_H */
diff --git a/arch/x86/kernel/l1d_flush.c b/arch/x86/kernel/l1d_flush.c
index 32119ee..4662f90 100644
--- a/arch/x86/kernel/l1d_flush.c
+++ b/arch/x86/kernel/l1d_flush.c
@@ -4,10 +4,10 @@
 
 #include <asm/cacheflush.h>
 
-void *l1d_flush_alloc_pages(void)
+static void *l1d_flush_alloc_pages(void)
 {
 	struct page *page;
-	void *l1d_flush_pages = NULL;
+	void *flush_pages = NULL;
 	int i;
 
 	/*
@@ -17,7 +17,7 @@ void *l1d_flush_alloc_pages(void)
 	page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
 	if (!page)
 		return NULL;
-	l1d_flush_pages = page_address(page);
+	flush_pages = page_address(page);
 
 	/*
 	 * Initialize each page with a different pattern in
@@ -25,20 +25,13 @@ void *l1d_flush_alloc_pages(void)
 	 * virtualization case.
 	 */
 	for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) {
-		memset(l1d_flush_pages + i * PAGE_SIZE, i + 1,
+		memset(flush_pages + i * PAGE_SIZE, i + 1,
 				PAGE_SIZE);
 	}
-	return l1d_flush_pages;
+	return flush_pages;
 }
-EXPORT_SYMBOL_GPL(l1d_flush_alloc_pages);
 
-void l1d_flush_cleanup_pages(void *l1d_flush_pages)
-{
-	free_pages((unsigned long)l1d_flush_pages, L1D_CACHE_ORDER);
-}
-EXPORT_SYMBOL_GPL(l1d_flush_cleanup_pages);
-
-void l1d_flush_populate_tlb(void *l1d_flush_pages)
+static void l1d_flush_populate_tlb(void *l1d_flush_pages)
 {
 	int size = PAGE_SIZE << L1D_CACHE_ORDER;
 
@@ -56,9 +49,8 @@ void l1d_flush_populate_tlb(void *l1d_flush_pages)
 		    [size] "r" (size)
 		: "eax", "ebx", "ecx", "edx");
 }
-EXPORT_SYMBOL_GPL(l1d_flush_populate_tlb);
 
-int l1d_flush_hw(void)
+static int l1d_flush_hw(void)
 {
 	if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) {
 		wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
@@ -66,9 +58,8 @@ int l1d_flush_hw(void)
 	}
 	return -ENOTSUPP;
 }
-EXPORT_SYMBOL_GPL(l1d_flush_hw);
 
-void l1d_flush_sw(void *l1d_flush_pages)
+static void l1d_flush_sw(void *l1d_flush_pages)
 {
 	int size = PAGE_SIZE << L1D_CACHE_ORDER;
 
@@ -85,4 +76,45 @@ void l1d_flush_sw(void *l1d_flush_pages)
 			[size] "r" (size)
 			: "eax", "ecx");
 }
-EXPORT_SYMBOL_GPL(l1d_flush_sw);
+
+static void *l1d_flush_pages;
+static DEFINE_MUTEX(l1d_flush_mutex);
+
+/*
+ * Initialize and setup L1D flush once, each caller will reuse the
+ * l1d_flush_pages for flushing, no per CPU allocations or NUMA aware
+ * allocations at the moment.
+ */
+int l1d_flush_init_once(void)
+{
+	int ret = 0;
+
+	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+		return -ENOTSUPP;
+
+	if (static_cpu_has(X86_FEATURE_FLUSH_L1D) || l1d_flush_pages)
+		return ret;
+
+	mutex_lock(&l1d_flush_mutex);
+	if (!l1d_flush_pages)
+		l1d_flush_pages = l1d_flush_alloc_pages();
+	ret = l1d_flush_pages ? 0 : -ENOMEM;
+	mutex_unlock(&l1d_flush_mutex);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(l1d_flush_init_once);
+
+void arch_l1d_flush(enum l1d_flush_options options)
+{
+	if (!l1d_flush_hw())
+		return;
+
+	if (WARN_ON_ONCE(!l1d_flush_pages))
+		return;
+
+	if (options & L1D_FLUSH_POPULATE_TLB)
+		l1d_flush_populate_tlb(l1d_flush_pages);
+
+	l1d_flush_sw(l1d_flush_pages);
+}
+EXPORT_SYMBOL_GPL(arch_l1d_flush);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 786d161..d489234 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -203,8 +203,6 @@ static const struct {
 	[VMENTER_L1D_FLUSH_NOT_REQUIRED] = {"not required", false},
 };
 
-static void *vmx_l1d_flush_pages;
-
 static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
 {
 	if (!boot_cpu_has_bug(X86_BUG_L1TF)) {
@@ -247,12 +245,9 @@ static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
 		l1tf = VMENTER_L1D_FLUSH_ALWAYS;
 	}
 
-	if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages &&
-	    !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
-		vmx_l1d_flush_pages = l1d_flush_alloc_pages();
-		if (!vmx_l1d_flush_pages)
+	if (l1tf != VMENTER_L1D_FLUSH_NEVER)
+		if (l1d_flush_init_once())
 			return -ENOMEM;
-	}
 
 	l1tf_vmx_mitigation = l1tf;
 
@@ -6010,12 +6005,7 @@ static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
 	}
 
 	vcpu->stat.l1d_flush++;
-
-	if (!l1d_flush_hw())
-		return;
-
-	l1d_flush_populate_tlb(vmx_l1d_flush_pages);
-	l1d_flush_sw(vmx_l1d_flush_pages);
+	arch_l1d_flush(L1D_FLUSH_POPULATE_TLB);
 }
 
 static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
@@ -7983,10 +7973,6 @@ static struct kvm_x86_init_ops vmx_init_ops __initdata = {
 
 static void vmx_cleanup_l1d_flush(void)
 {
-	if (vmx_l1d_flush_pages) {
-		l1d_flush_cleanup_pages(vmx_l1d_flush_pages);
-		vmx_l1d_flush_pages = NULL;
-	}
 	/* Restore state so sysfs ignores VMX */
 	l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
 }

  reply	other threads:[~2020-05-22  9:33 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-05-10  1:47 [PATCH v6 0/6] Optionally flush L1D on context switch Balbir Singh
2020-05-10  1:47 ` [PATCH v6 1/6] arch/x86/kvm: Refactor l1d flush lifecycle management Balbir Singh
2020-05-13 13:35   ` Thomas Gleixner
2020-05-14  8:23     ` Singh, Balbir
2020-05-13 13:53   ` Thomas Gleixner
2020-05-14  8:25     ` Singh, Balbir
2020-05-22  9:32   ` [tip: x86/mm] x86/kvm: Refactor L1D flush page management tip-bot2 for Balbir Singh
2020-05-10  1:47 ` [PATCH v6 2/6] arch/x86/kvm: Refactor tlbflush and l1d flush Balbir Singh
2020-05-22  9:32   ` [tip: x86/mm] x86/kvm: Refactor L1D flush operations tip-bot2 for Balbir Singh
2020-05-10  1:48 ` [PATCH v6 3/6] arch/x86/mm: Refactor cond_ibpb() to support other use cases Balbir Singh
2020-05-13 14:16   ` Thomas Gleixner
2020-05-22  9:32   ` [tip: x86/mm] x86/mm: " tip-bot2 for Balbir Singh
2020-09-16 13:11   ` [tip: x86/pti] " tip-bot2 for Balbir Singh
2020-05-10  1:48 ` [PATCH v6 4/6] arch/x86/kvm: Refactor L1D flushing Balbir Singh
2020-05-22  9:32   ` tip-bot2 for Balbir Singh [this message]
2020-05-10  1:48 ` [PATCH v6 5/6] Optionally flush L1D on context switch Balbir Singh
2020-05-13 15:04   ` Thomas Gleixner
2020-05-14  8:23     ` Singh, Balbir
2020-05-13 15:27   ` Thomas Gleixner
2020-05-14 21:28     ` Singh, Balbir
2020-05-13 16:16   ` Thomas Gleixner
2020-05-14  7:43     ` Singh, Balbir
2020-05-14 11:33       ` Thomas Gleixner
2020-05-10  1:48 ` [PATCH v6 6/6] Documentation: Add L1D flushing Documentation Balbir Singh
2020-05-13 13:33   ` Thomas Gleixner
2020-05-14  1:12     ` Singh, Balbir

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=159013997582.17951.6885445528498270174.tip-bot2@tip-bot2 \
    --to=tip-bot2@linutronix.de \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-tip-commits@vger.kernel.org \
    --cc=sblbir@amazon.com \
    --cc=tglx@linutronix.de \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.