All of lore.kernel.org
 help / color / mirror / Atom feed
From: Nadav Amit <namit@vmware.com>
To: Peter Zijlstra <peterz@infradead.org>, Andy Lutomirski <luto@kernel.org>
Cc: linux-kernel@vger.kernel.org, Ingo Molnar <mingo@redhat.com>,
	Borislav Petkov <bp@alien8.de>,
	x86@kernel.org, Thomas Gleixner <tglx@linutronix.de>,
	Dave Hansen <dave.hansen@linux.intel.com>,
	Nadav Amit <namit@vmware.com>
Subject: [PATCH 3/9] x86/mm/tlb: Refactor common code into flush_tlb_on_cpus()
Date: Wed, 12 Jun 2019 23:48:07 -0700	[thread overview]
Message-ID: <20190613064813.8102-4-namit@vmware.com> (raw)
In-Reply-To: <20190613064813.8102-1-namit@vmware.com>

arch_tlbbatch_flush() and flush_tlb_mm_range() have very similar code,
which is effectively the same. Extract the mutual code into a new
function flush_tlb_on_cpus().

There is one functional change, which should not affect correctness:
flush_tlb_mm_range compared loaded_mm and the mm to figure out if local
flush is needed. Instead, the common code would look at the mm_cpumask()
which should give the same result. Performance should not be affected,
since this cpumask should not change in such a frequency that would
introduce cache contention.

Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Nadav Amit <namit@vmware.com>
---
 arch/x86/mm/tlb.c | 62 ++++++++++++++++++++++++++---------------------
 1 file changed, 34 insertions(+), 28 deletions(-)

diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 91f6db92554c..c34bcf03f06f 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -734,7 +734,11 @@ static inline struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm,
 			unsigned int stride_shift, bool freed_tables,
 			u64 new_tlb_gen)
 {
-	struct flush_tlb_info *info = this_cpu_ptr(&flush_tlb_info);
+	struct flush_tlb_info *info;
+
+	preempt_disable();
+
+	info = this_cpu_ptr(&flush_tlb_info);
 
 #ifdef CONFIG_DEBUG_VM
 	/*
@@ -762,6 +766,23 @@ static inline void put_flush_tlb_info(void)
 	barrier();
 	this_cpu_dec(flush_tlb_info_idx);
 #endif
+	preempt_enable();
+}
+
+static void flush_tlb_on_cpus(const cpumask_t *cpumask,
+			      const struct flush_tlb_info *info)
+{
+	int this_cpu = smp_processor_id();
+
+	if (cpumask_test_cpu(this_cpu, cpumask)) {
+		lockdep_assert_irqs_enabled();
+		local_irq_disable();
+		flush_tlb_func_local(info, TLB_LOCAL_MM_SHOOTDOWN);
+		local_irq_enable();
+	}
+
+	if (cpumask_any_but(cpumask, this_cpu) < nr_cpu_ids)
+		flush_tlb_others(cpumask, info);
 }
 
 void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
@@ -770,9 +791,6 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
 {
 	struct flush_tlb_info *info;
 	u64 new_tlb_gen;
-	int cpu;
-
-	cpu = get_cpu();
 
 	/* Should we flush just the requested range? */
 	if ((end == TLB_FLUSH_ALL) ||
@@ -787,18 +805,18 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
 	info = get_flush_tlb_info(mm, start, end, stride_shift, freed_tables,
 				  new_tlb_gen);
 
-	if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) {
-		lockdep_assert_irqs_enabled();
-		local_irq_disable();
-		flush_tlb_func_local(info, TLB_LOCAL_MM_SHOOTDOWN);
-		local_irq_enable();
-	}
+	/*
+	 * Assert that mm_cpumask() corresponds with the loaded mm. We got one
+	 * exception: for init_mm we do not need to flush anything, and the
+	 * cpumask does not correspond with loaded_mm.
+	 */
+	VM_WARN_ON_ONCE(cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)) !=
+			(mm == this_cpu_read(cpu_tlbstate.loaded_mm)) &&
+			mm != &init_mm);
 
-	if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids)
-		flush_tlb_others(mm_cpumask(mm), info);
+	flush_tlb_on_cpus(mm_cpumask(mm), info);
 
 	put_flush_tlb_info();
-	put_cpu();
 }
 
 
@@ -833,13 +851,11 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
 	} else {
 		struct flush_tlb_info *info;
 
-		preempt_disable();
 		info = get_flush_tlb_info(NULL, start, end, 0, false, 0);
 
 		on_each_cpu(do_kernel_range_flush, info, 1);
 
 		put_flush_tlb_info();
-		preempt_enable();
 	}
 }
 
@@ -857,21 +873,11 @@ static const struct flush_tlb_info full_flush_tlb_info = {
 
 void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
 {
-	int cpu = get_cpu();
-
-	if (cpumask_test_cpu(cpu, &batch->cpumask)) {
-		lockdep_assert_irqs_enabled();
-		local_irq_disable();
-		flush_tlb_func_local(&full_flush_tlb_info, TLB_LOCAL_SHOOTDOWN);
-		local_irq_enable();
-	}
-
-	if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids)
-		flush_tlb_others(&batch->cpumask, &full_flush_tlb_info);
+	preempt_disable();
+	flush_tlb_on_cpus(&batch->cpumask, &full_flush_tlb_info);
+	preempt_enable();
 
 	cpumask_clear(&batch->cpumask);
-
-	put_cpu();
 }
 
 static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
-- 
2.20.1


  parent reply	other threads:[~2019-06-13 16:42 UTC|newest]

Thread overview: 61+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-06-13  6:48 [PATCH 0/9] x86: Concurrent TLB flushes and other improvements Nadav Amit
2019-06-13  6:48 ` [PATCH 1/9] smp: Remove smp_call_function() and on_each_cpu() return values Nadav Amit
2019-06-23 12:32   ` [tip:smp/hotplug] " tip-bot for Nadav Amit
2019-06-13  6:48 ` [PATCH 2/9] smp: Run functions concurrently in smp_call_function_many() Nadav Amit
2019-06-13  6:48 ` Nadav Amit [this message]
2019-06-25 21:07   ` [PATCH 3/9] x86/mm/tlb: Refactor common code into flush_tlb_on_cpus() Dave Hansen
2019-06-26  1:57     ` Nadav Amit
2019-06-13  6:48 ` [PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently Nadav Amit
2019-06-13  6:48   ` [Xen-devel] " Nadav Amit
2019-06-13  6:48   ` Nadav Amit via Virtualization
2019-06-25 21:29   ` Dave Hansen
2019-06-25 21:29     ` [Xen-devel] " Dave Hansen
2019-06-25 21:29     ` Dave Hansen
2019-06-26  2:35     ` Nadav Amit
2019-06-26  2:35       ` [Xen-devel] " Nadav Amit
2019-06-26  3:00       ` Dave Hansen
2019-06-26  3:00         ` [Xen-devel] " Dave Hansen
2019-06-26  3:32         ` Nadav Amit
2019-06-26  3:32           ` [Xen-devel] " Nadav Amit
2019-06-26  3:32           ` Nadav Amit via Virtualization
2019-06-26  3:00       ` Dave Hansen
2019-06-26  2:35     ` Nadav Amit via Virtualization
2019-06-26  3:36   ` Andy Lutomirski
2019-06-26  3:36     ` [Xen-devel] " Andy Lutomirski
2019-06-26  3:36     ` Andy Lutomirski
2019-06-26  3:48     ` Nadav Amit
2019-06-26  3:48       ` [Xen-devel] " Nadav Amit
2019-06-26  3:48       ` Nadav Amit via Virtualization
2019-06-26  3:51       ` Andy Lutomirski
2019-06-26  3:51       ` Andy Lutomirski
2019-06-26  3:51         ` [Xen-devel] " Andy Lutomirski
2019-06-13  6:48 ` [PATCH 5/9] x86/mm/tlb: Optimize local TLB flushes Nadav Amit
2019-06-25 21:36   ` Dave Hansen
2019-06-26 16:33     ` Andy Lutomirski
2019-06-26 16:39       ` Nadav Amit
2019-06-26 16:50         ` Andy Lutomirski
2019-06-13  6:48 ` [PATCH 6/9] KVM: x86: Provide paravirtualized flush_tlb_multi() Nadav Amit
2019-06-25 21:40   ` Dave Hansen
2019-06-26  2:39     ` Nadav Amit
2019-06-26  3:35       ` Andy Lutomirski
2019-06-26  3:41         ` Nadav Amit
2019-06-26  3:56           ` Andy Lutomirski
2019-06-26  6:30             ` Nadav Amit
2019-06-26 16:37               ` Andy Lutomirski
2019-06-26 17:41                 ` Vitaly Kuznetsov
2019-06-26 18:21                   ` Andy Lutomirski
2019-06-13  6:48 ` [PATCH 7/9] smp: Do not mark call_function_data as shared Nadav Amit
2019-06-23 12:31   ` [tip:smp/hotplug] " tip-bot for Nadav Amit
2019-06-13  6:48 ` [PATCH 8/9] x86/tlb: Privatize cpu_tlbstate Nadav Amit
2019-06-14 15:58   ` Sean Christopherson
2019-06-17 17:10     ` Nadav Amit
2019-06-25 21:52   ` Dave Hansen
2019-06-26  1:22     ` Nadav Amit
2019-06-26  3:57     ` Andy Lutomirski
2019-06-13  6:48 ` [PATCH 9/9] x86/apic: Use non-atomic operations when possible Nadav Amit
2019-06-23 12:16   ` [tip:x86/apic] " tip-bot for Nadav Amit
2019-06-25 21:58   ` [PATCH 9/9] " Dave Hansen
2019-06-25 22:03     ` Thomas Gleixner
2019-06-23 12:37 ` [PATCH 0/9] x86: Concurrent TLB flushes and other improvements Thomas Gleixner
2019-06-25 22:02 ` Dave Hansen
2019-06-26  1:34   ` Nadav Amit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190613064813.8102-4-namit@vmware.com \
    --to=namit@vmware.com \
    --cc=bp@alien8.de \
    --cc=dave.hansen@linux.intel.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=luto@kernel.org \
    --cc=mingo@redhat.com \
    --cc=peterz@infradead.org \
    --cc=tglx@linutronix.de \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.