linux-arch.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] mm/mmu_gather: send tlb_remove_table_smp_sync IPI only to MM CPUs
@ 2023-03-12  8:09 Yair Podemsky
  2023-03-12 20:25 ` Andrew Morton
  2023-03-20  8:49 ` Peter Zijlstra
  0 siblings, 2 replies; 6+ messages in thread
From: Yair Podemsky @ 2023-03-12  8:09 UTC (permalink / raw)
  To: will, aneesh.kumar, akpm, npiggin, peterz, arnd, linux-arch,
	linux-mm, linux-kernel, mtosatti, ppandit, alougovs
  Cc: ypodemsk, David Hildenbrand

Currently the tlb_remove_table_smp_sync IPI is sent to all CPUs
indiscriminately, this causes unnecessary work and delays notable in
real-time use-cases and isolated cpus, this patch will limit this IPI to
only be sent to cpus referencing the effected mm and are currently in
kernel space.

Signed-off-by: Yair Podemsky <ypodemsk@redhat.com>
Suggested-by: David Hildenbrand <david@redhat.com>
---
 include/asm-generic/tlb.h |  4 ++--
 mm/khugepaged.c           |  4 ++--
 mm/mmu_gather.c           | 20 +++++++++++++++-----
 3 files changed, 19 insertions(+), 9 deletions(-)

diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index b46617207c93..0b6ba17cc8d3 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -222,7 +222,7 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
 #define tlb_needs_table_invalidate() (true)
 #endif
 
-void tlb_remove_table_sync_one(void);
+void tlb_remove_table_sync_one(struct mm_struct *mm);
 
 #else
 
@@ -230,7 +230,7 @@ void tlb_remove_table_sync_one(void);
 #error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE
 #endif
 
-static inline void tlb_remove_table_sync_one(void) { }
+static inline void tlb_remove_table_sync_one(struct mm_struct *mm) { }
 
 #endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
 
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 5cb401aa2b9d..86a82c0ac41f 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1051,7 +1051,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
 	_pmd = pmdp_collapse_flush(vma, address, pmd);
 	spin_unlock(pmd_ptl);
 	mmu_notifier_invalidate_range_end(&range);
-	tlb_remove_table_sync_one();
+	tlb_remove_table_sync_one(mm);
 
 	spin_lock(pte_ptl);
 	result =  __collapse_huge_page_isolate(vma, address, pte, cc,
@@ -1408,7 +1408,7 @@ static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *v
 				addr + HPAGE_PMD_SIZE);
 	mmu_notifier_invalidate_range_start(&range);
 	pmd = pmdp_collapse_flush(vma, addr, pmdp);
-	tlb_remove_table_sync_one();
+	tlb_remove_table_sync_one(mm);
 	mmu_notifier_invalidate_range_end(&range);
 	mm_dec_nr_ptes(mm);
 	page_table_check_pte_clear_range(mm, addr, pmd);
diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
index 2b93cf6ac9ae..3b267600a5e9 100644
--- a/mm/mmu_gather.c
+++ b/mm/mmu_gather.c
@@ -9,6 +9,7 @@
 #include <linux/smp.h>
 #include <linux/swap.h>
 #include <linux/rmap.h>
+#include <linux/context_tracking_state.h>
 
 #include <asm/pgalloc.h>
 #include <asm/tlb.h>
@@ -191,7 +192,15 @@ static void tlb_remove_table_smp_sync(void *arg)
 	/* Simply deliver the interrupt */
 }
 
-void tlb_remove_table_sync_one(void)
+static bool cpu_in_kernel(int cpu, void *info)
+{
+	struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
+	int statue = atomic_read(&ct->state);
+	//will return true only for cpu's in kernel space
+	return !(statue & CT_STATE_MASK);
+}
+
+void tlb_remove_table_sync_one(struct mm_struct *mm)
 {
 	/*
 	 * This isn't an RCU grace period and hence the page-tables cannot be
@@ -200,7 +209,8 @@ void tlb_remove_table_sync_one(void)
 	 * It is however sufficient for software page-table walkers that rely on
 	 * IRQ disabling.
 	 */
-	smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
+	on_each_cpu_cond_mask(cpu_in_kernel, tlb_remove_table_smp_sync,
+			NULL, true, mm_cpumask(mm));
 }
 
 static void tlb_remove_table_rcu(struct rcu_head *head)
@@ -237,9 +247,9 @@ static inline void tlb_table_invalidate(struct mmu_gather *tlb)
 	}
 }
 
-static void tlb_remove_table_one(void *table)
+static void tlb_remove_table_one(struct mm_struct *mm, void *table)
 {
-	tlb_remove_table_sync_one();
+	tlb_remove_table_sync_one(mm);
 	__tlb_remove_table(table);
 }
 
@@ -262,7 +272,7 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
 		*batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
 		if (*batch == NULL) {
 			tlb_table_invalidate(tlb);
-			tlb_remove_table_one(table);
+			tlb_remove_table_one(tlb->mm, table);
 			return;
 		}
 		(*batch)->nr = 0;
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH] mm/mmu_gather: send tlb_remove_table_smp_sync IPI only to MM CPUs
  2023-03-12  8:09 [PATCH] mm/mmu_gather: send tlb_remove_table_smp_sync IPI only to MM CPUs Yair Podemsky
@ 2023-03-12 20:25 ` Andrew Morton
  2023-03-20  8:49 ` Peter Zijlstra
  1 sibling, 0 replies; 6+ messages in thread
From: Andrew Morton @ 2023-03-12 20:25 UTC (permalink / raw)
  To: Yair Podemsky
  Cc: will, aneesh.kumar, npiggin, peterz, arnd, linux-arch, linux-mm,
	linux-kernel, mtosatti, ppandit, alougovs, David Hildenbrand

On Sun, 12 Mar 2023 10:09:45 +0200 Yair Podemsky <ypodemsk@redhat.com> wrote:

> Currently the tlb_remove_table_smp_sync IPI is sent to all CPUs
> indiscriminately, this causes unnecessary work and delays notable in
> real-time use-cases and isolated cpus, this patch will limit this IPI to
> only be sent to cpus referencing the effected mm and are currently in
> kernel space.
> 
> ...
>
> --- a/mm/mmu_gather.c
> +++ b/mm/mmu_gather.c
> @@ -191,7 +192,15 @@ static void tlb_remove_table_smp_sync(void *arg)
>  	/* Simply deliver the interrupt */
>  }
>  
> -void tlb_remove_table_sync_one(void)
> +static bool cpu_in_kernel(int cpu, void *info)
> +{
> +	struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
> +	int statue = atomic_read(&ct->state);

Strange identifier.  Should be "state"?

> +	//will return true only for cpu's in kernel space

Please use /* */ style comments

And use "cpus" rather than "cpu's" - plural, not possessive.

> +	return !(statue & CT_STATE_MASK);

Using

	return state & CT_STATE_MASK == CONTEXT_KERNEL;

would more clearly express the intent.

> +}

And...  surely this function is racy.  ct->state can change value one
nanosecond after cpu_in_kernel() reads it, so cpu_in_kernel()'s return
value is now wrong?

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] mm/mmu_gather: send tlb_remove_table_smp_sync IPI only to MM CPUs
  2023-03-12  8:09 [PATCH] mm/mmu_gather: send tlb_remove_table_smp_sync IPI only to MM CPUs Yair Podemsky
  2023-03-12 20:25 ` Andrew Morton
@ 2023-03-20  8:49 ` Peter Zijlstra
  2023-03-22 14:11   ` ypodemsk
  1 sibling, 1 reply; 6+ messages in thread
From: Peter Zijlstra @ 2023-03-20  8:49 UTC (permalink / raw)
  To: Yair Podemsky
  Cc: will, aneesh.kumar, akpm, npiggin, arnd, linux-arch, linux-mm,
	linux-kernel, mtosatti, ppandit, alougovs, David Hildenbrand

On Sun, Mar 12, 2023 at 10:09:45AM +0200, Yair Podemsky wrote:
> Currently the tlb_remove_table_smp_sync IPI is sent to all CPUs
> indiscriminately, this causes unnecessary work and delays notable in
> real-time use-cases and isolated cpus, this patch will limit this IPI to
> only be sent to cpus referencing the effected mm and are currently in
> kernel space.

Did you validate that all architectures for which this is relevant
actually set bits in mm_cpumask() ?

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] mm/mmu_gather: send tlb_remove_table_smp_sync IPI only to MM CPUs
  2023-03-20  8:49 ` Peter Zijlstra
@ 2023-03-22 14:11   ` ypodemsk
  2023-03-24 15:13     ` Will Deacon
  0 siblings, 1 reply; 6+ messages in thread
From: ypodemsk @ 2023-03-22 14:11 UTC (permalink / raw)
  To: Peter Zijlstra
  Cc: will, aneesh.kumar, akpm, npiggin, arnd, linux-arch, linux-mm,
	linux-kernel, mtosatti, ppandit, alougovs, David Hildenbrand

On Mon, 2023-03-20 at 09:49 +0100, Peter Zijlstra wrote:
> On Sun, Mar 12, 2023 at 10:09:45AM +0200, Yair Podemsky wrote:
> > Currently the tlb_remove_table_smp_sync IPI is sent to all CPUs
> > indiscriminately, this causes unnecessary work and delays notable
> > in
> > real-time use-cases and isolated cpus, this patch will limit this
> > IPI to
> > only be sent to cpus referencing the effected mm and are currently
> > in
> > kernel space.
> 
> Did you validate that all architectures for which this is relevant
> actually set bits in mm_cpumask() ?
> 
Hi Peter,
Thank you for bringing this to my attention.
I reviewed the architectures using the MMU_GATHER_RCU_TABLE_FREE:
arm, powerpc, s390, sparc and x86 set the bit when switching process
in.
for arm64 removed set/clear bit in 38d96287504a ("arm64: mm: kill
mm_cpumask usage")
The reason given was that mm_cpumask was not used.
Given that we now have a use for it, I will add a patch to revert.
Thanks
Yair


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] mm/mmu_gather: send tlb_remove_table_smp_sync IPI only to MM CPUs
  2023-03-22 14:11   ` ypodemsk
@ 2023-03-24 15:13     ` Will Deacon
  2023-03-26  7:40       ` ypodemsk
  0 siblings, 1 reply; 6+ messages in thread
From: Will Deacon @ 2023-03-24 15:13 UTC (permalink / raw)
  To: ypodemsk
  Cc: Peter Zijlstra, aneesh.kumar, akpm, npiggin, arnd, linux-arch,
	linux-mm, linux-kernel, mtosatti, ppandit, alougovs,
	David Hildenbrand

On Wed, Mar 22, 2023 at 04:11:44PM +0200, ypodemsk@redhat.com wrote:
> On Mon, 2023-03-20 at 09:49 +0100, Peter Zijlstra wrote:
> > On Sun, Mar 12, 2023 at 10:09:45AM +0200, Yair Podemsky wrote:
> > > Currently the tlb_remove_table_smp_sync IPI is sent to all CPUs
> > > indiscriminately, this causes unnecessary work and delays notable
> > > in
> > > real-time use-cases and isolated cpus, this patch will limit this
> > > IPI to
> > > only be sent to cpus referencing the effected mm and are currently
> > > in
> > > kernel space.
> > 
> > Did you validate that all architectures for which this is relevant
> > actually set bits in mm_cpumask() ?
> > 
> Hi Peter,
> Thank you for bringing this to my attention.
> I reviewed the architectures using the MMU_GATHER_RCU_TABLE_FREE:
> arm, powerpc, s390, sparc and x86 set the bit when switching process
> in.
> for arm64 removed set/clear bit in 38d96287504a ("arm64: mm: kill
> mm_cpumask usage")
> The reason given was that mm_cpumask was not used.
> Given that we now have a use for it, I will add a patch to revert.

Maintaining the mask is also not free, so I'm not keen on adding it back
unless there's a net win.

Will

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] mm/mmu_gather: send tlb_remove_table_smp_sync IPI only to MM CPUs
  2023-03-24 15:13     ` Will Deacon
@ 2023-03-26  7:40       ` ypodemsk
  0 siblings, 0 replies; 6+ messages in thread
From: ypodemsk @ 2023-03-26  7:40 UTC (permalink / raw)
  To: Will Deacon
  Cc: Peter Zijlstra, aneesh.kumar, akpm, npiggin, arnd, linux-arch,
	linux-mm, linux-kernel, mtosatti, ppandit, alougovs,
	David Hildenbrand

On Fri, 2023-03-24 at 15:13 +0000, Will Deacon wrote:
> On Wed, Mar 22, 2023 at 04:11:44PM +0200, ypodemsk@redhat.com wrote:
> > On Mon, 2023-03-20 at 09:49 +0100, Peter Zijlstra wrote:
> > > On Sun, Mar 12, 2023 at 10:09:45AM +0200, Yair Podemsky wrote:
> > > > Currently the tlb_remove_table_smp_sync IPI is sent to all CPUs
> > > > indiscriminately, this causes unnecessary work and delays
> > > > notable
> > > > in
> > > > real-time use-cases and isolated cpus, this patch will limit
> > > > this
> > > > IPI to
> > > > only be sent to cpus referencing the effected mm and are
> > > > currently
> > > > in
> > > > kernel space.
> > > 
> > > Did you validate that all architectures for which this is
> > > relevant
> > > actually set bits in mm_cpumask() ?
> > > 
> > Hi Peter,
> > Thank you for bringing this to my attention.
> > I reviewed the architectures using the MMU_GATHER_RCU_TABLE_FREE:
> > arm, powerpc, s390, sparc and x86 set the bit when switching
> > process
> > in.
> > for arm64 removed set/clear bit in 38d96287504a ("arm64: mm: kill
> > mm_cpumask usage")
> > The reason given was that mm_cpumask was not used.
> > Given that we now have a use for it, I will add a patch to revert.
> 
> Maintaining the mask is also not free, so I'm not keen on adding it
> back
> unless there's a net win.
> 
> Will
> 
How about adding a Kconfig to mark which architectures set/use the
mm_cpumask?
This will allow us to use the mm_cpumask on architectures that support
it, and use acpu_online_mask on those that don't.
Also make it clear which architectures set the bit for the future.
Yair


^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2023-03-26  7:41 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-03-12  8:09 [PATCH] mm/mmu_gather: send tlb_remove_table_smp_sync IPI only to MM CPUs Yair Podemsky
2023-03-12 20:25 ` Andrew Morton
2023-03-20  8:49 ` Peter Zijlstra
2023-03-22 14:11   ` ypodemsk
2023-03-24 15:13     ` Will Deacon
2023-03-26  7:40       ` ypodemsk

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).