linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: "Uladzislau Rezki (Sony)" <urezki@gmail.com>
To: linux-mm@kvack.org, Andrew Morton <akpm@linux-foundation.org>
Cc: LKML <linux-kernel@vger.kernel.org>, Baoquan He <bhe@redhat.com>,
	Lorenzo Stoakes <lstoakes@gmail.com>,
	Christoph Hellwig <hch@infradead.org>,
	Matthew Wilcox <willy@infradead.org>,
	"Liam R . Howlett" <Liam.Howlett@oracle.com>,
	Dave Chinner <david@fromorbit.com>,
	"Paul E . McKenney" <paulmck@kernel.org>,
	Joel Fernandes <joel@joelfernandes.org>,
	Uladzislau Rezki <urezki@gmail.com>,
	Oleksiy Avramchenko <oleksiy.avramchenko@sony.com>
Subject: [PATCH 7/9] mm: vmalloc: Insert lazy-VA per-cpu zone
Date: Mon, 22 May 2023 13:08:47 +0200	[thread overview]
Message-ID: <20230522110849.2921-8-urezki@gmail.com> (raw)
In-Reply-To: <20230522110849.2921-1-urezki@gmail.com>

Similar to busy VAs, lazy ones are stored per a CPU zone
also. Freed address is converted into a correct zone it
belongs to and resides there for further handling.

Such approach does not require to have any global locking
primitive, instead an access becomes scalable to number of
CPUs.

This patch removes a global purge-lock, global purge-tree
and list.

Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
---
 mm/vmalloc.c | 127 ++++++++++++++++++++++++++++-----------------------
 1 file changed, 71 insertions(+), 56 deletions(-)

diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index dd83deb5ef4f..fe993c0561dd 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -734,10 +734,6 @@ static DEFINE_SPINLOCK(free_vmap_area_lock);
 LIST_HEAD(vmap_area_list);
 static bool vmap_initialized __read_mostly;
 
-static struct rb_root purge_vmap_area_root = RB_ROOT;
-static LIST_HEAD(purge_vmap_area_list);
-static DEFINE_SPINLOCK(purge_vmap_area_lock);
-
 /*
  * This kmem_cache is used for vmap_area objects. Instead of
  * allocating from slab we reuse an object from this cache to
@@ -1792,39 +1788,17 @@ static DEFINE_MUTEX(vmap_purge_lock);
 /* for per-CPU blocks */
 static void purge_fragmented_blocks_allcpus(void);
 
-/*
- * Purges all lazily-freed vmap areas.
- */
-static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
+static unsigned long
+purge_cpu_vmap_zone(struct cpu_vmap_zone *z)
 {
-	unsigned long resched_threshold;
-	unsigned int num_purged_areas = 0;
-	struct list_head local_purge_list;
+	unsigned long num_purged_areas = 0;
 	struct vmap_area *va, *n_va;
 
-	lockdep_assert_held(&vmap_purge_lock);
-
-	spin_lock(&purge_vmap_area_lock);
-	purge_vmap_area_root = RB_ROOT;
-	list_replace_init(&purge_vmap_area_list, &local_purge_list);
-	spin_unlock(&purge_vmap_area_lock);
-
-	if (unlikely(list_empty(&local_purge_list)))
+	if (list_empty(&z->purge_list))
 		goto out;
 
-	start = min(start,
-		list_first_entry(&local_purge_list,
-			struct vmap_area, list)->va_start);
-
-	end = max(end,
-		list_last_entry(&local_purge_list,
-			struct vmap_area, list)->va_end);
-
-	flush_tlb_kernel_range(start, end);
-	resched_threshold = lazy_max_pages() << 1;
-
 	spin_lock(&free_vmap_area_lock);
-	list_for_each_entry_safe(va, n_va, &local_purge_list, list) {
+	list_for_each_entry_safe(va, n_va, &z->purge_list, list) {
 		unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
 		unsigned long orig_start = va->va_start;
 		unsigned long orig_end = va->va_end;
@@ -1846,13 +1820,57 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
 
 		atomic_long_sub(nr, &vmap_lazy_nr);
 		num_purged_areas++;
-
-		if (atomic_long_read(&vmap_lazy_nr) < resched_threshold)
-			cond_resched_lock(&free_vmap_area_lock);
 	}
 	spin_unlock(&free_vmap_area_lock);
 
 out:
+	return num_purged_areas;
+}
+
+/*
+ * Purges all lazily-freed vmap areas.
+ */
+static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
+{
+	unsigned long num_purged_areas = 0;
+	struct cpu_vmap_zone *z;
+	int need_purge = 0;
+	int i;
+
+	lockdep_assert_held(&vmap_purge_lock);
+
+	for_each_possible_cpu(i) {
+		z = per_cpu_ptr(&cpu_vmap_zone, i);
+		INIT_LIST_HEAD(&z->purge_list);
+
+		if (RB_EMPTY_ROOT(&fbl_root(z, LAZY)))
+			continue;
+
+		fbl_lock(z, LAZY);
+		WRITE_ONCE(fbl(z, LAZY, root.rb_node), NULL);
+		list_replace_init(&fbl_head(z, LAZY), &z->purge_list);
+		fbl_unlock(z, LAZY);
+
+		start = min(start,
+			list_first_entry(&z->purge_list,
+				struct vmap_area, list)->va_start);
+
+		end = max(end,
+			list_last_entry(&z->purge_list,
+				struct vmap_area, list)->va_end);
+
+		need_purge++;
+	}
+
+	if (need_purge) {
+		flush_tlb_kernel_range(start, end);
+
+		for_each_possible_cpu(i) {
+			z = per_cpu_ptr(&cpu_vmap_zone, i);
+			num_purged_areas += purge_cpu_vmap_zone(z);
+		}
+	}
+
 	trace_purge_vmap_area_lazy(start, end, num_purged_areas);
 	return num_purged_areas > 0;
 }
@@ -1870,16 +1888,9 @@ static void purge_vmap_area_lazy(void)
 
 static void drain_vmap_area_work(struct work_struct *work)
 {
-	unsigned long nr_lazy;
-
-	do {
-		mutex_lock(&vmap_purge_lock);
-		__purge_vmap_area_lazy(ULONG_MAX, 0);
-		mutex_unlock(&vmap_purge_lock);
-
-		/* Recheck if further work is required. */
-		nr_lazy = atomic_long_read(&vmap_lazy_nr);
-	} while (nr_lazy > lazy_max_pages());
+	mutex_lock(&vmap_purge_lock);
+	__purge_vmap_area_lazy(ULONG_MAX, 0);
+	mutex_unlock(&vmap_purge_lock);
 }
 
 /*
@@ -1889,6 +1900,7 @@ static void drain_vmap_area_work(struct work_struct *work)
  */
 static void free_vmap_area_noflush(struct vmap_area *va)
 {
+	struct cpu_vmap_zone *z = addr_to_cvz(va->va_start);
 	unsigned long nr_lazy_max = lazy_max_pages();
 	unsigned long va_start = va->va_start;
 	unsigned long nr_lazy;
@@ -1902,10 +1914,9 @@ static void free_vmap_area_noflush(struct vmap_area *va)
 	/*
 	 * Merge or place it to the purge tree/list.
 	 */
-	spin_lock(&purge_vmap_area_lock);
-	merge_or_add_vmap_area(va,
-		&purge_vmap_area_root, &purge_vmap_area_list);
-	spin_unlock(&purge_vmap_area_lock);
+	fbl_lock(z, LAZY);
+	merge_or_add_vmap_area(va, &fbl_root(z, LAZY), &fbl_head(z, LAZY));
+	fbl_unlock(z, LAZY);
 
 	trace_free_vmap_area_noflush(va_start, nr_lazy, nr_lazy_max);
 
@@ -4199,17 +4210,21 @@ static void show_numa_info(struct seq_file *m, struct vm_struct *v)
 
 static void show_purge_info(struct seq_file *m)
 {
+	struct cpu_vmap_zone *z;
 	struct vmap_area *va;
+	int i;
 
-	mutex_lock(&vmap_purge_lock);
-	spin_lock(&purge_vmap_area_lock);
-	list_for_each_entry(va, &purge_vmap_area_list, list) {
-		seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
-			(void *)va->va_start, (void *)va->va_end,
-			va->va_end - va->va_start);
+	for_each_possible_cpu(i) {
+		z = per_cpu_ptr(&cpu_vmap_zone, i);
+
+		fbl_lock(z, LAZY);
+		list_for_each_entry(va, &fbl_head(z, LAZY), list) {
+			seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
+				(void *)va->va_start, (void *)va->va_end,
+				va->va_end - va->va_start);
+		}
+		fbl_unlock(z, LAZY);
 	}
-	spin_unlock(&purge_vmap_area_lock);
-	mutex_unlock(&vmap_purge_lock);
 }
 
 static int s_show(struct seq_file *m, void *p)
-- 
2.30.2


  parent reply	other threads:[~2023-05-22 11:15 UTC|newest]

Thread overview: 40+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-05-22 11:08 [PATCH 0/9] Mitigate a vmap lock contention Uladzislau Rezki (Sony)
2023-05-22 11:08 ` [PATCH 1/9] mm: vmalloc: Add va_alloc() helper Uladzislau Rezki (Sony)
2023-05-23  6:05   ` Christoph Hellwig
2023-05-23  9:57     ` Uladzislau Rezki
2023-05-27 19:55   ` Lorenzo Stoakes
2023-05-22 11:08 ` [PATCH 2/9] mm: vmalloc: Rename adjust_va_to_fit_type() function Uladzislau Rezki (Sony)
2023-05-23  6:06   ` Christoph Hellwig
2023-05-23 10:01     ` Uladzislau Rezki
2023-05-23 17:24   ` Liam R. Howlett
2023-05-24 11:51     ` Uladzislau Rezki
2023-05-27 21:50   ` Lorenzo Stoakes
2023-05-29 20:37     ` Uladzislau Rezki
2023-05-22 11:08 ` [PATCH 3/9] mm: vmalloc: Move vmap_init_free_space() down in vmalloc.c Uladzislau Rezki (Sony)
2023-05-23  6:06   ` Christoph Hellwig
2023-05-27 21:52   ` Lorenzo Stoakes
2023-05-22 11:08 ` [PATCH 4/9] mm: vmalloc: Add a per-CPU-zone infrastructure Uladzislau Rezki (Sony)
2023-05-23  6:08   ` Christoph Hellwig
2023-05-23 14:53     ` Uladzislau Rezki
2023-05-23 15:13       ` Christoph Hellwig
2023-05-23 15:32         ` Uladzislau Rezki
2023-05-22 11:08 ` [PATCH 5/9] mm: vmalloc: Insert busy-VA per-cpu zone Uladzislau Rezki (Sony)
2023-05-23  6:12   ` Christoph Hellwig
2023-05-23 15:00     ` Uladzislau Rezki
2023-05-22 11:08 ` [PATCH 6/9] mm: vmalloc: Support multiple zones in vmallocinfo Uladzislau Rezki (Sony)
2023-05-22 11:08 ` Uladzislau Rezki (Sony) [this message]
2023-05-22 11:08 ` [PATCH 8/9] mm: vmalloc: Offload free_vmap_area_lock global lock Uladzislau Rezki (Sony)
2023-06-05  0:43   ` Baoquan He
2023-06-06  9:01     ` Uladzislau Rezki
2023-06-06 12:11       ` Baoquan He
2023-06-07  6:58         ` Uladzislau Rezki
2023-05-22 11:08 ` [PATCH 9/9] mm: vmalloc: Scale and activate cvz_size Uladzislau Rezki (Sony)
2023-05-23 11:59 ` [PATCH 0/9] Mitigate a vmap lock contention Hyeonggon Yoo
2023-05-23 15:12   ` Uladzislau Rezki
2023-05-23 18:04     ` Hyeonggon Yoo
2023-05-23 21:43       ` Dave Chinner
2023-05-24  1:30         ` Hyeonggon Yoo
2023-05-24  9:50       ` Uladzislau Rezki
2023-05-24 21:56         ` Dave Chinner
2023-05-25  7:59           ` Christoph Hellwig
2023-05-25 10:20           ` Uladzislau Rezki

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230522110849.2921-8-urezki@gmail.com \
    --to=urezki@gmail.com \
    --cc=Liam.Howlett@oracle.com \
    --cc=akpm@linux-foundation.org \
    --cc=bhe@redhat.com \
    --cc=david@fromorbit.com \
    --cc=hch@infradead.org \
    --cc=joel@joelfernandes.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=lstoakes@gmail.com \
    --cc=oleksiy.avramchenko@sony.com \
    --cc=paulmck@kernel.org \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).