All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Uladzislau Rezki (Sony)" <urezki@gmail.com>
To: linux-mm@kvack.org, Andrew Morton <akpm@linux-foundation.org>
Cc: LKML <linux-kernel@vger.kernel.org>, Baoquan He <bhe@redhat.com>,
	Lorenzo Stoakes <lstoakes@gmail.com>,
	Christoph Hellwig <hch@infradead.org>,
	Matthew Wilcox <willy@infradead.org>,
	"Liam R . Howlett" <Liam.Howlett@oracle.com>,
	Dave Chinner <david@fromorbit.com>,
	"Paul E . McKenney" <paulmck@kernel.org>,
	Joel Fernandes <joel@joelfernandes.org>,
	Uladzislau Rezki <urezki@gmail.com>,
	Oleksiy Avramchenko <oleksiy.avramchenko@sony.com>
Subject: [PATCH v2 7/9] mm: vmalloc: Support multiple nodes in vread_iter
Date: Tue, 29 Aug 2023 10:11:40 +0200	[thread overview]
Message-ID: <20230829081142.3619-8-urezki@gmail.com> (raw)
In-Reply-To: <20230829081142.3619-1-urezki@gmail.com>

Extend the vread_iter() to be able to perform a sequential
reading of VAs which are spread among multiple nodes. So a
data read over the /dev/kmem correctly reflects a vmalloc
memory layout.

Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
---
 mm/vmalloc.c | 67 +++++++++++++++++++++++++++++++++++++++++-----------
 1 file changed, 53 insertions(+), 14 deletions(-)

diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 4fd4915c532d..968144c16237 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -870,7 +870,7 @@ unsigned long vmalloc_nr_pages(void)
 
 /* Look up the first VA which satisfies addr < va_end, NULL if none. */
 static struct vmap_area *
-find_vmap_area_exceed_addr(unsigned long addr, struct rb_root *root)
+__find_vmap_area_exceed_addr(unsigned long addr, struct rb_root *root)
 {
 	struct vmap_area *va = NULL;
 	struct rb_node *n = root->rb_node;
@@ -894,6 +894,41 @@ find_vmap_area_exceed_addr(unsigned long addr, struct rb_root *root)
 	return va;
 }
 
+/*
+ * Returns a node where a first VA, that satisfies addr < va_end, resides.
+ * If success, a node is locked. A user is responsible to unlock it when a
+ * VA is no longer needed to be accessed.
+ *
+ * Returns NULL if nothing found.
+ */
+static struct vmap_node *
+find_vmap_area_exceed_addr_lock(unsigned long addr, struct vmap_area **va)
+{
+	struct vmap_node *vn, *va_node = NULL;
+	struct vmap_area *va_lowest;
+	int i;
+
+	for (i = 0; i < nr_nodes; i++) {
+		vn = &nodes[i];
+
+		spin_lock(&vn->busy.lock);
+		va_lowest = __find_vmap_area_exceed_addr(addr, &vn->busy.root);
+		if (va_lowest) {
+			if (!va_node || va_lowest->va_start < (*va)->va_start) {
+				if (va_node)
+					spin_unlock(&va_node->busy.lock);
+
+				*va = va_lowest;
+				va_node = vn;
+				continue;
+			}
+		}
+		spin_unlock(&vn->busy.lock);
+	}
+
+	return va_node;
+}
+
 static struct vmap_area *__find_vmap_area(unsigned long addr, struct rb_root *root)
 {
 	struct rb_node *n = root->rb_node;
@@ -4048,6 +4083,7 @@ long vread_iter(struct iov_iter *iter, const char *addr, size_t count)
 	struct vm_struct *vm;
 	char *vaddr;
 	size_t n, size, flags, remains;
+	unsigned long next;
 
 	addr = kasan_reset_tag(addr);
 
@@ -4057,19 +4093,15 @@ long vread_iter(struct iov_iter *iter, const char *addr, size_t count)
 
 	remains = count;
 
-	/* Hooked to node_0 so far. */
-	vn = addr_to_node(0);
-	spin_lock(&vn->busy.lock);
-
-	va = find_vmap_area_exceed_addr((unsigned long)addr, &vn->busy.root);
-	if (!va)
+	vn = find_vmap_area_exceed_addr_lock((unsigned long) addr, &va);
+	if (!vn)
 		goto finished_zero;
 
 	/* no intersects with alive vmap_area */
 	if ((unsigned long)addr + remains <= va->va_start)
 		goto finished_zero;
 
-	list_for_each_entry_from(va, &vn->busy.head, list) {
+	do {
 		size_t copied;
 
 		if (remains == 0)
@@ -4084,10 +4116,10 @@ long vread_iter(struct iov_iter *iter, const char *addr, size_t count)
 		WARN_ON(flags == VMAP_BLOCK);
 
 		if (!vm && !flags)
-			continue;
+			goto next_va;
 
 		if (vm && (vm->flags & VM_UNINITIALIZED))
-			continue;
+			goto next_va;
 
 		/* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
 		smp_rmb();
@@ -4096,7 +4128,7 @@ long vread_iter(struct iov_iter *iter, const char *addr, size_t count)
 		size = vm ? get_vm_area_size(vm) : va_size(va);
 
 		if (addr >= vaddr + size)
-			continue;
+			goto next_va;
 
 		if (addr < vaddr) {
 			size_t to_zero = min_t(size_t, vaddr - addr, remains);
@@ -4125,15 +4157,22 @@ long vread_iter(struct iov_iter *iter, const char *addr, size_t count)
 
 		if (copied != n)
 			goto finished;
-	}
+
+	next_va:
+		next = va->va_end;
+		spin_unlock(&vn->busy.lock);
+	} while ((vn = find_vmap_area_exceed_addr_lock(next, &va)));
 
 finished_zero:
-	spin_unlock(&vn->busy.lock);
+	if (vn)
+		spin_unlock(&vn->busy.lock);
+
 	/* zero-fill memory holes */
 	return count - remains + zero_iter(iter, remains);
 finished:
 	/* Nothing remains, or We couldn't copy/zero everything. */
-	spin_unlock(&vn->busy.lock);
+	if (vn)
+		spin_unlock(&vn->busy.lock);
 
 	return count - remains;
 }
-- 
2.30.2


  parent reply	other threads:[~2023-08-29  8:12 UTC|newest]

Thread overview: 74+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-08-29  8:11 [PATCH v2 0/9] Mitigate a vmap lock contention v2 Uladzislau Rezki (Sony)
2023-08-29  8:11 ` [PATCH v2 1/9] mm: vmalloc: Add va_alloc() helper Uladzislau Rezki (Sony)
2023-09-06  5:51   ` Baoquan He
2023-09-06 15:06     ` Uladzislau Rezki
2023-08-29  8:11 ` [PATCH v2 2/9] mm: vmalloc: Rename adjust_va_to_fit_type() function Uladzislau Rezki (Sony)
2023-09-06  5:51   ` Baoquan He
2023-09-06 16:27     ` Uladzislau Rezki
2023-08-29  8:11 ` [PATCH v2 3/9] mm: vmalloc: Move vmap_init_free_space() down in vmalloc.c Uladzislau Rezki (Sony)
2023-09-06  5:52   ` Baoquan He
2023-09-06 16:29     ` Uladzislau Rezki
2023-08-29  8:11 ` [PATCH v2 4/9] mm: vmalloc: Remove global vmap_area_root rb-tree Uladzislau Rezki (Sony)
2023-08-29 14:30   ` kernel test robot
2023-08-30 14:48     ` Uladzislau Rezki
2023-09-07  2:17   ` Baoquan He
2023-09-07  2:17     ` Baoquan He
2023-09-07  9:38     ` Baoquan He
2023-09-07  9:38       ` Baoquan He
2023-09-07  9:40       ` Uladzislau Rezki
2023-09-07  9:40         ` Uladzislau Rezki
2023-09-07  9:39     ` Uladzislau Rezki
2023-09-07  9:39       ` Uladzislau Rezki
2023-09-07  9:58       ` Baoquan He
2023-09-07  9:58         ` Baoquan He
2023-09-08  1:51         ` HAGIO KAZUHITO(萩尾 一仁)
2023-09-08  1:51           ` HAGIO KAZUHITO(萩尾 一仁)
2023-09-08  4:43           ` Baoquan He
2023-09-08  4:43             ` Baoquan He
2023-09-08  5:01             ` HAGIO KAZUHITO(萩尾 一仁)
2023-09-08  5:01               ` HAGIO KAZUHITO(萩尾 一仁)
2023-09-08  6:44               ` Baoquan He
2023-09-08  6:44                 ` Baoquan He
2023-09-08 11:25                 ` Uladzislau Rezki
2023-09-08 11:25                   ` Uladzislau Rezki
2023-09-08 11:38                   ` Baoquan He
2023-09-08 11:38                     ` Baoquan He
2023-09-08 13:23                     ` Uladzislau Rezki
2023-09-08 13:23                       ` Uladzislau Rezki
2023-09-11  2:38   ` Baoquan He
2023-09-11 16:53     ` Uladzislau Rezki
2023-09-12 13:19       ` Baoquan He
2023-08-29  8:11 ` [PATCH v2 5/9] mm: vmalloc: Remove global purge_vmap_area_root rb-tree Uladzislau Rezki (Sony)
2023-09-11  2:57   ` Baoquan He
2023-09-11 17:00     ` Uladzislau Rezki
2023-08-29  8:11 ` [PATCH v2 6/9] mm: vmalloc: Offload free_vmap_area_lock lock Uladzislau Rezki (Sony)
2023-09-06  6:04   ` Baoquan He
2023-09-06 19:16     ` Uladzislau Rezki
2023-09-07  0:06       ` Baoquan He
2023-09-07  9:33         ` Uladzislau Rezki
2023-09-11  3:25   ` Baoquan He
2023-09-11 17:10     ` Uladzislau Rezki
2023-09-12 13:21       ` Baoquan He
2023-08-29  8:11 ` Uladzislau Rezki (Sony) [this message]
2023-09-11  3:58   ` [PATCH v2 7/9] mm: vmalloc: Support multiple nodes in vread_iter Baoquan He
2023-09-11 18:16     ` Uladzislau Rezki
2023-09-12 13:42       ` Baoquan He
2023-09-13 15:42         ` Uladzislau Rezki
2023-09-14  3:02           ` Baoquan He
2023-09-14  3:36           ` Baoquan He
2023-09-14  3:38             ` Baoquan He
2023-09-13 10:59       ` Baoquan He
2023-09-13 15:38         ` Uladzislau Rezki
2023-08-29  8:11 ` [PATCH v2 8/9] mm: vmalloc: Support multiple nodes in vmallocinfo Uladzislau Rezki (Sony)
2023-09-15 13:02   ` Baoquan He
2023-09-15 18:32     ` Uladzislau Rezki
2023-08-29  8:11 ` [PATCH v2 9/9] mm: vmalloc: Set nr_nodes/node_size based on CPU-cores Uladzislau Rezki (Sony)
2023-09-15 13:03   ` Baoquan He
2023-09-15 18:31     ` Uladzislau Rezki
2023-08-31  1:15 ` [PATCH v2 0/9] Mitigate a vmap lock contention v2 Baoquan He
2023-08-31 16:26   ` Uladzislau Rezki
2023-09-04 14:55 ` Uladzislau Rezki
2023-09-04 19:53   ` Andrew Morton
2023-09-05  6:53     ` Uladzislau Rezki
2023-09-06 20:04 ` Lorenzo Stoakes
2023-09-07  9:15   ` Uladzislau Rezki

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230829081142.3619-8-urezki@gmail.com \
    --to=urezki@gmail.com \
    --cc=Liam.Howlett@oracle.com \
    --cc=akpm@linux-foundation.org \
    --cc=bhe@redhat.com \
    --cc=david@fromorbit.com \
    --cc=hch@infradead.org \
    --cc=joel@joelfernandes.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=lstoakes@gmail.com \
    --cc=oleksiy.avramchenko@sony.com \
    --cc=paulmck@kernel.org \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.