All of lore.kernel.org
 help / color / mirror / Atom feed
From: Suren Baghdasaryan <surenb@google.com>
To: akpm@linux-foundation.org
Cc: michel@lespinasse.org, jglisse@google.com, mhocko@suse.com,
	vbabka@suse.cz, hannes@cmpxchg.org, mgorman@techsingularity.net,
	dave@stgolabs.net, willy@infradead.org, liam.howlett@oracle.com,
	peterz@infradead.org, ldufour@linux.ibm.com, paulmck@kernel.org,
	mingo@redhat.com, will@kernel.org, luto@kernel.org,
	songliubraving@fb.com, peterx@redhat.com, david@redhat.com,
	dhowells@redhat.com, hughd@google.com, bigeasy@linutronix.de,
	kent.overstreet@linux.dev, punit.agrawal@bytedance.com,
	lstoakes@gmail.com, peterjung1337@gmail.com, rientjes@google.com,
	chriscli@google.com, axelrasmussen@google.com, joelaf@google.com,
	minchan@google.com, rppt@kernel.org, jannh@google.com,
	shakeelb@google.com, tatashin@google.com, edumazet@google.com,
	gthelen@google.com, gurua@google.com, arjunroy@google.com,
	soheil@google.com, leewalsh@google.com, posk@google.com,
	michalechner92@googlemail.com, linux-mm@kvack.org,
	linux-arm-kernel@lists.infradead.org,
	linuxppc-dev@lists.ozlabs.org, x86@kernel.org,
	linux-kernel@vger.kernel.org, kernel-team@android.com,
	Liam Howlett <Liam.Howlett@oracle.com>,
	Suren Baghdasaryan <surenb@google.com>
Subject: [PATCH v4 03/33] maple_tree: Fix freeing of nodes in rcu mode
Date: Mon, 27 Feb 2023 09:36:02 -0800	[thread overview]
Message-ID: <20230227173632.3292573-4-surenb@google.com> (raw)
In-Reply-To: <20230227173632.3292573-1-surenb@google.com>

From: Liam Howlett <Liam.Howlett@oracle.com>

The walk to destroy the nodes was not always setting the node type and
would result in a destroy method potentially using the values as nodes.
Avoid this by setting the correct node types.  This is necessary for the
RCU mode of the maple tree.

Fixes: 54a611b60590 ("Maple Tree: add new data structure")
Signed-off-by: Liam Howlett <Liam.Howlett@oracle.com>
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
---
 lib/maple_tree.c | 73 ++++++++++++++++++++++++++++++++++++++++--------
 1 file changed, 62 insertions(+), 11 deletions(-)

diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index 089cd76ec379..44d6ce30b28e 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -902,6 +902,44 @@ static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt,
 	meta->end = end;
 }
 
+/*
+ * mas_clear_meta() - clear the metadata information of a node, if it exists
+ * @mas: The maple state
+ * @mn: The maple node
+ * @mt: The maple node type
+ * @offset: The offset of the highest sub-gap in this node.
+ * @end: The end of the data in this node.
+ */
+static inline void mas_clear_meta(struct ma_state *mas, struct maple_node *mn,
+				  enum maple_type mt)
+{
+	struct maple_metadata *meta;
+	unsigned long *pivots;
+	void __rcu **slots;
+	void *next;
+
+	switch (mt) {
+	case maple_range_64:
+		pivots = mn->mr64.pivot;
+		if (unlikely(pivots[MAPLE_RANGE64_SLOTS - 2])) {
+			slots = mn->mr64.slot;
+			next = mas_slot_locked(mas, slots,
+					       MAPLE_RANGE64_SLOTS - 1);
+			if (unlikely((mte_to_node(next) && mte_node_type(next))))
+				return; /* The last slot is a node, no metadata */
+		}
+		fallthrough;
+	case maple_arange_64:
+		meta = ma_meta(mn, mt);
+		break;
+	default:
+		return;
+	}
+
+	meta->gap = 0;
+	meta->end = 0;
+}
+
 /*
  * ma_meta_end() - Get the data end of a node from the metadata
  * @mn: The maple node
@@ -5455,20 +5493,22 @@ static inline int mas_rev_alloc(struct ma_state *mas, unsigned long min,
  * mas_dead_leaves() - Mark all leaves of a node as dead.
  * @mas: The maple state
  * @slots: Pointer to the slot array
+ * @type: The maple node type
  *
  * Must hold the write lock.
  *
  * Return: The number of leaves marked as dead.
  */
 static inline
-unsigned char mas_dead_leaves(struct ma_state *mas, void __rcu **slots)
+unsigned char mas_dead_leaves(struct ma_state *mas, void __rcu **slots,
+			      enum maple_type mt)
 {
 	struct maple_node *node;
 	enum maple_type type;
 	void *entry;
 	int offset;
 
-	for (offset = 0; offset < mt_slot_count(mas->node); offset++) {
+	for (offset = 0; offset < mt_slots[mt]; offset++) {
 		entry = mas_slot_locked(mas, slots, offset);
 		type = mte_node_type(entry);
 		node = mte_to_node(entry);
@@ -5487,14 +5527,13 @@ unsigned char mas_dead_leaves(struct ma_state *mas, void __rcu **slots)
 
 static void __rcu **mas_dead_walk(struct ma_state *mas, unsigned char offset)
 {
-	struct maple_node *node, *next;
+	struct maple_node *next;
 	void __rcu **slots = NULL;
 
 	next = mas_mn(mas);
 	do {
-		mas->node = ma_enode_ptr(next);
-		node = mas_mn(mas);
-		slots = ma_slots(node, node->type);
+		mas->node = mt_mk_node(next, next->type);
+		slots = ma_slots(next, next->type);
 		next = mas_slot_locked(mas, slots, offset);
 		offset = 0;
 	} while (!ma_is_leaf(next->type));
@@ -5558,11 +5597,14 @@ static inline void __rcu **mas_destroy_descend(struct ma_state *mas,
 		node = mas_mn(mas);
 		slots = ma_slots(node, mte_node_type(mas->node));
 		next = mas_slot_locked(mas, slots, 0);
-		if ((mte_dead_node(next)))
+		if ((mte_dead_node(next))) {
+			mte_to_node(next)->type = mte_node_type(next);
 			next = mas_slot_locked(mas, slots, 1);
+		}
 
 		mte_set_node_dead(mas->node);
 		node->type = mte_node_type(mas->node);
+		mas_clear_meta(mas, node, node->type);
 		node->piv_parent = prev;
 		node->parent_slot = offset;
 		offset = 0;
@@ -5582,13 +5624,18 @@ static void mt_destroy_walk(struct maple_enode *enode, unsigned char ma_flags,
 
 	MA_STATE(mas, &mt, 0, 0);
 
-	if (mte_is_leaf(enode))
+	mas.node = enode;
+	if (mte_is_leaf(enode)) {
+		node->type = mte_node_type(enode);
 		goto free_leaf;
+	}
 
+	ma_flags &= ~MT_FLAGS_LOCK_MASK;
 	mt_init_flags(&mt, ma_flags);
 	mas_lock(&mas);
 
-	mas.node = start = enode;
+	mte_to_node(enode)->ma_flags = ma_flags;
+	start = enode;
 	slots = mas_destroy_descend(&mas, start, 0);
 	node = mas_mn(&mas);
 	do {
@@ -5596,7 +5643,8 @@ static void mt_destroy_walk(struct maple_enode *enode, unsigned char ma_flags,
 		unsigned char offset;
 		struct maple_enode *parent, *tmp;
 
-		node->slot_len = mas_dead_leaves(&mas, slots);
+		node->type = mte_node_type(mas.node);
+		node->slot_len = mas_dead_leaves(&mas, slots, node->type);
 		if (free)
 			mt_free_bulk(node->slot_len, slots);
 		offset = node->parent_slot + 1;
@@ -5620,7 +5668,8 @@ static void mt_destroy_walk(struct maple_enode *enode, unsigned char ma_flags,
 	} while (start != mas.node);
 
 	node = mas_mn(&mas);
-	node->slot_len = mas_dead_leaves(&mas, slots);
+	node->type = mte_node_type(mas.node);
+	node->slot_len = mas_dead_leaves(&mas, slots, node->type);
 	if (free)
 		mt_free_bulk(node->slot_len, slots);
 
@@ -5630,6 +5679,8 @@ static void mt_destroy_walk(struct maple_enode *enode, unsigned char ma_flags,
 free_leaf:
 	if (free)
 		mt_free_rcu(&node->rcu);
+	else
+		mas_clear_meta(&mas, node, node->type);
 }
 
 /*
-- 
2.39.2.722.g9855ee24e9-goog


WARNING: multiple messages have this Message-ID (diff)
From: Suren Baghdasaryan <surenb@google.com>
To: akpm@linux-foundation.org
Cc: michel@lespinasse.org, joelaf@google.com, songliubraving@fb.com,
	mhocko@suse.com, leewalsh@google.com, david@redhat.com,
	peterz@infradead.org, bigeasy@linutronix.de, peterx@redhat.com,
	dhowells@redhat.com, linux-mm@kvack.org, edumazet@google.com,
	jglisse@google.com, punit.agrawal@bytedance.com, will@kernel.org,
	arjunroy@google.com, chriscli@google.com, dave@stgolabs.net,
	minchan@google.com, x86@kernel.org, hughd@google.com,
	willy@infradead.org, gurua@google.com, mingo@redhat.com,
	linux-arm-kernel@lists.infradead.org, rientjes@google.com,
	axelrasmussen@google.com, kernel-team@android.com,
	michalechner92@googlemail.com, soheil@google.com,
	paulmck@kernel.org, jannh@google.com,
	Liam Howlett <Liam.Howlett@oracle.com>,
	shakeelb@google.com, luto@kernel.org, gthelen@google.com,
	ldufour@linux.ibm.com, Suren Baghdasaryan <surenb@google.com>,
	vbabka@suse.cz, posk@google.com, lstoakes@gmail.com,
	peterjung1337@gmail.com, linuxppc-dev@lists.ozlabs.org,
	kent.overstreet@linux.dev, linux-kernel@v ger.kernel.org,
	hannes@cmpxchg.org, tatashin@google.com,
	mgorman@techsingularity.net, rppt@kernel.org
Subject: [PATCH v4 03/33] maple_tree: Fix freeing of nodes in rcu mode
Date: Mon, 27 Feb 2023 09:36:02 -0800	[thread overview]
Message-ID: <20230227173632.3292573-4-surenb@google.com> (raw)
In-Reply-To: <20230227173632.3292573-1-surenb@google.com>

From: Liam Howlett <Liam.Howlett@oracle.com>

The walk to destroy the nodes was not always setting the node type and
would result in a destroy method potentially using the values as nodes.
Avoid this by setting the correct node types.  This is necessary for the
RCU mode of the maple tree.

Fixes: 54a611b60590 ("Maple Tree: add new data structure")
Signed-off-by: Liam Howlett <Liam.Howlett@oracle.com>
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
---
 lib/maple_tree.c | 73 ++++++++++++++++++++++++++++++++++++++++--------
 1 file changed, 62 insertions(+), 11 deletions(-)

diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index 089cd76ec379..44d6ce30b28e 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -902,6 +902,44 @@ static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt,
 	meta->end = end;
 }
 
+/*
+ * mas_clear_meta() - clear the metadata information of a node, if it exists
+ * @mas: The maple state
+ * @mn: The maple node
+ * @mt: The maple node type
+ * @offset: The offset of the highest sub-gap in this node.
+ * @end: The end of the data in this node.
+ */
+static inline void mas_clear_meta(struct ma_state *mas, struct maple_node *mn,
+				  enum maple_type mt)
+{
+	struct maple_metadata *meta;
+	unsigned long *pivots;
+	void __rcu **slots;
+	void *next;
+
+	switch (mt) {
+	case maple_range_64:
+		pivots = mn->mr64.pivot;
+		if (unlikely(pivots[MAPLE_RANGE64_SLOTS - 2])) {
+			slots = mn->mr64.slot;
+			next = mas_slot_locked(mas, slots,
+					       MAPLE_RANGE64_SLOTS - 1);
+			if (unlikely((mte_to_node(next) && mte_node_type(next))))
+				return; /* The last slot is a node, no metadata */
+		}
+		fallthrough;
+	case maple_arange_64:
+		meta = ma_meta(mn, mt);
+		break;
+	default:
+		return;
+	}
+
+	meta->gap = 0;
+	meta->end = 0;
+}
+
 /*
  * ma_meta_end() - Get the data end of a node from the metadata
  * @mn: The maple node
@@ -5455,20 +5493,22 @@ static inline int mas_rev_alloc(struct ma_state *mas, unsigned long min,
  * mas_dead_leaves() - Mark all leaves of a node as dead.
  * @mas: The maple state
  * @slots: Pointer to the slot array
+ * @type: The maple node type
  *
  * Must hold the write lock.
  *
  * Return: The number of leaves marked as dead.
  */
 static inline
-unsigned char mas_dead_leaves(struct ma_state *mas, void __rcu **slots)
+unsigned char mas_dead_leaves(struct ma_state *mas, void __rcu **slots,
+			      enum maple_type mt)
 {
 	struct maple_node *node;
 	enum maple_type type;
 	void *entry;
 	int offset;
 
-	for (offset = 0; offset < mt_slot_count(mas->node); offset++) {
+	for (offset = 0; offset < mt_slots[mt]; offset++) {
 		entry = mas_slot_locked(mas, slots, offset);
 		type = mte_node_type(entry);
 		node = mte_to_node(entry);
@@ -5487,14 +5527,13 @@ unsigned char mas_dead_leaves(struct ma_state *mas, void __rcu **slots)
 
 static void __rcu **mas_dead_walk(struct ma_state *mas, unsigned char offset)
 {
-	struct maple_node *node, *next;
+	struct maple_node *next;
 	void __rcu **slots = NULL;
 
 	next = mas_mn(mas);
 	do {
-		mas->node = ma_enode_ptr(next);
-		node = mas_mn(mas);
-		slots = ma_slots(node, node->type);
+		mas->node = mt_mk_node(next, next->type);
+		slots = ma_slots(next, next->type);
 		next = mas_slot_locked(mas, slots, offset);
 		offset = 0;
 	} while (!ma_is_leaf(next->type));
@@ -5558,11 +5597,14 @@ static inline void __rcu **mas_destroy_descend(struct ma_state *mas,
 		node = mas_mn(mas);
 		slots = ma_slots(node, mte_node_type(mas->node));
 		next = mas_slot_locked(mas, slots, 0);
-		if ((mte_dead_node(next)))
+		if ((mte_dead_node(next))) {
+			mte_to_node(next)->type = mte_node_type(next);
 			next = mas_slot_locked(mas, slots, 1);
+		}
 
 		mte_set_node_dead(mas->node);
 		node->type = mte_node_type(mas->node);
+		mas_clear_meta(mas, node, node->type);
 		node->piv_parent = prev;
 		node->parent_slot = offset;
 		offset = 0;
@@ -5582,13 +5624,18 @@ static void mt_destroy_walk(struct maple_enode *enode, unsigned char ma_flags,
 
 	MA_STATE(mas, &mt, 0, 0);
 
-	if (mte_is_leaf(enode))
+	mas.node = enode;
+	if (mte_is_leaf(enode)) {
+		node->type = mte_node_type(enode);
 		goto free_leaf;
+	}
 
+	ma_flags &= ~MT_FLAGS_LOCK_MASK;
 	mt_init_flags(&mt, ma_flags);
 	mas_lock(&mas);
 
-	mas.node = start = enode;
+	mte_to_node(enode)->ma_flags = ma_flags;
+	start = enode;
 	slots = mas_destroy_descend(&mas, start, 0);
 	node = mas_mn(&mas);
 	do {
@@ -5596,7 +5643,8 @@ static void mt_destroy_walk(struct maple_enode *enode, unsigned char ma_flags,
 		unsigned char offset;
 		struct maple_enode *parent, *tmp;
 
-		node->slot_len = mas_dead_leaves(&mas, slots);
+		node->type = mte_node_type(mas.node);
+		node->slot_len = mas_dead_leaves(&mas, slots, node->type);
 		if (free)
 			mt_free_bulk(node->slot_len, slots);
 		offset = node->parent_slot + 1;
@@ -5620,7 +5668,8 @@ static void mt_destroy_walk(struct maple_enode *enode, unsigned char ma_flags,
 	} while (start != mas.node);
 
 	node = mas_mn(&mas);
-	node->slot_len = mas_dead_leaves(&mas, slots);
+	node->type = mte_node_type(mas.node);
+	node->slot_len = mas_dead_leaves(&mas, slots, node->type);
 	if (free)
 		mt_free_bulk(node->slot_len, slots);
 
@@ -5630,6 +5679,8 @@ static void mt_destroy_walk(struct maple_enode *enode, unsigned char ma_flags,
 free_leaf:
 	if (free)
 		mt_free_rcu(&node->rcu);
+	else
+		mas_clear_meta(&mas, node, node->type);
 }
 
 /*
-- 
2.39.2.722.g9855ee24e9-goog


  parent reply	other threads:[~2023-02-27 17:36 UTC|newest]

Thread overview: 134+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-02-27 17:35 [PATCH v4 00/33] Per-VMA locks Suren Baghdasaryan
2023-02-27 17:35 ` Suren Baghdasaryan
2023-02-27 17:36 ` [PATCH v4 01/33] maple_tree: Be more cautious about dead nodes Suren Baghdasaryan
2023-02-27 17:36   ` Suren Baghdasaryan
2023-02-27 17:36 ` [PATCH v4 02/33] maple_tree: Detect dead nodes in mas_start() Suren Baghdasaryan
2023-02-27 17:36   ` Suren Baghdasaryan
2023-02-27 17:36 ` Suren Baghdasaryan [this message]
2023-02-27 17:36   ` [PATCH v4 03/33] maple_tree: Fix freeing of nodes in rcu mode Suren Baghdasaryan
2023-02-27 17:36 ` [PATCH v4 04/33] maple_tree: remove extra smp_wmb() from mas_dead_leaves() Suren Baghdasaryan
2023-02-27 17:36   ` Suren Baghdasaryan
2023-02-27 17:36 ` [PATCH v4 05/33] maple_tree: Fix write memory barrier of nodes once dead for RCU mode Suren Baghdasaryan
2023-02-27 17:36   ` Suren Baghdasaryan
2023-02-27 17:36 ` [PATCH v4 06/33] maple_tree: Add smp_rmb() to dead node detection Suren Baghdasaryan
2023-02-27 17:36   ` Suren Baghdasaryan
2023-02-27 17:36 ` [PATCH v4 07/33] maple_tree: Add RCU lock checking to rcu callback functions Suren Baghdasaryan
2023-02-27 17:36   ` Suren Baghdasaryan
2023-02-27 17:36 ` [PATCH v4 08/33] mm: Enable maple tree RCU mode by default Suren Baghdasaryan
2023-02-27 17:36   ` Suren Baghdasaryan
2023-02-27 17:36 ` [PATCH v4 09/33] mm: introduce CONFIG_PER_VMA_LOCK Suren Baghdasaryan
2023-02-27 17:36   ` Suren Baghdasaryan
2023-02-27 17:36 ` [PATCH v4 10/33] mm: rcu safe VMA freeing Suren Baghdasaryan
2023-02-27 17:36   ` Suren Baghdasaryan
2023-02-27 17:36 ` [PATCH v4 11/33] mm: move mmap_lock assert function definitions Suren Baghdasaryan
2023-02-27 17:36   ` Suren Baghdasaryan
2023-02-27 17:36 ` [PATCH v4 12/33] mm: add per-VMA lock and helper functions to control it Suren Baghdasaryan
2023-02-27 17:36   ` Suren Baghdasaryan
2023-02-27 17:36 ` [PATCH v4 13/33] mm: mark VMA as being written when changing vm_flags Suren Baghdasaryan
2023-02-27 17:36   ` Suren Baghdasaryan
2023-02-27 17:36 ` [PATCH v4 14/33] mm/mmap: move vma_prepare before vma_adjust_trans_huge Suren Baghdasaryan
2023-02-27 17:36   ` Suren Baghdasaryan
2023-02-27 17:36 ` [PATCH v4 15/33] mm/khugepaged: write-lock VMA while collapsing a huge page Suren Baghdasaryan
2023-02-27 17:36   ` Suren Baghdasaryan
2023-02-27 17:36 ` [PATCH v4 16/33] mm/mmap: write-lock VMAs in vma_prepare before modifying them Suren Baghdasaryan
2023-02-27 17:36   ` Suren Baghdasaryan
2023-02-27 17:36 ` [PATCH v4 17/33] mm/mremap: write-lock VMA while remapping it to a new address range Suren Baghdasaryan
2023-02-27 17:36   ` Suren Baghdasaryan
2023-03-01  7:01   ` Hyeonggon Yoo
2023-03-01  7:01     ` Hyeonggon Yoo
2023-02-27 17:36 ` [PATCH v4 18/33] mm: write-lock VMAs before removing them from VMA tree Suren Baghdasaryan
2023-02-27 17:36   ` Suren Baghdasaryan
2023-03-01  7:43   ` Hyeonggon Yoo
2023-03-01  7:43     ` Hyeonggon Yoo
2023-03-01  7:56     ` Hyeonggon Yoo
2023-03-01  7:56       ` Hyeonggon Yoo
2023-03-01 18:34       ` Suren Baghdasaryan
2023-03-01 18:34         ` Suren Baghdasaryan
2023-03-01 18:42         ` Suren Baghdasaryan
2023-03-01 18:42           ` Suren Baghdasaryan
2023-03-02  0:53           ` Hyeonggon Yoo
2023-03-02  0:53             ` Hyeonggon Yoo
2023-03-02  2:21             ` Suren Baghdasaryan
2023-03-02  2:21               ` Suren Baghdasaryan
2023-03-01 19:07         ` Suren Baghdasaryan
2023-03-01 19:07           ` Suren Baghdasaryan
2023-02-27 17:36 ` [PATCH v4 19/33] mm: conditionally write-lock VMA in free_pgtables Suren Baghdasaryan
2023-02-27 17:36   ` Suren Baghdasaryan
2023-02-27 17:36 ` [PATCH v4 20/33] kernel/fork: assert no VMA readers during its destruction Suren Baghdasaryan
2023-02-27 17:36   ` Suren Baghdasaryan
2023-02-27 17:36 ` [PATCH v4 21/33] mm/mmap: prevent pagefault handler from racing with mmu_notifier registration Suren Baghdasaryan
2023-02-27 17:36   ` Suren Baghdasaryan
2023-02-27 17:36 ` [PATCH v4 22/33] mm: introduce vma detached flag Suren Baghdasaryan
2023-02-27 17:36   ` Suren Baghdasaryan
2023-02-27 17:36 ` [PATCH v4 23/33] mm: introduce lock_vma_under_rcu to be used from arch-specific code Suren Baghdasaryan
2023-02-27 17:36   ` Suren Baghdasaryan
2023-02-27 17:36 ` [PATCH v4 24/33] mm: fall back to mmap_lock if vma->anon_vma is not yet set Suren Baghdasaryan
2023-02-27 17:36   ` Suren Baghdasaryan
2023-03-01  9:54   ` Hyeonggon Yoo
2023-03-01  9:54     ` Hyeonggon Yoo
2023-02-27 17:36 ` [PATCH v4 25/33] mm: add FAULT_FLAG_VMA_LOCK flag Suren Baghdasaryan
2023-02-27 17:36   ` Suren Baghdasaryan
2023-02-27 17:36 ` [PATCH v4 26/33] mm: prevent do_swap_page from handling page faults under VMA lock Suren Baghdasaryan
2023-02-27 17:36   ` Suren Baghdasaryan
2023-02-27 17:36 ` [PATCH v4 27/33] mm: prevent userfaults to be handled under per-vma lock Suren Baghdasaryan
2023-02-27 17:36   ` Suren Baghdasaryan
2023-02-27 17:36 ` [PATCH v4 28/33] mm: introduce per-VMA lock statistics Suren Baghdasaryan
2023-02-27 17:36   ` Suren Baghdasaryan
2023-02-27 17:36 ` [PATCH v4 29/33] x86/mm: try VMA lock-based page fault handling first Suren Baghdasaryan
2023-02-27 17:36   ` Suren Baghdasaryan
2023-06-29 14:40   ` Jiri Slaby
2023-06-29 14:40     ` Jiri Slaby
2023-06-29 15:30     ` Suren Baghdasaryan
2023-06-29 15:30       ` Suren Baghdasaryan
2023-06-30  6:35       ` Jiri Slaby
2023-06-30  6:35         ` Jiri Slaby
2023-06-30  8:28         ` Jiri Slaby
2023-06-30  8:28           ` Jiri Slaby
2023-06-30  8:43           ` Jiri Slaby
2023-06-30  8:43             ` Jiri Slaby
2023-06-30 17:40             ` Suren Baghdasaryan
2023-06-30 17:40               ` Suren Baghdasaryan
2023-07-03 10:47               ` Jiri Slaby
2023-07-03 10:47                 ` Jiri Slaby
2023-07-03 13:52                 ` Holger Hoffstätte
2023-07-03 14:45                   ` Suren Baghdasaryan
2023-07-03 15:24                     ` Suren Baghdasaryan
2023-07-03 18:28                       ` Suren Baghdasaryan
2023-07-05 22:15                   ` Suren Baghdasaryan
2023-07-05 22:37                     ` Holger Hoffstätte
2023-07-05 22:55                       ` Suren Baghdasaryan
2023-07-06 14:27                         ` Holger Hoffstätte
2023-07-06 16:11                           ` Suren Baghdasaryan
2023-07-07  2:23                             ` Suren Baghdasaryan
2023-07-07  4:40                               ` Suren Baghdasaryan
2023-07-11  6:20                     ` Jiri Slaby
2023-06-29 17:06     ` Linux regression tracking #adding (Thorsten Leemhuis)
2023-06-29 17:06       ` Linux regression tracking #adding (Thorsten Leemhuis)
2023-07-10 10:45       ` Linux regression tracking #update (Thorsten Leemhuis)
2023-07-03  9:58     ` Linux regression tracking (Thorsten Leemhuis)
2023-07-03  9:58       ` Linux regression tracking (Thorsten Leemhuis)
2023-02-27 17:36 ` [PATCH v4 30/33] arm64/mm: " Suren Baghdasaryan
2023-02-27 17:36   ` Suren Baghdasaryan
2023-02-27 17:36 ` [PATCH v4 31/33] powerc/mm: " Suren Baghdasaryan
2023-02-27 17:36   ` Suren Baghdasaryan
2023-03-06 15:42   ` [PATCH] powerpc/mm: fix mmap_lock bad unlock Laurent Dufour
2023-03-06 15:42     ` Laurent Dufour
2023-03-06 20:25   ` [PATCH v4 31/33] powerc/mm: try VMA lock-based page fault handling first Suren Baghdasaryan
2023-03-06 20:25     ` Suren Baghdasaryan
2023-03-06 20:25     ` Suren Baghdasaryan
2023-02-27 17:36 ` [PATCH v4 32/33] mm/mmap: free vm_area_struct without call_rcu in exit_mmap Suren Baghdasaryan
2023-02-27 17:36   ` Suren Baghdasaryan
2023-02-27 17:36 ` [PATCH v4 33/33] mm: separate vma->lock from vm_area_struct Suren Baghdasaryan
2023-02-27 17:36   ` Suren Baghdasaryan
2023-07-11 10:35 ` [PATCH v4 00/33] Per-VMA locks Leon Romanovsky
2023-07-11 10:35   ` Leon Romanovsky
2023-07-11 10:39   ` Vlastimil Babka
2023-07-11 10:39     ` Vlastimil Babka
2023-07-11 11:01     ` Leon Romanovsky
2023-07-11 11:01       ` Leon Romanovsky
2023-07-11 11:09       ` Leon Romanovsky
2023-07-11 11:09         ` Leon Romanovsky
2023-07-11 16:35         ` Suren Baghdasaryan
2023-07-11 16:35           ` Suren Baghdasaryan
2023-07-11 17:14           ` Leon Romanovsky
2023-07-11 17:14             ` Leon Romanovsky

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230227173632.3292573-4-surenb@google.com \
    --to=surenb@google.com \
    --cc=akpm@linux-foundation.org \
    --cc=arjunroy@google.com \
    --cc=axelrasmussen@google.com \
    --cc=bigeasy@linutronix.de \
    --cc=chriscli@google.com \
    --cc=dave@stgolabs.net \
    --cc=david@redhat.com \
    --cc=dhowells@redhat.com \
    --cc=edumazet@google.com \
    --cc=gthelen@google.com \
    --cc=gurua@google.com \
    --cc=hannes@cmpxchg.org \
    --cc=hughd@google.com \
    --cc=jannh@google.com \
    --cc=jglisse@google.com \
    --cc=joelaf@google.com \
    --cc=kent.overstreet@linux.dev \
    --cc=kernel-team@android.com \
    --cc=ldufour@linux.ibm.com \
    --cc=leewalsh@google.com \
    --cc=liam.howlett@oracle.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=lstoakes@gmail.com \
    --cc=luto@kernel.org \
    --cc=mgorman@techsingularity.net \
    --cc=mhocko@suse.com \
    --cc=michalechner92@googlemail.com \
    --cc=michel@lespinasse.org \
    --cc=minchan@google.com \
    --cc=mingo@redhat.com \
    --cc=paulmck@kernel.org \
    --cc=peterjung1337@gmail.com \
    --cc=peterx@redhat.com \
    --cc=peterz@infradead.org \
    --cc=posk@google.com \
    --cc=punit.agrawal@bytedance.com \
    --cc=rientjes@google.com \
    --cc=rppt@kernel.org \
    --cc=shakeelb@google.com \
    --cc=soheil@google.com \
    --cc=songliubraving@fb.com \
    --cc=tatashin@google.com \
    --cc=vbabka@suse.cz \
    --cc=will@kernel.org \
    --cc=willy@infradead.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.