All of lore.kernel.org
 help / color / mirror / Atom feed
From: Suren Baghdasaryan <surenb@google.com>
To: akpm@linux-foundation.org
Cc: michel@lespinasse.org, jglisse@google.com, mhocko@suse.com,
	vbabka@suse.cz, hannes@cmpxchg.org, mgorman@techsingularity.net,
	dave@stgolabs.net, willy@infradead.org, liam.howlett@oracle.com,
	peterz@infradead.org, ldufour@linux.ibm.com,
	laurent.dufour@fr.ibm.com, paulmck@kernel.org, luto@kernel.org,
	songliubraving@fb.com, peterx@redhat.com, david@redhat.com,
	dhowells@redhat.com, hughd@google.com, bigeasy@linutronix.de,
	kent.overstreet@linux.dev, punit.agrawal@bytedance.com,
	lstoakes@gmail.com, peterjung1337@gmail.com, rientjes@google.com,
	axelrasmussen@google.com, joelaf@google.com, minchan@google.com,
	jannh@google.com, shakeelb@google.com, tatashin@google.com,
	edumazet@google.com, gthelen@google.com, gurua@google.com,
	arjunroy@google.com, soheil@google.com, hughlynch@google.com,
	leewalsh@google.com, posk@google.com, linux-mm@kvack.org,
	linux-arm-kernel@lists.infradead.org,
	linuxppc-dev@lists.ozlabs.org, x86@kernel.org,
	linux-kernel@vger.kernel.org, kernel-team@android.com,
	surenb@google.com, Liam Howlett <Liam.Howlett@oracle.com>
Subject: [PATCH 01/41] maple_tree: Be more cautious about dead nodes
Date: Mon,  9 Jan 2023 12:52:56 -0800	[thread overview]
Message-ID: <20230109205336.3665937-2-surenb@google.com> (raw)
In-Reply-To: <20230109205336.3665937-1-surenb@google.com>

From: Liam Howlett <Liam.Howlett@oracle.com>

ma_pivots() and ma_data_end() may be called with a dead node.  Ensure to
that the node isn't dead before using the returned values.

This is necessary for RCU mode of the maple tree.

Fixes: 54a611b60590 ("Maple Tree: add new data structure")
Signed-off-by: Liam Howlett <Liam.Howlett@oracle.com>
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
---
 lib/maple_tree.c | 53 +++++++++++++++++++++++++++++++++++++++---------
 1 file changed, 43 insertions(+), 10 deletions(-)

diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index 26e2045d3cda..ff9f04e0150d 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -540,6 +540,7 @@ static inline bool ma_dead_node(const struct maple_node *node)
 
 	return (parent == node);
 }
+
 /*
  * mte_dead_node() - check if the @enode is dead.
  * @enode: The encoded maple node
@@ -621,6 +622,8 @@ static inline unsigned int mas_alloc_req(const struct ma_state *mas)
  * @node - the maple node
  * @type - the node type
  *
+ * In the event of a dead node, this array may be %NULL
+ *
  * Return: A pointer to the maple node pivots
  */
 static inline unsigned long *ma_pivots(struct maple_node *node,
@@ -1091,8 +1094,11 @@ static int mas_ascend(struct ma_state *mas)
 		a_type = mas_parent_enum(mas, p_enode);
 		a_node = mte_parent(p_enode);
 		a_slot = mte_parent_slot(p_enode);
-		pivots = ma_pivots(a_node, a_type);
 		a_enode = mt_mk_node(a_node, a_type);
+		pivots = ma_pivots(a_node, a_type);
+
+		if (unlikely(ma_dead_node(a_node)))
+			return 1;
 
 		if (!set_min && a_slot) {
 			set_min = true;
@@ -1398,6 +1404,9 @@ static inline unsigned char ma_data_end(struct maple_node *node,
 {
 	unsigned char offset;
 
+	if (!pivots)
+		return 0;
+
 	if (type == maple_arange_64)
 		return ma_meta_end(node, type);
 
@@ -1433,6 +1442,9 @@ static inline unsigned char mas_data_end(struct ma_state *mas)
 		return ma_meta_end(node, type);
 
 	pivots = ma_pivots(node, type);
+	if (unlikely(ma_dead_node(node)))
+		return 0;
+
 	offset = mt_pivots[type] - 1;
 	if (likely(!pivots[offset]))
 		return ma_meta_end(node, type);
@@ -4504,6 +4516,9 @@ static inline int mas_prev_node(struct ma_state *mas, unsigned long min)
 	node = mas_mn(mas);
 	slots = ma_slots(node, mt);
 	pivots = ma_pivots(node, mt);
+	if (unlikely(ma_dead_node(node)))
+		return 1;
+
 	mas->max = pivots[offset];
 	if (offset)
 		mas->min = pivots[offset - 1] + 1;
@@ -4525,6 +4540,9 @@ static inline int mas_prev_node(struct ma_state *mas, unsigned long min)
 		slots = ma_slots(node, mt);
 		pivots = ma_pivots(node, mt);
 		offset = ma_data_end(node, mt, pivots, mas->max);
+		if (unlikely(ma_dead_node(node)))
+			return 1;
+
 		if (offset)
 			mas->min = pivots[offset - 1] + 1;
 
@@ -4573,6 +4591,7 @@ static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
 	struct maple_enode *enode;
 	int level = 0;
 	unsigned char offset;
+	unsigned char node_end;
 	enum maple_type mt;
 	void __rcu **slots;
 
@@ -4596,7 +4615,11 @@ static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
 		node = mas_mn(mas);
 		mt = mte_node_type(mas->node);
 		pivots = ma_pivots(node, mt);
-	} while (unlikely(offset == ma_data_end(node, mt, pivots, mas->max)));
+		node_end = ma_data_end(node, mt, pivots, mas->max);
+		if (unlikely(ma_dead_node(node)))
+			return 1;
+
+	} while (unlikely(offset == node_end));
 
 	slots = ma_slots(node, mt);
 	pivot = mas_safe_pivot(mas, pivots, ++offset, mt);
@@ -4612,6 +4635,9 @@ static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
 		mt = mte_node_type(mas->node);
 		slots = ma_slots(node, mt);
 		pivots = ma_pivots(node, mt);
+		if (unlikely(ma_dead_node(node)))
+			return 1;
+
 		offset = 0;
 		pivot = pivots[0];
 	}
@@ -4658,16 +4684,18 @@ static inline void *mas_next_nentry(struct ma_state *mas,
 		return NULL;
 	}
 
-	pivots = ma_pivots(node, type);
 	slots = ma_slots(node, type);
-	mas->index = mas_safe_min(mas, pivots, mas->offset);
-	if (ma_dead_node(node))
+	pivots = ma_pivots(node, type);
+	count = ma_data_end(node, type, pivots, mas->max);
+	if (unlikely(ma_dead_node(node)))
 		return NULL;
 
+	mas->index = mas_safe_min(mas, pivots, mas->offset);
+	if (unlikely(ma_dead_node(node)))
+		return NULL;
 	if (mas->index > max)
 		return NULL;
 
-	count = ma_data_end(node, type, pivots, mas->max);
 	if (mas->offset > count)
 		return NULL;
 
@@ -4815,6 +4843,11 @@ static inline void *mas_prev_nentry(struct ma_state *mas, unsigned long limit,
 
 	slots = ma_slots(mn, mt);
 	pivots = ma_pivots(mn, mt);
+	if (unlikely(ma_dead_node(mn))) {
+		mas_rewalk(mas, index);
+		goto retry;
+	}
+
 	if (offset == mt_pivots[mt])
 		pivot = mas->max;
 	else
@@ -6613,11 +6646,11 @@ static inline void *mas_first_entry(struct ma_state *mas, struct maple_node *mn,
 	while (likely(!ma_is_leaf(mt))) {
 		MT_BUG_ON(mas->tree, mte_dead_node(mas->node));
 		slots = ma_slots(mn, mt);
-		pivots = ma_pivots(mn, mt);
-		max = pivots[0];
 		entry = mas_slot(mas, slots, 0);
+		pivots = ma_pivots(mn, mt);
 		if (unlikely(ma_dead_node(mn)))
 			return NULL;
+		max = pivots[0];
 		mas->node = entry;
 		mn = mas_mn(mas);
 		mt = mte_node_type(mas->node);
@@ -6637,13 +6670,13 @@ static inline void *mas_first_entry(struct ma_state *mas, struct maple_node *mn,
 	if (likely(entry))
 		return entry;
 
-	pivots = ma_pivots(mn, mt);
-	mas->index = pivots[0] + 1;
 	mas->offset = 1;
 	entry = mas_slot(mas, slots, 1);
+	pivots = ma_pivots(mn, mt);
 	if (unlikely(ma_dead_node(mn)))
 		return NULL;
 
+	mas->index = pivots[0] + 1;
 	if (mas->index > limit)
 		goto none;
 
-- 
2.39.0


WARNING: multiple messages have this Message-ID (diff)
From: Suren Baghdasaryan <surenb@google.com>
To: akpm@linux-foundation.org
Cc: michel@lespinasse.org, joelaf@google.com, songliubraving@fb.com,
	mhocko@suse.com, leewalsh@google.com, david@redhat.com,
	peterz@infradead.org, bigeasy@linutronix.de, peterx@redhat.com,
	dhowells@redhat.com, linux-mm@kvack.org, edumazet@google.com,
	jglisse@google.com, punit.agrawal@bytedance.com,
	arjunroy@google.com, dave@stgolabs.net, minchan@google.com,
	x86@kernel.org, hughd@google.com, willy@infradead.org,
	gurua@google.com, laurent.dufour@fr.ibm.com,
	linux-arm-kernel@lists.infradead.org, rientjes@google.com,
	axelrasmussen@google.com, kernel-team@android.com,
	soheil@google.com, paulmck@kernel.org, jannh@google.com,
	Liam Howlett <Liam.Howlett@oracle.com>,
	shakeelb@google.com, luto@kernel.org, gthelen@google.com,
	ldufour@linux.ibm.com, surenb@google.com, vbabka@suse.cz,
	posk@google.com, lstoakes@gmail.com, peterjung1337@gmail.com,
	linuxppc-dev@lists.ozlabs.org, kent.overstreet@linux.dev,
	hughlynch@google.com, linux-kernel@vger.kernel.org,
	hannes@cmpxchg.org, tatashin@google.com,
	mg orman@techsingularity.net
Subject: [PATCH 01/41] maple_tree: Be more cautious about dead nodes
Date: Mon,  9 Jan 2023 12:52:56 -0800	[thread overview]
Message-ID: <20230109205336.3665937-2-surenb@google.com> (raw)
In-Reply-To: <20230109205336.3665937-1-surenb@google.com>

From: Liam Howlett <Liam.Howlett@oracle.com>

ma_pivots() and ma_data_end() may be called with a dead node.  Ensure to
that the node isn't dead before using the returned values.

This is necessary for RCU mode of the maple tree.

Fixes: 54a611b60590 ("Maple Tree: add new data structure")
Signed-off-by: Liam Howlett <Liam.Howlett@oracle.com>
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
---
 lib/maple_tree.c | 53 +++++++++++++++++++++++++++++++++++++++---------
 1 file changed, 43 insertions(+), 10 deletions(-)

diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index 26e2045d3cda..ff9f04e0150d 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -540,6 +540,7 @@ static inline bool ma_dead_node(const struct maple_node *node)
 
 	return (parent == node);
 }
+
 /*
  * mte_dead_node() - check if the @enode is dead.
  * @enode: The encoded maple node
@@ -621,6 +622,8 @@ static inline unsigned int mas_alloc_req(const struct ma_state *mas)
  * @node - the maple node
  * @type - the node type
  *
+ * In the event of a dead node, this array may be %NULL
+ *
  * Return: A pointer to the maple node pivots
  */
 static inline unsigned long *ma_pivots(struct maple_node *node,
@@ -1091,8 +1094,11 @@ static int mas_ascend(struct ma_state *mas)
 		a_type = mas_parent_enum(mas, p_enode);
 		a_node = mte_parent(p_enode);
 		a_slot = mte_parent_slot(p_enode);
-		pivots = ma_pivots(a_node, a_type);
 		a_enode = mt_mk_node(a_node, a_type);
+		pivots = ma_pivots(a_node, a_type);
+
+		if (unlikely(ma_dead_node(a_node)))
+			return 1;
 
 		if (!set_min && a_slot) {
 			set_min = true;
@@ -1398,6 +1404,9 @@ static inline unsigned char ma_data_end(struct maple_node *node,
 {
 	unsigned char offset;
 
+	if (!pivots)
+		return 0;
+
 	if (type == maple_arange_64)
 		return ma_meta_end(node, type);
 
@@ -1433,6 +1442,9 @@ static inline unsigned char mas_data_end(struct ma_state *mas)
 		return ma_meta_end(node, type);
 
 	pivots = ma_pivots(node, type);
+	if (unlikely(ma_dead_node(node)))
+		return 0;
+
 	offset = mt_pivots[type] - 1;
 	if (likely(!pivots[offset]))
 		return ma_meta_end(node, type);
@@ -4504,6 +4516,9 @@ static inline int mas_prev_node(struct ma_state *mas, unsigned long min)
 	node = mas_mn(mas);
 	slots = ma_slots(node, mt);
 	pivots = ma_pivots(node, mt);
+	if (unlikely(ma_dead_node(node)))
+		return 1;
+
 	mas->max = pivots[offset];
 	if (offset)
 		mas->min = pivots[offset - 1] + 1;
@@ -4525,6 +4540,9 @@ static inline int mas_prev_node(struct ma_state *mas, unsigned long min)
 		slots = ma_slots(node, mt);
 		pivots = ma_pivots(node, mt);
 		offset = ma_data_end(node, mt, pivots, mas->max);
+		if (unlikely(ma_dead_node(node)))
+			return 1;
+
 		if (offset)
 			mas->min = pivots[offset - 1] + 1;
 
@@ -4573,6 +4591,7 @@ static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
 	struct maple_enode *enode;
 	int level = 0;
 	unsigned char offset;
+	unsigned char node_end;
 	enum maple_type mt;
 	void __rcu **slots;
 
@@ -4596,7 +4615,11 @@ static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
 		node = mas_mn(mas);
 		mt = mte_node_type(mas->node);
 		pivots = ma_pivots(node, mt);
-	} while (unlikely(offset == ma_data_end(node, mt, pivots, mas->max)));
+		node_end = ma_data_end(node, mt, pivots, mas->max);
+		if (unlikely(ma_dead_node(node)))
+			return 1;
+
+	} while (unlikely(offset == node_end));
 
 	slots = ma_slots(node, mt);
 	pivot = mas_safe_pivot(mas, pivots, ++offset, mt);
@@ -4612,6 +4635,9 @@ static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
 		mt = mte_node_type(mas->node);
 		slots = ma_slots(node, mt);
 		pivots = ma_pivots(node, mt);
+		if (unlikely(ma_dead_node(node)))
+			return 1;
+
 		offset = 0;
 		pivot = pivots[0];
 	}
@@ -4658,16 +4684,18 @@ static inline void *mas_next_nentry(struct ma_state *mas,
 		return NULL;
 	}
 
-	pivots = ma_pivots(node, type);
 	slots = ma_slots(node, type);
-	mas->index = mas_safe_min(mas, pivots, mas->offset);
-	if (ma_dead_node(node))
+	pivots = ma_pivots(node, type);
+	count = ma_data_end(node, type, pivots, mas->max);
+	if (unlikely(ma_dead_node(node)))
 		return NULL;
 
+	mas->index = mas_safe_min(mas, pivots, mas->offset);
+	if (unlikely(ma_dead_node(node)))
+		return NULL;
 	if (mas->index > max)
 		return NULL;
 
-	count = ma_data_end(node, type, pivots, mas->max);
 	if (mas->offset > count)
 		return NULL;
 
@@ -4815,6 +4843,11 @@ static inline void *mas_prev_nentry(struct ma_state *mas, unsigned long limit,
 
 	slots = ma_slots(mn, mt);
 	pivots = ma_pivots(mn, mt);
+	if (unlikely(ma_dead_node(mn))) {
+		mas_rewalk(mas, index);
+		goto retry;
+	}
+
 	if (offset == mt_pivots[mt])
 		pivot = mas->max;
 	else
@@ -6613,11 +6646,11 @@ static inline void *mas_first_entry(struct ma_state *mas, struct maple_node *mn,
 	while (likely(!ma_is_leaf(mt))) {
 		MT_BUG_ON(mas->tree, mte_dead_node(mas->node));
 		slots = ma_slots(mn, mt);
-		pivots = ma_pivots(mn, mt);
-		max = pivots[0];
 		entry = mas_slot(mas, slots, 0);
+		pivots = ma_pivots(mn, mt);
 		if (unlikely(ma_dead_node(mn)))
 			return NULL;
+		max = pivots[0];
 		mas->node = entry;
 		mn = mas_mn(mas);
 		mt = mte_node_type(mas->node);
@@ -6637,13 +6670,13 @@ static inline void *mas_first_entry(struct ma_state *mas, struct maple_node *mn,
 	if (likely(entry))
 		return entry;
 
-	pivots = ma_pivots(mn, mt);
-	mas->index = pivots[0] + 1;
 	mas->offset = 1;
 	entry = mas_slot(mas, slots, 1);
+	pivots = ma_pivots(mn, mt);
 	if (unlikely(ma_dead_node(mn)))
 		return NULL;
 
+	mas->index = pivots[0] + 1;
 	if (mas->index > limit)
 		goto none;
 
-- 
2.39.0


WARNING: multiple messages have this Message-ID (diff)
From: Suren Baghdasaryan <surenb@google.com>
To: akpm@linux-foundation.org
Cc: michel@lespinasse.org, jglisse@google.com, mhocko@suse.com,
	vbabka@suse.cz,  hannes@cmpxchg.org, mgorman@techsingularity.net,
	dave@stgolabs.net,  willy@infradead.org, liam.howlett@oracle.com,
	peterz@infradead.org,  ldufour@linux.ibm.com,
	laurent.dufour@fr.ibm.com, paulmck@kernel.org,  luto@kernel.org,
	songliubraving@fb.com, peterx@redhat.com, david@redhat.com,
	 dhowells@redhat.com, hughd@google.com, bigeasy@linutronix.de,
	 kent.overstreet@linux.dev, punit.agrawal@bytedance.com,
	lstoakes@gmail.com,  peterjung1337@gmail.com,
	rientjes@google.com, axelrasmussen@google.com,
	 joelaf@google.com, minchan@google.com, jannh@google.com,
	shakeelb@google.com,  tatashin@google.com, edumazet@google.com,
	gthelen@google.com,  gurua@google.com, arjunroy@google.com,
	soheil@google.com,  hughlynch@google.com, leewalsh@google.com,
	posk@google.com,  linux-mm@kvack.org,
	linux-arm-kernel@lists.infradead.org,
	 linuxppc-dev@lists.ozlabs.org, x86@kernel.org,
	linux-kernel@vger.kernel.org,  kernel-team@android.com,
	surenb@google.com,  Liam Howlett <Liam.Howlett@oracle.com>
Subject: [PATCH 01/41] maple_tree: Be more cautious about dead nodes
Date: Mon,  9 Jan 2023 12:52:56 -0800	[thread overview]
Message-ID: <20230109205336.3665937-2-surenb@google.com> (raw)
In-Reply-To: <20230109205336.3665937-1-surenb@google.com>

From: Liam Howlett <Liam.Howlett@oracle.com>

ma_pivots() and ma_data_end() may be called with a dead node.  Ensure to
that the node isn't dead before using the returned values.

This is necessary for RCU mode of the maple tree.

Fixes: 54a611b60590 ("Maple Tree: add new data structure")
Signed-off-by: Liam Howlett <Liam.Howlett@oracle.com>
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
---
 lib/maple_tree.c | 53 +++++++++++++++++++++++++++++++++++++++---------
 1 file changed, 43 insertions(+), 10 deletions(-)

diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index 26e2045d3cda..ff9f04e0150d 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -540,6 +540,7 @@ static inline bool ma_dead_node(const struct maple_node *node)
 
 	return (parent == node);
 }
+
 /*
  * mte_dead_node() - check if the @enode is dead.
  * @enode: The encoded maple node
@@ -621,6 +622,8 @@ static inline unsigned int mas_alloc_req(const struct ma_state *mas)
  * @node - the maple node
  * @type - the node type
  *
+ * In the event of a dead node, this array may be %NULL
+ *
  * Return: A pointer to the maple node pivots
  */
 static inline unsigned long *ma_pivots(struct maple_node *node,
@@ -1091,8 +1094,11 @@ static int mas_ascend(struct ma_state *mas)
 		a_type = mas_parent_enum(mas, p_enode);
 		a_node = mte_parent(p_enode);
 		a_slot = mte_parent_slot(p_enode);
-		pivots = ma_pivots(a_node, a_type);
 		a_enode = mt_mk_node(a_node, a_type);
+		pivots = ma_pivots(a_node, a_type);
+
+		if (unlikely(ma_dead_node(a_node)))
+			return 1;
 
 		if (!set_min && a_slot) {
 			set_min = true;
@@ -1398,6 +1404,9 @@ static inline unsigned char ma_data_end(struct maple_node *node,
 {
 	unsigned char offset;
 
+	if (!pivots)
+		return 0;
+
 	if (type == maple_arange_64)
 		return ma_meta_end(node, type);
 
@@ -1433,6 +1442,9 @@ static inline unsigned char mas_data_end(struct ma_state *mas)
 		return ma_meta_end(node, type);
 
 	pivots = ma_pivots(node, type);
+	if (unlikely(ma_dead_node(node)))
+		return 0;
+
 	offset = mt_pivots[type] - 1;
 	if (likely(!pivots[offset]))
 		return ma_meta_end(node, type);
@@ -4504,6 +4516,9 @@ static inline int mas_prev_node(struct ma_state *mas, unsigned long min)
 	node = mas_mn(mas);
 	slots = ma_slots(node, mt);
 	pivots = ma_pivots(node, mt);
+	if (unlikely(ma_dead_node(node)))
+		return 1;
+
 	mas->max = pivots[offset];
 	if (offset)
 		mas->min = pivots[offset - 1] + 1;
@@ -4525,6 +4540,9 @@ static inline int mas_prev_node(struct ma_state *mas, unsigned long min)
 		slots = ma_slots(node, mt);
 		pivots = ma_pivots(node, mt);
 		offset = ma_data_end(node, mt, pivots, mas->max);
+		if (unlikely(ma_dead_node(node)))
+			return 1;
+
 		if (offset)
 			mas->min = pivots[offset - 1] + 1;
 
@@ -4573,6 +4591,7 @@ static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
 	struct maple_enode *enode;
 	int level = 0;
 	unsigned char offset;
+	unsigned char node_end;
 	enum maple_type mt;
 	void __rcu **slots;
 
@@ -4596,7 +4615,11 @@ static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
 		node = mas_mn(mas);
 		mt = mte_node_type(mas->node);
 		pivots = ma_pivots(node, mt);
-	} while (unlikely(offset == ma_data_end(node, mt, pivots, mas->max)));
+		node_end = ma_data_end(node, mt, pivots, mas->max);
+		if (unlikely(ma_dead_node(node)))
+			return 1;
+
+	} while (unlikely(offset == node_end));
 
 	slots = ma_slots(node, mt);
 	pivot = mas_safe_pivot(mas, pivots, ++offset, mt);
@@ -4612,6 +4635,9 @@ static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
 		mt = mte_node_type(mas->node);
 		slots = ma_slots(node, mt);
 		pivots = ma_pivots(node, mt);
+		if (unlikely(ma_dead_node(node)))
+			return 1;
+
 		offset = 0;
 		pivot = pivots[0];
 	}
@@ -4658,16 +4684,18 @@ static inline void *mas_next_nentry(struct ma_state *mas,
 		return NULL;
 	}
 
-	pivots = ma_pivots(node, type);
 	slots = ma_slots(node, type);
-	mas->index = mas_safe_min(mas, pivots, mas->offset);
-	if (ma_dead_node(node))
+	pivots = ma_pivots(node, type);
+	count = ma_data_end(node, type, pivots, mas->max);
+	if (unlikely(ma_dead_node(node)))
 		return NULL;
 
+	mas->index = mas_safe_min(mas, pivots, mas->offset);
+	if (unlikely(ma_dead_node(node)))
+		return NULL;
 	if (mas->index > max)
 		return NULL;
 
-	count = ma_data_end(node, type, pivots, mas->max);
 	if (mas->offset > count)
 		return NULL;
 
@@ -4815,6 +4843,11 @@ static inline void *mas_prev_nentry(struct ma_state *mas, unsigned long limit,
 
 	slots = ma_slots(mn, mt);
 	pivots = ma_pivots(mn, mt);
+	if (unlikely(ma_dead_node(mn))) {
+		mas_rewalk(mas, index);
+		goto retry;
+	}
+
 	if (offset == mt_pivots[mt])
 		pivot = mas->max;
 	else
@@ -6613,11 +6646,11 @@ static inline void *mas_first_entry(struct ma_state *mas, struct maple_node *mn,
 	while (likely(!ma_is_leaf(mt))) {
 		MT_BUG_ON(mas->tree, mte_dead_node(mas->node));
 		slots = ma_slots(mn, mt);
-		pivots = ma_pivots(mn, mt);
-		max = pivots[0];
 		entry = mas_slot(mas, slots, 0);
+		pivots = ma_pivots(mn, mt);
 		if (unlikely(ma_dead_node(mn)))
 			return NULL;
+		max = pivots[0];
 		mas->node = entry;
 		mn = mas_mn(mas);
 		mt = mte_node_type(mas->node);
@@ -6637,13 +6670,13 @@ static inline void *mas_first_entry(struct ma_state *mas, struct maple_node *mn,
 	if (likely(entry))
 		return entry;
 
-	pivots = ma_pivots(mn, mt);
-	mas->index = pivots[0] + 1;
 	mas->offset = 1;
 	entry = mas_slot(mas, slots, 1);
+	pivots = ma_pivots(mn, mt);
 	if (unlikely(ma_dead_node(mn)))
 		return NULL;
 
+	mas->index = pivots[0] + 1;
 	if (mas->index > limit)
 		goto none;
 
-- 
2.39.0


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  reply	other threads:[~2023-01-09 20:53 UTC|newest]

Thread overview: 548+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-01-09 20:52 [PATCH 00/41] Per-VMA locks Suren Baghdasaryan
2023-01-09 20:52 ` Suren Baghdasaryan
2023-01-09 20:52 ` Suren Baghdasaryan
2023-01-09 20:52 ` Suren Baghdasaryan [this message]
2023-01-09 20:52   ` [PATCH 01/41] maple_tree: Be more cautious about dead nodes Suren Baghdasaryan
2023-01-09 20:52   ` Suren Baghdasaryan
2023-01-09 20:52 ` [PATCH 02/41] maple_tree: Detect dead nodes in mas_start() Suren Baghdasaryan
2023-01-09 20:52   ` Suren Baghdasaryan
2023-01-09 20:52   ` Suren Baghdasaryan
2023-01-09 20:52 ` [PATCH 03/41] maple_tree: Fix freeing of nodes in rcu mode Suren Baghdasaryan
2023-01-09 20:52   ` Suren Baghdasaryan
2023-01-09 20:52   ` Suren Baghdasaryan
2023-01-09 20:52 ` [PATCH 04/41] maple_tree: remove extra smp_wmb() from mas_dead_leaves() Suren Baghdasaryan
2023-01-09 20:52   ` Suren Baghdasaryan
2023-01-09 20:52   ` Suren Baghdasaryan
2023-01-09 20:53 ` [PATCH 05/41] maple_tree: Fix write memory barrier of nodes once dead for RCU mode Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53 ` [PATCH 06/41] maple_tree: Add smp_rmb() to dead node detection Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53 ` [PATCH 07/41] mm: Enable maple tree RCU mode by default Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53 ` [PATCH 08/41] mm: introduce CONFIG_PER_VMA_LOCK Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-11  0:13   ` Davidlohr Bueso
2023-01-11  0:13     ` Davidlohr Bueso
2023-01-11  0:13     ` Davidlohr Bueso
2023-01-11  0:44     ` Suren Baghdasaryan
2023-01-11  0:44       ` Suren Baghdasaryan
2023-01-11  8:23       ` Michal Hocko
2023-01-11  8:23         ` Michal Hocko
2023-01-11  8:23         ` Michal Hocko
2023-01-11  9:54         ` Ingo Molnar
2023-01-11  9:54           ` Ingo Molnar
2023-01-11  9:54           ` Ingo Molnar
2023-01-11 10:02           ` David Laight
2023-01-11 10:02             ` David Laight
2023-01-11 16:28             ` Suren Baghdasaryan
2023-01-11 16:28               ` Suren Baghdasaryan
2023-01-11 16:28               ` Suren Baghdasaryan
2023-01-11 16:44               ` Michal Hocko
2023-01-11 16:44                 ` Michal Hocko
2023-01-11 16:44                 ` Michal Hocko
2023-01-11 17:04                 ` Suren Baghdasaryan
2023-01-11 17:04                   ` Suren Baghdasaryan
2023-01-11 17:04                   ` Suren Baghdasaryan
2023-01-11 17:37                   ` Michal Hocko
2023-01-11 17:37                     ` Michal Hocko
2023-01-11 17:37                     ` Michal Hocko
2023-01-11 17:49                     ` Suren Baghdasaryan
2023-01-11 17:49                       ` Suren Baghdasaryan
2023-01-11 17:49                       ` Suren Baghdasaryan
2023-01-11 18:02                       ` Michal Hocko
2023-01-11 18:02                         ` Michal Hocko
2023-01-11 18:02                         ` Michal Hocko
2023-01-11 18:09                         ` Suren Baghdasaryan
2023-01-11 18:09                           ` Suren Baghdasaryan
2023-01-11 18:09                           ` Suren Baghdasaryan
2023-01-09 20:53 ` [PATCH 09/41] mm: rcu safe VMA freeing Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-17 14:25   ` Michal Hocko
2023-01-17 14:25     ` Michal Hocko
2023-01-17 14:25     ` Michal Hocko
2023-01-18  2:16     ` Suren Baghdasaryan
2023-01-18  2:16       ` Suren Baghdasaryan
2023-01-18  2:16       ` Suren Baghdasaryan
2023-01-09 20:53 ` [PATCH 10/41] mm: move mmap_lock assert function definitions Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53 ` [PATCH 11/41] mm: export dump_mm() Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53 ` [PATCH 12/41] mm: add per-VMA lock and helper functions to control it Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-17 15:04   ` Michal Hocko
2023-01-17 15:04     ` Michal Hocko
2023-01-17 15:04     ` Michal Hocko
2023-01-17 15:12     ` Michal Hocko
2023-01-17 15:12       ` Michal Hocko
2023-01-17 15:12       ` Michal Hocko
2023-01-17 21:21       ` Suren Baghdasaryan
2023-01-17 21:21         ` Suren Baghdasaryan
2023-01-17 21:21         ` Suren Baghdasaryan
2023-01-17 21:54         ` Matthew Wilcox
2023-01-17 21:54           ` Matthew Wilcox
2023-01-17 21:54           ` Matthew Wilcox
2023-01-17 22:33           ` Suren Baghdasaryan
2023-01-17 22:33             ` Suren Baghdasaryan
2023-01-17 22:33             ` Suren Baghdasaryan
2023-01-18  9:18           ` Michal Hocko
2023-01-18  9:18             ` Michal Hocko
2023-01-18  9:18             ` Michal Hocko
2023-01-17 21:08     ` Suren Baghdasaryan
2023-01-17 21:08       ` Suren Baghdasaryan
2023-01-17 21:08       ` Suren Baghdasaryan
2023-01-17 15:07   ` Michal Hocko
2023-01-17 15:07     ` Michal Hocko
2023-01-17 15:07     ` Michal Hocko
2023-01-17 21:09     ` Suren Baghdasaryan
2023-01-17 21:09       ` Suren Baghdasaryan
2023-01-17 21:09       ` Suren Baghdasaryan
2023-01-17 18:02   ` Jann Horn
2023-01-17 18:02     ` Jann Horn
2023-01-17 18:02     ` Jann Horn
2023-01-17 21:28     ` Suren Baghdasaryan
2023-01-17 21:28       ` Suren Baghdasaryan
2023-01-17 21:28       ` Suren Baghdasaryan
2023-01-17 21:45       ` Jann Horn
2023-01-17 21:45         ` Jann Horn
2023-01-17 21:45         ` Jann Horn
2023-01-17 22:36         ` Suren Baghdasaryan
2023-01-17 22:36           ` Suren Baghdasaryan
2023-01-17 22:36           ` Suren Baghdasaryan
2023-01-17 23:15           ` Matthew Wilcox
2023-01-17 23:15             ` Matthew Wilcox
2023-01-17 23:15             ` Matthew Wilcox
2023-11-22 14:04         ` Alexander Gordeev
2023-11-22 14:04           ` Alexander Gordeev
2023-11-22 14:04           ` Alexander Gordeev
2023-01-18 12:28     ` Michal Hocko
2023-01-18 12:28       ` Michal Hocko
2023-01-18 12:28       ` Michal Hocko
2023-01-18 13:09       ` David Laight
2023-01-18 13:09         ` David Laight
2023-01-18 13:23       ` Jann Horn
2023-01-18 13:23         ` Jann Horn
2023-01-18 13:23         ` Jann Horn
2023-01-18 15:11         ` Michal Hocko
2023-01-18 15:11           ` Michal Hocko
2023-01-18 15:11           ` Michal Hocko
2023-01-18 17:36           ` Suren Baghdasaryan
2023-01-18 17:36             ` Suren Baghdasaryan
2023-01-18 17:36             ` Suren Baghdasaryan
2023-01-18 21:28             ` Michal Hocko
2023-01-18 21:28               ` Michal Hocko
2023-01-18 21:28               ` Michal Hocko
2023-01-18 21:45               ` Suren Baghdasaryan
2023-01-18 21:45                 ` Suren Baghdasaryan
2023-01-18 21:45                 ` Suren Baghdasaryan
2023-01-09 20:53 ` [PATCH 13/41] mm: introduce vma->vm_flags modifier functions Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-11 15:47   ` Davidlohr Bueso
2023-01-11 15:47     ` Davidlohr Bueso
2023-01-11 15:47     ` Davidlohr Bueso
2023-01-11 17:36     ` Suren Baghdasaryan
2023-01-11 17:36       ` Suren Baghdasaryan
2023-01-11 19:52       ` Davidlohr Bueso
2023-01-11 19:52         ` Davidlohr Bueso
2023-01-11 19:52         ` Davidlohr Bueso
2023-01-11 21:23         ` Suren Baghdasaryan
2023-01-11 21:23           ` Suren Baghdasaryan
2023-01-17 15:09   ` Michal Hocko
2023-01-17 15:09     ` Michal Hocko
2023-01-17 15:09     ` Michal Hocko
2023-01-17 15:15     ` Michal Hocko
2023-01-17 15:15       ` Michal Hocko
2023-01-17 15:15       ` Michal Hocko
2023-01-18  2:07       ` Suren Baghdasaryan
2023-01-18  2:07         ` Suren Baghdasaryan
2023-01-18  2:07         ` Suren Baghdasaryan
2023-01-09 20:53 ` [PATCH 14/41] mm: replace VM_LOCKED_CLEAR_MASK with VM_LOCKED_MASK Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53 ` [PATCH 15/41] mm: replace vma->vm_flags direct modifications with modifier calls Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53 ` [PATCH 16/41] mm: replace vma->vm_flags indirect modification in ksm_madvise Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53 ` [PATCH 17/41] mm/mmap: move VMA locking before anon_vma_lock_write call Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-17 15:16   ` Michal Hocko
2023-01-17 15:16     ` Michal Hocko
2023-01-17 15:16     ` Michal Hocko
2023-01-18  2:01     ` Suren Baghdasaryan
2023-01-18  2:01       ` Suren Baghdasaryan
2023-01-18  2:01       ` Suren Baghdasaryan
2023-01-18  9:23       ` Michal Hocko
2023-01-18  9:23         ` Michal Hocko
2023-01-18  9:23         ` Michal Hocko
2023-01-18 18:09         ` Suren Baghdasaryan
2023-01-18 18:09           ` Suren Baghdasaryan
2023-01-18 18:09           ` Suren Baghdasaryan
2023-01-18 21:33           ` Michal Hocko
2023-01-18 21:33             ` Michal Hocko
2023-01-18 21:33             ` Michal Hocko
2023-01-18 21:48             ` Suren Baghdasaryan
2023-01-18 21:48               ` Suren Baghdasaryan
2023-01-18 21:48               ` Suren Baghdasaryan
2023-01-19  9:31               ` Michal Hocko
2023-01-19  9:31                 ` Michal Hocko
2023-01-19  9:31                 ` Michal Hocko
2023-01-19 18:53                 ` Suren Baghdasaryan
2023-01-19 18:53                   ` Suren Baghdasaryan
2023-01-19 18:53                   ` Suren Baghdasaryan
2023-01-09 20:53 ` [PATCH 18/41] mm/khugepaged: write-lock VMA while collapsing a huge page Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-17 15:25   ` Michal Hocko
2023-01-17 15:25     ` Michal Hocko
2023-01-17 15:25     ` Michal Hocko
2023-01-17 20:28     ` Jann Horn
2023-01-17 20:28       ` Jann Horn
2023-01-17 20:28       ` Jann Horn
2023-01-17 21:05       ` Suren Baghdasaryan
2023-01-17 21:05         ` Suren Baghdasaryan
2023-01-17 21:05         ` Suren Baghdasaryan
2023-01-18  9:40       ` Michal Hocko
2023-01-18  9:40         ` Michal Hocko
2023-01-18  9:40         ` Michal Hocko
2023-01-18 12:38         ` Jann Horn
2023-01-18 12:38           ` Jann Horn
2023-01-18 12:38           ` Jann Horn
2023-01-18 17:41         ` Suren Baghdasaryan
2023-01-18 17:41           ` Suren Baghdasaryan
2023-01-18 17:41           ` Suren Baghdasaryan
2023-01-09 20:53 ` [PATCH 19/41] mm/mmap: write-lock VMAs before merging, splitting or expanding them Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53 ` [PATCH 20/41] mm/mmap: write-lock VMAs in vma_adjust Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53 ` [PATCH 21/41] mm/mmap: write-lock VMAs affected by VMA expansion Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53 ` [PATCH 22/41] mm/mremap: write-lock VMA while remapping it to a new address range Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53 ` [PATCH 23/41] mm: write-lock VMAs before removing them from VMA tree Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53 ` [PATCH 24/41] mm: conditionally write-lock VMA in free_pgtables Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53 ` [PATCH 25/41] mm/mmap: write-lock adjacent VMAs if they can grow into unmapped area Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53 ` [PATCH 26/41] kernel/fork: assert no VMA readers during its destruction Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-17 15:42   ` Michal Hocko
2023-01-17 15:42     ` Michal Hocko
2023-01-17 15:42     ` Michal Hocko
2023-01-18  1:53     ` Suren Baghdasaryan
2023-01-18  1:53       ` Suren Baghdasaryan
2023-01-18  1:53       ` Suren Baghdasaryan
2023-01-18  9:43       ` Michal Hocko
2023-01-18  9:43         ` Michal Hocko
2023-01-18  9:43         ` Michal Hocko
2023-01-18 18:06         ` Suren Baghdasaryan
2023-01-18 18:06           ` Suren Baghdasaryan
2023-01-18 18:06           ` Suren Baghdasaryan
2023-01-09 20:53 ` [PATCH 27/41] mm/mmap: prevent pagefault handler from racing with mmu_notifier registration Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-18 12:50   ` Jann Horn
2023-01-18 12:50     ` Jann Horn
2023-01-18 12:50     ` Jann Horn
2023-01-18 17:40     ` Suren Baghdasaryan
2023-01-18 17:40       ` Suren Baghdasaryan
2023-01-18 17:40       ` Suren Baghdasaryan
2023-01-09 20:53 ` [PATCH 28/41] mm: introduce lock_vma_under_rcu to be used from arch-specific code Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-17 15:47   ` Michal Hocko
2023-01-17 15:47     ` Michal Hocko
2023-01-17 15:47     ` Michal Hocko
2023-01-18  1:06     ` Suren Baghdasaryan
2023-01-18  1:06       ` Suren Baghdasaryan
2023-01-18  1:06       ` Suren Baghdasaryan
2023-01-18  2:44       ` Matthew Wilcox
2023-01-18  2:44         ` Matthew Wilcox
2023-01-18  2:44         ` Matthew Wilcox
2023-01-18 21:33         ` Suren Baghdasaryan
2023-01-18 21:33           ` Suren Baghdasaryan
2023-01-18 21:33           ` Suren Baghdasaryan
2023-01-17 21:03   ` Jann Horn
2023-01-17 21:03     ` Jann Horn
2023-01-17 21:03     ` Jann Horn
2023-01-17 23:18     ` Liam Howlett
2023-01-17 23:18       ` Liam Howlett
2023-01-17 23:18       ` Liam Howlett
2023-01-09 20:53 ` [PATCH 29/41] mm: fall back to mmap_lock if vma->anon_vma is not yet set Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53 ` [PATCH 30/41] mm: add FAULT_FLAG_VMA_LOCK flag Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53 ` [PATCH 31/41] mm: prevent do_swap_page from handling page faults under VMA lock Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53 ` [PATCH 32/41] mm: prevent userfaults to be handled under per-vma lock Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-17 19:51   ` Jann Horn
2023-01-17 19:51     ` Jann Horn
2023-01-17 19:51     ` Jann Horn
2023-01-17 20:36     ` Jann Horn
2023-01-17 20:36       ` Jann Horn
2023-01-17 20:36       ` Jann Horn
2023-01-17 20:57       ` Suren Baghdasaryan
2023-01-17 20:57         ` Suren Baghdasaryan
2023-01-17 20:57         ` Suren Baghdasaryan
2023-01-09 20:53 ` [PATCH 33/41] mm: introduce per-VMA lock statistics Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53 ` [PATCH 34/41] x86/mm: try VMA lock-based page fault handling first Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53 ` [PATCH 35/41] arm64/mm: " Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53 ` [PATCH 36/41] powerc/mm: " Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53 ` [PATCH 37/41] mm: introduce mod_vm_flags_nolock Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53 ` [PATCH 38/41] mm: avoid assertion in untrack_pfn Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53 ` [PATCH 39/41] kernel/fork: throttle call_rcu() calls in vm_area_free Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-17 15:57   ` Michal Hocko
2023-01-17 15:57     ` Michal Hocko
2023-01-17 15:57     ` Michal Hocko
2023-01-18  1:19     ` Suren Baghdasaryan
2023-01-18  1:19       ` Suren Baghdasaryan
2023-01-18  1:19       ` Suren Baghdasaryan
2023-01-18  9:49       ` Michal Hocko
2023-01-18  9:49         ` Michal Hocko
2023-01-18  9:49         ` Michal Hocko
2023-01-18 18:04         ` Suren Baghdasaryan
2023-01-18 18:04           ` Suren Baghdasaryan
2023-01-18 18:04           ` Suren Baghdasaryan
2023-01-18 18:34           ` Paul E. McKenney
2023-01-18 18:34             ` Paul E. McKenney
2023-01-18 18:34             ` Paul E. McKenney
2023-01-18 19:01             ` Suren Baghdasaryan
2023-01-18 19:01               ` Suren Baghdasaryan
2023-01-18 19:01               ` Suren Baghdasaryan
2023-01-18 20:20               ` Paul E. McKenney
2023-01-18 20:20                 ` Paul E. McKenney
2023-01-18 20:20                 ` Paul E. McKenney
2023-01-19 12:52               ` Michal Hocko
2023-01-19 12:52                 ` Michal Hocko
2023-01-19 12:52                 ` Michal Hocko
2023-01-19 19:17                 ` Paul E. McKenney
2023-01-19 19:17                   ` Paul E. McKenney
2023-01-19 19:17                   ` Paul E. McKenney
2023-01-20  8:57                   ` Michal Hocko
2023-01-20  8:57                     ` Michal Hocko
2023-01-20  8:57                     ` Michal Hocko
2023-01-20 16:08                     ` Paul E. McKenney
2023-01-20 16:08                       ` Paul E. McKenney
2023-01-20 16:08                       ` Paul E. McKenney
2023-01-19 12:59   ` Michal Hocko
2023-01-19 12:59     ` Michal Hocko
2023-01-19 12:59     ` Michal Hocko
2023-01-19 18:52     ` Suren Baghdasaryan
2023-01-19 18:52       ` Suren Baghdasaryan
2023-01-19 18:52       ` Suren Baghdasaryan
2023-01-19 19:20       ` Paul E. McKenney
2023-01-19 19:20         ` Paul E. McKenney
2023-01-19 19:20         ` Paul E. McKenney
2023-01-19 19:47         ` Suren Baghdasaryan
2023-01-19 19:47           ` Suren Baghdasaryan
2023-01-19 19:47           ` Suren Baghdasaryan
2023-01-19 19:55           ` Paul E. McKenney
2023-01-19 19:55             ` Paul E. McKenney
2023-01-19 19:55             ` Paul E. McKenney
2023-01-20  8:52       ` Michal Hocko
2023-01-20  8:52         ` Michal Hocko
2023-01-20  8:52         ` Michal Hocko
2023-01-20 16:20         ` Suren Baghdasaryan
2023-01-20 16:20           ` Suren Baghdasaryan
2023-01-20 16:20           ` Suren Baghdasaryan
2023-01-20 16:45           ` Suren Baghdasaryan
2023-01-20 16:45             ` Suren Baghdasaryan
2023-01-20 16:45             ` Suren Baghdasaryan
2023-01-20 16:49             ` Matthew Wilcox
2023-01-20 16:49               ` Matthew Wilcox
2023-01-20 16:49               ` Matthew Wilcox
2023-01-20 17:08               ` Liam R. Howlett
2023-01-20 17:08                 ` Liam R. Howlett
2023-01-20 17:08                 ` Liam R. Howlett
2023-01-20 17:17                 ` Suren Baghdasaryan
2023-01-20 17:17                   ` Suren Baghdasaryan
2023-01-20 17:32                   ` Matthew Wilcox
2023-01-20 17:32                     ` Matthew Wilcox
2023-01-20 17:32                     ` Matthew Wilcox
2023-01-20 17:50                     ` Suren Baghdasaryan
2023-01-20 17:50                       ` Suren Baghdasaryan
2023-01-20 17:50                       ` Suren Baghdasaryan
2023-01-20 19:23                       ` Liam R. Howlett
2023-01-20 19:23                         ` Liam R. Howlett
2023-01-20 19:23                         ` Liam R. Howlett
2023-01-23  9:56                       ` Michal Hocko
2023-01-23  9:56                         ` Michal Hocko
2023-01-23  9:56                         ` Michal Hocko
2023-01-23 16:22                         ` Suren Baghdasaryan
2023-01-23 16:22                           ` Suren Baghdasaryan
2023-01-23 16:22                           ` Suren Baghdasaryan
2023-01-23 16:55                           ` Michal Hocko
2023-01-23 16:55                             ` Michal Hocko
2023-01-23 16:55                             ` Michal Hocko
2023-01-23 17:07                             ` Suren Baghdasaryan
2023-01-23 17:07                               ` Suren Baghdasaryan
2023-01-23 17:07                               ` Suren Baghdasaryan
2023-01-23 17:16                               ` Michal Hocko
2023-01-23 17:16                                 ` Michal Hocko
2023-01-23 17:16                                 ` Michal Hocko
2023-01-23 17:46                                 ` Suren Baghdasaryan
2023-01-23 17:46                                   ` Suren Baghdasaryan
2023-01-23 17:46                                   ` Suren Baghdasaryan
2023-01-23 18:23                                   ` Matthew Wilcox
2023-01-23 18:23                                     ` Matthew Wilcox
2023-01-23 18:23                                     ` Matthew Wilcox
2023-01-23 18:47                                     ` Suren Baghdasaryan
2023-01-23 18:47                                       ` Suren Baghdasaryan
2023-01-23 18:47                                       ` Suren Baghdasaryan
2023-01-23 19:18                                     ` Michal Hocko
2023-01-23 19:18                                       ` Michal Hocko
2023-01-23 19:18                                       ` Michal Hocko
2023-01-23 19:30                                       ` Matthew Wilcox
2023-01-23 19:30                                         ` Matthew Wilcox
2023-01-23 19:30                                         ` Matthew Wilcox
2023-01-23 19:57                                         ` Suren Baghdasaryan
2023-01-23 19:57                                           ` Suren Baghdasaryan
2023-01-23 19:57                                           ` Suren Baghdasaryan
2023-01-23 20:00                                         ` Michal Hocko
2023-01-23 20:00                                           ` Michal Hocko
2023-01-23 20:00                                           ` Michal Hocko
2023-01-23 20:08                                           ` Suren Baghdasaryan
2023-01-23 20:08                                             ` Suren Baghdasaryan
2023-01-23 20:08                                             ` Suren Baghdasaryan
2023-01-23 20:38                                           ` Liam R. Howlett
2023-01-23 20:38                                             ` Liam R. Howlett
2023-01-23 20:38                                             ` Liam R. Howlett
2023-01-20 17:21               ` Paul E. McKenney
2023-01-20 17:21                 ` Paul E. McKenney
2023-01-20 17:21                 ` Paul E. McKenney
2023-01-20 18:42                 ` Suren Baghdasaryan
2023-01-20 18:42                   ` Suren Baghdasaryan
2023-01-20 18:42                   ` Suren Baghdasaryan
2023-01-23  9:59           ` Michal Hocko
2023-01-23  9:59             ` Michal Hocko
2023-01-23  9:59             ` Michal Hocko
2023-01-23 17:43             ` Suren Baghdasaryan
2023-01-23 17:43               ` Suren Baghdasaryan
2023-01-23 17:43               ` Suren Baghdasaryan
2023-01-09 20:53 ` [PATCH 40/41] mm: separate vma->lock from vm_area_struct Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-17 18:33   ` Jann Horn
2023-01-17 18:33     ` Jann Horn
2023-01-17 18:33     ` Jann Horn
2023-01-17 19:01     ` Suren Baghdasaryan
2023-01-17 19:01       ` Suren Baghdasaryan
2023-01-17 19:01       ` Suren Baghdasaryan
2023-01-09 20:53 ` [PATCH 41/41] mm: replace rw_semaphore with atomic_t in vma_lock Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-09 20:53   ` Suren Baghdasaryan
2023-01-10  8:04   ` Vlastimil Babka
2023-01-10  8:04     ` Vlastimil Babka
2023-01-10  8:04     ` Vlastimil Babka
2023-01-10 17:05     ` Suren Baghdasaryan
2023-01-10 17:05       ` Suren Baghdasaryan
2023-01-10 17:05       ` Suren Baghdasaryan
2023-01-16 11:14   ` Hyeonggon Yoo
2023-01-16 11:14     ` Hyeonggon Yoo
2023-01-16 22:36     ` Suren Baghdasaryan
2023-01-16 22:36       ` Suren Baghdasaryan
2023-01-16 22:36       ` Suren Baghdasaryan
2023-01-17  4:14     ` Matthew Wilcox
2023-01-17  4:14       ` Matthew Wilcox
2023-01-17  4:14       ` Matthew Wilcox
2023-01-17  4:34       ` Suren Baghdasaryan
2023-01-17  4:34         ` Suren Baghdasaryan
2023-01-17  4:34         ` Suren Baghdasaryan
2023-01-17  5:46         ` Matthew Wilcox
2023-01-17  5:46           ` Matthew Wilcox
2023-01-17  5:46           ` Matthew Wilcox
2023-01-17  5:58           ` Suren Baghdasaryan
2023-01-17  5:58             ` Suren Baghdasaryan
2023-01-17  5:58             ` Suren Baghdasaryan
2023-01-17 18:23             ` Matthew Wilcox
2023-01-17 18:23               ` Matthew Wilcox
2023-01-17 18:23               ` Matthew Wilcox
2023-01-17 18:28               ` Suren Baghdasaryan
2023-01-17 18:28                 ` Suren Baghdasaryan
2023-01-17 18:28                 ` Suren Baghdasaryan
2023-01-17 20:31                 ` Michal Hocko
2023-01-17 20:31                   ` Michal Hocko
2023-01-17 20:31                   ` Michal Hocko
2023-01-17 21:00                   ` Suren Baghdasaryan
2023-01-17 21:00                     ` Suren Baghdasaryan
2023-01-17 21:00                     ` Suren Baghdasaryan
2023-01-16 14:06   ` Hillf Danton
2023-01-16 23:08     ` Suren Baghdasaryan
2023-01-16 23:11       ` Suren Baghdasaryan
2023-01-17  3:16       ` Hillf Danton
2023-01-17  4:52         ` Suren Baghdasaryan
2023-01-17  8:33           ` Hillf Danton
2023-01-17 18:21             ` Suren Baghdasaryan
2023-01-17 18:27               ` Matthew Wilcox
2023-01-17 18:31                 ` Suren Baghdasaryan
2023-01-18  6:26                 ` Hillf Danton
2023-01-18 18:35                   ` Matthew Wilcox
2023-01-19  0:28                     ` Hillf Danton
2023-01-17 18:11   ` Jann Horn
2023-01-17 18:11     ` Jann Horn
2023-01-17 18:11     ` Jann Horn
2023-01-17 18:26     ` Suren Baghdasaryan
2023-01-17 18:26       ` Suren Baghdasaryan
2023-01-17 18:26       ` Suren Baghdasaryan
2023-01-17 18:31       ` Matthew Wilcox
2023-01-17 18:31         ` Matthew Wilcox
2023-01-17 18:31         ` Matthew Wilcox
2023-01-17 18:36         ` Jann Horn
2023-01-17 18:36           ` Jann Horn
2023-01-17 18:36           ` Jann Horn
2023-01-17 18:49           ` Suren Baghdasaryan
2023-01-17 18:49             ` Suren Baghdasaryan
2023-01-17 18:49             ` Suren Baghdasaryan
2023-01-17 18:36         ` Suren Baghdasaryan
2023-01-17 18:36           ` Suren Baghdasaryan
2023-01-17 18:36           ` Suren Baghdasaryan
2023-01-17 18:48           ` Matthew Wilcox
2023-01-17 18:48             ` Matthew Wilcox
2023-01-17 18:48             ` Matthew Wilcox
2023-01-17 18:55             ` Suren Baghdasaryan
2023-01-17 18:55               ` Suren Baghdasaryan
2023-01-17 18:55               ` Suren Baghdasaryan
2023-01-17 18:59               ` Jann Horn
2023-01-17 18:59                 ` Jann Horn
2023-01-17 18:59                 ` Jann Horn
2023-01-17 19:06                 ` Suren Baghdasaryan
2023-01-17 19:06                   ` Suren Baghdasaryan
2023-01-17 19:06                   ` Suren Baghdasaryan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230109205336.3665937-2-surenb@google.com \
    --to=surenb@google.com \
    --cc=akpm@linux-foundation.org \
    --cc=arjunroy@google.com \
    --cc=axelrasmussen@google.com \
    --cc=bigeasy@linutronix.de \
    --cc=dave@stgolabs.net \
    --cc=david@redhat.com \
    --cc=dhowells@redhat.com \
    --cc=edumazet@google.com \
    --cc=gthelen@google.com \
    --cc=gurua@google.com \
    --cc=hannes@cmpxchg.org \
    --cc=hughd@google.com \
    --cc=hughlynch@google.com \
    --cc=jannh@google.com \
    --cc=jglisse@google.com \
    --cc=joelaf@google.com \
    --cc=kent.overstreet@linux.dev \
    --cc=kernel-team@android.com \
    --cc=laurent.dufour@fr.ibm.com \
    --cc=ldufour@linux.ibm.com \
    --cc=leewalsh@google.com \
    --cc=liam.howlett@oracle.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=lstoakes@gmail.com \
    --cc=luto@kernel.org \
    --cc=mgorman@techsingularity.net \
    --cc=mhocko@suse.com \
    --cc=michel@lespinasse.org \
    --cc=minchan@google.com \
    --cc=paulmck@kernel.org \
    --cc=peterjung1337@gmail.com \
    --cc=peterx@redhat.com \
    --cc=peterz@infradead.org \
    --cc=posk@google.com \
    --cc=punit.agrawal@bytedance.com \
    --cc=rientjes@google.com \
    --cc=shakeelb@google.com \
    --cc=soheil@google.com \
    --cc=songliubraving@fb.com \
    --cc=tatashin@google.com \
    --cc=vbabka@suse.cz \
    --cc=willy@infradead.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.