From: Suren Baghdasaryan <surenb@google.com>
To: akpm@linux-foundation.org
Cc: michel@lespinasse.org, jglisse@google.com, mhocko@suse.com,
vbabka@suse.cz, hannes@cmpxchg.org, mgorman@techsingularity.net,
dave@stgolabs.net, willy@infradead.org, liam.howlett@oracle.com,
peterz@infradead.org, ldufour@linux.ibm.com, paulmck@kernel.org,
mingo@redhat.com, will@kernel.org, luto@kernel.org,
songliubraving@fb.com, peterx@redhat.com, david@redhat.com,
dhowells@redhat.com, hughd@google.com, bigeasy@linutronix.de,
kent.overstreet@linux.dev, punit.agrawal@bytedance.com,
lstoakes@gmail.com, peterjung1337@gmail.com, rientjes@google.com,
chriscli@google.com, axelrasmussen@google.com, joelaf@google.com,
minchan@google.com, rppt@kernel.org, jannh@google.com,
shakeelb@google.com, tatashin@google.com, edumazet@google.com,
gthelen@google.com, gurua@google.com, arjunroy@google.com,
soheil@google.com, leewalsh@google.com, posk@google.com,
michalechner92@googlemail.com, linux-mm@kvack.org,
linux-arm-kernel@lists.infradead.org,
linuxppc-dev@lists.ozlabs.org, x86@kernel.org,
linux-kernel@vger.kernel.org, kernel-team@android.com,
Liam Howlett <Liam.Howlett@oracle.com>,
Suren Baghdasaryan <surenb@google.com>
Subject: [PATCH v3 01/35] maple_tree: Be more cautious about dead nodes
Date: Wed, 15 Feb 2023 21:17:16 -0800 [thread overview]
Message-ID: <20230216051750.3125598-2-surenb@google.com> (raw)
In-Reply-To: <20230216051750.3125598-1-surenb@google.com>
From: Liam Howlett <Liam.Howlett@oracle.com>
ma_pivots() and ma_data_end() may be called with a dead node. Ensure to
that the node isn't dead before using the returned values.
This is necessary for RCU mode of the maple tree.
Fixes: 54a611b60590 ("Maple Tree: add new data structure")
Signed-off-by: Liam Howlett <Liam.Howlett@oracle.com>
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
---
lib/maple_tree.c | 52 +++++++++++++++++++++++++++++++++++++++---------
1 file changed, 43 insertions(+), 9 deletions(-)
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index 646297cae5d1..cc356b8369ad 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -544,6 +544,7 @@ static inline bool ma_dead_node(const struct maple_node *node)
return (parent == node);
}
+
/*
* mte_dead_node() - check if the @enode is dead.
* @enode: The encoded maple node
@@ -625,6 +626,8 @@ static inline unsigned int mas_alloc_req(const struct ma_state *mas)
* @node - the maple node
* @type - the node type
*
+ * In the event of a dead node, this array may be %NULL
+ *
* Return: A pointer to the maple node pivots
*/
static inline unsigned long *ma_pivots(struct maple_node *node,
@@ -1096,8 +1099,11 @@ static int mas_ascend(struct ma_state *mas)
a_type = mas_parent_enum(mas, p_enode);
a_node = mte_parent(p_enode);
a_slot = mte_parent_slot(p_enode);
- pivots = ma_pivots(a_node, a_type);
a_enode = mt_mk_node(a_node, a_type);
+ pivots = ma_pivots(a_node, a_type);
+
+ if (unlikely(ma_dead_node(a_node)))
+ return 1;
if (!set_min && a_slot) {
set_min = true;
@@ -1401,6 +1407,9 @@ static inline unsigned char ma_data_end(struct maple_node *node,
{
unsigned char offset;
+ if (!pivots)
+ return 0;
+
if (type == maple_arange_64)
return ma_meta_end(node, type);
@@ -1436,6 +1445,9 @@ static inline unsigned char mas_data_end(struct ma_state *mas)
return ma_meta_end(node, type);
pivots = ma_pivots(node, type);
+ if (unlikely(ma_dead_node(node)))
+ return 0;
+
offset = mt_pivots[type] - 1;
if (likely(!pivots[offset]))
return ma_meta_end(node, type);
@@ -4505,6 +4517,9 @@ static inline int mas_prev_node(struct ma_state *mas, unsigned long min)
node = mas_mn(mas);
slots = ma_slots(node, mt);
pivots = ma_pivots(node, mt);
+ if (unlikely(ma_dead_node(node)))
+ return 1;
+
mas->max = pivots[offset];
if (offset)
mas->min = pivots[offset - 1] + 1;
@@ -4526,6 +4541,9 @@ static inline int mas_prev_node(struct ma_state *mas, unsigned long min)
slots = ma_slots(node, mt);
pivots = ma_pivots(node, mt);
offset = ma_data_end(node, mt, pivots, mas->max);
+ if (unlikely(ma_dead_node(node)))
+ return 1;
+
if (offset)
mas->min = pivots[offset - 1] + 1;
@@ -4574,6 +4592,7 @@ static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
struct maple_enode *enode;
int level = 0;
unsigned char offset;
+ unsigned char node_end;
enum maple_type mt;
void __rcu **slots;
@@ -4597,7 +4616,11 @@ static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
node = mas_mn(mas);
mt = mte_node_type(mas->node);
pivots = ma_pivots(node, mt);
- } while (unlikely(offset == ma_data_end(node, mt, pivots, mas->max)));
+ node_end = ma_data_end(node, mt, pivots, mas->max);
+ if (unlikely(ma_dead_node(node)))
+ return 1;
+
+ } while (unlikely(offset == node_end));
slots = ma_slots(node, mt);
pivot = mas_safe_pivot(mas, pivots, ++offset, mt);
@@ -4613,6 +4636,9 @@ static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
mt = mte_node_type(mas->node);
slots = ma_slots(node, mt);
pivots = ma_pivots(node, mt);
+ if (unlikely(ma_dead_node(node)))
+ return 1;
+
offset = 0;
pivot = pivots[0];
}
@@ -4659,11 +4685,14 @@ static inline void *mas_next_nentry(struct ma_state *mas,
return NULL;
}
- pivots = ma_pivots(node, type);
slots = ma_slots(node, type);
- mas->index = mas_safe_min(mas, pivots, mas->offset);
+ pivots = ma_pivots(node, type);
count = ma_data_end(node, type, pivots, mas->max);
- if (ma_dead_node(node))
+ if (unlikely(ma_dead_node(node)))
+ return NULL;
+
+ mas->index = mas_safe_min(mas, pivots, mas->offset);
+ if (unlikely(ma_dead_node(node)))
return NULL;
if (mas->index > max)
@@ -4817,6 +4846,11 @@ static inline void *mas_prev_nentry(struct ma_state *mas, unsigned long limit,
slots = ma_slots(mn, mt);
pivots = ma_pivots(mn, mt);
+ if (unlikely(ma_dead_node(mn))) {
+ mas_rewalk(mas, index);
+ goto retry;
+ }
+
if (offset == mt_pivots[mt])
pivot = mas->max;
else
@@ -6631,11 +6665,11 @@ static inline void *mas_first_entry(struct ma_state *mas, struct maple_node *mn,
while (likely(!ma_is_leaf(mt))) {
MT_BUG_ON(mas->tree, mte_dead_node(mas->node));
slots = ma_slots(mn, mt);
- pivots = ma_pivots(mn, mt);
- max = pivots[0];
entry = mas_slot(mas, slots, 0);
+ pivots = ma_pivots(mn, mt);
if (unlikely(ma_dead_node(mn)))
return NULL;
+ max = pivots[0];
mas->node = entry;
mn = mas_mn(mas);
mt = mte_node_type(mas->node);
@@ -6655,13 +6689,13 @@ static inline void *mas_first_entry(struct ma_state *mas, struct maple_node *mn,
if (likely(entry))
return entry;
- pivots = ma_pivots(mn, mt);
- mas->index = pivots[0] + 1;
mas->offset = 1;
entry = mas_slot(mas, slots, 1);
+ pivots = ma_pivots(mn, mt);
if (unlikely(ma_dead_node(mn)))
return NULL;
+ mas->index = pivots[0] + 1;
if (mas->index > limit)
goto none;
--
2.39.1
next prev parent reply other threads:[~2023-02-16 5:18 UTC|newest]
Thread overview: 67+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-02-16 5:17 [PATCH v3 00/35] Per-VMA locks Suren Baghdasaryan
2023-02-16 5:17 ` Suren Baghdasaryan [this message]
2023-02-16 5:17 ` [PATCH v3 02/35] maple_tree: Detect dead nodes in mas_start() Suren Baghdasaryan
2023-02-16 5:17 ` [PATCH v3 03/35] maple_tree: Fix freeing of nodes in rcu mode Suren Baghdasaryan
2023-02-16 5:17 ` [PATCH v3 04/35] maple_tree: remove extra smp_wmb() from mas_dead_leaves() Suren Baghdasaryan
2023-02-16 5:17 ` [PATCH v3 05/35] maple_tree: Fix write memory barrier of nodes once dead for RCU mode Suren Baghdasaryan
2023-02-16 5:17 ` [PATCH v3 06/35] maple_tree: Add smp_rmb() to dead node detection Suren Baghdasaryan
2023-02-16 5:17 ` [PATCH v3 07/35] maple_tree: Add RCU lock checking to rcu callback functions Suren Baghdasaryan
2023-02-16 5:17 ` [PATCH v3 08/35] mm: Enable maple tree RCU mode by default Suren Baghdasaryan
2023-02-16 5:17 ` [PATCH v3 09/35] mm: introduce CONFIG_PER_VMA_LOCK Suren Baghdasaryan
2023-02-16 5:17 ` [PATCH v3 10/35] mm: rcu safe VMA freeing Suren Baghdasaryan
2023-02-16 5:17 ` [PATCH v3 11/35] mm: move mmap_lock assert function definitions Suren Baghdasaryan
2023-02-16 5:17 ` [PATCH v3 12/35] mm: add per-VMA lock and helper functions to control it Suren Baghdasaryan
2023-02-16 5:17 ` [PATCH v3 13/35] mm: mark VMA as being written when changing vm_flags Suren Baghdasaryan
2023-02-16 5:17 ` [PATCH v3 14/35] mm/mmap: move VMA locking before vma_adjust_trans_huge call Suren Baghdasaryan
2023-02-16 5:17 ` [PATCH v3 15/35] mm/khugepaged: write-lock VMA while collapsing a huge page Suren Baghdasaryan
2023-02-16 5:17 ` [PATCH v3 16/35] mm/mmap: write-lock VMAs before merging, splitting or expanding them Suren Baghdasaryan
2023-02-23 14:51 ` Hyeonggon Yoo
2023-02-23 14:59 ` Hyeonggon Yoo
2023-02-23 17:46 ` Suren Baghdasaryan
2023-02-16 5:17 ` [PATCH v3 17/35] mm/mmap: write-lock VMA before shrinking or expanding it Suren Baghdasaryan
2023-02-23 20:20 ` Liam R. Howlett
2023-02-23 20:28 ` Liam R. Howlett
2023-02-23 21:16 ` Suren Baghdasaryan
2023-02-24 1:46 ` Liam R. Howlett
2023-02-24 2:06 ` Suren Baghdasaryan
2023-02-24 16:14 ` Liam R. Howlett
2023-02-24 16:19 ` Suren Baghdasaryan
2023-02-27 17:33 ` Suren Baghdasaryan
2023-02-16 5:17 ` [PATCH v3 18/35] mm/mremap: write-lock VMA while remapping it to a new address range Suren Baghdasaryan
2023-02-16 5:17 ` [PATCH v3 19/35] mm: write-lock VMAs before removing them from VMA tree Suren Baghdasaryan
2023-02-16 5:17 ` [PATCH v3 20/35] mm: conditionally write-lock VMA in free_pgtables Suren Baghdasaryan
2023-02-16 5:17 ` [PATCH v3 21/35] mm/mmap: write-lock adjacent VMAs if they can grow into unmapped area Suren Baghdasaryan
2023-02-16 15:34 ` Liam R. Howlett
2023-02-16 19:36 ` Suren Baghdasaryan
2023-02-17 14:50 ` Liam R. Howlett
2023-02-17 15:54 ` Suren Baghdasaryan
2023-02-16 5:17 ` [PATCH v3 22/35] kernel/fork: assert no VMA readers during its destruction Suren Baghdasaryan
2023-02-16 5:17 ` [PATCH v3 23/35] mm/mmap: prevent pagefault handler from racing with mmu_notifier registration Suren Baghdasaryan
2023-02-23 20:06 ` Liam R. Howlett
2023-02-23 20:29 ` Suren Baghdasaryan
2023-02-16 5:17 ` [PATCH v3 24/35] mm: introduce vma detached flag Suren Baghdasaryan
2023-02-23 20:08 ` Liam R. Howlett
2023-02-23 20:34 ` Suren Baghdasaryan
2023-02-16 5:17 ` [PATCH v3 25/35] mm: introduce lock_vma_under_rcu to be used from arch-specific code Suren Baghdasaryan
2023-02-16 5:17 ` [PATCH v3 26/35] mm: fall back to mmap_lock if vma->anon_vma is not yet set Suren Baghdasaryan
2023-02-16 15:44 ` Matthew Wilcox
2023-02-16 19:43 ` Suren Baghdasaryan
2023-02-17 2:14 ` Suren Baghdasaryan
2023-02-17 10:21 ` Hyeonggon Yoo
2023-02-17 16:13 ` Suren Baghdasaryan
2023-02-17 18:49 ` Hyeonggon Yoo
2023-02-17 16:05 ` Matthew Wilcox
2023-02-17 16:10 ` Suren Baghdasaryan
2023-04-03 19:49 ` Matthew Wilcox
2023-02-16 5:17 ` [PATCH v3 27/35] mm: add FAULT_FLAG_VMA_LOCK flag Suren Baghdasaryan
2023-02-16 5:17 ` [PATCH v3 28/35] mm: prevent do_swap_page from handling page faults under VMA lock Suren Baghdasaryan
2023-02-16 5:17 ` [PATCH v3 29/35] mm: prevent userfaults to be handled under per-vma lock Suren Baghdasaryan
2023-02-16 5:17 ` [PATCH v3 30/35] mm: introduce per-VMA lock statistics Suren Baghdasaryan
2023-02-16 5:17 ` [PATCH v3 31/35] x86/mm: try VMA lock-based page fault handling first Suren Baghdasaryan
2023-02-16 5:17 ` [PATCH v3 32/35] arm64/mm: " Suren Baghdasaryan
2023-02-16 5:17 ` [PATCH v3 33/35] powerc/mm: " Suren Baghdasaryan
2023-02-16 5:17 ` [PATCH v3 34/35] mm/mmap: free vm_area_struct without call_rcu in exit_mmap Suren Baghdasaryan
2023-02-16 5:17 ` [PATCH v3 35/35] mm: separate vma->lock from vm_area_struct Suren Baghdasaryan
2023-02-24 9:21 ` [PATCH v3 00/35] Per-VMA locks freak07
2023-02-27 16:50 ` Davidlohr Bueso
2023-02-27 17:22 ` Suren Baghdasaryan
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230216051750.3125598-2-surenb@google.com \
--to=surenb@google.com \
--cc=akpm@linux-foundation.org \
--cc=arjunroy@google.com \
--cc=axelrasmussen@google.com \
--cc=bigeasy@linutronix.de \
--cc=chriscli@google.com \
--cc=dave@stgolabs.net \
--cc=david@redhat.com \
--cc=dhowells@redhat.com \
--cc=edumazet@google.com \
--cc=gthelen@google.com \
--cc=gurua@google.com \
--cc=hannes@cmpxchg.org \
--cc=hughd@google.com \
--cc=jannh@google.com \
--cc=jglisse@google.com \
--cc=joelaf@google.com \
--cc=kent.overstreet@linux.dev \
--cc=kernel-team@android.com \
--cc=ldufour@linux.ibm.com \
--cc=leewalsh@google.com \
--cc=liam.howlett@oracle.com \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=lstoakes@gmail.com \
--cc=luto@kernel.org \
--cc=mgorman@techsingularity.net \
--cc=mhocko@suse.com \
--cc=michalechner92@googlemail.com \
--cc=michel@lespinasse.org \
--cc=minchan@google.com \
--cc=mingo@redhat.com \
--cc=paulmck@kernel.org \
--cc=peterjung1337@gmail.com \
--cc=peterx@redhat.com \
--cc=peterz@infradead.org \
--cc=posk@google.com \
--cc=punit.agrawal@bytedance.com \
--cc=rientjes@google.com \
--cc=rppt@kernel.org \
--cc=shakeelb@google.com \
--cc=soheil@google.com \
--cc=songliubraving@fb.com \
--cc=tatashin@google.com \
--cc=vbabka@suse.cz \
--cc=will@kernel.org \
--cc=willy@infradead.org \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).