From: Laurent Dufour <ldufour@linux.ibm.com> To: akpm@linux-foundation.org, mhocko@kernel.org, peterz@infradead.org, kirill@shutemov.name, ak@linux.intel.com, dave@stgolabs.net, jack@suse.cz, Matthew Wilcox <willy@infradead.org>, aneesh.kumar@linux.ibm.com, benh@kernel.crashing.org, mpe@ellerman.id.au, paulus@samba.org, Thomas Gleixner <tglx@linutronix.de>, Ingo Molnar <mingo@redhat.com>, hpa@zytor.com, Will Deacon <will.deacon@arm.com>, Sergey Senozhatsky <sergey.senozhatsky@gmail.com>, sergey.senozhatsky.work@gmail.com, Andrea Arcangeli <aarcange@redhat.com>, Alexei Starovoitov <alexei.starovoitov@gmail.com>, kemi.wang@intel.com, Daniel Jordan <daniel.m.jordan@oracle.com>, David Rientjes <rientjes@google.com>, Jerome Glisse <jglisse@redhat.com>, Ganesh Mahendran <opensource.ganesh@gmail.com>, Minchan Kim <minchan@kernel.org>, Punit Agrawal <punitagrawal@gmail.com>, vinayak menon <vinayakm.list@gmail.com>, Yang Shi <yang.shi@linux.alibaba.com>, zhong jiang <zhongjiang@huawei.com>, Haiyan Song <haiyanx.song@intel.com>, Balbir Singh <bsingharora@gmail.com>, sj38.park@gmail.com, Michel Lespinasse <walken@google.com>, Mike Rapoport <rppt@linux.ibm.com> Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org, haren@linux.vnet.ibm.com, npiggin@gmail.com, paulmck@linux.vnet.ibm.com, Tim Chen <tim.c.chen@linux.intel.com>, linuxppc-dev@lists.ozlabs.org, x86@kernel.org Subject: [PATCH v12 19/31] mm: protect the RB tree with a sequence lock Date: Tue, 16 Apr 2019 15:45:10 +0200 [thread overview] Message-ID: <20190416134522.17540-20-ldufour@linux.ibm.com> (raw) In-Reply-To: <20190416134522.17540-1-ldufour@linux.ibm.com> Introducing a per mm_struct seqlock, mm_seq field, to protect the changes made in the MM RB tree. This allows to walk the RB tree without grabbing the mmap_sem, and on the walk is done to double check that sequence counter was stable during the walk. The mm seqlock is held while inserting and removing entries into the MM RB tree. Later in this series, it will be check when looking for a VMA without holding the mmap_sem. This is based on the initial work from Peter Zijlstra: https://lore.kernel.org/linux-mm/20100104182813.479668508@chello.nl/ Signed-off-by: Laurent Dufour <ldufour@linux.ibm.com> --- include/linux/mm_types.h | 3 +++ kernel/fork.c | 3 +++ mm/init-mm.c | 3 +++ mm/mmap.c | 48 +++++++++++++++++++++++++++++++--------- 4 files changed, 46 insertions(+), 11 deletions(-) diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index e78f72eb2576..24b3f8ce9e42 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -358,6 +358,9 @@ struct mm_struct { struct { struct vm_area_struct *mmap; /* list of VMAs */ struct rb_root mm_rb; +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT + seqlock_t mm_seq; +#endif u64 vmacache_seqnum; /* per-thread vmacache */ #ifdef CONFIG_MMU unsigned long (*get_unmapped_area) (struct file *filp, diff --git a/kernel/fork.c b/kernel/fork.c index 2992d2c95256..3a1739197ebc 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1008,6 +1008,9 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, mm->mmap = NULL; mm->mm_rb = RB_ROOT; mm->vmacache_seqnum = 0; +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT + seqlock_init(&mm->mm_seq); +#endif atomic_set(&mm->mm_users, 1); atomic_set(&mm->mm_count, 1); init_rwsem(&mm->mmap_sem); diff --git a/mm/init-mm.c b/mm/init-mm.c index a787a319211e..69346b883a4e 100644 --- a/mm/init-mm.c +++ b/mm/init-mm.c @@ -27,6 +27,9 @@ */ struct mm_struct init_mm = { .mm_rb = RB_ROOT, +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT + .mm_seq = __SEQLOCK_UNLOCKED(init_mm.mm_seq), +#endif .pgd = swapper_pg_dir, .mm_users = ATOMIC_INIT(2), .mm_count = ATOMIC_INIT(1), diff --git a/mm/mmap.c b/mm/mmap.c index 13460b38b0fb..f7f6027a7dff 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -170,6 +170,24 @@ void unlink_file_vma(struct vm_area_struct *vma) } } +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT +static inline void mm_write_seqlock(struct mm_struct *mm) +{ + write_seqlock(&mm->mm_seq); +} +static inline void mm_write_sequnlock(struct mm_struct *mm) +{ + write_sequnlock(&mm->mm_seq); +} +#else +static inline void mm_write_seqlock(struct mm_struct *mm) +{ +} +static inline void mm_write_sequnlock(struct mm_struct *mm) +{ +} +#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */ + /* * Close a vm structure and free it, returning the next. */ @@ -445,26 +463,32 @@ static void vma_gap_update(struct vm_area_struct *vma) } static inline void vma_rb_insert(struct vm_area_struct *vma, - struct rb_root *root) + struct mm_struct *mm) { + struct rb_root *root = &mm->mm_rb; + /* All rb_subtree_gap values must be consistent prior to insertion */ validate_mm_rb(root, NULL); rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks); } -static void __vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root) +static void __vma_rb_erase(struct vm_area_struct *vma, struct mm_struct *mm) { + struct rb_root *root = &mm->mm_rb; + /* * Note rb_erase_augmented is a fairly large inline function, * so make sure we instantiate it only once with our desired * augmented rbtree callbacks. */ + mm_write_seqlock(mm); rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks); + mm_write_sequnlock(mm); /* wmb */ } static __always_inline void vma_rb_erase_ignore(struct vm_area_struct *vma, - struct rb_root *root, + struct mm_struct *mm, struct vm_area_struct *ignore) { /* @@ -472,21 +496,21 @@ static __always_inline void vma_rb_erase_ignore(struct vm_area_struct *vma, * with the possible exception of the "next" vma being erased if * next->vm_start was reduced. */ - validate_mm_rb(root, ignore); + validate_mm_rb(&mm->mm_rb, ignore); - __vma_rb_erase(vma, root); + __vma_rb_erase(vma, mm); } static __always_inline void vma_rb_erase(struct vm_area_struct *vma, - struct rb_root *root) + struct mm_struct *mm) { /* * All rb_subtree_gap values must be consistent prior to erase, * with the possible exception of the vma being erased. */ - validate_mm_rb(root, vma); + validate_mm_rb(&mm->mm_rb, vma); - __vma_rb_erase(vma, root); + __vma_rb_erase(vma, mm); } /* @@ -601,10 +625,12 @@ void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, * immediately update the gap to the correct value. Finally we * rebalance the rbtree after all augmented values have been set. */ + mm_write_seqlock(mm); rb_link_node(&vma->vm_rb, rb_parent, rb_link); vma->rb_subtree_gap = 0; vma_gap_update(vma); - vma_rb_insert(vma, &mm->mm_rb); + vma_rb_insert(vma, mm); + mm_write_sequnlock(mm); } static void __vma_link_file(struct vm_area_struct *vma) @@ -680,7 +706,7 @@ static __always_inline void __vma_unlink_common(struct mm_struct *mm, { struct vm_area_struct *next; - vma_rb_erase_ignore(vma, &mm->mm_rb, ignore); + vma_rb_erase_ignore(vma, mm, ignore); next = vma->vm_next; if (has_prev) prev->vm_next = next; @@ -2674,7 +2700,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, insertion_point = (prev ? &prev->vm_next : &mm->mmap); vma->vm_prev = NULL; do { - vma_rb_erase(vma, &mm->mm_rb); + vma_rb_erase(vma, mm); mm->map_count--; tail_vma = vma; vma = vma->vm_next; -- 2.21.0
WARNING: multiple messages have this Message-ID (diff)
From: Laurent Dufour <ldufour@linux.ibm.com> To: akpm@linux-foundation.org, mhocko@kernel.org, peterz@infradead.org, kirill@shutemov.name, ak@linux.intel.com, dave@stgolabs.net, jack@suse.cz, Matthew Wilcox <willy@infradead.org>, aneesh.kumar@linux.ibm.com, benh@kernel.crashing.org, mpe@ellerman.id.au, paulus@samba.org, Thomas Gleixner <tglx@linutronix.de>, Ingo Molnar <mingo@redhat.com>, hpa@zytor.com, Will Deacon <will.deacon@arm.com>, Sergey Senozhatsky <sergey.senozhatsky@gmail.com>, sergey.senozhatsky.work@gmail.com, Andrea Arcangeli <aarcange@redhat.com>, Alexei Starovoitov <alexei.starovoitov@gmail.com>, kemi.wang@intel.com, Daniel Jordan <daniel.m.jordan@oracle.com>, David Rientjes <rientjes@google.com>, Jerome Glisse <jglisse@redhat.com>, Ganesh Mahendran <opensource.ganesh@gmail.com>, Minchan Kim <minchan@kernel.org>, Punit Agrawal <punitagrawal@gmail.com>, vinayak menon <vinayakm.list@gmail.com>, Yang Shi <yang.shi@linux.alibaba.com>, zhong jiang <zhongjiang@huawei.com>, Haiyan Song <haiyanx.song@intel.com>, Balbir Singh <bsingharora@gmail.com>, sj38.park@gmail.com, Michel Lespinasse <walken@google.com>, Mike Rapoport <rppt@linux.ibm.com> Cc: linuxppc-dev@lists.ozlabs.org, x86@kernel.org, linux-kernel@vger.kernel.org, npiggin@gmail.com, linux-mm@kvack.org, paulmck@linux.vnet.ibm.com, Tim Chen <tim.c.chen@linux.intel.com>, haren@linux.vnet.ibm.com Subject: [PATCH v12 19/31] mm: protect the RB tree with a sequence lock Date: Tue, 16 Apr 2019 15:45:10 +0200 [thread overview] Message-ID: <20190416134522.17540-20-ldufour@linux.ibm.com> (raw) In-Reply-To: <20190416134522.17540-1-ldufour@linux.ibm.com> Introducing a per mm_struct seqlock, mm_seq field, to protect the changes made in the MM RB tree. This allows to walk the RB tree without grabbing the mmap_sem, and on the walk is done to double check that sequence counter was stable during the walk. The mm seqlock is held while inserting and removing entries into the MM RB tree. Later in this series, it will be check when looking for a VMA without holding the mmap_sem. This is based on the initial work from Peter Zijlstra: https://lore.kernel.org/linux-mm/20100104182813.479668508@chello.nl/ Signed-off-by: Laurent Dufour <ldufour@linux.ibm.com> --- include/linux/mm_types.h | 3 +++ kernel/fork.c | 3 +++ mm/init-mm.c | 3 +++ mm/mmap.c | 48 +++++++++++++++++++++++++++++++--------- 4 files changed, 46 insertions(+), 11 deletions(-) diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index e78f72eb2576..24b3f8ce9e42 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -358,6 +358,9 @@ struct mm_struct { struct { struct vm_area_struct *mmap; /* list of VMAs */ struct rb_root mm_rb; +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT + seqlock_t mm_seq; +#endif u64 vmacache_seqnum; /* per-thread vmacache */ #ifdef CONFIG_MMU unsigned long (*get_unmapped_area) (struct file *filp, diff --git a/kernel/fork.c b/kernel/fork.c index 2992d2c95256..3a1739197ebc 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1008,6 +1008,9 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, mm->mmap = NULL; mm->mm_rb = RB_ROOT; mm->vmacache_seqnum = 0; +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT + seqlock_init(&mm->mm_seq); +#endif atomic_set(&mm->mm_users, 1); atomic_set(&mm->mm_count, 1); init_rwsem(&mm->mmap_sem); diff --git a/mm/init-mm.c b/mm/init-mm.c index a787a319211e..69346b883a4e 100644 --- a/mm/init-mm.c +++ b/mm/init-mm.c @@ -27,6 +27,9 @@ */ struct mm_struct init_mm = { .mm_rb = RB_ROOT, +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT + .mm_seq = __SEQLOCK_UNLOCKED(init_mm.mm_seq), +#endif .pgd = swapper_pg_dir, .mm_users = ATOMIC_INIT(2), .mm_count = ATOMIC_INIT(1), diff --git a/mm/mmap.c b/mm/mmap.c index 13460b38b0fb..f7f6027a7dff 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -170,6 +170,24 @@ void unlink_file_vma(struct vm_area_struct *vma) } } +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT +static inline void mm_write_seqlock(struct mm_struct *mm) +{ + write_seqlock(&mm->mm_seq); +} +static inline void mm_write_sequnlock(struct mm_struct *mm) +{ + write_sequnlock(&mm->mm_seq); +} +#else +static inline void mm_write_seqlock(struct mm_struct *mm) +{ +} +static inline void mm_write_sequnlock(struct mm_struct *mm) +{ +} +#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */ + /* * Close a vm structure and free it, returning the next. */ @@ -445,26 +463,32 @@ static void vma_gap_update(struct vm_area_struct *vma) } static inline void vma_rb_insert(struct vm_area_struct *vma, - struct rb_root *root) + struct mm_struct *mm) { + struct rb_root *root = &mm->mm_rb; + /* All rb_subtree_gap values must be consistent prior to insertion */ validate_mm_rb(root, NULL); rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks); } -static void __vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root) +static void __vma_rb_erase(struct vm_area_struct *vma, struct mm_struct *mm) { + struct rb_root *root = &mm->mm_rb; + /* * Note rb_erase_augmented is a fairly large inline function, * so make sure we instantiate it only once with our desired * augmented rbtree callbacks. */ + mm_write_seqlock(mm); rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks); + mm_write_sequnlock(mm); /* wmb */ } static __always_inline void vma_rb_erase_ignore(struct vm_area_struct *vma, - struct rb_root *root, + struct mm_struct *mm, struct vm_area_struct *ignore) { /* @@ -472,21 +496,21 @@ static __always_inline void vma_rb_erase_ignore(struct vm_area_struct *vma, * with the possible exception of the "next" vma being erased if * next->vm_start was reduced. */ - validate_mm_rb(root, ignore); + validate_mm_rb(&mm->mm_rb, ignore); - __vma_rb_erase(vma, root); + __vma_rb_erase(vma, mm); } static __always_inline void vma_rb_erase(struct vm_area_struct *vma, - struct rb_root *root) + struct mm_struct *mm) { /* * All rb_subtree_gap values must be consistent prior to erase, * with the possible exception of the vma being erased. */ - validate_mm_rb(root, vma); + validate_mm_rb(&mm->mm_rb, vma); - __vma_rb_erase(vma, root); + __vma_rb_erase(vma, mm); } /* @@ -601,10 +625,12 @@ void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, * immediately update the gap to the correct value. Finally we * rebalance the rbtree after all augmented values have been set. */ + mm_write_seqlock(mm); rb_link_node(&vma->vm_rb, rb_parent, rb_link); vma->rb_subtree_gap = 0; vma_gap_update(vma); - vma_rb_insert(vma, &mm->mm_rb); + vma_rb_insert(vma, mm); + mm_write_sequnlock(mm); } static void __vma_link_file(struct vm_area_struct *vma) @@ -680,7 +706,7 @@ static __always_inline void __vma_unlink_common(struct mm_struct *mm, { struct vm_area_struct *next; - vma_rb_erase_ignore(vma, &mm->mm_rb, ignore); + vma_rb_erase_ignore(vma, mm, ignore); next = vma->vm_next; if (has_prev) prev->vm_next = next; @@ -2674,7 +2700,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, insertion_point = (prev ? &prev->vm_next : &mm->mmap); vma->vm_prev = NULL; do { - vma_rb_erase(vma, &mm->mm_rb); + vma_rb_erase(vma, mm); mm->map_count--; tail_vma = vma; vma = vma->vm_next; -- 2.21.0
next prev parent reply other threads:[~2019-04-16 13:48 UTC|newest] Thread overview: 197+ messages / expand[flat|nested] mbox.gz Atom feed top 2019-04-16 13:44 [PATCH v12 00/31] Speculative page faults Laurent Dufour 2019-04-16 13:44 ` Laurent Dufour 2019-04-16 13:44 ` [PATCH v12 01/31] mm: introduce CONFIG_SPECULATIVE_PAGE_FAULT Laurent Dufour 2019-04-16 13:44 ` Laurent Dufour 2019-04-18 21:47 ` Jerome Glisse 2019-04-18 21:47 ` Jerome Glisse 2019-04-23 15:21 ` Laurent Dufour 2019-04-23 15:21 ` Laurent Dufour 2019-04-16 13:44 ` [PATCH v12 02/31] x86/mm: define ARCH_SUPPORTS_SPECULATIVE_PAGE_FAULT Laurent Dufour 2019-04-16 13:44 ` Laurent Dufour 2019-04-18 21:48 ` Jerome Glisse 2019-04-18 21:48 ` Jerome Glisse 2019-04-16 13:44 ` [PATCH v12 03/31] powerpc/mm: set ARCH_SUPPORTS_SPECULATIVE_PAGE_FAULT Laurent Dufour 2019-04-16 13:44 ` Laurent Dufour 2019-04-18 21:49 ` Jerome Glisse 2019-04-18 21:49 ` Jerome Glisse 2019-04-16 13:44 ` [PATCH v12 04/31] arm64/mm: define ARCH_SUPPORTS_SPECULATIVE_PAGE_FAULT Laurent Dufour 2019-04-16 13:44 ` Laurent Dufour 2019-04-16 14:27 ` Mark Rutland 2019-04-16 14:27 ` Mark Rutland 2019-04-16 14:31 ` Laurent Dufour 2019-04-16 14:31 ` Laurent Dufour 2019-04-16 14:41 ` Mark Rutland 2019-04-16 14:41 ` Mark Rutland 2019-04-18 21:51 ` Jerome Glisse 2019-04-18 21:51 ` Jerome Glisse 2019-04-23 15:36 ` Laurent Dufour 2019-04-23 15:36 ` Laurent Dufour 2019-04-23 16:19 ` Mark Rutland 2019-04-23 16:19 ` Mark Rutland 2019-04-24 10:34 ` Laurent Dufour 2019-04-24 10:34 ` Laurent Dufour 2019-04-16 13:44 ` [PATCH v12 05/31] mm: prepare for FAULT_FLAG_SPECULATIVE Laurent Dufour 2019-04-16 13:44 ` Laurent Dufour 2019-04-18 22:04 ` Jerome Glisse 2019-04-18 22:04 ` Jerome Glisse 2019-04-23 15:45 ` Laurent Dufour 2019-04-23 15:45 ` Laurent Dufour 2019-04-16 13:44 ` [PATCH v12 06/31] mm: introduce pte_spinlock " Laurent Dufour 2019-04-16 13:44 ` Laurent Dufour 2019-04-18 22:05 ` Jerome Glisse 2019-04-18 22:05 ` Jerome Glisse 2019-04-16 13:44 ` [PATCH v12 07/31] mm: make pte_unmap_same compatible with SPF Laurent Dufour 2019-04-16 13:44 ` Laurent Dufour 2019-04-18 22:10 ` Jerome Glisse 2019-04-18 22:10 ` Jerome Glisse 2019-04-23 15:43 ` Matthew Wilcox 2019-04-23 15:43 ` Matthew Wilcox 2019-04-23 15:47 ` Laurent Dufour 2019-04-23 15:47 ` Laurent Dufour 2019-04-16 13:44 ` [PATCH v12 08/31] mm: introduce INIT_VMA() Laurent Dufour 2019-04-16 13:44 ` Laurent Dufour 2019-04-18 22:22 ` Jerome Glisse 2019-04-18 22:22 ` Jerome Glisse 2019-04-16 13:45 ` [PATCH v12 09/31] mm: VMA sequence count Laurent Dufour 2019-04-16 13:45 ` Laurent Dufour 2019-04-18 22:48 ` Jerome Glisse 2019-04-18 22:48 ` Jerome Glisse 2019-04-19 15:45 ` Laurent Dufour 2019-04-19 15:45 ` Laurent Dufour 2019-04-22 15:51 ` Jerome Glisse 2019-04-22 15:51 ` Jerome Glisse 2019-04-16 13:45 ` [PATCH v12 10/31] mm: protect VMA modifications using " Laurent Dufour 2019-04-16 13:45 ` Laurent Dufour 2019-04-22 19:43 ` Jerome Glisse 2019-04-22 19:43 ` Jerome Glisse 2019-04-16 13:45 ` [PATCH v12 11/31] mm: protect mremap() against SPF hanlder Laurent Dufour 2019-04-16 13:45 ` Laurent Dufour 2019-04-22 19:51 ` Jerome Glisse 2019-04-22 19:51 ` Jerome Glisse 2019-04-23 15:51 ` Laurent Dufour 2019-04-23 15:51 ` Laurent Dufour 2019-04-16 13:45 ` [PATCH v12 12/31] mm: protect SPF handler against anon_vma changes Laurent Dufour 2019-04-16 13:45 ` Laurent Dufour 2019-04-22 19:53 ` Jerome Glisse 2019-04-22 19:53 ` Jerome Glisse 2019-04-16 13:45 ` [PATCH v12 13/31] mm: cache some VMA fields in the vm_fault structure Laurent Dufour 2019-04-16 13:45 ` Laurent Dufour 2019-04-22 20:06 ` Jerome Glisse 2019-04-22 20:06 ` Jerome Glisse 2019-04-16 13:45 ` [PATCH v12 14/31] mm/migrate: Pass vm_fault pointer to migrate_misplaced_page() Laurent Dufour 2019-04-16 13:45 ` Laurent Dufour 2019-04-22 20:09 ` Jerome Glisse 2019-04-22 20:09 ` Jerome Glisse 2019-04-16 13:45 ` [PATCH v12 15/31] mm: introduce __lru_cache_add_active_or_unevictable Laurent Dufour 2019-04-16 13:45 ` Laurent Dufour 2019-04-22 20:11 ` Jerome Glisse 2019-04-22 20:11 ` Jerome Glisse 2019-04-16 13:45 ` [PATCH v12 16/31] mm: introduce __vm_normal_page() Laurent Dufour 2019-04-16 13:45 ` Laurent Dufour 2019-04-22 20:15 ` Jerome Glisse 2019-04-22 20:15 ` Jerome Glisse 2019-04-16 13:45 ` [PATCH v12 17/31] mm: introduce __page_add_new_anon_rmap() Laurent Dufour 2019-04-16 13:45 ` Laurent Dufour 2019-04-22 20:18 ` Jerome Glisse 2019-04-22 20:18 ` Jerome Glisse 2019-04-16 13:45 ` [PATCH v12 18/31] mm: protect against PTE changes done by dup_mmap() Laurent Dufour 2019-04-16 13:45 ` Laurent Dufour 2019-04-22 20:32 ` Jerome Glisse 2019-04-22 20:32 ` Jerome Glisse 2019-04-24 10:33 ` Laurent Dufour 2019-04-24 10:33 ` Laurent Dufour 2019-04-16 13:45 ` Laurent Dufour [this message] 2019-04-16 13:45 ` [PATCH v12 19/31] mm: protect the RB tree with a sequence lock Laurent Dufour 2019-04-22 20:33 ` Jerome Glisse 2019-04-22 20:33 ` Jerome Glisse 2019-04-16 13:45 ` [PATCH v12 20/31] mm: introduce vma reference counter Laurent Dufour 2019-04-16 13:45 ` Laurent Dufour 2019-04-22 20:36 ` Jerome Glisse 2019-04-22 20:36 ` Jerome Glisse 2019-04-24 14:26 ` Laurent Dufour 2019-04-24 14:26 ` Laurent Dufour 2019-04-16 13:45 ` [PATCH v12 21/31] mm: Introduce find_vma_rcu() Laurent Dufour 2019-04-16 13:45 ` Laurent Dufour 2019-04-22 20:57 ` Jerome Glisse 2019-04-22 20:57 ` Jerome Glisse 2019-04-24 14:39 ` Laurent Dufour 2019-04-24 14:39 ` Laurent Dufour 2019-04-23 9:27 ` Peter Zijlstra 2019-04-23 9:27 ` Peter Zijlstra 2019-04-23 18:13 ` Davidlohr Bueso 2019-04-23 18:13 ` Davidlohr Bueso 2019-04-24 7:57 ` Laurent Dufour 2019-04-24 7:57 ` Laurent Dufour 2019-04-16 13:45 ` [PATCH v12 22/31] mm: provide speculative fault infrastructure Laurent Dufour 2019-04-16 13:45 ` Laurent Dufour 2019-04-22 21:26 ` Jerome Glisse 2019-04-22 21:26 ` Jerome Glisse 2019-04-24 14:56 ` Laurent Dufour 2019-04-24 14:56 ` Laurent Dufour 2019-04-24 15:13 ` Jerome Glisse 2019-04-24 15:13 ` Jerome Glisse 2019-04-16 13:45 ` [PATCH v12 23/31] mm: don't do swap readahead during speculative page fault Laurent Dufour 2019-04-16 13:45 ` Laurent Dufour 2019-04-22 21:36 ` Jerome Glisse 2019-04-22 21:36 ` Jerome Glisse 2019-04-24 14:57 ` Laurent Dufour 2019-04-24 14:57 ` Laurent Dufour 2019-04-16 13:45 ` [PATCH v12 24/31] mm: adding speculative page fault failure trace events Laurent Dufour 2019-04-16 13:45 ` Laurent Dufour 2019-04-16 13:45 ` [PATCH v12 25/31] perf: add a speculative page fault sw event Laurent Dufour 2019-04-16 13:45 ` Laurent Dufour 2019-04-16 13:45 ` [PATCH v12 26/31] perf tools: add support for the SPF perf event Laurent Dufour 2019-04-16 13:45 ` Laurent Dufour 2019-04-16 13:45 ` [PATCH v12 27/31] mm: add speculative page fault vmstats Laurent Dufour 2019-04-16 13:45 ` Laurent Dufour 2019-04-16 13:45 ` [PATCH v12 28/31] x86/mm: add speculative pagefault handling Laurent Dufour 2019-04-16 13:45 ` Laurent Dufour 2019-04-16 13:45 ` [PATCH v12 29/31] powerpc/mm: add speculative page fault Laurent Dufour 2019-04-16 13:45 ` Laurent Dufour 2019-04-16 13:45 ` [PATCH v12 30/31] arm64/mm: " Laurent Dufour 2019-04-16 13:45 ` Laurent Dufour 2019-04-16 13:45 ` [PATCH v12 31/31] mm: Add a speculative page fault switch in sysctl Laurent Dufour 2019-04-16 13:45 ` Laurent Dufour 2019-04-22 21:29 ` [PATCH v12 00/31] Speculative page faults Michel Lespinasse 2019-04-22 21:29 ` Michel Lespinasse 2019-04-22 21:29 ` Michel Lespinasse 2019-04-23 9:38 ` Peter Zijlstra 2019-04-23 9:38 ` Peter Zijlstra 2019-04-24 7:33 ` Laurent Dufour 2019-04-24 7:33 ` Laurent Dufour 2019-04-27 1:53 ` Michel Lespinasse 2019-04-27 1:53 ` Michel Lespinasse 2019-04-23 10:47 ` Michal Hocko 2019-04-23 10:47 ` Michal Hocko 2019-04-23 12:41 ` Matthew Wilcox 2019-04-23 12:41 ` Matthew Wilcox 2019-04-23 12:48 ` Peter Zijlstra 2019-04-23 12:48 ` Peter Zijlstra 2019-04-23 13:42 ` Michal Hocko 2019-04-23 13:42 ` Michal Hocko 2019-04-24 18:01 ` Laurent Dufour 2019-04-24 18:01 ` Laurent Dufour 2019-04-27 6:00 ` Michel Lespinasse 2019-04-27 6:00 ` Michel Lespinasse 2019-04-23 11:35 ` Anshuman Khandual 2019-04-23 11:35 ` Anshuman Khandual 2019-06-06 6:51 ` Haiyan Song 2019-06-06 6:51 ` Haiyan Song 2019-06-14 8:37 ` Laurent Dufour 2019-06-14 8:37 ` Laurent Dufour 2019-06-14 8:44 ` Laurent Dufour 2019-06-14 8:44 ` Laurent Dufour 2019-06-20 8:19 ` Haiyan Song 2019-06-20 8:19 ` Haiyan Song 2020-07-06 9:25 ` Chinwen Chang 2020-07-06 9:25 ` Chinwen Chang 2020-07-06 12:27 ` Laurent Dufour 2020-07-06 12:27 ` Laurent Dufour 2020-07-07 5:31 ` Chinwen Chang 2020-07-07 5:31 ` Chinwen Chang 2020-12-14 2:03 ` Joel Fernandes 2020-12-14 2:03 ` Joel Fernandes 2020-12-14 9:36 ` Laurent Dufour 2020-12-14 9:36 ` Laurent Dufour 2020-12-14 18:10 ` Joel Fernandes 2020-12-14 18:10 ` Joel Fernandes
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20190416134522.17540-20-ldufour@linux.ibm.com \ --to=ldufour@linux.ibm.com \ --cc=aarcange@redhat.com \ --cc=ak@linux.intel.com \ --cc=akpm@linux-foundation.org \ --cc=alexei.starovoitov@gmail.com \ --cc=aneesh.kumar@linux.ibm.com \ --cc=benh@kernel.crashing.org \ --cc=bsingharora@gmail.com \ --cc=daniel.m.jordan@oracle.com \ --cc=dave@stgolabs.net \ --cc=haiyanx.song@intel.com \ --cc=haren@linux.vnet.ibm.com \ --cc=hpa@zytor.com \ --cc=jack@suse.cz \ --cc=jglisse@redhat.com \ --cc=kemi.wang@intel.com \ --cc=kirill@shutemov.name \ --cc=linux-kernel@vger.kernel.org \ --cc=linux-mm@kvack.org \ --cc=linuxppc-dev@lists.ozlabs.org \ --cc=mhocko@kernel.org \ --cc=minchan@kernel.org \ --cc=mingo@redhat.com \ --cc=mpe@ellerman.id.au \ --cc=npiggin@gmail.com \ --cc=opensource.ganesh@gmail.com \ --cc=paulmck@linux.vnet.ibm.com \ --cc=paulus@samba.org \ --cc=peterz@infradead.org \ --cc=punitagrawal@gmail.com \ --cc=rientjes@google.com \ --cc=rppt@linux.ibm.com \ --cc=sergey.senozhatsky.work@gmail.com \ --cc=sergey.senozhatsky@gmail.com \ --cc=sj38.park@gmail.com \ --cc=tglx@linutronix.de \ --cc=tim.c.chen@linux.intel.com \ --cc=vinayakm.list@gmail.com \ --cc=walken@google.com \ --cc=will.deacon@arm.com \ --cc=willy@infradead.org \ --cc=x86@kernel.org \ --cc=yang.shi@linux.alibaba.com \ --cc=zhongjiang@huawei.com \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.