From: Nicholas Piggin <npiggin@gmail.com> To: linux-arch@vger.kernel.org Cc: Nicholas Piggin <npiggin@gmail.com>, x86@kernel.org, Mathieu Desnoyers <mathieu.desnoyers@efficios.com>, Arnd Bergmann <arnd@arndb.de>, Peter Zijlstra <peterz@infradead.org>, linux-kernel@vger.kernel.org, linuxppc-dev@lists.ozlabs.org, linux-mm@kvack.org, Anton Blanchard <anton@ozlabs.org> Subject: [RFC PATCH 6/7] lazy tlb: allow lazy tlb mm switching to be configurable Date: Fri, 10 Jul 2020 11:56:45 +1000 [thread overview] Message-ID: <20200710015646.2020871-7-npiggin@gmail.com> (raw) In-Reply-To: <20200710015646.2020871-1-npiggin@gmail.com> NOMMU systems could easily go without this and save a bit of code and the mm refcounting, because their mm switch is a no-op. I haven't flipped them over because haven't audited all arch code to convert over to using the _lazy_tlb refcounting. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> --- arch/Kconfig | 7 +++++ include/linux/sched/mm.h | 12 ++++++--- kernel/sched/core.c | 55 +++++++++++++++++++++++++++------------- kernel/sched/sched.h | 4 ++- 4 files changed, 55 insertions(+), 23 deletions(-) diff --git a/arch/Kconfig b/arch/Kconfig index 8cc35dc556c7..2daf8fe6146a 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -411,6 +411,13 @@ config MMU_GATHER_NO_GATHER bool depends on MMU_GATHER_TABLE_FREE +# Would like to make this depend on MMU, because there is little use for lazy mm switching +# with NOMMU, but have to audit NOMMU architecture code first. +config MMU_LAZY_TLB + def_bool y + help + Enable "lazy TLB" mmu context switching for kernel threads. + config ARCH_HAVE_NMI_SAFE_CMPXCHG bool diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index 110d4ad21de6..2c2b20e2ccc7 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -53,18 +53,22 @@ void mmdrop(struct mm_struct *mm); /* Helpers for lazy TLB mm refcounting */ static inline void mmgrab_lazy_tlb(struct mm_struct *mm) { - mmgrab(mm); + if (IS_ENABLED(CONFIG_MMU_LAZY_TLB)) + mmgrab(mm); } static inline void mmdrop_lazy_tlb(struct mm_struct *mm) { - mmdrop(mm); + if (IS_ENABLED(CONFIG_MMU_LAZY_TLB)) + mmdrop(mm); } static inline void mmdrop_lazy_tlb_smp_mb(struct mm_struct *mm) { - /* This depends on mmdrop providing a full smp_mb() */ - mmdrop(mm); + if (IS_ENABLED(CONFIG_MMU_LAZY_TLB)) + mmdrop(mm); /* This depends on mmdrop providing a full smp_mb() */ + else + smp_mb(); } /* diff --git a/kernel/sched/core.c b/kernel/sched/core.c index d19f2f517f6c..14b4fae6f6e3 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3253,7 +3253,7 @@ static struct rq *finish_task_switch(struct task_struct *prev) __releases(rq->lock) { struct rq *rq = this_rq(); - struct mm_struct *mm = rq->prev_mm; + struct mm_struct *mm = NULL; long prev_state; /* @@ -3272,7 +3272,10 @@ static struct rq *finish_task_switch(struct task_struct *prev) current->comm, current->pid, preempt_count())) preempt_count_set(FORK_PREEMPT_COUNT); - rq->prev_mm = NULL; +#ifdef CONFIG_MMU_LAZY_TLB + mm = rq->prev_lazy_mm; + rq->prev_lazy_mm = NULL; +#endif /* * A task struct has one reference for the use as "current". @@ -3393,22 +3396,11 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev) calculate_sigpending(); } -/* - * context_switch - switch to the new MM and the new thread's register state. - */ -static __always_inline struct rq * -context_switch(struct rq *rq, struct task_struct *prev, - struct task_struct *next, struct rq_flags *rf) +static __always_inline void +context_switch_mm(struct rq *rq, struct task_struct *prev, + struct task_struct *next) { - prepare_task_switch(rq, prev, next); - - /* - * For paravirt, this is coupled with an exit in switch_to to - * combine the page table reload and the switch backend into - * one hypercall. - */ - arch_start_context_switch(prev); - +#ifdef CONFIG_MMU_LAZY_TLB /* * kernel -> kernel lazy + transfer active * user -> kernel lazy + mmgrab_lazy_tlb() active @@ -3440,10 +3432,37 @@ context_switch(struct rq *rq, struct task_struct *prev, exit_lazy_tlb(prev->active_mm, next); /* will mmdrop_lazy_tlb() in finish_task_switch(). */ - rq->prev_mm = prev->active_mm; + rq->prev_lazy_mm = prev->active_mm; prev->active_mm = NULL; } } +#else + if (!next->mm) + next->active_mm = &init_mm; + membarrier_switch_mm(rq, prev->active_mm, next->active_mm); + switch_mm_irqs_off(prev->active_mm, next->active_mm, next); + if (!prev->mm) + prev->active_mm = NULL; +#endif +} + +/* + * context_switch - switch to the new MM and the new thread's register state. + */ +static __always_inline struct rq * +context_switch(struct rq *rq, struct task_struct *prev, + struct task_struct *next, struct rq_flags *rf) +{ + prepare_task_switch(rq, prev, next); + + /* + * For paravirt, this is coupled with an exit in switch_to to + * combine the page table reload and the switch backend into + * one hypercall. + */ + arch_start_context_switch(prev); + + context_switch_mm(rq, prev, next); rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 877fb08eb1b0..b196dd885d33 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -929,7 +929,9 @@ struct rq { struct task_struct *idle; struct task_struct *stop; unsigned long next_balance; - struct mm_struct *prev_mm; +#ifdef CONFIG_MMU_LAZY_TLB + struct mm_struct *prev_lazy_mm; +#endif unsigned int clock_update_flags; u64 clock; -- 2.23.0
WARNING: multiple messages have this Message-ID (diff)
From: Nicholas Piggin <npiggin@gmail.com> To: linux-arch@vger.kernel.org Cc: Arnd Bergmann <arnd@arndb.de>, Peter Zijlstra <peterz@infradead.org>, x86@kernel.org, linux-kernel@vger.kernel.org, Nicholas Piggin <npiggin@gmail.com>, linux-mm@kvack.org, Mathieu Desnoyers <mathieu.desnoyers@efficios.com>, linuxppc-dev@lists.ozlabs.org Subject: [RFC PATCH 6/7] lazy tlb: allow lazy tlb mm switching to be configurable Date: Fri, 10 Jul 2020 11:56:45 +1000 [thread overview] Message-ID: <20200710015646.2020871-7-npiggin@gmail.com> (raw) In-Reply-To: <20200710015646.2020871-1-npiggin@gmail.com> NOMMU systems could easily go without this and save a bit of code and the mm refcounting, because their mm switch is a no-op. I haven't flipped them over because haven't audited all arch code to convert over to using the _lazy_tlb refcounting. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> --- arch/Kconfig | 7 +++++ include/linux/sched/mm.h | 12 ++++++--- kernel/sched/core.c | 55 +++++++++++++++++++++++++++------------- kernel/sched/sched.h | 4 ++- 4 files changed, 55 insertions(+), 23 deletions(-) diff --git a/arch/Kconfig b/arch/Kconfig index 8cc35dc556c7..2daf8fe6146a 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -411,6 +411,13 @@ config MMU_GATHER_NO_GATHER bool depends on MMU_GATHER_TABLE_FREE +# Would like to make this depend on MMU, because there is little use for lazy mm switching +# with NOMMU, but have to audit NOMMU architecture code first. +config MMU_LAZY_TLB + def_bool y + help + Enable "lazy TLB" mmu context switching for kernel threads. + config ARCH_HAVE_NMI_SAFE_CMPXCHG bool diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index 110d4ad21de6..2c2b20e2ccc7 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -53,18 +53,22 @@ void mmdrop(struct mm_struct *mm); /* Helpers for lazy TLB mm refcounting */ static inline void mmgrab_lazy_tlb(struct mm_struct *mm) { - mmgrab(mm); + if (IS_ENABLED(CONFIG_MMU_LAZY_TLB)) + mmgrab(mm); } static inline void mmdrop_lazy_tlb(struct mm_struct *mm) { - mmdrop(mm); + if (IS_ENABLED(CONFIG_MMU_LAZY_TLB)) + mmdrop(mm); } static inline void mmdrop_lazy_tlb_smp_mb(struct mm_struct *mm) { - /* This depends on mmdrop providing a full smp_mb() */ - mmdrop(mm); + if (IS_ENABLED(CONFIG_MMU_LAZY_TLB)) + mmdrop(mm); /* This depends on mmdrop providing a full smp_mb() */ + else + smp_mb(); } /* diff --git a/kernel/sched/core.c b/kernel/sched/core.c index d19f2f517f6c..14b4fae6f6e3 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3253,7 +3253,7 @@ static struct rq *finish_task_switch(struct task_struct *prev) __releases(rq->lock) { struct rq *rq = this_rq(); - struct mm_struct *mm = rq->prev_mm; + struct mm_struct *mm = NULL; long prev_state; /* @@ -3272,7 +3272,10 @@ static struct rq *finish_task_switch(struct task_struct *prev) current->comm, current->pid, preempt_count())) preempt_count_set(FORK_PREEMPT_COUNT); - rq->prev_mm = NULL; +#ifdef CONFIG_MMU_LAZY_TLB + mm = rq->prev_lazy_mm; + rq->prev_lazy_mm = NULL; +#endif /* * A task struct has one reference for the use as "current". @@ -3393,22 +3396,11 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev) calculate_sigpending(); } -/* - * context_switch - switch to the new MM and the new thread's register state. - */ -static __always_inline struct rq * -context_switch(struct rq *rq, struct task_struct *prev, - struct task_struct *next, struct rq_flags *rf) +static __always_inline void +context_switch_mm(struct rq *rq, struct task_struct *prev, + struct task_struct *next) { - prepare_task_switch(rq, prev, next); - - /* - * For paravirt, this is coupled with an exit in switch_to to - * combine the page table reload and the switch backend into - * one hypercall. - */ - arch_start_context_switch(prev); - +#ifdef CONFIG_MMU_LAZY_TLB /* * kernel -> kernel lazy + transfer active * user -> kernel lazy + mmgrab_lazy_tlb() active @@ -3440,10 +3432,37 @@ context_switch(struct rq *rq, struct task_struct *prev, exit_lazy_tlb(prev->active_mm, next); /* will mmdrop_lazy_tlb() in finish_task_switch(). */ - rq->prev_mm = prev->active_mm; + rq->prev_lazy_mm = prev->active_mm; prev->active_mm = NULL; } } +#else + if (!next->mm) + next->active_mm = &init_mm; + membarrier_switch_mm(rq, prev->active_mm, next->active_mm); + switch_mm_irqs_off(prev->active_mm, next->active_mm, next); + if (!prev->mm) + prev->active_mm = NULL; +#endif +} + +/* + * context_switch - switch to the new MM and the new thread's register state. + */ +static __always_inline struct rq * +context_switch(struct rq *rq, struct task_struct *prev, + struct task_struct *next, struct rq_flags *rf) +{ + prepare_task_switch(rq, prev, next); + + /* + * For paravirt, this is coupled with an exit in switch_to to + * combine the page table reload and the switch backend into + * one hypercall. + */ + arch_start_context_switch(prev); + + context_switch_mm(rq, prev, next); rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 877fb08eb1b0..b196dd885d33 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -929,7 +929,9 @@ struct rq { struct task_struct *idle; struct task_struct *stop; unsigned long next_balance; - struct mm_struct *prev_mm; +#ifdef CONFIG_MMU_LAZY_TLB + struct mm_struct *prev_lazy_mm; +#endif unsigned int clock_update_flags; u64 clock; -- 2.23.0
next prev parent reply other threads:[~2020-07-10 1:57 UTC|newest] Thread overview: 136+ messages / expand[flat|nested] mbox.gz Atom feed top 2020-07-10 1:56 [RFC PATCH 0/7] mmu context cleanup, lazy tlb cleanup, Nicholas Piggin 2020-07-10 1:56 ` Nicholas Piggin 2020-07-10 1:56 ` [RFC PATCH 1/7] asm-generic: add generic MMU versions of mmu context functions Nicholas Piggin 2020-07-10 1:56 ` Nicholas Piggin 2020-07-10 1:56 ` [RFC PATCH 2/7] arch: use asm-generic mmu context for no-op implementations Nicholas Piggin 2020-07-10 1:56 ` Nicholas Piggin 2020-07-10 1:56 ` [RFC PATCH 3/7] mm: introduce exit_lazy_tlb Nicholas Piggin 2020-07-10 1:56 ` Nicholas Piggin 2020-07-10 1:56 ` [RFC PATCH 4/7] x86: use exit_lazy_tlb rather than membarrier_mm_sync_core_before_usermode Nicholas Piggin 2020-07-10 1:56 ` Nicholas Piggin 2020-07-10 9:42 ` Peter Zijlstra 2020-07-10 9:42 ` Peter Zijlstra 2020-07-10 14:02 ` Mathieu Desnoyers 2020-07-10 14:02 ` Mathieu Desnoyers 2020-07-10 14:02 ` Mathieu Desnoyers 2020-07-10 17:04 ` Andy Lutomirski 2020-07-10 17:04 ` Andy Lutomirski 2020-07-10 17:04 ` Andy Lutomirski 2020-07-13 4:45 ` Nicholas Piggin 2020-07-13 4:45 ` Nicholas Piggin 2020-07-13 13:47 ` Nicholas Piggin 2020-07-13 13:47 ` Nicholas Piggin 2020-07-13 14:13 ` Mathieu Desnoyers 2020-07-13 14:13 ` Mathieu Desnoyers 2020-07-13 14:13 ` Mathieu Desnoyers 2020-07-13 15:48 ` Andy Lutomirski 2020-07-13 15:48 ` Andy Lutomirski 2020-07-13 15:48 ` Andy Lutomirski 2020-07-13 16:37 ` Nicholas Piggin 2020-07-13 16:37 ` Nicholas Piggin 2020-07-16 4:15 ` Nicholas Piggin 2020-07-16 4:15 ` Nicholas Piggin 2020-07-16 4:42 ` Nicholas Piggin 2020-07-16 4:42 ` Nicholas Piggin 2020-07-16 15:46 ` Mathieu Desnoyers 2020-07-16 15:46 ` Mathieu Desnoyers 2020-07-16 15:46 ` Mathieu Desnoyers 2020-07-16 16:03 ` Mathieu Desnoyers 2020-07-16 16:03 ` Mathieu Desnoyers 2020-07-16 16:03 ` Mathieu Desnoyers 2020-07-16 18:58 ` Mathieu Desnoyers 2020-07-16 18:58 ` Mathieu Desnoyers 2020-07-16 18:58 ` Mathieu Desnoyers 2020-07-16 21:24 ` Alan Stern 2020-07-16 21:24 ` Alan Stern 2020-07-17 13:39 ` Mathieu Desnoyers 2020-07-17 13:39 ` Mathieu Desnoyers 2020-07-17 13:39 ` Mathieu Desnoyers 2020-07-17 14:51 ` Alan Stern 2020-07-17 14:51 ` Alan Stern 2020-07-17 15:39 ` Mathieu Desnoyers 2020-07-17 15:39 ` Mathieu Desnoyers 2020-07-17 15:39 ` Mathieu Desnoyers 2020-07-17 16:11 ` Alan Stern 2020-07-17 16:11 ` Alan Stern 2020-07-17 16:22 ` Mathieu Desnoyers 2020-07-17 16:22 ` Mathieu Desnoyers 2020-07-17 16:22 ` Mathieu Desnoyers 2020-07-17 17:44 ` Alan Stern 2020-07-17 17:44 ` Alan Stern 2020-07-17 17:52 ` Mathieu Desnoyers 2020-07-17 17:52 ` Mathieu Desnoyers 2020-07-17 17:52 ` Mathieu Desnoyers 2020-07-17 0:00 ` Nicholas Piggin 2020-07-17 0:00 ` Nicholas Piggin 2020-07-16 5:18 ` Andy Lutomirski 2020-07-16 5:18 ` Andy Lutomirski 2020-07-16 6:06 ` Nicholas Piggin 2020-07-16 6:06 ` Nicholas Piggin 2020-07-16 8:50 ` Peter Zijlstra 2020-07-16 8:50 ` Peter Zijlstra 2020-07-16 10:03 ` Nicholas Piggin 2020-07-16 10:03 ` Nicholas Piggin 2020-07-16 11:00 ` peterz 2020-07-16 11:00 ` peterz 2020-07-16 15:34 ` Mathieu Desnoyers 2020-07-16 15:34 ` Mathieu Desnoyers 2020-07-16 15:34 ` Mathieu Desnoyers 2020-07-16 23:26 ` Nicholas Piggin 2020-07-16 23:26 ` Nicholas Piggin 2020-07-17 13:42 ` Mathieu Desnoyers 2020-07-17 13:42 ` Mathieu Desnoyers 2020-07-17 13:42 ` Mathieu Desnoyers 2020-07-20 3:03 ` Nicholas Piggin 2020-07-20 3:03 ` Nicholas Piggin 2020-07-20 16:46 ` Mathieu Desnoyers 2020-07-20 16:46 ` Mathieu Desnoyers 2020-07-20 16:46 ` Mathieu Desnoyers 2020-07-21 10:04 ` Nicholas Piggin 2020-07-21 10:04 ` Nicholas Piggin 2020-07-21 13:11 ` Mathieu Desnoyers 2020-07-21 13:11 ` Mathieu Desnoyers 2020-07-21 13:11 ` Mathieu Desnoyers 2020-07-21 14:30 ` Nicholas Piggin 2020-07-21 14:30 ` Nicholas Piggin 2020-07-21 15:06 ` peterz 2020-07-21 15:06 ` peterz 2020-07-21 15:15 ` Mathieu Desnoyers 2020-07-21 15:15 ` Mathieu Desnoyers 2020-07-21 15:15 ` Mathieu Desnoyers 2020-07-21 15:19 ` Peter Zijlstra 2020-07-21 15:19 ` Peter Zijlstra 2020-07-21 15:22 ` Mathieu Desnoyers 2020-07-21 15:22 ` Mathieu Desnoyers 2020-07-21 15:22 ` Mathieu Desnoyers 2020-07-10 1:56 ` [RFC PATCH 5/7] lazy tlb: introduce lazy mm refcount helper functions Nicholas Piggin 2020-07-10 1:56 ` Nicholas Piggin 2020-07-10 9:48 ` Peter Zijlstra 2020-07-10 9:48 ` Peter Zijlstra 2020-07-10 1:56 ` Nicholas Piggin [this message] 2020-07-10 1:56 ` [RFC PATCH 6/7] lazy tlb: allow lazy tlb mm switching to be configurable Nicholas Piggin 2020-07-10 1:56 ` [RFC PATCH 7/7] lazy tlb: shoot lazies, a non-refcounting lazy tlb option Nicholas Piggin 2020-07-10 1:56 ` Nicholas Piggin 2020-07-10 9:35 ` Peter Zijlstra 2020-07-10 9:35 ` Peter Zijlstra 2020-07-13 4:58 ` Nicholas Piggin 2020-07-13 4:58 ` Nicholas Piggin 2020-07-13 15:59 ` Andy Lutomirski 2020-07-13 15:59 ` Andy Lutomirski 2020-07-13 15:59 ` Andy Lutomirski 2020-07-13 16:48 ` Nicholas Piggin 2020-07-13 16:48 ` Nicholas Piggin 2020-07-13 18:18 ` Andy Lutomirski 2020-07-13 18:18 ` Andy Lutomirski 2020-07-14 5:04 ` Nicholas Piggin 2020-07-14 5:04 ` Nicholas Piggin 2020-07-14 6:31 ` Nicholas Piggin 2020-07-14 6:31 ` Nicholas Piggin 2020-07-14 12:46 ` Andy Lutomirski 2020-07-14 12:46 ` Andy Lutomirski 2020-07-14 13:23 ` Peter Zijlstra 2020-07-14 13:23 ` Peter Zijlstra 2020-07-16 2:26 ` Nicholas Piggin 2020-07-16 2:26 ` Nicholas Piggin 2020-07-16 2:35 ` Nicholas Piggin 2020-07-16 2:35 ` Nicholas Piggin
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20200710015646.2020871-7-npiggin@gmail.com \ --to=npiggin@gmail.com \ --cc=anton@ozlabs.org \ --cc=arnd@arndb.de \ --cc=linux-arch@vger.kernel.org \ --cc=linux-kernel@vger.kernel.org \ --cc=linux-mm@kvack.org \ --cc=linuxppc-dev@lists.ozlabs.org \ --cc=mathieu.desnoyers@efficios.com \ --cc=peterz@infradead.org \ --cc=x86@kernel.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.