From mboxrd@z Thu Jan 1 00:00:00 1970 From: Peter Zijlstra Subject: Re: TLB and PTE coherency during munmap Date: Wed, 29 May 2013 19:51:25 +0200 Message-ID: <20130529175125.GJ12193@twins.programming.kicks-ass.net> References: <51A45861.1010008@gmail.com> <20130529122728.GA27176@twins.programming.kicks-ass.net> <51A5F7A7.5020604@synopsys.com> Mime-Version: 1.0 Content-Type: text/plain; charset=us-ascii Return-path: Content-Disposition: inline In-Reply-To: <51A5F7A7.5020604@synopsys.com> Sender: owner-linux-mm@kvack.org To: Vineet Gupta Cc: Max Filippov , KAMEZAWA Hiroyuki , linux-arch@vger.kernel.org, linux-mm@kvack.org, Ralf Baechle , Chris Zankel , Marc Gauthier , linux-xtensa@linux-xtensa.org, Hugh Dickins List-Id: linux-arch.vger.kernel.org What about something like this? --- include/asm-generic/tlb.h | 11 ++++++++++- mm/memory.c | 17 ++++++++++++++++- 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index b1b1fa6..651b1cf 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -116,6 +116,7 @@ struct mmu_gather { static inline int tlb_fast_mode(struct mmu_gather *tlb) { +#ifndef CONFIG_PREEMPT #ifdef CONFIG_SMP return tlb->fast_mode; #else @@ -124,7 +125,15 @@ static inline int tlb_fast_mode(struct mmu_gather *tlb) * and page free order so much.. */ return 1; -#endif +#endif /* CONFIG_SMP */ +#else /* CONFIG_PREEMPT */ + /* + * Since mmu_gather is preemptible, preemptible kernels are like SMP + * kernels, we must batch to make sure we invalidate TLBs before we + * free the pages. + */ + return 0; +#endif /* CONFIG_PREEMPT */ } void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm); diff --git a/mm/memory.c b/mm/memory.c index 6dc1882..e915af2 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -384,6 +384,21 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table) #endif /* CONFIG_HAVE_RCU_TABLE_FREE */ +static inline void cond_resched_tlb(struct mmu_gather *tlb) +{ +#ifndef CONFIG_PREEMPT + /* + * For full preempt kernels we must do regular batching like + * SMP, see tlb_fast_mode(). For !PREEMPT we can 'cheat' and + * do a flush before our voluntary 'yield'. + */ + if (need_resched()) { + tlb_flush_mmu(tlb); + cond_resched(); + } +#endif +} + /* * If a p?d_bad entry is found while walking page tables, report * the error, before resetting entry to p?d_none. Usually (but @@ -1264,7 +1279,7 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, goto next; next = zap_pte_range(tlb, vma, pmd, addr, next, details); next: - cond_resched(); + cond_resched_tlb(tlb); } while (pmd++, addr = next, addr != end); return addr; -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@kvack.org. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: email@kvack.org From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from merlin.infradead.org ([205.233.59.134]:59323 "EHLO merlin.infradead.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S934317Ab3E2Rvk (ORCPT ); Wed, 29 May 2013 13:51:40 -0400 Date: Wed, 29 May 2013 19:51:25 +0200 From: Peter Zijlstra Subject: Re: TLB and PTE coherency during munmap Message-ID: <20130529175125.GJ12193@twins.programming.kicks-ass.net> References: <51A45861.1010008@gmail.com> <20130529122728.GA27176@twins.programming.kicks-ass.net> <51A5F7A7.5020604@synopsys.com> MIME-Version: 1.0 Content-Type: text/plain; charset=us-ascii Content-Disposition: inline In-Reply-To: <51A5F7A7.5020604@synopsys.com> Sender: linux-arch-owner@vger.kernel.org List-ID: To: Vineet Gupta Cc: Max Filippov , KAMEZAWA Hiroyuki , linux-arch@vger.kernel.org, linux-mm@kvack.org, Ralf Baechle , Chris Zankel , Marc Gauthier , linux-xtensa@linux-xtensa.org, Hugh Dickins Message-ID: <20130529175125.eVXH_rGB-2HGoI_8BM2czmMhUD8YAvfGVjWfh1ABvRk@z> What about something like this? --- include/asm-generic/tlb.h | 11 ++++++++++- mm/memory.c | 17 ++++++++++++++++- 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index b1b1fa6..651b1cf 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -116,6 +116,7 @@ struct mmu_gather { static inline int tlb_fast_mode(struct mmu_gather *tlb) { +#ifndef CONFIG_PREEMPT #ifdef CONFIG_SMP return tlb->fast_mode; #else @@ -124,7 +125,15 @@ static inline int tlb_fast_mode(struct mmu_gather *tlb) * and page free order so much.. */ return 1; -#endif +#endif /* CONFIG_SMP */ +#else /* CONFIG_PREEMPT */ + /* + * Since mmu_gather is preemptible, preemptible kernels are like SMP + * kernels, we must batch to make sure we invalidate TLBs before we + * free the pages. + */ + return 0; +#endif /* CONFIG_PREEMPT */ } void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm); diff --git a/mm/memory.c b/mm/memory.c index 6dc1882..e915af2 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -384,6 +384,21 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table) #endif /* CONFIG_HAVE_RCU_TABLE_FREE */ +static inline void cond_resched_tlb(struct mmu_gather *tlb) +{ +#ifndef CONFIG_PREEMPT + /* + * For full preempt kernels we must do regular batching like + * SMP, see tlb_fast_mode(). For !PREEMPT we can 'cheat' and + * do a flush before our voluntary 'yield'. + */ + if (need_resched()) { + tlb_flush_mmu(tlb); + cond_resched(); + } +#endif +} + /* * If a p?d_bad entry is found while walking page tables, report * the error, before resetting entry to p?d_none. Usually (but @@ -1264,7 +1279,7 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, goto next; next = zap_pte_range(tlb, vma, pmd, addr, next, details); next: - cond_resched(); + cond_resched_tlb(tlb); } while (pmd++, addr = next, addr != end); return addr;