From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752769AbdBFNbo (ORCPT ); Mon, 6 Feb 2017 08:31:44 -0500 Received: from mail-wj0-f195.google.com ([209.85.210.195]:32776 "EHLO mail-wj0-f195.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752684AbdBFNbi (ORCPT ); Mon, 6 Feb 2017 08:31:38 -0500 From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Andrew Morton , Linus Torvalds , Mike Galbraith , Oleg Nesterov , Peter Zijlstra , Thomas Gleixner Subject: [PATCH 76/89] sched/headers, mm: Move 'struct tlbflush_unmap_batch' from to Date: Mon, 6 Feb 2017 14:29:19 +0100 Message-Id: <1486387772-18837-77-git-send-email-mingo@kernel.org> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1486387772-18837-1-git-send-email-mingo@kernel.org> References: <1486387772-18837-1-git-send-email-mingo@kernel.org> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Unclutter some more. Also move the CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH condition inside the structure body definition, to remove a pair of #ifdefs from sched.h. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Thomas Gleixner Cc: Linus Torvalds Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- include/linux/mm_types_task.h | 22 ++++++++++++++++++++++ include/linux/sched.h | 21 --------------------- 2 files changed, 22 insertions(+), 21 deletions(-) diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h index 9526d8b9fe0e..136dfdf63ba1 100644 --- a/include/linux/mm_types_task.h +++ b/include/linux/mm_types_task.h @@ -10,6 +10,7 @@ #include #include #include +#include #include @@ -62,4 +63,25 @@ struct page_frag { #endif }; +/* Track pages that require TLB flushes */ +struct tlbflush_unmap_batch { +#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH + /* + * Each bit set is a CPU that potentially has a TLB entry for one of + * the PFNs being flushed. See set_tlb_ubc_flush_pending(). + */ + struct cpumask cpumask; + + /* True if any bit in cpumask is set */ + bool flush_required; + + /* + * If true then the PTE was dirty when unmapped. The entry must be + * flushed before IO is initiated or a stale TLB entry potentially + * allows an update without redirtying the page. + */ + bool writable; +#endif +}; + #endif /* _LINUX_MM_TYPES_TASK_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 08428ca98671..e5bcf966e792 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -462,25 +462,6 @@ enum perf_event_task_context { perf_nr_task_contexts, }; -/* Track pages that require TLB flushes */ -struct tlbflush_unmap_batch { - /* - * Each bit set is a CPU that potentially has a TLB entry for one of - * the PFNs being flushed. See set_tlb_ubc_flush_pending(). - */ - struct cpumask cpumask; - - /* True if any bit in cpumask is set */ - bool flush_required; - - /* - * If true then the PTE was dirty when unmapped. The entry must be - * flushed before IO is initiated or a stale TLB entry potentially - * allows an update without redirtying the page. - */ - bool writable; -}; - struct task_struct { #ifdef CONFIG_THREAD_INFO_IN_TASK /* @@ -873,9 +854,7 @@ struct task_struct { unsigned long numa_pages_migrated; #endif /* CONFIG_NUMA_BALANCING */ -#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH struct tlbflush_unmap_batch tlb_ubc; -#endif struct rcu_head rcu; -- 2.7.4