From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752417AbdBFNae (ORCPT ); Mon, 6 Feb 2017 08:30:34 -0500 Received: from mail-wm0-f68.google.com ([74.125.82.68]:33610 "EHLO mail-wm0-f68.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752330AbdBFNaY (ORCPT ); Mon, 6 Feb 2017 08:30:24 -0500 From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Andrew Morton , Linus Torvalds , Mike Galbraith , Oleg Nesterov , Peter Zijlstra , Thomas Gleixner Subject: [PATCH 25/89] sched/headers, cgroups: Remove the threadgroup_change_*() wrappery Date: Mon, 6 Feb 2017 14:28:28 +0100 Message-Id: <1486387772-18837-26-git-send-email-mingo@kernel.org> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1486387772-18837-1-git-send-email-mingo@kernel.org> References: <1486387772-18837-1-git-send-email-mingo@kernel.org> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org threadgroup_change_begin()/end() is a pointless wrapper around cgroup_threadgroup_change_begin()/end(), minus a might_sleep() in the !CONFIG_CGROUPS=y case. Remove the wrappery, move the might_sleep() (the down_read() already has a might_sleep() check). This debloats a bit and simplifies this API. Update all call sites. No change in functionality. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Thomas Gleixner Cc: Linus Torvalds Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- fs/exec.c | 6 +++--- include/linux/cgroup-defs.h | 13 ++++++++----- include/linux/sched.h | 28 ---------------------------- kernel/cgroup_pids.c | 2 +- kernel/fork.c | 6 +++--- kernel/signal.c | 6 +++--- 6 files changed, 18 insertions(+), 43 deletions(-) diff --git a/fs/exec.c b/fs/exec.c index b8ed0c542692..2a24d7d435ba 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1091,7 +1091,7 @@ static int de_thread(struct task_struct *tsk) struct task_struct *leader = tsk->group_leader; for (;;) { - threadgroup_change_begin(tsk); + cgroup_threadgroup_change_begin(tsk); write_lock_irq(&tasklist_lock); /* * Do this under tasklist_lock to ensure that @@ -1102,7 +1102,7 @@ static int de_thread(struct task_struct *tsk) break; __set_current_state(TASK_KILLABLE); write_unlock_irq(&tasklist_lock); - threadgroup_change_end(tsk); + cgroup_threadgroup_change_end(tsk); schedule(); if (unlikely(__fatal_signal_pending(tsk))) goto killed; @@ -1160,7 +1160,7 @@ static int de_thread(struct task_struct *tsk) if (unlikely(leader->ptrace)) __wake_up_parent(leader, leader->parent); write_unlock_irq(&tasklist_lock); - threadgroup_change_end(tsk); + cgroup_threadgroup_change_end(tsk); release_task(leader); } diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 861b4677fc5b..c5eac7631377 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -528,8 +528,8 @@ extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem; * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups * @tsk: target task * - * Called from threadgroup_change_begin() and allows cgroup operations to - * synchronize against threadgroup changes using a percpu_rw_semaphore. + * Allows cgroup operations to synchronize against threadgroup changes + * using a percpu_rw_semaphore. */ static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) { @@ -540,8 +540,7 @@ static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) * cgroup_threadgroup_change_end - threadgroup exclusion for cgroups * @tsk: target task * - * Called from threadgroup_change_end(). Counterpart of - * cgroup_threadcgroup_change_begin(). + * Counterpart of cgroup_threadcgroup_change_begin(). */ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) { @@ -552,7 +551,11 @@ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) #define CGROUP_SUBSYS_COUNT 0 -static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) {} +static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) +{ + might_sleep(); +} + static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {} #endif /* CONFIG_CGROUPS */ diff --git a/include/linux/sched.h b/include/linux/sched.h index c36df08ac7bf..00039e0e63a4 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1933,34 +1933,6 @@ static inline void task_unlock(struct task_struct *p) spin_unlock(&p->alloc_lock); } -/** - * threadgroup_change_begin - mark the beginning of changes to a threadgroup - * @tsk: task causing the changes - * - * All operations which modify a threadgroup - a new thread joining the - * group, death of a member thread (the assertion of PF_EXITING) and - * exec(2) dethreading the process and replacing the leader - are wrapped - * by threadgroup_change_{begin|end}(). This is to provide a place which - * subsystems needing threadgroup stability can hook into for - * synchronization. - */ -static inline void threadgroup_change_begin(struct task_struct *tsk) -{ - might_sleep(); - cgroup_threadgroup_change_begin(tsk); -} - -/** - * threadgroup_change_end - mark the end of changes to a threadgroup - * @tsk: task causing the changes - * - * See threadgroup_change_begin(). - */ -static inline void threadgroup_change_end(struct task_struct *tsk) -{ - cgroup_threadgroup_change_end(tsk); -} - #ifdef CONFIG_THREAD_INFO_IN_TASK static inline struct thread_info *task_thread_info(struct task_struct *task) diff --git a/kernel/cgroup_pids.c b/kernel/cgroup_pids.c index 2bd673783f1a..e756dae49300 100644 --- a/kernel/cgroup_pids.c +++ b/kernel/cgroup_pids.c @@ -214,7 +214,7 @@ static void pids_cancel_attach(struct cgroup_taskset *tset) /* * task_css_check(true) in pids_can_fork() and pids_cancel_fork() relies - * on threadgroup_change_begin() held by the copy_process(). + * on cgroup_threadgroup_change_begin() held by the copy_process(). */ static int pids_can_fork(struct task_struct *task) { diff --git a/kernel/fork.c b/kernel/fork.c index 1c6916090277..9c577368b30d 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1749,7 +1749,7 @@ static __latent_entropy struct task_struct *copy_process( INIT_LIST_HEAD(&p->thread_group); p->task_works = NULL; - threadgroup_change_begin(current); + cgroup_threadgroup_change_begin(current); /* * Ensure that the cgroup subsystem policies allow the new process to be * forked. It should be noted the the new process's css_set can be changed @@ -1839,7 +1839,7 @@ static __latent_entropy struct task_struct *copy_process( proc_fork_connector(p); cgroup_post_fork(p); - threadgroup_change_end(current); + cgroup_threadgroup_change_end(current); perf_event_fork(p); trace_task_newtask(p, clone_flags); @@ -1850,7 +1850,7 @@ static __latent_entropy struct task_struct *copy_process( bad_fork_cancel_cgroup: cgroup_cancel_fork(p); bad_fork_free_pid: - threadgroup_change_end(current); + cgroup_threadgroup_change_end(current); if (pid != &init_struct_pid) free_pid(pid); bad_fork_cleanup_thread: diff --git a/kernel/signal.c b/kernel/signal.c index 08bba8751c0e..26d7975cac66 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2396,11 +2396,11 @@ void exit_signals(struct task_struct *tsk) * @tsk is about to have PF_EXITING set - lock out users which * expect stable threadgroup. */ - threadgroup_change_begin(tsk); + cgroup_threadgroup_change_begin(tsk); if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) { tsk->flags |= PF_EXITING; - threadgroup_change_end(tsk); + cgroup_threadgroup_change_end(tsk); return; } @@ -2411,7 +2411,7 @@ void exit_signals(struct task_struct *tsk) */ tsk->flags |= PF_EXITING; - threadgroup_change_end(tsk); + cgroup_threadgroup_change_end(tsk); if (!signal_pending(tsk)) goto out; -- 2.7.4