From: Mike Galbraith <efault@gmx.de>
To: Sebastian Andrzej Siewior <bigeasy@linutronix.de>,
Thomas Gleixner <tglx@linutronix.de>
Cc: LKML <linux-kernel@vger.kernel.org>,
linux-rt-users <linux-rt-users@vger.kernel.org>,
Steven Rostedt <rostedt@goodmis.org>
Subject: {patch-rt] cpuset: Convert callback_lock to raw_spinlock_t
Date: Sun, 08 Jan 2017 09:32:25 +0100 [thread overview]
Message-ID: <1483864345.5676.3.camel@gmx.de> (raw)
In-Reply-To: <20161223163213.szj43nv7rnelkbty@linutronix.de>
The two commits below add up to a cpuset might_sleep() splat for RT:
8447a0fee974 cpuset: convert callback_mutex to a spinlock
344736f29b35 cpuset: simplify cpuset_node_allowed API
BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:995
in_atomic(): 0, irqs_disabled(): 1, pid: 11718, name: cset
CPU: 135 PID: 11718 Comm: cset Tainted: G E 4.10.0-rt1-rt #4
Hardware name: Intel Corporation BRICKLAND/BRICKLAND, BIOS BRHSXSD1.86B.0056.R01.1409242327 09/24/2014
Call Trace:
? dump_stack+0x5c/0x81
? ___might_sleep+0xf4/0x170
? rt_spin_lock+0x1c/0x50
? __cpuset_node_allowed+0x66/0xc0
? ___slab_alloc+0x390/0x570
? anon_vma_fork+0x8f/0x140
? copy_page_range+0x6cf/0xb00
? anon_vma_fork+0x8f/0x140
? __slab_alloc.isra.74+0x5a/0x81
? anon_vma_fork+0x8f/0x140
? kmem_cache_alloc+0x1b5/0x1f0
? anon_vma_fork+0x8f/0x140
? copy_process.part.35+0x1670/0x1ee0
? _do_fork+0xdd/0x3f0
? _do_fork+0xdd/0x3f0
? do_syscall_64+0x61/0x170
? entry_SYSCALL64_slow_path+0x25/0x25
The later ensured that a NUMA box WILL take callback_lock in atomic
context by removing the allocator and reclaim path __GFP_HARDWALL
usage which prevented such contexts from taking callback_mutex.
One option would be to reinstate __GFP_HARDWALL protections for
RT, however, as the 8447a0fee974 changelog states:
The callback_mutex is only used to synchronize reads/updates of cpusets'
flags and cpu/node masks. These operations should always proceed fast so
there's no reason why we can't use a spinlock instead of the mutex.
Signed-off-by: Mike Galbraith <efault@gmx.de>
Cc: stable-rt@vger.kernel.org
---
kernel/cpuset.c | 66 ++++++++++++++++++++++++++++----------------------------
1 file changed, 33 insertions(+), 33 deletions(-)
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -284,7 +284,7 @@ static struct cpuset top_cpuset = {
*/
static DEFINE_MUTEX(cpuset_mutex);
-static DEFINE_SPINLOCK(callback_lock);
+static DEFINE_RAW_SPINLOCK(callback_lock);
static struct workqueue_struct *cpuset_migrate_mm_wq;
@@ -907,9 +907,9 @@ static void update_cpumasks_hier(struct
continue;
rcu_read_unlock();
- spin_lock_irq(&callback_lock);
+ raw_spin_lock_irq(&callback_lock);
cpumask_copy(cp->effective_cpus, new_cpus);
- spin_unlock_irq(&callback_lock);
+ raw_spin_unlock_irq(&callback_lock);
WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
!cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
@@ -974,9 +974,9 @@ static int update_cpumask(struct cpuset
if (retval < 0)
return retval;
- spin_lock_irq(&callback_lock);
+ raw_spin_lock_irq(&callback_lock);
cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
- spin_unlock_irq(&callback_lock);
+ raw_spin_unlock_irq(&callback_lock);
/* use trialcs->cpus_allowed as a temp variable */
update_cpumasks_hier(cs, trialcs->cpus_allowed);
@@ -1176,9 +1176,9 @@ static void update_nodemasks_hier(struct
continue;
rcu_read_unlock();
- spin_lock_irq(&callback_lock);
+ raw_spin_lock_irq(&callback_lock);
cp->effective_mems = *new_mems;
- spin_unlock_irq(&callback_lock);
+ raw_spin_unlock_irq(&callback_lock);
WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
!nodes_equal(cp->mems_allowed, cp->effective_mems));
@@ -1246,9 +1246,9 @@ static int update_nodemask(struct cpuset
if (retval < 0)
goto done;
- spin_lock_irq(&callback_lock);
+ raw_spin_lock_irq(&callback_lock);
cs->mems_allowed = trialcs->mems_allowed;
- spin_unlock_irq(&callback_lock);
+ raw_spin_unlock_irq(&callback_lock);
/* use trialcs->mems_allowed as a temp variable */
update_nodemasks_hier(cs, &trialcs->mems_allowed);
@@ -1339,9 +1339,9 @@ static int update_flag(cpuset_flagbits_t
spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
|| (is_spread_page(cs) != is_spread_page(trialcs)));
- spin_lock_irq(&callback_lock);
+ raw_spin_lock_irq(&callback_lock);
cs->flags = trialcs->flags;
- spin_unlock_irq(&callback_lock);
+ raw_spin_unlock_irq(&callback_lock);
if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
rebuild_sched_domains_locked();
@@ -1756,7 +1756,7 @@ static int cpuset_common_seq_show(struct
cpuset_filetype_t type = seq_cft(sf)->private;
int ret = 0;
- spin_lock_irq(&callback_lock);
+ raw_spin_lock_irq(&callback_lock);
switch (type) {
case FILE_CPULIST:
@@ -1775,7 +1775,7 @@ static int cpuset_common_seq_show(struct
ret = -EINVAL;
}
- spin_unlock_irq(&callback_lock);
+ raw_spin_unlock_irq(&callback_lock);
return ret;
}
@@ -1989,12 +1989,12 @@ static int cpuset_css_online(struct cgro
cpuset_inc();
- spin_lock_irq(&callback_lock);
+ raw_spin_lock_irq(&callback_lock);
if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
cpumask_copy(cs->effective_cpus, parent->effective_cpus);
cs->effective_mems = parent->effective_mems;
}
- spin_unlock_irq(&callback_lock);
+ raw_spin_unlock_irq(&callback_lock);
if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
goto out_unlock;
@@ -2021,12 +2021,12 @@ static int cpuset_css_online(struct cgro
}
rcu_read_unlock();
- spin_lock_irq(&callback_lock);
+ raw_spin_lock_irq(&callback_lock);
cs->mems_allowed = parent->mems_allowed;
cs->effective_mems = parent->mems_allowed;
cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
- spin_unlock_irq(&callback_lock);
+ raw_spin_unlock_irq(&callback_lock);
out_unlock:
mutex_unlock(&cpuset_mutex);
return 0;
@@ -2065,7 +2065,7 @@ static void cpuset_css_free(struct cgrou
static void cpuset_bind(struct cgroup_subsys_state *root_css)
{
mutex_lock(&cpuset_mutex);
- spin_lock_irq(&callback_lock);
+ raw_spin_lock_irq(&callback_lock);
if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
@@ -2076,7 +2076,7 @@ static void cpuset_bind(struct cgroup_su
top_cpuset.mems_allowed = top_cpuset.effective_mems;
}
- spin_unlock_irq(&callback_lock);
+ raw_spin_unlock_irq(&callback_lock);
mutex_unlock(&cpuset_mutex);
}
@@ -2177,12 +2177,12 @@ hotplug_update_tasks_legacy(struct cpuse
{
bool is_empty;
- spin_lock_irq(&callback_lock);
+ raw_spin_lock_irq(&callback_lock);
cpumask_copy(cs->cpus_allowed, new_cpus);
cpumask_copy(cs->effective_cpus, new_cpus);
cs->mems_allowed = *new_mems;
cs->effective_mems = *new_mems;
- spin_unlock_irq(&callback_lock);
+ raw_spin_unlock_irq(&callback_lock);
/*
* Don't call update_tasks_cpumask() if the cpuset becomes empty,
@@ -2219,10 +2219,10 @@ hotplug_update_tasks(struct cpuset *cs,
if (nodes_empty(*new_mems))
*new_mems = parent_cs(cs)->effective_mems;
- spin_lock_irq(&callback_lock);
+ raw_spin_lock_irq(&callback_lock);
cpumask_copy(cs->effective_cpus, new_cpus);
cs->effective_mems = *new_mems;
- spin_unlock_irq(&callback_lock);
+ raw_spin_unlock_irq(&callback_lock);
if (cpus_updated)
update_tasks_cpumask(cs);
@@ -2308,21 +2308,21 @@ static void cpuset_hotplug_workfn(struct
/* synchronize cpus_allowed to cpu_active_mask */
if (cpus_updated) {
- spin_lock_irq(&callback_lock);
+ raw_spin_lock_irq(&callback_lock);
if (!on_dfl)
cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
- spin_unlock_irq(&callback_lock);
+ raw_spin_unlock_irq(&callback_lock);
/* we don't mess with cpumasks of tasks in top_cpuset */
}
/* synchronize mems_allowed to N_MEMORY */
if (mems_updated) {
- spin_lock_irq(&callback_lock);
+ raw_spin_lock_irq(&callback_lock);
if (!on_dfl)
top_cpuset.mems_allowed = new_mems;
top_cpuset.effective_mems = new_mems;
- spin_unlock_irq(&callback_lock);
+ raw_spin_unlock_irq(&callback_lock);
update_tasks_nodemask(&top_cpuset);
}
@@ -2420,11 +2420,11 @@ void cpuset_cpus_allowed(struct task_str
{
unsigned long flags;
- spin_lock_irqsave(&callback_lock, flags);
+ raw_spin_lock_irqsave(&callback_lock, flags);
rcu_read_lock();
guarantee_online_cpus(task_cs(tsk), pmask);
rcu_read_unlock();
- spin_unlock_irqrestore(&callback_lock, flags);
+ raw_spin_unlock_irqrestore(&callback_lock, flags);
}
void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
@@ -2472,11 +2472,11 @@ nodemask_t cpuset_mems_allowed(struct ta
nodemask_t mask;
unsigned long flags;
- spin_lock_irqsave(&callback_lock, flags);
+ raw_spin_lock_irqsave(&callback_lock, flags);
rcu_read_lock();
guarantee_online_mems(task_cs(tsk), &mask);
rcu_read_unlock();
- spin_unlock_irqrestore(&callback_lock, flags);
+ raw_spin_unlock_irqrestore(&callback_lock, flags);
return mask;
}
@@ -2568,14 +2568,14 @@ bool __cpuset_node_allowed(int node, gfp
return true;
/* Not hardwall and node outside mems_allowed: scan up cpusets */
- spin_lock_irqsave(&callback_lock, flags);
+ raw_spin_lock_irqsave(&callback_lock, flags);
rcu_read_lock();
cs = nearest_hardwall_ancestor(task_cs(current));
allowed = node_isset(node, cs->mems_allowed);
rcu_read_unlock();
- spin_unlock_irqrestore(&callback_lock, flags);
+ raw_spin_unlock_irqrestore(&callback_lock, flags);
return allowed;
}
next prev parent reply other threads:[~2017-01-08 8:32 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-12-23 16:32 [ANNOUNCE] v4.9-rt1 Sebastian Andrzej Siewior
2016-12-26 6:54 ` [patch-rt] kvm: Convert pvclock_gtod_sync_lock to raw_spinlock_t Mike Galbraith
2017-01-20 16:44 ` Sebastian Andrzej Siewior
2017-01-20 17:32 ` Mike Galbraith
2016-12-26 7:00 ` [rfc patch-rt] posix_cpu_timers: Kill hotplug cpu notifier Mike Galbraith
2017-01-20 16:46 ` Sebastian Andrzej Siewior
2017-01-20 17:29 ` Mike Galbraith
2017-01-20 17:34 ` Sebastian Andrzej Siewior
2017-01-20 17:56 ` Mike Galbraith
2016-12-31 8:20 ` [patch-rt] softirq: Move ksoftirqd_running() under !CONFIG_PREEMPT_RT_FULL Mike Galbraith
2017-01-20 17:21 ` Sebastian Andrzej Siewior
2017-01-06 8:13 ` [rfc patch-rt] radix-tree: Partially disable memcg accounting in radix_tree_node_alloc() Mike Galbraith
2017-01-06 8:28 ` Mike Galbraith
2017-01-06 8:55 ` Michal Hocko
2017-01-06 10:52 ` Mike Galbraith
2017-01-06 12:20 ` Mike Galbraith
2017-01-06 12:44 ` Mike Galbraith
2017-01-25 15:06 ` Sebastian Andrzej Siewior
2017-01-26 3:42 ` Mike Galbraith
2017-01-08 8:32 ` Mike Galbraith [this message]
2017-01-25 15:45 ` {patch-rt] cpuset: Convert callback_lock to raw_spinlock_t Sebastian Andrzej Siewior
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1483864345.5676.3.camel@gmx.de \
--to=efault@gmx.de \
--cc=bigeasy@linutronix.de \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-rt-users@vger.kernel.org \
--cc=rostedt@goodmis.org \
--cc=tglx@linutronix.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).