From: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com>
To: tglx@linutronix.de, peterz@infradead.org, tj@kernel.org,
oleg@redhat.com, paulmck@linux.vnet.ibm.com,
rusty@rustcorp.com.au, mingo@kernel.org,
akpm@linux-foundation.org, namhyung@kernel.org,
walken@google.com, vincent.guittot@linaro.org,
laijs@cn.fujitsu.com
Cc: linux-arch@vger.kernel.org, Alex Shi <alex.shi@intel.com>,
nikunj@linux.vnet.ibm.com, zhong@linux.vnet.ibm.com,
linux-pm@vger.kernel.org, fweisbec@gmail.com,
Rusty Russell <rusty@rustcorp.com.au>,
linux-kernel@vger.kernel.org, rostedt@goodmis.org,
xiaoguangrong@linux.vnet.ibm.com, sbw@mit.edu,
Joonsoo Kim <js1304@gmail.com>,
wangyun@linux.vnet.ibm.com, srivatsa.bhat@linux.vnet.ibm.com,
netdev@vger.kernel.org, Tejun Heo <tj@kernel.org>,
Andrew Morton <akpm@linux-foundation.org>,
KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>,
linuxppc-dev@lists.ozlabs.org
Subject: [PATCH 06/45] CPU hotplug: Sprinkle debugging checks to catch locking bugs
Date: Sun, 23 Jun 2013 19:09:15 +0530 [thread overview]
Message-ID: <20130623133909.19094.18155.stgit@srivatsabhat.in.ibm.com> (raw)
In-Reply-To: <20130623133642.19094.16038.stgit@srivatsabhat.in.ibm.com>
Now that we have a debug infrastructure in place to detect cases where
get/put_online_cpus_atomic() had to be used, add these checks at the
right spots to help catch places where we missed converting to the new
APIs.
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Alex Shi <alex.shi@intel.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Joonsoo Kim <js1304@gmail.com>
Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
---
include/linux/cpumask.h | 47 +++++++++++++++++++++++++++++++++++++++++++++--
lib/cpumask.c | 8 ++++++++
2 files changed, 53 insertions(+), 2 deletions(-)
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 9197ca4..06d2c36 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -169,6 +169,7 @@ static inline unsigned int cpumask_any_but(const struct cpumask *mask,
*/
static inline unsigned int cpumask_first(const struct cpumask *srcp)
{
+ check_hotplug_safe_cpumask(srcp);
return find_first_bit(cpumask_bits(srcp), nr_cpumask_bits);
}
@@ -184,6 +185,8 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
/* -1 is a legal arg here. */
if (n != -1)
cpumask_check(n);
+
+ check_hotplug_safe_cpumask(srcp);
return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
}
@@ -199,6 +202,8 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
/* -1 is a legal arg here. */
if (n != -1)
cpumask_check(n);
+
+ check_hotplug_safe_cpumask(srcp);
return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
}
@@ -288,8 +293,15 @@ static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
*
* No static inline type checking - see Subtlety (1) above.
*/
-#define cpumask_test_cpu(cpu, cpumask) \
- test_bit(cpumask_check(cpu), cpumask_bits((cpumask)))
+#define cpumask_test_cpu(cpu, cpumask) \
+({ \
+ int __ret; \
+ \
+ check_hotplug_safe_cpu(cpu, cpumask); \
+ __ret = test_bit(cpumask_check(cpu), \
+ cpumask_bits((cpumask))); \
+ __ret; \
+})
/**
* cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask
@@ -349,6 +361,9 @@ static inline int cpumask_and(struct cpumask *dstp,
const struct cpumask *src1p,
const struct cpumask *src2p)
{
+ check_hotplug_safe_cpumask(src1p);
+ check_hotplug_safe_cpumask(src2p);
+
return bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p),
cpumask_bits(src2p), nr_cpumask_bits);
}
@@ -362,6 +377,9 @@ static inline int cpumask_and(struct cpumask *dstp,
static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
const struct cpumask *src2p)
{
+ check_hotplug_safe_cpumask(src1p);
+ check_hotplug_safe_cpumask(src2p);
+
bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p),
cpumask_bits(src2p), nr_cpumask_bits);
}
@@ -376,6 +394,9 @@ static inline void cpumask_xor(struct cpumask *dstp,
const struct cpumask *src1p,
const struct cpumask *src2p)
{
+ check_hotplug_safe_cpumask(src1p);
+ check_hotplug_safe_cpumask(src2p);
+
bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p),
cpumask_bits(src2p), nr_cpumask_bits);
}
@@ -392,6 +413,9 @@ static inline int cpumask_andnot(struct cpumask *dstp,
const struct cpumask *src1p,
const struct cpumask *src2p)
{
+ check_hotplug_safe_cpumask(src1p);
+ check_hotplug_safe_cpumask(src2p);
+
return bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p),
cpumask_bits(src2p), nr_cpumask_bits);
}
@@ -404,6 +428,8 @@ static inline int cpumask_andnot(struct cpumask *dstp,
static inline void cpumask_complement(struct cpumask *dstp,
const struct cpumask *srcp)
{
+ check_hotplug_safe_cpumask(srcp);
+
bitmap_complement(cpumask_bits(dstp), cpumask_bits(srcp),
nr_cpumask_bits);
}
@@ -416,6 +442,9 @@ static inline void cpumask_complement(struct cpumask *dstp,
static inline bool cpumask_equal(const struct cpumask *src1p,
const struct cpumask *src2p)
{
+ check_hotplug_safe_cpumask(src1p);
+ check_hotplug_safe_cpumask(src2p);
+
return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p),
nr_cpumask_bits);
}
@@ -428,6 +457,10 @@ static inline bool cpumask_equal(const struct cpumask *src1p,
static inline bool cpumask_intersects(const struct cpumask *src1p,
const struct cpumask *src2p)
{
+
+ check_hotplug_safe_cpumask(src1p);
+ check_hotplug_safe_cpumask(src2p);
+
return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p),
nr_cpumask_bits);
}
@@ -442,6 +475,9 @@ static inline bool cpumask_intersects(const struct cpumask *src1p,
static inline int cpumask_subset(const struct cpumask *src1p,
const struct cpumask *src2p)
{
+ check_hotplug_safe_cpumask(src1p);
+ check_hotplug_safe_cpumask(src2p);
+
return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p),
nr_cpumask_bits);
}
@@ -470,6 +506,12 @@ static inline bool cpumask_full(const struct cpumask *srcp)
*/
static inline unsigned int cpumask_weight(const struct cpumask *srcp)
{
+ /*
+ * Often, we just want to have a rough estimate of the number of
+ * online CPUs, without going to the trouble of synchronizing with
+ * CPU hotplug. So don't invoke check_hotplug_safe_cpumask() here.
+ */
+
return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits);
}
@@ -507,6 +549,7 @@ static inline void cpumask_shift_left(struct cpumask *dstp,
static inline void cpumask_copy(struct cpumask *dstp,
const struct cpumask *srcp)
{
+ check_hotplug_safe_cpumask(srcp);
bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), nr_cpumask_bits);
}
diff --git a/lib/cpumask.c b/lib/cpumask.c
index d327b87..481df57 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -7,12 +7,14 @@
int __first_cpu(const cpumask_t *srcp)
{
+ check_hotplug_safe_cpumask(srcp);
return min_t(int, NR_CPUS, find_first_bit(srcp->bits, NR_CPUS));
}
EXPORT_SYMBOL(__first_cpu);
int __next_cpu(int n, const cpumask_t *srcp)
{
+ check_hotplug_safe_cpumask(srcp);
return min_t(int, NR_CPUS, find_next_bit(srcp->bits, NR_CPUS, n+1));
}
EXPORT_SYMBOL(__next_cpu);
@@ -20,6 +22,7 @@ EXPORT_SYMBOL(__next_cpu);
#if NR_CPUS > 64
int __next_cpu_nr(int n, const cpumask_t *srcp)
{
+ check_hotplug_safe_cpumask(srcp);
return min_t(int, nr_cpu_ids,
find_next_bit(srcp->bits, nr_cpu_ids, n+1));
}
@@ -37,6 +40,9 @@ EXPORT_SYMBOL(__next_cpu_nr);
int cpumask_next_and(int n, const struct cpumask *src1p,
const struct cpumask *src2p)
{
+ check_hotplug_safe_cpumask(src1p);
+ check_hotplug_safe_cpumask(src2p);
+
while ((n = cpumask_next(n, src1p)) < nr_cpu_ids)
if (cpumask_test_cpu(n, src2p))
break;
@@ -57,6 +63,8 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
unsigned int i;
cpumask_check(cpu);
+ check_hotplug_safe_cpumask(mask);
+
for_each_cpu(i, mask)
if (i != cpu)
break;
next prev parent reply other threads:[~2013-06-23 13:42 UTC|newest]
Thread overview: 67+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-06-23 13:37 [PATCH 00/45] CPU hotplug: stop_machine()-free CPU hotplug, part 1 Srivatsa S. Bhat
2013-06-23 13:38 ` [PATCH 01/45] CPU hotplug: Provide APIs to prevent CPU offline from atomic context Srivatsa S. Bhat
2013-06-24 22:49 ` Steven Rostedt
2013-06-23 13:38 ` [PATCH 02/45] CPU hotplug: Clarify the usage of different synchronization APIs Srivatsa S. Bhat
2013-06-23 13:38 ` [PATCH 03/45] Documentation, CPU hotplug: Recommend usage of get/put_online_cpus_atomic() Srivatsa S. Bhat
2013-06-23 13:38 ` [PATCH 04/45] CPU hotplug: Add infrastructure to check lacking hotplug synchronization Srivatsa S. Bhat
2013-06-24 23:26 ` Steven Rostedt
2013-06-25 18:49 ` Srivatsa S. Bhat
2013-06-23 13:39 ` [PATCH 05/45] CPU hotplug: Protect set_cpu_online() to avoid false-positives Srivatsa S. Bhat
2013-06-23 13:39 ` Srivatsa S. Bhat [this message]
2013-06-23 13:39 ` [PATCH 07/45] CPU hotplug: Expose the new debug config option Srivatsa S. Bhat
2013-06-23 15:08 ` Sergei Shtylyov
2013-06-23 18:58 ` Srivatsa S. Bhat
2013-06-23 13:39 ` [PATCH 08/45] CPU hotplug: Convert preprocessor macros to static inline functions Srivatsa S. Bhat
2013-06-23 13:40 ` [PATCH 09/45] smp: Use get/put_online_cpus_atomic() to prevent CPU offline Srivatsa S. Bhat
2013-06-23 13:40 ` [PATCH 10/45] sched/core: " Srivatsa S. Bhat
2013-06-23 13:40 ` [PATCH 11/45] migration: Use raw_spin_lock/unlock since interrupts are already disabled Srivatsa S. Bhat
2013-06-23 13:40 ` [PATCH 12/45] sched/fair: Use get/put_online_cpus_atomic() to prevent CPU offline Srivatsa S. Bhat
2013-06-23 13:40 ` [PATCH 13/45] timer: " Srivatsa S. Bhat
2013-06-23 13:41 ` [PATCH 14/45] sched/rt: " Srivatsa S. Bhat
2013-06-23 13:41 ` [PATCH 15/45] rcu: " Srivatsa S. Bhat
2013-06-23 13:41 ` [PATCH 16/45] tick-broadcast: " Srivatsa S. Bhat
2013-06-23 13:41 ` [PATCH 17/45] time/clocksource: " Srivatsa S. Bhat
2013-06-23 13:42 ` [PATCH 18/45] softirq: " Srivatsa S. Bhat
2013-06-23 13:42 ` [PATCH 19/45] irq: " Srivatsa S. Bhat
2013-06-23 13:42 ` [PATCH 20/45] net: " Srivatsa S. Bhat
2013-06-23 13:42 ` [PATCH 21/45] block: " Srivatsa S. Bhat
2013-06-23 13:42 ` [PATCH 22/45] percpu_counter: " Srivatsa S. Bhat
2013-06-24 17:55 ` Tejun Heo
2013-06-24 18:06 ` Tejun Heo
2013-06-24 18:09 ` Srivatsa S. Bhat
2013-06-23 13:43 ` [PATCH 23/45] infiniband: ehca: " Srivatsa S. Bhat
2013-06-23 13:43 ` [PATCH 24/45] [SCSI] fcoe: " Srivatsa S. Bhat
2013-06-23 13:43 ` [PATCH 25/45] staging/octeon: " Srivatsa S. Bhat
2013-06-23 18:17 ` Greg Kroah-Hartman
2013-06-23 18:55 ` Srivatsa S. Bhat
2013-06-23 19:17 ` Joe Perches
2013-06-24 17:25 ` Srivatsa S. Bhat
2013-06-24 18:17 ` David Daney
2013-06-23 13:43 ` [PATCH 26/45] x86: " Srivatsa S. Bhat
2013-06-23 13:43 ` [PATCH 27/45] perf/x86: " Srivatsa S. Bhat
2013-06-23 13:44 ` [PATCH 28/45] KVM: " Srivatsa S. Bhat
2013-06-23 13:44 ` [PATCH 29/45] kvm/vmx: " Srivatsa S. Bhat
2013-06-23 13:44 ` [PATCH 30/45] x86/xen: " Srivatsa S. Bhat
2013-06-23 13:45 ` [PATCH 31/45] alpha/smp: " Srivatsa S. Bhat
2013-06-23 17:50 ` Matt Turner
2013-06-23 18:56 ` Srivatsa S. Bhat
2013-06-23 13:45 ` [PATCH 32/45] blackfin/smp: " Srivatsa S. Bhat
2013-06-23 13:45 ` [PATCH 33/45] cris/smp: " Srivatsa S. Bhat
2013-06-24 6:41 ` Jesper Nilsson
2013-06-23 13:45 ` [PATCH 34/45] hexagon/smp: " Srivatsa S. Bhat
2013-06-23 13:45 ` [PATCH 35/45] ia64: irq, perfmon: " Srivatsa S. Bhat
2013-06-23 13:46 ` [PATCH 36/45] ia64: smp, tlb: " Srivatsa S. Bhat
2013-06-23 13:46 ` [PATCH 37/45] m32r: " Srivatsa S. Bhat
2013-06-23 13:46 ` [PATCH 38/45] MIPS: " Srivatsa S. Bhat
2013-06-23 13:46 ` [PATCH 39/45] mn10300: " Srivatsa S. Bhat
2013-06-23 13:47 ` [PATCH 40/45] powerpc, irq: Use GFP_ATOMIC allocations in atomic context Srivatsa S. Bhat
2013-06-25 2:08 ` Michael Ellerman
2013-06-25 2:13 ` Benjamin Herrenschmidt
2013-06-25 2:58 ` Michael Ellerman
2013-06-25 3:13 ` Benjamin Herrenschmidt
2013-06-25 19:20 ` Srivatsa S. Bhat
2013-06-23 13:47 ` [PATCH 41/45] powerpc: Use get/put_online_cpus_atomic() to prevent CPU offline Srivatsa S. Bhat
2013-06-23 13:47 ` [PATCH 42/45] powerpc: Use get/put_online_cpus_atomic() to avoid false-positive warning Srivatsa S. Bhat
2013-06-23 13:47 ` [PATCH 43/45] sh: Use get/put_online_cpus_atomic() to prevent CPU offline Srivatsa S. Bhat
2013-06-23 13:48 ` [PATCH 44/45] sparc: " Srivatsa S. Bhat
2013-06-23 13:48 ` [PATCH 45/45] tile: " Srivatsa S. Bhat
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20130623133909.19094.18155.stgit@srivatsabhat.in.ibm.com \
--to=srivatsa.bhat@linux.vnet.ibm.com \
--cc=akpm@linux-foundation.org \
--cc=alex.shi@intel.com \
--cc=fweisbec@gmail.com \
--cc=js1304@gmail.com \
--cc=kosaki.motohiro@jp.fujitsu.com \
--cc=laijs@cn.fujitsu.com \
--cc=linux-arch@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-pm@vger.kernel.org \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=mingo@kernel.org \
--cc=namhyung@kernel.org \
--cc=netdev@vger.kernel.org \
--cc=nikunj@linux.vnet.ibm.com \
--cc=oleg@redhat.com \
--cc=paulmck@linux.vnet.ibm.com \
--cc=peterz@infradead.org \
--cc=rostedt@goodmis.org \
--cc=rusty@rustcorp.com.au \
--cc=sbw@mit.edu \
--cc=tglx@linutronix.de \
--cc=tj@kernel.org \
--cc=vincent.guittot@linaro.org \
--cc=walken@google.com \
--cc=wangyun@linux.vnet.ibm.com \
--cc=xiaoguangrong@linux.vnet.ibm.com \
--cc=zhong@linux.vnet.ibm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).