linuxppc-dev.lists.ozlabs.org archive mirror
 help / color / mirror / Atom feed
From: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com>
To: tglx@linutronix.de, peterz@infradead.org, tj@kernel.org,
	oleg@redhat.com, paulmck@linux.vnet.ibm.com,
	rusty@rustcorp.com.au, mingo@kernel.org,
	akpm@linux-foundation.org, namhyung@kernel.org,
	walken@google.com, vincent.guittot@linaro.org,
	laijs@cn.fujitsu.com
Cc: linux-arch@vger.kernel.org, Fenghua Yu <fenghua.yu@intel.com>,
	Tony Luck <tony.luck@intel.com>,
	linux-ia64@vger.kernel.org, nikunj@linux.vnet.ibm.com,
	zhong@linux.vnet.ibm.com, linux-pm@vger.kernel.org,
	fweisbec@gmail.com, linux-kernel@vger.kernel.org,
	rostedt@goodmis.org, xiaoguangrong@linux.vnet.ibm.com,
	sbw@mit.edu, wangyun@linux.vnet.ibm.com,
	srivatsa.bhat@linux.vnet.ibm.com, netdev@vger.kernel.org,
	Andrew Morton <akpm@linux-foundation.org>,
	linuxppc-dev@lists.ozlabs.org,
	Thomas Gleixner <tglx@linutronix.de>,
	"Eric W. Biederman" <ebiederm@xmission.com>
Subject: [PATCH 35/45] ia64: irq, perfmon: Use get/put_online_cpus_atomic() to prevent CPU offline
Date: Sun, 23 Jun 2013 19:15:59 +0530	[thread overview]
Message-ID: <20130623134555.19094.5855.stgit@srivatsabhat.in.ibm.com> (raw)
In-Reply-To: <20130623133642.19094.16038.stgit@srivatsabhat.in.ibm.com>

Once stop_machine() is gone from the CPU offline path, we won't be able
to depend on disabling preemption to prevent CPUs from going offline
from under us.

Use the get/put_online_cpus_atomic() APIs to prevent CPUs from going
offline, while invoking from atomic context.

Cc: Tony Luck <tony.luck@intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-ia64@vger.kernel.org
Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
---

 arch/ia64/kernel/irq_ia64.c |   15 +++++++++++++++
 arch/ia64/kernel/perfmon.c  |    8 +++++++-
 2 files changed, 22 insertions(+), 1 deletion(-)

diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 1034884..f58b162 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -25,6 +25,7 @@
 #include <linux/ptrace.h>
 #include <linux/signal.h>
 #include <linux/smp.h>
+#include <linux/cpu.h>
 #include <linux/threads.h>
 #include <linux/bitops.h>
 #include <linux/irq.h>
@@ -160,9 +161,11 @@ int bind_irq_vector(int irq, int vector, cpumask_t domain)
 	unsigned long flags;
 	int ret;
 
+	get_online_cpus_atomic();
 	spin_lock_irqsave(&vector_lock, flags);
 	ret = __bind_irq_vector(irq, vector, domain);
 	spin_unlock_irqrestore(&vector_lock, flags);
+	put_online_cpus_atomic();
 	return ret;
 }
 
@@ -190,9 +193,11 @@ static void clear_irq_vector(int irq)
 {
 	unsigned long flags;
 
+	get_online_cpus_atomic();
 	spin_lock_irqsave(&vector_lock, flags);
 	__clear_irq_vector(irq);
 	spin_unlock_irqrestore(&vector_lock, flags);
+	put_online_cpus_atomic();
 }
 
 int
@@ -204,6 +209,7 @@ ia64_native_assign_irq_vector (int irq)
 
 	vector = -ENOSPC;
 
+	get_online_cpus_atomic();
 	spin_lock_irqsave(&vector_lock, flags);
 	for_each_online_cpu(cpu) {
 		domain = vector_allocation_domain(cpu);
@@ -218,6 +224,7 @@ ia64_native_assign_irq_vector (int irq)
 	BUG_ON(__bind_irq_vector(irq, vector, domain));
  out:
 	spin_unlock_irqrestore(&vector_lock, flags);
+	put_online_cpus_atomic();
 	return vector;
 }
 
@@ -302,9 +309,11 @@ int irq_prepare_move(int irq, int cpu)
 	unsigned long flags;
 	int ret;
 
+	get_online_cpus_atomic();
 	spin_lock_irqsave(&vector_lock, flags);
 	ret = __irq_prepare_move(irq, cpu);
 	spin_unlock_irqrestore(&vector_lock, flags);
+	put_online_cpus_atomic();
 	return ret;
 }
 
@@ -320,11 +329,13 @@ void irq_complete_move(unsigned irq)
 	if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain)))
 		return;
 
+	get_online_cpus_atomic();
 	cpumask_and(&cleanup_mask, &cfg->old_domain, cpu_online_mask);
 	cfg->move_cleanup_count = cpus_weight(cleanup_mask);
 	for_each_cpu_mask(i, cleanup_mask)
 		platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0);
 	cfg->move_in_progress = 0;
+	put_online_cpus_atomic();
 }
 
 static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
@@ -393,10 +404,12 @@ void destroy_and_reserve_irq(unsigned int irq)
 
 	dynamic_irq_cleanup(irq);
 
+	get_online_cpus_atomic();
 	spin_lock_irqsave(&vector_lock, flags);
 	__clear_irq_vector(irq);
 	irq_status[irq] = IRQ_RSVD;
 	spin_unlock_irqrestore(&vector_lock, flags);
+	put_online_cpus_atomic();
 }
 
 /*
@@ -409,6 +422,7 @@ int create_irq(void)
 	cpumask_t domain = CPU_MASK_NONE;
 
 	irq = vector = -ENOSPC;
+	get_online_cpus_atomic();
 	spin_lock_irqsave(&vector_lock, flags);
 	for_each_online_cpu(cpu) {
 		domain = vector_allocation_domain(cpu);
@@ -424,6 +438,7 @@ int create_irq(void)
 	BUG_ON(__bind_irq_vector(irq, vector, domain));
  out:
 	spin_unlock_irqrestore(&vector_lock, flags);
+	put_online_cpus_atomic();
 	if (irq >= 0)
 		dynamic_irq_init(irq);
 	return irq;
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 9ea25fc..16c8303 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -6476,9 +6476,12 @@ pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
 	/* do the easy test first */
 	if (pfm_alt_intr_handler) return -EBUSY;
 
+	get_online_cpus_atomic();
+
 	/* one at a time in the install or remove, just fail the others */
 	if (!spin_trylock(&pfm_alt_install_check)) {
-		return -EBUSY;
+		ret = -EBUSY;
+		goto out;
 	}
 
 	/* reserve our session */
@@ -6498,6 +6501,7 @@ pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
 	pfm_alt_intr_handler = hdl;
 
 	spin_unlock(&pfm_alt_install_check);
+	put_online_cpus_atomic();
 
 	return 0;
 
@@ -6510,6 +6514,8 @@ cleanup_reserve:
 	}
 
 	spin_unlock(&pfm_alt_install_check);
+out:
+	put_online_cpus_atomic();
 
 	return ret;
 }

  parent reply	other threads:[~2013-06-23 13:49 UTC|newest]

Thread overview: 67+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-06-23 13:37 [PATCH 00/45] CPU hotplug: stop_machine()-free CPU hotplug, part 1 Srivatsa S. Bhat
2013-06-23 13:38 ` [PATCH 01/45] CPU hotplug: Provide APIs to prevent CPU offline from atomic context Srivatsa S. Bhat
2013-06-24 22:49   ` Steven Rostedt
2013-06-23 13:38 ` [PATCH 02/45] CPU hotplug: Clarify the usage of different synchronization APIs Srivatsa S. Bhat
2013-06-23 13:38 ` [PATCH 03/45] Documentation, CPU hotplug: Recommend usage of get/put_online_cpus_atomic() Srivatsa S. Bhat
2013-06-23 13:38 ` [PATCH 04/45] CPU hotplug: Add infrastructure to check lacking hotplug synchronization Srivatsa S. Bhat
2013-06-24 23:26   ` Steven Rostedt
2013-06-25 18:49     ` Srivatsa S. Bhat
2013-06-23 13:39 ` [PATCH 05/45] CPU hotplug: Protect set_cpu_online() to avoid false-positives Srivatsa S. Bhat
2013-06-23 13:39 ` [PATCH 06/45] CPU hotplug: Sprinkle debugging checks to catch locking bugs Srivatsa S. Bhat
2013-06-23 13:39 ` [PATCH 07/45] CPU hotplug: Expose the new debug config option Srivatsa S. Bhat
2013-06-23 15:08   ` Sergei Shtylyov
2013-06-23 18:58     ` Srivatsa S. Bhat
2013-06-23 13:39 ` [PATCH 08/45] CPU hotplug: Convert preprocessor macros to static inline functions Srivatsa S. Bhat
2013-06-23 13:40 ` [PATCH 09/45] smp: Use get/put_online_cpus_atomic() to prevent CPU offline Srivatsa S. Bhat
2013-06-23 13:40 ` [PATCH 10/45] sched/core: " Srivatsa S. Bhat
2013-06-23 13:40 ` [PATCH 11/45] migration: Use raw_spin_lock/unlock since interrupts are already disabled Srivatsa S. Bhat
2013-06-23 13:40 ` [PATCH 12/45] sched/fair: Use get/put_online_cpus_atomic() to prevent CPU offline Srivatsa S. Bhat
2013-06-23 13:40 ` [PATCH 13/45] timer: " Srivatsa S. Bhat
2013-06-23 13:41 ` [PATCH 14/45] sched/rt: " Srivatsa S. Bhat
2013-06-23 13:41 ` [PATCH 15/45] rcu: " Srivatsa S. Bhat
2013-06-23 13:41 ` [PATCH 16/45] tick-broadcast: " Srivatsa S. Bhat
2013-06-23 13:41 ` [PATCH 17/45] time/clocksource: " Srivatsa S. Bhat
2013-06-23 13:42 ` [PATCH 18/45] softirq: " Srivatsa S. Bhat
2013-06-23 13:42 ` [PATCH 19/45] irq: " Srivatsa S. Bhat
2013-06-23 13:42 ` [PATCH 20/45] net: " Srivatsa S. Bhat
2013-06-23 13:42 ` [PATCH 21/45] block: " Srivatsa S. Bhat
2013-06-23 13:42 ` [PATCH 22/45] percpu_counter: " Srivatsa S. Bhat
2013-06-24 17:55   ` Tejun Heo
2013-06-24 18:06     ` Tejun Heo
2013-06-24 18:09       ` Srivatsa S. Bhat
2013-06-23 13:43 ` [PATCH 23/45] infiniband: ehca: " Srivatsa S. Bhat
2013-06-23 13:43 ` [PATCH 24/45] [SCSI] fcoe: " Srivatsa S. Bhat
2013-06-23 13:43 ` [PATCH 25/45] staging/octeon: " Srivatsa S. Bhat
2013-06-23 18:17   ` Greg Kroah-Hartman
2013-06-23 18:55     ` Srivatsa S. Bhat
2013-06-23 19:17       ` Joe Perches
2013-06-24 17:25         ` Srivatsa S. Bhat
2013-06-24 18:17       ` David Daney
2013-06-23 13:43 ` [PATCH 26/45] x86: " Srivatsa S. Bhat
2013-06-23 13:43 ` [PATCH 27/45] perf/x86: " Srivatsa S. Bhat
2013-06-23 13:44 ` [PATCH 28/45] KVM: " Srivatsa S. Bhat
2013-06-23 13:44 ` [PATCH 29/45] kvm/vmx: " Srivatsa S. Bhat
2013-06-23 13:44 ` [PATCH 30/45] x86/xen: " Srivatsa S. Bhat
2013-06-23 13:45 ` [PATCH 31/45] alpha/smp: " Srivatsa S. Bhat
2013-06-23 17:50   ` Matt Turner
2013-06-23 18:56     ` Srivatsa S. Bhat
2013-06-23 13:45 ` [PATCH 32/45] blackfin/smp: " Srivatsa S. Bhat
2013-06-23 13:45 ` [PATCH 33/45] cris/smp: " Srivatsa S. Bhat
2013-06-24  6:41   ` Jesper Nilsson
2013-06-23 13:45 ` [PATCH 34/45] hexagon/smp: " Srivatsa S. Bhat
2013-06-23 13:45 ` Srivatsa S. Bhat [this message]
2013-06-23 13:46 ` [PATCH 36/45] ia64: smp, tlb: " Srivatsa S. Bhat
2013-06-23 13:46 ` [PATCH 37/45] m32r: " Srivatsa S. Bhat
2013-06-23 13:46 ` [PATCH 38/45] MIPS: " Srivatsa S. Bhat
2013-06-23 13:46 ` [PATCH 39/45] mn10300: " Srivatsa S. Bhat
2013-06-23 13:47 ` [PATCH 40/45] powerpc, irq: Use GFP_ATOMIC allocations in atomic context Srivatsa S. Bhat
2013-06-25  2:08   ` Michael Ellerman
2013-06-25  2:13     ` Benjamin Herrenschmidt
2013-06-25  2:58       ` Michael Ellerman
2013-06-25  3:13         ` Benjamin Herrenschmidt
2013-06-25 19:20           ` Srivatsa S. Bhat
2013-06-23 13:47 ` [PATCH 41/45] powerpc: Use get/put_online_cpus_atomic() to prevent CPU offline Srivatsa S. Bhat
2013-06-23 13:47 ` [PATCH 42/45] powerpc: Use get/put_online_cpus_atomic() to avoid false-positive warning Srivatsa S. Bhat
2013-06-23 13:47 ` [PATCH 43/45] sh: Use get/put_online_cpus_atomic() to prevent CPU offline Srivatsa S. Bhat
2013-06-23 13:48 ` [PATCH 44/45] sparc: " Srivatsa S. Bhat
2013-06-23 13:48 ` [PATCH 45/45] tile: " Srivatsa S. Bhat

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20130623134555.19094.5855.stgit@srivatsabhat.in.ibm.com \
    --to=srivatsa.bhat@linux.vnet.ibm.com \
    --cc=akpm@linux-foundation.org \
    --cc=ebiederm@xmission.com \
    --cc=fenghua.yu@intel.com \
    --cc=fweisbec@gmail.com \
    --cc=laijs@cn.fujitsu.com \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-ia64@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-pm@vger.kernel.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=mingo@kernel.org \
    --cc=namhyung@kernel.org \
    --cc=netdev@vger.kernel.org \
    --cc=nikunj@linux.vnet.ibm.com \
    --cc=oleg@redhat.com \
    --cc=paulmck@linux.vnet.ibm.com \
    --cc=peterz@infradead.org \
    --cc=rostedt@goodmis.org \
    --cc=rusty@rustcorp.com.au \
    --cc=sbw@mit.edu \
    --cc=tglx@linutronix.de \
    --cc=tj@kernel.org \
    --cc=tony.luck@intel.com \
    --cc=vincent.guittot@linaro.org \
    --cc=walken@google.com \
    --cc=wangyun@linux.vnet.ibm.com \
    --cc=xiaoguangrong@linux.vnet.ibm.com \
    --cc=zhong@linux.vnet.ibm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).