All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/7] RFC: __smp_call_function_single improvements
@ 2013-10-24 15:19 Christoph Hellwig
  2013-10-24 15:19 ` [PATCH 1/7] Revert: "softirq: Add support for triggering softirq work on softirqs" Christoph Hellwig
                   ` (6 more replies)
  0 siblings, 7 replies; 13+ messages in thread
From: Christoph Hellwig @ 2013-10-24 15:19 UTC (permalink / raw)
  To: Jens Axboe; +Cc: Andrew Morton, linux-kernel

This series is on top of the blk-mq series Jens just set out and changes
the generic IPI mechanism to be directly usable for block completions.

Besides always making __smp_call_function_single constantly available it
changes it to use a lockless list that avoids the overhead of disabling
irq and a lock every time it is called.

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH 1/7] Revert: "softirq: Add support for triggering softirq work on softirqs"
  2013-10-24 15:19 [PATCH 0/7] RFC: __smp_call_function_single improvements Christoph Hellwig
@ 2013-10-24 15:19 ` Christoph Hellwig
  2013-10-24 15:19 ` [PATCH 2/7] kernel: remove CONFIG_USE_GENERIC_SMP_HELPERS Christoph Hellwig
                   ` (5 subsequent siblings)
  6 siblings, 0 replies; 13+ messages in thread
From: Christoph Hellwig @ 2013-10-24 15:19 UTC (permalink / raw)
  To: Jens Axboe; +Cc: Andrew Morton, linux-kernel

[-- Attachment #1: 0001-Revert-softirq-Add-support-for-triggering-softirq-wo.patch --]
[-- Type: text/plain, Size: 6514 bytes --]

This commit was incomplete in that code to remove items from the per-cpu
lists was missing and never acquired a user in the 5 years it has been
in the tree.  We're going to implement what it seems to try to archive
in a simpler way, and this code is in the way of doing so.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 include/linux/interrupt.h |   22 --------
 kernel/softirq.c          |  131 ---------------------------------------------
 2 files changed, 153 deletions(-)

diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 5e865b5..8c0d4ba 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -11,8 +11,6 @@
 #include <linux/irqnr.h>
 #include <linux/hardirq.h>
 #include <linux/irqflags.h>
-#include <linux/smp.h>
-#include <linux/percpu.h>
 #include <linux/hrtimer.h>
 #include <linux/kref.h>
 #include <linux/workqueue.h>
@@ -381,15 +379,6 @@ extern void __raise_softirq_irqoff(unsigned int nr);
 extern void raise_softirq_irqoff(unsigned int nr);
 extern void raise_softirq(unsigned int nr);
 
-/* This is the worklist that queues up per-cpu softirq work.
- *
- * send_remote_sendirq() adds work to these lists, and
- * the softirq handler itself dequeues from them.  The queues
- * are protected by disabling local cpu interrupts and they must
- * only be accessed by the local cpu that they are for.
- */
-DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
-
 DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
 
 static inline struct task_struct *this_cpu_ksoftirqd(void)
@@ -397,17 +386,6 @@ static inline struct task_struct *this_cpu_ksoftirqd(void)
 	return this_cpu_read(ksoftirqd);
 }
 
-/* Try to send a softirq to a remote cpu.  If this cannot be done, the
- * work will be queued to the local cpu.
- */
-extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq);
-
-/* Like send_remote_softirq(), but the caller must disable local cpu interrupts
- * and compute the current cpu, passed in as 'this_cpu'.
- */
-extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
-				  int this_cpu, int softirq);
-
 /* Tasklets --- multithreaded analogue of BHs.
 
    Main feature differing them of generic softirqs: tasklet
diff --git a/kernel/softirq.c b/kernel/softirq.c
index d7d498d..c587b7f 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -6,8 +6,6 @@
  *	Distribute under GPLv2.
  *
  *	Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
- *
- *	Remote softirq infrastructure is by Jens Axboe.
  */
 
 #include <linux/export.h>
@@ -618,146 +616,17 @@ void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
 }
 EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
 
-/*
- * Remote softirq bits
- */
-
-DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
-EXPORT_PER_CPU_SYMBOL(softirq_work_list);
-
-static void __local_trigger(struct call_single_data *cp, int softirq)
-{
-	struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]);
-
-	list_add_tail(&cp->list, head);
-
-	/* Trigger the softirq only if the list was previously empty.  */
-	if (head->next == &cp->list)
-		raise_softirq_irqoff(softirq);
-}
-
-#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
-static void remote_softirq_receive(void *data)
-{
-	struct call_single_data *cp = data;
-	unsigned long flags;
-	int softirq;
-
-	softirq = *(int *)cp->info;
-	local_irq_save(flags);
-	__local_trigger(cp, softirq);
-	local_irq_restore(flags);
-}
-
-static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
-{
-	if (cpu_online(cpu)) {
-		cp->func = remote_softirq_receive;
-		cp->info = &softirq;
-		cp->flags = 0;
-
-		__smp_call_function_single(cpu, cp, 0);
-		return 0;
-	}
-	return 1;
-}
-#else /* CONFIG_USE_GENERIC_SMP_HELPERS */
-static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
-{
-	return 1;
-}
-#endif
-
-/**
- * __send_remote_softirq - try to schedule softirq work on a remote cpu
- * @cp: private SMP call function data area
- * @cpu: the remote cpu
- * @this_cpu: the currently executing cpu
- * @softirq: the softirq for the work
- *
- * Attempt to schedule softirq work on a remote cpu.  If this cannot be
- * done, the work is instead queued up on the local cpu.
- *
- * Interrupts must be disabled.
- */
-void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq)
-{
-	if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq))
-		__local_trigger(cp, softirq);
-}
-EXPORT_SYMBOL(__send_remote_softirq);
-
-/**
- * send_remote_softirq - try to schedule softirq work on a remote cpu
- * @cp: private SMP call function data area
- * @cpu: the remote cpu
- * @softirq: the softirq for the work
- *
- * Like __send_remote_softirq except that disabling interrupts and
- * computing the current cpu is done for the caller.
- */
-void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
-{
-	unsigned long flags;
-	int this_cpu;
-
-	local_irq_save(flags);
-	this_cpu = smp_processor_id();
-	__send_remote_softirq(cp, cpu, this_cpu, softirq);
-	local_irq_restore(flags);
-}
-EXPORT_SYMBOL(send_remote_softirq);
-
-static int remote_softirq_cpu_notify(struct notifier_block *self,
-					       unsigned long action, void *hcpu)
-{
-	/*
-	 * If a CPU goes away, splice its entries to the current CPU
-	 * and trigger a run of the softirq
-	 */
-	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
-		int cpu = (unsigned long) hcpu;
-		int i;
-
-		local_irq_disable();
-		for (i = 0; i < NR_SOFTIRQS; i++) {
-			struct list_head *head = &per_cpu(softirq_work_list[i], cpu);
-			struct list_head *local_head;
-
-			if (list_empty(head))
-				continue;
-
-			local_head = &__get_cpu_var(softirq_work_list[i]);
-			list_splice_init(head, local_head);
-			raise_softirq_irqoff(i);
-		}
-		local_irq_enable();
-	}
-
-	return NOTIFY_OK;
-}
-
-static struct notifier_block remote_softirq_cpu_notifier = {
-	.notifier_call	= remote_softirq_cpu_notify,
-};
-
 void __init softirq_init(void)
 {
 	int cpu;
 
 	for_each_possible_cpu(cpu) {
-		int i;
-
 		per_cpu(tasklet_vec, cpu).tail =
 			&per_cpu(tasklet_vec, cpu).head;
 		per_cpu(tasklet_hi_vec, cpu).tail =
 			&per_cpu(tasklet_hi_vec, cpu).head;
-		for (i = 0; i < NR_SOFTIRQS; i++)
-			INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu));
 	}
 
-	register_hotcpu_notifier(&remote_softirq_cpu_notifier);
-
 	open_softirq(TASKLET_SOFTIRQ, tasklet_action);
 	open_softirq(HI_SOFTIRQ, tasklet_hi_action);
 }
-- 
1.7.10.4



^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH 2/7] kernel: remove CONFIG_USE_GENERIC_SMP_HELPERS
  2013-10-24 15:19 [PATCH 0/7] RFC: __smp_call_function_single improvements Christoph Hellwig
  2013-10-24 15:19 ` [PATCH 1/7] Revert: "softirq: Add support for triggering softirq work on softirqs" Christoph Hellwig
@ 2013-10-24 15:19 ` Christoph Hellwig
  2013-10-24 15:19 ` [PATCH 3/7] kernel: provide a __smp_call_function_single stub for !CONFIG_SMP Christoph Hellwig
                   ` (4 subsequent siblings)
  6 siblings, 0 replies; 13+ messages in thread
From: Christoph Hellwig @ 2013-10-24 15:19 UTC (permalink / raw)
  To: Jens Axboe; +Cc: Andrew Morton, linux-kernel

[-- Attachment #1: 0002-kernel-remove-CONFIG_USE_GENERIC_SMP_HELPERS.patch --]
[-- Type: text/plain, Size: 11983 bytes --]

We've switched over every architecture that supports SMP to it, so remove
the new useless config variable.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 arch/Kconfig          |    3 ---
 arch/alpha/Kconfig    |    1 -
 arch/arc/Kconfig      |    1 -
 arch/arm/Kconfig      |    1 -
 arch/arm64/Kconfig    |    1 -
 arch/blackfin/Kconfig |    1 -
 arch/hexagon/Kconfig  |    1 -
 arch/ia64/Kconfig     |    1 -
 arch/m32r/Kconfig     |    1 -
 arch/metag/Kconfig    |    1 -
 arch/mips/Kconfig     |    1 -
 arch/mn10300/Kconfig  |    1 -
 arch/parisc/Kconfig   |    1 -
 arch/powerpc/Kconfig  |    1 -
 arch/s390/Kconfig     |    1 -
 arch/sh/Kconfig       |    1 -
 arch/sparc/Kconfig    |    1 -
 arch/tile/Kconfig     |    1 -
 arch/x86/Kconfig      |    1 -
 block/blk-mq.c        |    4 ++--
 block/blk-softirq.c   |    4 ++--
 block/blk-sysfs.c     |    2 +-
 include/linux/smp.h   |    4 ----
 kernel/Kconfig.hz     |    2 +-
 kernel/smp.c          |    2 --
 25 files changed, 6 insertions(+), 33 deletions(-)

diff --git a/arch/Kconfig b/arch/Kconfig
index af2cc6e..63c53c6 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -207,9 +207,6 @@ config HAVE_DMA_ATTRS
 config HAVE_DMA_CONTIGUOUS
 	bool
 
-config USE_GENERIC_SMP_HELPERS
-	bool
-
 config GENERIC_SMP_IDLE_THREAD
        bool
 
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 35a300d..8d2a483 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -522,7 +522,6 @@ config ARCH_MAY_HAVE_PC_FDC
 config SMP
 	bool "Symmetric multi-processing support"
 	depends on ALPHA_SABLE || ALPHA_LYNX || ALPHA_RAWHIDE || ALPHA_DP264 || ALPHA_WILDFIRE || ALPHA_TITAN || ALPHA_GENERIC || ALPHA_SHARK || ALPHA_MARVEL
-	select USE_GENERIC_SMP_HELPERS
 	---help---
 	  This enables support for systems with more than one CPU. If you have
 	  a system with only one CPU, like most personal computers, say N. If
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 91dbb27..9c2ddbf 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -119,7 +119,6 @@ config ARC_PLAT_NEEDS_CPU_TO_DMA
 config SMP
 	bool "Symmetric Multi-Processing (Incomplete)"
 	default n
-	select USE_GENERIC_SMP_HELPERS
 	help
 	  This enables support for systems with more than one CPU. If you have
 	  a system with only one CPU, like most personal computers, say N. If
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 1ad6fb6..a671b58 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1467,7 +1467,6 @@ config SMP
 	depends on GENERIC_CLOCKEVENTS
 	depends on HAVE_SMP
 	depends on MMU || ARM_MPU
-	select USE_GENERIC_SMP_HELPERS
 	help
 	  This enables support for systems with more than one CPU. If you have
 	  a system with only one CPU, like most personal computers, say N. If
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index c044548..7020613 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -140,7 +140,6 @@ config ARM64_64K_PAGES
 
 config SMP
 	bool "Symmetric Multi-Processing"
-	select USE_GENERIC_SMP_HELPERS
 	help
 	  This enables support for systems with more than one CPU.  If
 	  you say N here, the kernel will run on single and
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index f78c9a2..e06b5d8 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -34,7 +34,6 @@ config BLACKFIN
 	select ARCH_WANT_IPC_PARSE_VERSION
 	select GENERIC_ATOMIC64
 	select GENERIC_IRQ_PROBE
-	select USE_GENERIC_SMP_HELPERS if SMP
 	select HAVE_NMI_WATCHDOG if NMI_WATCHDOG
 	select GENERIC_SMP_IDLE_THREAD
 	select ARCH_USES_GETTIMEOFFSET if !GENERIC_CLOCKEVENTS
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
index 99041b0..09df260 100644
--- a/arch/hexagon/Kconfig
+++ b/arch/hexagon/Kconfig
@@ -4,7 +4,6 @@ comment "Linux Kernel Configuration for Hexagon"
 config HEXAGON
 	def_bool y
 	select HAVE_OPROFILE
-	select USE_GENERIC_SMP_HELPERS if SMP
 	# Other pending projects/to-do items.
 	# select HAVE_REGS_AND_STACK_ACCESS_API
 	# select HAVE_HW_BREAKPOINT if PERF_EVENTS
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 7740ab1..dfe85e9 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -343,7 +343,6 @@ config FORCE_MAX_ZONEORDER
 
 config SMP
 	bool "Symmetric multi-processing support"
-	select USE_GENERIC_SMP_HELPERS
 	help
 	  This enables support for systems with more than one CPU. If you have
 	  a system with only one CPU, say N.  If you have a system with more
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index 75661fb..09ef94a 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -275,7 +275,6 @@ source "kernel/Kconfig.preempt"
 
 config SMP
 	bool "Symmetric multi-processing support"
-	select USE_GENERIC_SMP_HELPERS
 	---help---
 	  This enables support for systems with more than one CPU. If you have
 	  a system with only one CPU, like most personal computers, say N. If
diff --git a/arch/metag/Kconfig b/arch/metag/Kconfig
index 36368eb..e56abd2 100644
--- a/arch/metag/Kconfig
+++ b/arch/metag/Kconfig
@@ -111,7 +111,6 @@ config METAG_META21
 config SMP
 	bool "Symmetric multi-processing support"
 	depends on METAG_META21 && METAG_META21_MMU
-	select USE_GENERIC_SMP_HELPERS
 	help
 	  This enables support for systems with more than one thread running
 	  Linux. If you have a system with only one thread running Linux,
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index f75ab4a..f745dc3 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -2140,7 +2140,6 @@ source "mm/Kconfig"
 config SMP
 	bool "Multi-Processing support"
 	depends on SYS_SUPPORTS_SMP
-	select USE_GENERIC_SMP_HELPERS
 	help
 	  This enables support for systems with more than one CPU. If you have
 	  a system with only one CPU, like most personal computers, say N. If
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index 6aaa160..8bde923 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -181,7 +181,6 @@ endmenu
 config SMP
 	bool "Symmetric multi-processing support"
 	default y
-	select USE_GENERIC_SMP_HELPERS
 	depends on MN10300_PROC_MN2WS0038 || MN10300_PROC_MN2WS0050
 	---help---
 	  This enables support for systems with more than one CPU. If you have
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index ad2ce8d..0c08dc8 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -226,7 +226,6 @@ endchoice
 
 config SMP
 	bool "Symmetric multi-processing support"
-	select USE_GENERIC_SMP_HELPERS
 	---help---
 	  This enables support for systems with more than one CPU. If you have
 	  a system with only one CPU, like most personal computers, say N. If
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 38f3b7e..2bc9c49 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -106,7 +106,6 @@ config PPC
 	select HAVE_MEMBLOCK_NODE_MAP
 	select HAVE_DMA_ATTRS
 	select HAVE_DMA_API_DEBUG
-	select USE_GENERIC_SMP_HELPERS if SMP
 	select HAVE_OPROFILE
 	select HAVE_DEBUG_KMEMLEAK
 	select GENERIC_ATOMIC64 if PPC32
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 7143793..8fed3f5 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -140,7 +140,6 @@ config S390
 	select OLD_SIGACTION
 	select OLD_SIGSUSPEND3
 	select SYSCTL_EXCEPTION_TRACE
-	select USE_GENERIC_SMP_HELPERS if SMP
 	select VIRT_CPU_ACCOUNTING
 	select VIRT_TO_BUS
 
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 224f4bc..e78561b 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -711,7 +711,6 @@ config CC_STACKPROTECTOR
 config SMP
 	bool "Symmetric multi-processing support"
 	depends on SYS_SUPPORTS_SMP
-	select USE_GENERIC_SMP_HELPERS
 	---help---
 	  This enables support for systems with more than one CPU. If you have
 	  a system with only one CPU, like most personal computers, say N. If
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 78c4fdb..8591b20 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -28,7 +28,6 @@ config SPARC
 	select HAVE_ARCH_JUMP_LABEL
 	select GENERIC_IRQ_SHOW
 	select ARCH_WANT_IPC_PARSE_VERSION
-	select USE_GENERIC_SMP_HELPERS if SMP
 	select GENERIC_PCI_IOMAP
 	select HAVE_NMI_WATCHDOG if SPARC64
 	select HAVE_BPF_JIT
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index d45a2c4..b3692ce 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -8,7 +8,6 @@ config TILE
 	select HAVE_KVM if !TILEGX
 	select GENERIC_FIND_FIRST_BIT
 	select SYSCTL_EXCEPTION_TRACE
-	select USE_GENERIC_SMP_HELPERS
 	select CC_OPTIMIZE_FOR_SIZE
 	select HAVE_DEBUG_KMEMLEAK
 	select GENERIC_IRQ_PROBE
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 145d703..f137df2 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -90,7 +90,6 @@ config X86
 	select GENERIC_IRQ_SHOW
 	select GENERIC_CLOCKEVENTS_MIN_ADJUST
 	select IRQ_FORCED_THREADING
-	select USE_GENERIC_SMP_HELPERS if SMP
 	select HAVE_BPF_JIT if X86_64
 	select HAVE_ARCH_TRANSPARENT_HUGEPAGE
 	select CLKEVT_I8253
diff --git a/block/blk-mq.c b/block/blk-mq.c
index f21ec96..d43a7e8 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -318,7 +318,7 @@ void __blk_mq_end_io(struct request *rq, int error)
 		blk_mq_complete_request(rq, error);
 }
 
-#if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS)
+#if defined(CONFIG_SMP)
 
 /*
  * Called with interrupts disabled.
@@ -360,7 +360,7 @@ static int ipi_remote_cpu(struct blk_mq_ctx *ctx, const int cpu,
 
 	return true;
 }
-#else /* CONFIG_SMP && CONFIG_USE_GENERIC_SMP_HELPERS */
+#else /* CONFIG_SMP */
 static int ipi_remote_cpu(struct blk_mq_ctx *ctx, const int cpu,
 			  struct request *rq, const int error)
 {
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index ec9e606..ed07d79 100644
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
@@ -36,7 +36,7 @@ static void blk_done_softirq(struct softirq_action *h)
 	}
 }
 
-#if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS)
+#ifdef CONFIG_SMP
 static void trigger_softirq(void *data)
 {
 	struct request *rq = data;
@@ -71,7 +71,7 @@ static int raise_blk_irq(int cpu, struct request *rq)
 
 	return 1;
 }
-#else /* CONFIG_SMP && CONFIG_USE_GENERIC_SMP_HELPERS */
+#else /* CONFIG_SMP */
 static int raise_blk_irq(int cpu, struct request *rq)
 {
 	return 1;
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 4f8c4d9..9777952 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -288,7 +288,7 @@ static ssize_t
 queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
 {
 	ssize_t ret = -EINVAL;
-#if defined(CONFIG_USE_GENERIC_SMP_HELPERS)
+#ifdef CONFIG_SMP
 	unsigned long val;
 
 	ret = queue_var_store(&val, page, count);
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 731f523..7885151 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -106,14 +106,10 @@ void kick_all_cpus_sync(void);
 /*
  * Generic and arch helpers
  */
-#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
 void __init call_function_init(void);
 void generic_smp_call_function_single_interrupt(void);
 #define generic_smp_call_function_interrupt \
 	generic_smp_call_function_single_interrupt
-#else
-static inline void call_function_init(void) { }
-#endif
 
 /*
  * Mark the boot cpu "online" so that it can call console drivers in
diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz
index 94fabd5..2a202a8 100644
--- a/kernel/Kconfig.hz
+++ b/kernel/Kconfig.hz
@@ -55,4 +55,4 @@ config HZ
 	default 1000 if HZ_1000
 
 config SCHED_HRTICK
-	def_bool HIGH_RES_TIMERS && (!SMP || USE_GENERIC_SMP_HELPERS)
+	def_bool HIGH_RES_TIMERS
diff --git a/kernel/smp.c b/kernel/smp.c
index bc400e3..2b83645 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -15,7 +15,6 @@
 
 #include "smpboot.h"
 
-#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
 enum {
 	CSD_FLAG_LOCK		= 0x01,
 	CSD_FLAG_WAIT		= 0x02,
@@ -463,7 +462,6 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
 	return 0;
 }
 EXPORT_SYMBOL(smp_call_function);
-#endif /* USE_GENERIC_SMP_HELPERS */
 
 /* Setup configured maximum number of CPUs to activate */
 unsigned int setup_max_cpus = NR_CPUS;
-- 
1.7.10.4



^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH 3/7] kernel: provide a __smp_call_function_single stub for !CONFIG_SMP
  2013-10-24 15:19 [PATCH 0/7] RFC: __smp_call_function_single improvements Christoph Hellwig
  2013-10-24 15:19 ` [PATCH 1/7] Revert: "softirq: Add support for triggering softirq work on softirqs" Christoph Hellwig
  2013-10-24 15:19 ` [PATCH 2/7] kernel: remove CONFIG_USE_GENERIC_SMP_HELPERS Christoph Hellwig
@ 2013-10-24 15:19 ` Christoph Hellwig
  2013-10-24 15:19 ` [PATCH 4/7] kernel: fix generic_exec_single indication Christoph Hellwig
                   ` (3 subsequent siblings)
  6 siblings, 0 replies; 13+ messages in thread
From: Christoph Hellwig @ 2013-10-24 15:19 UTC (permalink / raw)
  To: Jens Axboe; +Cc: Andrew Morton, linux-kernel

[-- Attachment #1: 0003-kernel-provide-a-__smp_call_function_single-stub-for.patch --]
[-- Type: text/plain, Size: 708 bytes --]

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 kernel/up.c |   11 +++++++++++
 1 file changed, 11 insertions(+)

diff --git a/kernel/up.c b/kernel/up.c
index 630d72b..509403e 100644
--- a/kernel/up.c
+++ b/kernel/up.c
@@ -22,6 +22,17 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
 }
 EXPORT_SYMBOL(smp_call_function_single);
 
+void __smp_call_function_single(int cpu, struct call_single_data *csd,
+				int wait)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	csd->func(csd->info);
+	local_irq_restore(flags);
+}
+EXPORT_SYMBOL(__smp_call_function_single);
+
 int on_each_cpu(smp_call_func_t func, void *info, int wait)
 {
 	unsigned long flags;
-- 
1.7.10.4



^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH 4/7] kernel: fix generic_exec_single indication
  2013-10-24 15:19 [PATCH 0/7] RFC: __smp_call_function_single improvements Christoph Hellwig
                   ` (2 preceding siblings ...)
  2013-10-24 15:19 ` [PATCH 3/7] kernel: provide a __smp_call_function_single stub for !CONFIG_SMP Christoph Hellwig
@ 2013-10-24 15:19 ` Christoph Hellwig
  2013-10-24 15:19 ` [PATCH 5/7] llists: move llist_reverse_order from raid5 to llist.c Christoph Hellwig
                   ` (2 subsequent siblings)
  6 siblings, 0 replies; 13+ messages in thread
From: Christoph Hellwig @ 2013-10-24 15:19 UTC (permalink / raw)
  To: Jens Axboe; +Cc: Andrew Morton, linux-kernel

[-- Attachment #1: 0004-kernel-fix-generic_exec_single-indication.patch --]
[-- Type: text/plain, Size: 681 bytes --]

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 kernel/smp.c |    3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/kernel/smp.c b/kernel/smp.c
index 2b83645..53644e6 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -139,8 +139,7 @@ static void csd_unlock(struct call_single_data *csd)
  * for execution on the given CPU. data must already have
  * ->func, ->info, and ->flags set.
  */
-static
-void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
+static void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
 {
 	struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
 	unsigned long flags;
-- 
1.7.10.4



^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH 5/7] llists: move llist_reverse_order from raid5 to llist.c
  2013-10-24 15:19 [PATCH 0/7] RFC: __smp_call_function_single improvements Christoph Hellwig
                   ` (3 preceding siblings ...)
  2013-10-24 15:19 ` [PATCH 4/7] kernel: fix generic_exec_single indication Christoph Hellwig
@ 2013-10-24 15:19 ` Christoph Hellwig
  2013-10-29 18:58   ` Jan Kara
  2013-10-24 15:19 ` [PATCH 6/7] kernel: use lockless list for smp_call_function_single Christoph Hellwig
  2013-10-24 15:19 ` [PATCH 7/7] blk-mq: use __smp_call_function_single directly Christoph Hellwig
  6 siblings, 1 reply; 13+ messages in thread
From: Christoph Hellwig @ 2013-10-24 15:19 UTC (permalink / raw)
  To: Jens Axboe; +Cc: Andrew Morton, linux-kernel

[-- Attachment #1: 0005-llists-move-llist_reverse_order-from-raid5-to-llist..patch --]
[-- Type: text/plain, Size: 2070 bytes --]

Make this useful helper available for other users.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/md/raid5.c    |   14 --------------
 include/linux/llist.h |    2 ++
 lib/llist.c           |   22 ++++++++++++++++++++++
 3 files changed, 24 insertions(+), 14 deletions(-)

diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 7ff4f25..046f1a9 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -293,20 +293,6 @@ static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
 		do_release_stripe(conf, sh);
 }
 
-static struct llist_node *llist_reverse_order(struct llist_node *head)
-{
-	struct llist_node *new_head = NULL;
-
-	while (head) {
-		struct llist_node *tmp = head;
-		head = head->next;
-		tmp->next = new_head;
-		new_head = tmp;
-	}
-
-	return new_head;
-}
-
 /* should hold conf->device_lock already */
 static int release_stripe_list(struct r5conf *conf)
 {
diff --git a/include/linux/llist.h b/include/linux/llist.h
index 8828a78..fbf10a0 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -195,4 +195,6 @@ static inline struct llist_node *llist_del_all(struct llist_head *head)
 
 extern struct llist_node *llist_del_first(struct llist_head *head);
 
+struct llist_node *llist_reverse_order(struct llist_node *head);
+
 #endif /* LLIST_H */
diff --git a/lib/llist.c b/lib/llist.c
index 4a70d12..ef48b87 100644
--- a/lib/llist.c
+++ b/lib/llist.c
@@ -81,3 +81,25 @@ struct llist_node *llist_del_first(struct llist_head *head)
 	return entry;
 }
 EXPORT_SYMBOL_GPL(llist_del_first);
+
+/**
+ * llist_reverse_order - reverse order of a llist chain
+ * @head:	first item of the list to be reversed
+ *
+ * Reverse the oder of a chain of llist entries and return the
+ * new first entry.
+ */
+struct llist_node *llist_reverse_order(struct llist_node *head)
+{
+	struct llist_node *new_head = NULL;
+
+	while (head) {
+		struct llist_node *tmp = head;
+		head = head->next;
+		tmp->next = new_head;
+		new_head = tmp;
+	}
+
+	return new_head;
+}
+EXPORT_SYMBOL_GPL(llist_reverse_order);
-- 
1.7.10.4



^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH 6/7] kernel: use lockless list for smp_call_function_single
  2013-10-24 15:19 [PATCH 0/7] RFC: __smp_call_function_single improvements Christoph Hellwig
                   ` (4 preceding siblings ...)
  2013-10-24 15:19 ` [PATCH 5/7] llists: move llist_reverse_order from raid5 to llist.c Christoph Hellwig
@ 2013-10-24 15:19 ` Christoph Hellwig
  2013-10-29 19:25   ` Jan Kara
  2013-10-24 15:19 ` [PATCH 7/7] blk-mq: use __smp_call_function_single directly Christoph Hellwig
  6 siblings, 1 reply; 13+ messages in thread
From: Christoph Hellwig @ 2013-10-24 15:19 UTC (permalink / raw)
  To: Jens Axboe; +Cc: Andrew Morton, linux-kernel

[-- Attachment #1: 0006-kernel-use-lockless-list-for-smp_call_function_singl.patch --]
[-- Type: text/plain, Size: 4814 bytes --]

Make smp_call_function_single and friends more efficient by using
a lockless list.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 include/linux/blkdev.h |    5 +----
 include/linux/smp.h    |    6 +++++-
 kernel/smp.c           |   51 ++++++++++++------------------------------------
 3 files changed, 19 insertions(+), 43 deletions(-)

diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index f26ec20f..287bf7c 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -95,10 +95,7 @@ enum rq_cmd_type_bits {
  * as well!
  */
 struct request {
-	union {
-		struct list_head queuelist;
-		struct llist_node ll_list;
-	};
+	struct list_head queuelist;
 	union {
 		struct call_single_data csd;
 		struct work_struct mq_flush_data;
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 7885151..10755dd 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -11,12 +11,16 @@
 #include <linux/list.h>
 #include <linux/cpumask.h>
 #include <linux/init.h>
+#include <linux/llist.h>
 
 extern void cpu_idle(void);
 
 typedef void (*smp_call_func_t)(void *info);
 struct call_single_data {
-	struct list_head list;
+	union {
+		struct list_head list;
+		struct llist_node llist;
+	};
 	smp_call_func_t func;
 	void *info;
 	u16 flags;
diff --git a/kernel/smp.c b/kernel/smp.c
index 53644e6..a735c66 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -28,12 +28,7 @@ struct call_function_data {
 
 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
 
-struct call_single_queue {
-	struct list_head	list;
-	raw_spinlock_t		lock;
-};
-
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_queue, call_single_queue);
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
 
 static int
 hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
@@ -85,12 +80,8 @@ void __init call_function_init(void)
 	void *cpu = (void *)(long)smp_processor_id();
 	int i;
 
-	for_each_possible_cpu(i) {
-		struct call_single_queue *q = &per_cpu(call_single_queue, i);
-
-		raw_spin_lock_init(&q->lock);
-		INIT_LIST_HEAD(&q->list);
-	}
+	for_each_possible_cpu(i)
+		init_llist_head(&per_cpu(call_single_queue, i));
 
 	hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
 	register_cpu_notifier(&hotplug_cfd_notifier);
@@ -141,18 +132,9 @@ static void csd_unlock(struct call_single_data *csd)
  */
 static void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
 {
-	struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
-	unsigned long flags;
-	int ipi;
-
 	if (wait)
 		csd->flags |= CSD_FLAG_WAIT;
 
-	raw_spin_lock_irqsave(&dst->lock, flags);
-	ipi = list_empty(&dst->list);
-	list_add_tail(&csd->list, &dst->list);
-	raw_spin_unlock_irqrestore(&dst->lock, flags);
-
 	/*
 	 * The list addition should be visible before sending the IPI
 	 * handler locks the list to pull the entry off it because of
@@ -164,7 +146,7 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
 	 * locking and barrier primitives. Generic code isn't really
 	 * equipped to do the right thing...
 	 */
-	if (ipi)
+	if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
 		arch_send_call_function_single_ipi(cpu);
 
 	if (wait)
@@ -177,27 +159,26 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
  */
 void generic_smp_call_function_single_interrupt(void)
 {
-	struct call_single_queue *q = &__get_cpu_var(call_single_queue);
-	LIST_HEAD(list);
+	struct llist_node *entry, *next;
 
 	/*
 	 * Shouldn't receive this interrupt on a cpu that is not yet online.
 	 */
 	WARN_ON_ONCE(!cpu_online(smp_processor_id()));
 
-	raw_spin_lock(&q->lock);
-	list_replace_init(&q->list, &list);
-	raw_spin_unlock(&q->lock);
+	entry = llist_del_all(&__get_cpu_var(call_single_queue));
+	entry = llist_reverse_order(entry);
 
-	while (!list_empty(&list)) {
+	while (entry) {
 		struct call_single_data *csd;
 
-		csd = list_entry(list.next, struct call_single_data, list);
-		list_del(&csd->list);
+		next = entry->next;
 
+		csd = llist_entry(entry, struct call_single_data, llist);
 		csd->func(csd->info);
-
 		csd_unlock(csd);
+
+		entry = next;
 	}
 }
 
@@ -410,17 +391,11 @@ void smp_call_function_many(const struct cpumask *mask,
 
 	for_each_cpu(cpu, cfd->cpumask) {
 		struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
-		struct call_single_queue *dst =
-					&per_cpu(call_single_queue, cpu);
-		unsigned long flags;
 
 		csd_lock(csd);
 		csd->func = func;
 		csd->info = info;
-
-		raw_spin_lock_irqsave(&dst->lock, flags);
-		list_add_tail(&csd->list, &dst->list);
-		raw_spin_unlock_irqrestore(&dst->lock, flags);
+		llist_add(&csd->llist, &per_cpu(call_single_queue, cpu));
 	}
 
 	/* Send a message to all CPUs in the map */
-- 
1.7.10.4



^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH 7/7] blk-mq: use __smp_call_function_single directly
  2013-10-24 15:19 [PATCH 0/7] RFC: __smp_call_function_single improvements Christoph Hellwig
                   ` (5 preceding siblings ...)
  2013-10-24 15:19 ` [PATCH 6/7] kernel: use lockless list for smp_call_function_single Christoph Hellwig
@ 2013-10-24 15:19 ` Christoph Hellwig
  6 siblings, 0 replies; 13+ messages in thread
From: Christoph Hellwig @ 2013-10-24 15:19 UTC (permalink / raw)
  To: Jens Axboe; +Cc: Andrew Morton, linux-kernel

[-- Attachment #1: 0007-blk-mq-use-__smp_call_function_single-directly.patch --]
[-- Type: text/plain, Size: 4938 bytes --]

Now that __smp_call_function_single is available for all builds and
uses llists to queue up items without taking a lock or disabling
interrupts there is no need to wrap around it in the block code.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-mq-cpu.c |   31 ------------------------
 block/blk-mq.c     |   68 +++++++++-------------------------------------------
 block/blk-mq.h     |    1 -
 3 files changed, 11 insertions(+), 89 deletions(-)

diff --git a/block/blk-mq-cpu.c b/block/blk-mq-cpu.c
index f8ea39d..4f0c352 100644
--- a/block/blk-mq-cpu.c
+++ b/block/blk-mq-cpu.c
@@ -28,32 +28,6 @@ static int __cpuinit blk_mq_main_cpu_notify(struct notifier_block *self,
 	return NOTIFY_OK;
 }
 
-static void __cpuinit blk_mq_cpu_notify(void *data, unsigned long action,
-					unsigned int cpu)
-{
-	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
-		/*
-		 * If the CPU goes away, ensure that we run any pending
-		 * completions.
-		 */
-		struct llist_node *node;
-		struct request *rq;
-
-		local_irq_disable();
-
-		node = llist_del_all(&per_cpu(ipi_lists, cpu));
-		while (node) {
-			struct llist_node *next = node->next;
-
-			rq = llist_entry(node, struct request, ll_list);
-			__blk_mq_end_io(rq, rq->errors);
-			node = next;
-		}
-
-		local_irq_enable();
-	}
-}
-
 static struct notifier_block __cpuinitdata blk_mq_main_cpu_notifier = {
 	.notifier_call	= blk_mq_main_cpu_notify,
 };
@@ -82,12 +56,7 @@ void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
 	notifier->data = data;
 }
 
-static struct blk_mq_cpu_notifier __cpuinitdata cpu_notifier = {
-	.notify = blk_mq_cpu_notify,
-};
-
 void __init blk_mq_cpu_init(void)
 {
 	register_hotcpu_notifier(&blk_mq_main_cpu_notifier);
-	blk_mq_register_cpu_notifier(&cpu_notifier);
 }
diff --git a/block/blk-mq.c b/block/blk-mq.c
index d43a7e8..0b2b487 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -27,8 +27,6 @@ static LIST_HEAD(all_q_list);
 
 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
 
-DEFINE_PER_CPU(struct llist_head, ipi_lists);
-
 static struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
 					   unsigned int cpu)
 {
@@ -318,55 +316,12 @@ void __blk_mq_end_io(struct request *rq, int error)
 		blk_mq_complete_request(rq, error);
 }
 
-#if defined(CONFIG_SMP)
-
-/*
- * Called with interrupts disabled.
- */
-static void ipi_end_io(void *data)
+static void blk_mq_end_io_remote(void *data)
 {
-	struct llist_head *list = &per_cpu(ipi_lists, smp_processor_id());
-	struct llist_node *entry, *next;
-	struct request *rq;
-
-	entry = llist_del_all(list);
-
-	while (entry) {
-		next = entry->next;
-		rq = llist_entry(entry, struct request, ll_list);
-		__blk_mq_end_io(rq, rq->errors);
-		entry = next;
-	}
-}
-
-static int ipi_remote_cpu(struct blk_mq_ctx *ctx, const int cpu,
-			  struct request *rq, const int error)
-{
-	struct call_single_data *data = &rq->csd;
-
-	rq->errors = error;
-	rq->ll_list.next = NULL;
-
-	/*
-	 * If the list is non-empty, an existing IPI must already
-	 * be "in flight". If that is the case, we need not schedule
-	 * a new one.
-	 */
-	if (llist_add(&rq->ll_list, &per_cpu(ipi_lists, ctx->cpu))) {
-		data->func = ipi_end_io;
-		data->flags = 0;
-		__smp_call_function_single(ctx->cpu, data, 0);
-	}
+	struct request *rq = data;
 
-	return true;
+	__blk_mq_end_io(rq, rq->errors);
 }
-#else /* CONFIG_SMP */
-static int ipi_remote_cpu(struct blk_mq_ctx *ctx, const int cpu,
-			  struct request *rq, const int error)
-{
-	return false;
-}
-#endif
 
 /*
  * End IO on this request on a multiqueue enabled driver. We'll either do
@@ -382,11 +337,15 @@ void blk_mq_end_io(struct request *rq, int error)
 		return __blk_mq_end_io(rq, error);
 
 	cpu = get_cpu();
-
-	if (cpu == ctx->cpu || !cpu_online(ctx->cpu) ||
-	    !ipi_remote_cpu(ctx, cpu, rq, error))
+	if (cpu != ctx->cpu && cpu_online(ctx->cpu)) {
+		rq->errors = error;
+		rq->csd.func = blk_mq_end_io_remote;
+		rq->csd.info = rq;
+		rq->csd.flags = 0;
+		__smp_call_function_single(ctx->cpu, &rq->csd, 0);
+	} else {
 		__blk_mq_end_io(rq, error);
-
+	}
 	put_cpu();
 }
 EXPORT_SYMBOL(blk_mq_end_io);
@@ -1465,11 +1424,6 @@ static int __cpuinit blk_mq_queue_reinit_notify(struct notifier_block *nb,
 
 static int __init blk_mq_init(void)
 {
-	unsigned int i;
-
-	for_each_possible_cpu(i)
-		init_llist_head(&per_cpu(ipi_lists, i));
-
 	blk_mq_cpu_init();
 
 	/* Must be called after percpu_counter_hotcpu_callback() */
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 52bf1f9..5761eed 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -38,7 +38,6 @@ void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
 void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
 void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
 void blk_mq_cpu_init(void);
-DECLARE_PER_CPU(struct llist_head, ipi_lists);
 
 /*
  * CPU -> queue mappings
-- 
1.7.10.4



^ permalink raw reply related	[flat|nested] 13+ messages in thread

* Re: [PATCH 5/7] llists: move llist_reverse_order from raid5 to llist.c
  2013-10-24 15:19 ` [PATCH 5/7] llists: move llist_reverse_order from raid5 to llist.c Christoph Hellwig
@ 2013-10-29 18:58   ` Jan Kara
  0 siblings, 0 replies; 13+ messages in thread
From: Jan Kara @ 2013-10-29 18:58 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: Jens Axboe, Andrew Morton, linux-kernel

On Thu 24-10-13 08:19:26, Christoph Hellwig wrote:
> Make this useful helper available for other users.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  drivers/md/raid5.c    |   14 --------------
>  include/linux/llist.h |    2 ++
>  lib/llist.c           |   22 ++++++++++++++++++++++
>  3 files changed, 24 insertions(+), 14 deletions(-)
> 
> diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
> index 7ff4f25..046f1a9 100644
> --- a/drivers/md/raid5.c
> +++ b/drivers/md/raid5.c
> @@ -293,20 +293,6 @@ static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
>  		do_release_stripe(conf, sh);
>  }
>  
> -static struct llist_node *llist_reverse_order(struct llist_node *head)
> -{
> -	struct llist_node *new_head = NULL;
> -
> -	while (head) {
> -		struct llist_node *tmp = head;
> -		head = head->next;
> -		tmp->next = new_head;
> -		new_head = tmp;
> -	}
> -
> -	return new_head;
> -}
> -
>  /* should hold conf->device_lock already */
>  static int release_stripe_list(struct r5conf *conf)
>  {
> diff --git a/include/linux/llist.h b/include/linux/llist.h
> index 8828a78..fbf10a0 100644
> --- a/include/linux/llist.h
> +++ b/include/linux/llist.h
> @@ -195,4 +195,6 @@ static inline struct llist_node *llist_del_all(struct llist_head *head)
>  
>  extern struct llist_node *llist_del_first(struct llist_head *head);
>  
> +struct llist_node *llist_reverse_order(struct llist_node *head);
> +
>  #endif /* LLIST_H */
> diff --git a/lib/llist.c b/lib/llist.c
> index 4a70d12..ef48b87 100644
> --- a/lib/llist.c
> +++ b/lib/llist.c
> @@ -81,3 +81,25 @@ struct llist_node *llist_del_first(struct llist_head *head)
>  	return entry;
>  }
>  EXPORT_SYMBOL_GPL(llist_del_first);
> +
> +/**
> + * llist_reverse_order - reverse order of a llist chain
> + * @head:	first item of the list to be reversed
> + *
> + * Reverse the oder of a chain of llist entries and return the
                  ^^ order

  Otherwise the patch looks fine. You can add:
Reviewed-by: Jan Kara <jack@suse.cz>

								Honza

> + * new first entry.
> + */
> +struct llist_node *llist_reverse_order(struct llist_node *head)
> +{
> +	struct llist_node *new_head = NULL;
> +
> +	while (head) {
> +		struct llist_node *tmp = head;
> +		head = head->next;
> +		tmp->next = new_head;
> +		new_head = tmp;
> +	}
> +
> +	return new_head;
> +}
> +EXPORT_SYMBOL_GPL(llist_reverse_order);
> -- 
> 1.7.10.4
> 
> 
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at  http://www.tux.org/lkml/
-- 
Jan Kara <jack@suse.cz>
SUSE Labs, CR

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 6/7] kernel: use lockless list for smp_call_function_single
  2013-10-24 15:19 ` [PATCH 6/7] kernel: use lockless list for smp_call_function_single Christoph Hellwig
@ 2013-10-29 19:25   ` Jan Kara
  2013-10-29 19:27     ` Christoph Hellwig
  0 siblings, 1 reply; 13+ messages in thread
From: Jan Kara @ 2013-10-29 19:25 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: Jens Axboe, Andrew Morton, linux-kernel

On Thu 24-10-13 08:19:27, Christoph Hellwig wrote:
> Make smp_call_function_single and friends more efficient by using
> a lockless list.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  include/linux/blkdev.h |    5 +----
>  include/linux/smp.h    |    6 +++++-
>  kernel/smp.c           |   51 ++++++++++++------------------------------------
>  3 files changed, 19 insertions(+), 43 deletions(-)
> 
> diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
> index f26ec20f..287bf7c 100644
> --- a/include/linux/blkdev.h
> +++ b/include/linux/blkdev.h
> @@ -95,10 +95,7 @@ enum rq_cmd_type_bits {
>   * as well!
>   */
>  struct request {
> -	union {
> -		struct list_head queuelist;
> -		struct llist_node ll_list;
> -	};
> +	struct list_head queuelist;
>  	union {
>  		struct call_single_data csd;
>  		struct work_struct mq_flush_data;
> diff --git a/include/linux/smp.h b/include/linux/smp.h
> index 7885151..10755dd 100644
> --- a/include/linux/smp.h
> +++ b/include/linux/smp.h
> @@ -11,12 +11,16 @@
>  #include <linux/list.h>
>  #include <linux/cpumask.h>
>  #include <linux/init.h>
> +#include <linux/llist.h>
>  
>  extern void cpu_idle(void);
>  
>  typedef void (*smp_call_func_t)(void *info);
>  struct call_single_data {
> -	struct list_head list;
> +	union {
> +		struct list_head list;
> +		struct llist_node llist;
> +	};
  I'm wondering: Who's still using the normal list_head? I was grepping for
a while and I couldn't find any user. Otherwise the patch looks good to me.
You can add:
  Reviewed-by: Jan Kara <jack@suse.cz>

								Honza

>  	smp_call_func_t func;
>  	void *info;
>  	u16 flags;
> diff --git a/kernel/smp.c b/kernel/smp.c
> index 53644e6..a735c66 100644
> --- a/kernel/smp.c
> +++ b/kernel/smp.c
> @@ -28,12 +28,7 @@ struct call_function_data {
>  
>  static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
>  
> -struct call_single_queue {
> -	struct list_head	list;
> -	raw_spinlock_t		lock;
> -};
> -
> -static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_queue, call_single_queue);
> +static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
>  
>  static int
>  hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
> @@ -85,12 +80,8 @@ void __init call_function_init(void)
>  	void *cpu = (void *)(long)smp_processor_id();
>  	int i;
>  
> -	for_each_possible_cpu(i) {
> -		struct call_single_queue *q = &per_cpu(call_single_queue, i);
> -
> -		raw_spin_lock_init(&q->lock);
> -		INIT_LIST_HEAD(&q->list);
> -	}
> +	for_each_possible_cpu(i)
> +		init_llist_head(&per_cpu(call_single_queue, i));
>  
>  	hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
>  	register_cpu_notifier(&hotplug_cfd_notifier);
> @@ -141,18 +132,9 @@ static void csd_unlock(struct call_single_data *csd)
>   */
>  static void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
>  {
> -	struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
> -	unsigned long flags;
> -	int ipi;
> -
>  	if (wait)
>  		csd->flags |= CSD_FLAG_WAIT;
>  
> -	raw_spin_lock_irqsave(&dst->lock, flags);
> -	ipi = list_empty(&dst->list);
> -	list_add_tail(&csd->list, &dst->list);
> -	raw_spin_unlock_irqrestore(&dst->lock, flags);
> -
>  	/*
>  	 * The list addition should be visible before sending the IPI
>  	 * handler locks the list to pull the entry off it because of
> @@ -164,7 +146,7 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
>  	 * locking and barrier primitives. Generic code isn't really
>  	 * equipped to do the right thing...
>  	 */
> -	if (ipi)
> +	if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
>  		arch_send_call_function_single_ipi(cpu);
>  
>  	if (wait)
> @@ -177,27 +159,26 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
>   */
>  void generic_smp_call_function_single_interrupt(void)
>  {
> -	struct call_single_queue *q = &__get_cpu_var(call_single_queue);
> -	LIST_HEAD(list);
> +	struct llist_node *entry, *next;
>  
>  	/*
>  	 * Shouldn't receive this interrupt on a cpu that is not yet online.
>  	 */
>  	WARN_ON_ONCE(!cpu_online(smp_processor_id()));
>  
> -	raw_spin_lock(&q->lock);
> -	list_replace_init(&q->list, &list);
> -	raw_spin_unlock(&q->lock);
> +	entry = llist_del_all(&__get_cpu_var(call_single_queue));
> +	entry = llist_reverse_order(entry);
>  
> -	while (!list_empty(&list)) {
> +	while (entry) {
>  		struct call_single_data *csd;
>  
> -		csd = list_entry(list.next, struct call_single_data, list);
> -		list_del(&csd->list);
> +		next = entry->next;
>  
> +		csd = llist_entry(entry, struct call_single_data, llist);
>  		csd->func(csd->info);
> -
>  		csd_unlock(csd);
> +
> +		entry = next;
>  	}
>  }
>  
> @@ -410,17 +391,11 @@ void smp_call_function_many(const struct cpumask *mask,
>  
>  	for_each_cpu(cpu, cfd->cpumask) {
>  		struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
> -		struct call_single_queue *dst =
> -					&per_cpu(call_single_queue, cpu);
> -		unsigned long flags;
>  
>  		csd_lock(csd);
>  		csd->func = func;
>  		csd->info = info;
> -
> -		raw_spin_lock_irqsave(&dst->lock, flags);
> -		list_add_tail(&csd->list, &dst->list);
> -		raw_spin_unlock_irqrestore(&dst->lock, flags);
> +		llist_add(&csd->llist, &per_cpu(call_single_queue, cpu));
>  	}
>  
>  	/* Send a message to all CPUs in the map */
> -- 
> 1.7.10.4
> 
> 
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at  http://www.tux.org/lkml/
-- 
Jan Kara <jack@suse.cz>
SUSE Labs, CR

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 6/7] kernel: use lockless list for smp_call_function_single
  2013-10-29 19:25   ` Jan Kara
@ 2013-10-29 19:27     ` Christoph Hellwig
  2013-10-29 21:17       ` Jan Kara
  0 siblings, 1 reply; 13+ messages in thread
From: Christoph Hellwig @ 2013-10-29 19:27 UTC (permalink / raw)
  To: Jan Kara; +Cc: Christoph Hellwig, Jens Axboe, Andrew Morton, linux-kernel

On Tue, Oct 29, 2013 at 08:25:36PM +0100, Jan Kara wrote:
>   I'm wondering: Who's still using the normal list_head? I was grepping for
> a while and I couldn't find any user. Otherwise the patch looks good to me.

The block/blk-softirq.c code is abusing it in a fairly ugly way.


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 6/7] kernel: use lockless list for smp_call_function_single
  2013-10-29 19:27     ` Christoph Hellwig
@ 2013-10-29 21:17       ` Jan Kara
  2013-10-29 21:25         ` Jens Axboe
  0 siblings, 1 reply; 13+ messages in thread
From: Jan Kara @ 2013-10-29 21:17 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: Jan Kara, Jens Axboe, Andrew Morton, linux-kernel

On Tue 29-10-13 12:27:20, Christoph Hellwig wrote:
> On Tue, Oct 29, 2013 at 08:25:36PM +0100, Jan Kara wrote:
> >   I'm wondering: Who's still using the normal list_head? I was grepping for
> > a while and I couldn't find any user. Otherwise the patch looks good to me.
> 
> The block/blk-softirq.c code is abusing it in a fairly ugly way.
  Ah, thanks for the pointer. Looking into the code, that could easily use
llist as well, couldn't it?

								Honza

-- 
Jan Kara <jack@suse.cz>
SUSE Labs, CR

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 6/7] kernel: use lockless list for smp_call_function_single
  2013-10-29 21:17       ` Jan Kara
@ 2013-10-29 21:25         ` Jens Axboe
  0 siblings, 0 replies; 13+ messages in thread
From: Jens Axboe @ 2013-10-29 21:25 UTC (permalink / raw)
  To: Jan Kara, Christoph Hellwig; +Cc: Andrew Morton, linux-kernel

On 10/29/2013 03:17 PM, Jan Kara wrote:
> On Tue 29-10-13 12:27:20, Christoph Hellwig wrote:
>> On Tue, Oct 29, 2013 at 08:25:36PM +0100, Jan Kara wrote:
>>>   I'm wondering: Who's still using the normal list_head? I was grepping for
>>> a while and I couldn't find any user. Otherwise the patch looks good to me.
>>
>> The block/blk-softirq.c code is abusing it in a fairly ugly way.
>   Ah, thanks for the pointer. Looking into the code, that could easily use
> llist as well, couldn't it?

Sure, it'd be easy enough to do.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2013-10-29 21:25 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2013-10-24 15:19 [PATCH 0/7] RFC: __smp_call_function_single improvements Christoph Hellwig
2013-10-24 15:19 ` [PATCH 1/7] Revert: "softirq: Add support for triggering softirq work on softirqs" Christoph Hellwig
2013-10-24 15:19 ` [PATCH 2/7] kernel: remove CONFIG_USE_GENERIC_SMP_HELPERS Christoph Hellwig
2013-10-24 15:19 ` [PATCH 3/7] kernel: provide a __smp_call_function_single stub for !CONFIG_SMP Christoph Hellwig
2013-10-24 15:19 ` [PATCH 4/7] kernel: fix generic_exec_single indication Christoph Hellwig
2013-10-24 15:19 ` [PATCH 5/7] llists: move llist_reverse_order from raid5 to llist.c Christoph Hellwig
2013-10-29 18:58   ` Jan Kara
2013-10-24 15:19 ` [PATCH 6/7] kernel: use lockless list for smp_call_function_single Christoph Hellwig
2013-10-29 19:25   ` Jan Kara
2013-10-29 19:27     ` Christoph Hellwig
2013-10-29 21:17       ` Jan Kara
2013-10-29 21:25         ` Jens Axboe
2013-10-24 15:19 ` [PATCH 7/7] blk-mq: use __smp_call_function_single directly Christoph Hellwig

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.