All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v5] irqchip: gic: Allow gic_arch_extn hooks to call into scheduler
@ 2014-08-20 19:22 ` Stephen Boyd
  0 siblings, 0 replies; 8+ messages in thread
From: Stephen Boyd @ 2014-08-20 19:22 UTC (permalink / raw)
  To: Thomas Gleixner, Jason Cooper
  Cc: linux-kernel, linux-arm-msm, linux-arm-kernel, Nicolas Pitre,
	Russell King - ARM Linux

Commit 1a6b69b6548c (ARM: gic: add CPU migration support,
2012-04-12) introduced an acquisition of the irq_controller_lock
in gic_raise_softirq() which can lead to a spinlock recursion if
the gic_arch_extn hooks call into the scheduler (via complete()
or wake_up(), etc.). This happens because gic_arch_extn hooks are
normally called with the irq_controller_lock held and calling
into the scheduler may cause us to call smp_send_reschedule()
which will grab the irq_controller_lock again. Here's an example
from a vendor kernel (note that the gic_arch_extn hook code here
isn't actually in mainline):

BUG: spinlock recursion on CPU#0, swapper/0/1
 lock: irq_controller_lock+0x0/0x18, .magic: dead4ead, .owner: sw
er_cpu: 0
CPU: 0 PID: 1 Comm: swapper/0 Not tainted 3.14.10-00430-g3d433c4e

Call trace:
[<ffffffc000087e1c>] dump_backtrace+0x0/0x140
[<ffffffc000087f6c>] show_stack+0x10/0x1c
[<ffffffc00064732c>] dump_stack+0x74/0xc4
[<ffffffc0006446c0>] spin_dump+0x78/0x88
[<ffffffc0006446f4>] spin_bug+0x24/0x34
[<ffffffc0000d47d0>] do_raw_spin_lock+0x58/0x148
[<ffffffc00064d398>] _raw_spin_lock_irqsave+0x24/0x38
[<ffffffc0002c9d7c>] gic_raise_softirq+0x2c/0xbc
[<ffffffc00008daa4>] smp_send_reschedule+0x34/0x40
[<ffffffc0000c1e94>] try_to_wake_up+0x224/0x288
[<ffffffc0000c1f4c>] default_wake_function+0xc/0x18
[<ffffffc0000ceef0>] __wake_up_common+0x50/0x8c
[<ffffffc0000cef3c>] __wake_up_locked+0x10/0x1c
[<ffffffc0000cf734>] complete+0x3c/0x5c
[<ffffffc0002f0e78>] msm_mpm_enable_irq_exclusive+0x1b8/0x1c8
[<ffffffc0002f0f58>] __msm_mpm_enable_irq+0x4c/0x7c
[<ffffffc0002f0f94>] msm_mpm_enable_irq+0xc/0x18
[<ffffffc0002c9bb0>] gic_unmask_irq+0x40/0x7c
[<ffffffc0000de5f4>] irq_enable+0x2c/0x48
[<ffffffc0000de65c>] irq_startup+0x4c/0x74
[<ffffffc0000dd2fc>] __setup_irq+0x264/0x3f0
[<ffffffc0000dd5e0>] request_threaded_irq+0xcc/0x11c
[<ffffffc0000df254>] devm_request_threaded_irq+0x68/0xb4
[<ffffffc000471520>] msm_iommu_ctx_probe+0x124/0x2d4
[<ffffffc000337374>] platform_drv_probe+0x20/0x54
[<ffffffc00033598c>] driver_probe_device+0x158/0x340
[<ffffffc000335c20>] __driver_attach+0x60/0x90
[<ffffffc000333c9c>] bus_for_each_dev+0x6c/0x8c
[<ffffffc000335304>] driver_attach+0x1c/0x28
[<ffffffc000334f14>] bus_add_driver+0x120/0x204
[<ffffffc0003362e4>] driver_register+0xbc/0x10c
[<ffffffc000337348>] __platform_driver_register+0x5c/0x68
[<ffffffc00094c478>] msm_iommu_driver_init+0x54/0x7c
[<ffffffc0000813ec>] do_one_initcall+0xa4/0x130
[<ffffffc00091d928>] kernel_init_freeable+0x138/0x1dc
[<ffffffc000642578>] kernel_init+0xc/0xd4

We really just want to synchronize the sending of an SGI with the
update of the gic_cpu_map[], so introduce a new SGI lock that we
can use to synchronize the two code paths. To further optimize
we don't take any locks at all if the BL switcher code isn't used.

Cc: Nicolas Pitre <nico@linaro.org>
Cc: Russell King - ARM Linux <linux@arm.linux.org.uk>
Signed-off-by: Stephen Boyd <sboyd@codeaurora.org>
---
 drivers/irqchip/irq-gic.c | 16 ++++++++++++++--
 1 file changed, 14 insertions(+), 2 deletions(-)

diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 4b959e606fe8..052762638b1c 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -80,6 +80,16 @@ static DEFINE_RAW_SPINLOCK(irq_controller_lock);
 #define NR_GIC_CPU_IF 8
 static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
 
+#ifdef CONFIG_BL_SWITCHER
+/* Synchronize switching CPU interface and sending SGIs */
+static DEFINE_RAW_SPINLOCK(gic_sgi_lock);
+#define sgi_map_lock(flags) raw_spin_lock_irqsave(&gic_sgi_lock, flags)
+#define sgi_map_unlock(flags) raw_spin_unlock_irqrestore(&gic_sgi_lock, flags)
+#else
+#define sgi_map_lock(flags) (void)(flags)
+#define sgi_map_unlock(flags) (void)(flags)
+#endif
+
 /*
  * Supported arch specific GIC irq extension.
  * Default make them NULL.
@@ -605,7 +615,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
 	int cpu;
 	unsigned long flags, map = 0;
 
-	raw_spin_lock_irqsave(&irq_controller_lock, flags);
+	sgi_map_lock(flags);
 
 	/* Convert our logical CPU mask into a physical one. */
 	for_each_cpu(cpu, mask)
@@ -620,7 +630,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
 	/* this always happens on GIC0 */
 	writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
 
-	raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
+	sgi_map_unlock(flags);
 }
 #endif
 
@@ -690,6 +700,7 @@ void gic_migrate_target(unsigned int new_cpu_id)
 	ror_val = (cur_cpu_id - new_cpu_id) & 31;
 
 	raw_spin_lock(&irq_controller_lock);
+	raw_spin_lock(&gic_sgi_lock);
 
 	/* Update the target interface for this logical CPU */
 	gic_cpu_map[cpu] = 1 << new_cpu_id;
@@ -709,6 +720,7 @@ void gic_migrate_target(unsigned int new_cpu_id)
 		}
 	}
 
+	raw_spin_unlock(&gic_sgi_lock);
 	raw_spin_unlock(&irq_controller_lock);
 
 	/*
-- 
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
hosted by The Linux Foundation

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH v5] irqchip: gic: Allow gic_arch_extn hooks to call into scheduler
@ 2014-08-20 19:22 ` Stephen Boyd
  0 siblings, 0 replies; 8+ messages in thread
From: Stephen Boyd @ 2014-08-20 19:22 UTC (permalink / raw)
  To: linux-arm-kernel

Commit 1a6b69b6548c (ARM: gic: add CPU migration support,
2012-04-12) introduced an acquisition of the irq_controller_lock
in gic_raise_softirq() which can lead to a spinlock recursion if
the gic_arch_extn hooks call into the scheduler (via complete()
or wake_up(), etc.). This happens because gic_arch_extn hooks are
normally called with the irq_controller_lock held and calling
into the scheduler may cause us to call smp_send_reschedule()
which will grab the irq_controller_lock again. Here's an example
from a vendor kernel (note that the gic_arch_extn hook code here
isn't actually in mainline):

BUG: spinlock recursion on CPU#0, swapper/0/1
 lock: irq_controller_lock+0x0/0x18, .magic: dead4ead, .owner: sw
er_cpu: 0
CPU: 0 PID: 1 Comm: swapper/0 Not tainted 3.14.10-00430-g3d433c4e

Call trace:
[<ffffffc000087e1c>] dump_backtrace+0x0/0x140
[<ffffffc000087f6c>] show_stack+0x10/0x1c
[<ffffffc00064732c>] dump_stack+0x74/0xc4
[<ffffffc0006446c0>] spin_dump+0x78/0x88
[<ffffffc0006446f4>] spin_bug+0x24/0x34
[<ffffffc0000d47d0>] do_raw_spin_lock+0x58/0x148
[<ffffffc00064d398>] _raw_spin_lock_irqsave+0x24/0x38
[<ffffffc0002c9d7c>] gic_raise_softirq+0x2c/0xbc
[<ffffffc00008daa4>] smp_send_reschedule+0x34/0x40
[<ffffffc0000c1e94>] try_to_wake_up+0x224/0x288
[<ffffffc0000c1f4c>] default_wake_function+0xc/0x18
[<ffffffc0000ceef0>] __wake_up_common+0x50/0x8c
[<ffffffc0000cef3c>] __wake_up_locked+0x10/0x1c
[<ffffffc0000cf734>] complete+0x3c/0x5c
[<ffffffc0002f0e78>] msm_mpm_enable_irq_exclusive+0x1b8/0x1c8
[<ffffffc0002f0f58>] __msm_mpm_enable_irq+0x4c/0x7c
[<ffffffc0002f0f94>] msm_mpm_enable_irq+0xc/0x18
[<ffffffc0002c9bb0>] gic_unmask_irq+0x40/0x7c
[<ffffffc0000de5f4>] irq_enable+0x2c/0x48
[<ffffffc0000de65c>] irq_startup+0x4c/0x74
[<ffffffc0000dd2fc>] __setup_irq+0x264/0x3f0
[<ffffffc0000dd5e0>] request_threaded_irq+0xcc/0x11c
[<ffffffc0000df254>] devm_request_threaded_irq+0x68/0xb4
[<ffffffc000471520>] msm_iommu_ctx_probe+0x124/0x2d4
[<ffffffc000337374>] platform_drv_probe+0x20/0x54
[<ffffffc00033598c>] driver_probe_device+0x158/0x340
[<ffffffc000335c20>] __driver_attach+0x60/0x90
[<ffffffc000333c9c>] bus_for_each_dev+0x6c/0x8c
[<ffffffc000335304>] driver_attach+0x1c/0x28
[<ffffffc000334f14>] bus_add_driver+0x120/0x204
[<ffffffc0003362e4>] driver_register+0xbc/0x10c
[<ffffffc000337348>] __platform_driver_register+0x5c/0x68
[<ffffffc00094c478>] msm_iommu_driver_init+0x54/0x7c
[<ffffffc0000813ec>] do_one_initcall+0xa4/0x130
[<ffffffc00091d928>] kernel_init_freeable+0x138/0x1dc
[<ffffffc000642578>] kernel_init+0xc/0xd4

We really just want to synchronize the sending of an SGI with the
update of the gic_cpu_map[], so introduce a new SGI lock that we
can use to synchronize the two code paths. To further optimize
we don't take any locks at all if the BL switcher code isn't used.

Cc: Nicolas Pitre <nico@linaro.org>
Cc: Russell King - ARM Linux <linux@arm.linux.org.uk>
Signed-off-by: Stephen Boyd <sboyd@codeaurora.org>
---
 drivers/irqchip/irq-gic.c | 16 ++++++++++++++--
 1 file changed, 14 insertions(+), 2 deletions(-)

diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 4b959e606fe8..052762638b1c 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -80,6 +80,16 @@ static DEFINE_RAW_SPINLOCK(irq_controller_lock);
 #define NR_GIC_CPU_IF 8
 static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
 
+#ifdef CONFIG_BL_SWITCHER
+/* Synchronize switching CPU interface and sending SGIs */
+static DEFINE_RAW_SPINLOCK(gic_sgi_lock);
+#define sgi_map_lock(flags) raw_spin_lock_irqsave(&gic_sgi_lock, flags)
+#define sgi_map_unlock(flags) raw_spin_unlock_irqrestore(&gic_sgi_lock, flags)
+#else
+#define sgi_map_lock(flags) (void)(flags)
+#define sgi_map_unlock(flags) (void)(flags)
+#endif
+
 /*
  * Supported arch specific GIC irq extension.
  * Default make them NULL.
@@ -605,7 +615,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
 	int cpu;
 	unsigned long flags, map = 0;
 
-	raw_spin_lock_irqsave(&irq_controller_lock, flags);
+	sgi_map_lock(flags);
 
 	/* Convert our logical CPU mask into a physical one. */
 	for_each_cpu(cpu, mask)
@@ -620,7 +630,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
 	/* this always happens on GIC0 */
 	writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
 
-	raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
+	sgi_map_unlock(flags);
 }
 #endif
 
@@ -690,6 +700,7 @@ void gic_migrate_target(unsigned int new_cpu_id)
 	ror_val = (cur_cpu_id - new_cpu_id) & 31;
 
 	raw_spin_lock(&irq_controller_lock);
+	raw_spin_lock(&gic_sgi_lock);
 
 	/* Update the target interface for this logical CPU */
 	gic_cpu_map[cpu] = 1 << new_cpu_id;
@@ -709,6 +720,7 @@ void gic_migrate_target(unsigned int new_cpu_id)
 		}
 	}
 
+	raw_spin_unlock(&gic_sgi_lock);
 	raw_spin_unlock(&irq_controller_lock);
 
 	/*
-- 
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
hosted by The Linux Foundation

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH v5] irqchip: gic: Allow gic_arch_extn hooks to call into scheduler
  2014-08-20 19:22 ` Stephen Boyd
@ 2014-08-20 20:43   ` Nicolas Pitre
  -1 siblings, 0 replies; 8+ messages in thread
From: Nicolas Pitre @ 2014-08-20 20:43 UTC (permalink / raw)
  To: Stephen Boyd
  Cc: Thomas Gleixner, Jason Cooper, linux-kernel, linux-arm-msm,
	linux-arm-kernel, Russell King - ARM Linux

On Wed, 20 Aug 2014, Stephen Boyd wrote:

> Commit 1a6b69b6548c (ARM: gic: add CPU migration support,
> 2012-04-12) introduced an acquisition of the irq_controller_lock
> in gic_raise_softirq() which can lead to a spinlock recursion if
> the gic_arch_extn hooks call into the scheduler (via complete()
> or wake_up(), etc.). This happens because gic_arch_extn hooks are
> normally called with the irq_controller_lock held and calling
> into the scheduler may cause us to call smp_send_reschedule()
> which will grab the irq_controller_lock again. Here's an example
> from a vendor kernel (note that the gic_arch_extn hook code here
> isn't actually in mainline):
> 
> BUG: spinlock recursion on CPU#0, swapper/0/1

Given the actual source of the bug is contested, you probably should 
only talk about the locking optimization which is the only undisputed 
reason for this patch.





>  lock: irq_controller_lock+0x0/0x18, .magic: dead4ead, .owner: sw
> er_cpu: 0
> CPU: 0 PID: 1 Comm: swapper/0 Not tainted 3.14.10-00430-g3d433c4e
> 
> Call trace:
> [<ffffffc000087e1c>] dump_backtrace+0x0/0x140
> [<ffffffc000087f6c>] show_stack+0x10/0x1c
> [<ffffffc00064732c>] dump_stack+0x74/0xc4
> [<ffffffc0006446c0>] spin_dump+0x78/0x88
> [<ffffffc0006446f4>] spin_bug+0x24/0x34
> [<ffffffc0000d47d0>] do_raw_spin_lock+0x58/0x148
> [<ffffffc00064d398>] _raw_spin_lock_irqsave+0x24/0x38
> [<ffffffc0002c9d7c>] gic_raise_softirq+0x2c/0xbc
> [<ffffffc00008daa4>] smp_send_reschedule+0x34/0x40
> [<ffffffc0000c1e94>] try_to_wake_up+0x224/0x288
> [<ffffffc0000c1f4c>] default_wake_function+0xc/0x18
> [<ffffffc0000ceef0>] __wake_up_common+0x50/0x8c
> [<ffffffc0000cef3c>] __wake_up_locked+0x10/0x1c
> [<ffffffc0000cf734>] complete+0x3c/0x5c
> [<ffffffc0002f0e78>] msm_mpm_enable_irq_exclusive+0x1b8/0x1c8
> [<ffffffc0002f0f58>] __msm_mpm_enable_irq+0x4c/0x7c
> [<ffffffc0002f0f94>] msm_mpm_enable_irq+0xc/0x18
> [<ffffffc0002c9bb0>] gic_unmask_irq+0x40/0x7c
> [<ffffffc0000de5f4>] irq_enable+0x2c/0x48
> [<ffffffc0000de65c>] irq_startup+0x4c/0x74
> [<ffffffc0000dd2fc>] __setup_irq+0x264/0x3f0
> [<ffffffc0000dd5e0>] request_threaded_irq+0xcc/0x11c
> [<ffffffc0000df254>] devm_request_threaded_irq+0x68/0xb4
> [<ffffffc000471520>] msm_iommu_ctx_probe+0x124/0x2d4
> [<ffffffc000337374>] platform_drv_probe+0x20/0x54
> [<ffffffc00033598c>] driver_probe_device+0x158/0x340
> [<ffffffc000335c20>] __driver_attach+0x60/0x90
> [<ffffffc000333c9c>] bus_for_each_dev+0x6c/0x8c
> [<ffffffc000335304>] driver_attach+0x1c/0x28
> [<ffffffc000334f14>] bus_add_driver+0x120/0x204
> [<ffffffc0003362e4>] driver_register+0xbc/0x10c
> [<ffffffc000337348>] __platform_driver_register+0x5c/0x68
> [<ffffffc00094c478>] msm_iommu_driver_init+0x54/0x7c
> [<ffffffc0000813ec>] do_one_initcall+0xa4/0x130
> [<ffffffc00091d928>] kernel_init_freeable+0x138/0x1dc
> [<ffffffc000642578>] kernel_init+0xc/0xd4
> 
> We really just want to synchronize the sending of an SGI with the
> update of the gic_cpu_map[], so introduce a new SGI lock that we
> can use to synchronize the two code paths. To further optimize
> we don't take any locks at all if the BL switcher code isn't used.
> 
> Cc: Nicolas Pitre <nico@linaro.org>
> Cc: Russell King - ARM Linux <linux@arm.linux.org.uk>
> Signed-off-by: Stephen Boyd <sboyd@codeaurora.org>
> ---
>  drivers/irqchip/irq-gic.c | 16 ++++++++++++++--
>  1 file changed, 14 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
> index 4b959e606fe8..052762638b1c 100644
> --- a/drivers/irqchip/irq-gic.c
> +++ b/drivers/irqchip/irq-gic.c
> @@ -80,6 +80,16 @@ static DEFINE_RAW_SPINLOCK(irq_controller_lock);
>  #define NR_GIC_CPU_IF 8
>  static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
>  
> +#ifdef CONFIG_BL_SWITCHER
> +/* Synchronize switching CPU interface and sending SGIs */
> +static DEFINE_RAW_SPINLOCK(gic_sgi_lock);
> +#define sgi_map_lock(flags) raw_spin_lock_irqsave(&gic_sgi_lock, flags)
> +#define sgi_map_unlock(flags) raw_spin_unlock_irqrestore(&gic_sgi_lock, flags)
> +#else
> +#define sgi_map_lock(flags) (void)(flags)
> +#define sgi_map_unlock(flags) (void)(flags)
> +#endif
> +
>  /*
>   * Supported arch specific GIC irq extension.
>   * Default make them NULL.
> @@ -605,7 +615,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
>  	int cpu;
>  	unsigned long flags, map = 0;
>  
> -	raw_spin_lock_irqsave(&irq_controller_lock, flags);
> +	sgi_map_lock(flags);
>  
>  	/* Convert our logical CPU mask into a physical one. */
>  	for_each_cpu(cpu, mask)
> @@ -620,7 +630,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
>  	/* this always happens on GIC0 */
>  	writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
>  
> -	raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
> +	sgi_map_unlock(flags);
>  }
>  #endif
>  
> @@ -690,6 +700,7 @@ void gic_migrate_target(unsigned int new_cpu_id)
>  	ror_val = (cur_cpu_id - new_cpu_id) & 31;
>  
>  	raw_spin_lock(&irq_controller_lock);
> +	raw_spin_lock(&gic_sgi_lock);
>  
>  	/* Update the target interface for this logical CPU */
>  	gic_cpu_map[cpu] = 1 << new_cpu_id;
> @@ -709,6 +720,7 @@ void gic_migrate_target(unsigned int new_cpu_id)
>  		}
>  	}
>  
> +	raw_spin_unlock(&gic_sgi_lock);
>  	raw_spin_unlock(&irq_controller_lock);
>  
>  	/*
> -- 
> The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
> hosted by The Linux Foundation
> 
> 

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH v5] irqchip: gic: Allow gic_arch_extn hooks to call into scheduler
@ 2014-08-20 20:43   ` Nicolas Pitre
  0 siblings, 0 replies; 8+ messages in thread
From: Nicolas Pitre @ 2014-08-20 20:43 UTC (permalink / raw)
  To: linux-arm-kernel

On Wed, 20 Aug 2014, Stephen Boyd wrote:

> Commit 1a6b69b6548c (ARM: gic: add CPU migration support,
> 2012-04-12) introduced an acquisition of the irq_controller_lock
> in gic_raise_softirq() which can lead to a spinlock recursion if
> the gic_arch_extn hooks call into the scheduler (via complete()
> or wake_up(), etc.). This happens because gic_arch_extn hooks are
> normally called with the irq_controller_lock held and calling
> into the scheduler may cause us to call smp_send_reschedule()
> which will grab the irq_controller_lock again. Here's an example
> from a vendor kernel (note that the gic_arch_extn hook code here
> isn't actually in mainline):
> 
> BUG: spinlock recursion on CPU#0, swapper/0/1

Given the actual source of the bug is contested, you probably should 
only talk about the locking optimization which is the only undisputed 
reason for this patch.





>  lock: irq_controller_lock+0x0/0x18, .magic: dead4ead, .owner: sw
> er_cpu: 0
> CPU: 0 PID: 1 Comm: swapper/0 Not tainted 3.14.10-00430-g3d433c4e
> 
> Call trace:
> [<ffffffc000087e1c>] dump_backtrace+0x0/0x140
> [<ffffffc000087f6c>] show_stack+0x10/0x1c
> [<ffffffc00064732c>] dump_stack+0x74/0xc4
> [<ffffffc0006446c0>] spin_dump+0x78/0x88
> [<ffffffc0006446f4>] spin_bug+0x24/0x34
> [<ffffffc0000d47d0>] do_raw_spin_lock+0x58/0x148
> [<ffffffc00064d398>] _raw_spin_lock_irqsave+0x24/0x38
> [<ffffffc0002c9d7c>] gic_raise_softirq+0x2c/0xbc
> [<ffffffc00008daa4>] smp_send_reschedule+0x34/0x40
> [<ffffffc0000c1e94>] try_to_wake_up+0x224/0x288
> [<ffffffc0000c1f4c>] default_wake_function+0xc/0x18
> [<ffffffc0000ceef0>] __wake_up_common+0x50/0x8c
> [<ffffffc0000cef3c>] __wake_up_locked+0x10/0x1c
> [<ffffffc0000cf734>] complete+0x3c/0x5c
> [<ffffffc0002f0e78>] msm_mpm_enable_irq_exclusive+0x1b8/0x1c8
> [<ffffffc0002f0f58>] __msm_mpm_enable_irq+0x4c/0x7c
> [<ffffffc0002f0f94>] msm_mpm_enable_irq+0xc/0x18
> [<ffffffc0002c9bb0>] gic_unmask_irq+0x40/0x7c
> [<ffffffc0000de5f4>] irq_enable+0x2c/0x48
> [<ffffffc0000de65c>] irq_startup+0x4c/0x74
> [<ffffffc0000dd2fc>] __setup_irq+0x264/0x3f0
> [<ffffffc0000dd5e0>] request_threaded_irq+0xcc/0x11c
> [<ffffffc0000df254>] devm_request_threaded_irq+0x68/0xb4
> [<ffffffc000471520>] msm_iommu_ctx_probe+0x124/0x2d4
> [<ffffffc000337374>] platform_drv_probe+0x20/0x54
> [<ffffffc00033598c>] driver_probe_device+0x158/0x340
> [<ffffffc000335c20>] __driver_attach+0x60/0x90
> [<ffffffc000333c9c>] bus_for_each_dev+0x6c/0x8c
> [<ffffffc000335304>] driver_attach+0x1c/0x28
> [<ffffffc000334f14>] bus_add_driver+0x120/0x204
> [<ffffffc0003362e4>] driver_register+0xbc/0x10c
> [<ffffffc000337348>] __platform_driver_register+0x5c/0x68
> [<ffffffc00094c478>] msm_iommu_driver_init+0x54/0x7c
> [<ffffffc0000813ec>] do_one_initcall+0xa4/0x130
> [<ffffffc00091d928>] kernel_init_freeable+0x138/0x1dc
> [<ffffffc000642578>] kernel_init+0xc/0xd4
> 
> We really just want to synchronize the sending of an SGI with the
> update of the gic_cpu_map[], so introduce a new SGI lock that we
> can use to synchronize the two code paths. To further optimize
> we don't take any locks at all if the BL switcher code isn't used.
> 
> Cc: Nicolas Pitre <nico@linaro.org>
> Cc: Russell King - ARM Linux <linux@arm.linux.org.uk>
> Signed-off-by: Stephen Boyd <sboyd@codeaurora.org>
> ---
>  drivers/irqchip/irq-gic.c | 16 ++++++++++++++--
>  1 file changed, 14 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
> index 4b959e606fe8..052762638b1c 100644
> --- a/drivers/irqchip/irq-gic.c
> +++ b/drivers/irqchip/irq-gic.c
> @@ -80,6 +80,16 @@ static DEFINE_RAW_SPINLOCK(irq_controller_lock);
>  #define NR_GIC_CPU_IF 8
>  static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
>  
> +#ifdef CONFIG_BL_SWITCHER
> +/* Synchronize switching CPU interface and sending SGIs */
> +static DEFINE_RAW_SPINLOCK(gic_sgi_lock);
> +#define sgi_map_lock(flags) raw_spin_lock_irqsave(&gic_sgi_lock, flags)
> +#define sgi_map_unlock(flags) raw_spin_unlock_irqrestore(&gic_sgi_lock, flags)
> +#else
> +#define sgi_map_lock(flags) (void)(flags)
> +#define sgi_map_unlock(flags) (void)(flags)
> +#endif
> +
>  /*
>   * Supported arch specific GIC irq extension.
>   * Default make them NULL.
> @@ -605,7 +615,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
>  	int cpu;
>  	unsigned long flags, map = 0;
>  
> -	raw_spin_lock_irqsave(&irq_controller_lock, flags);
> +	sgi_map_lock(flags);
>  
>  	/* Convert our logical CPU mask into a physical one. */
>  	for_each_cpu(cpu, mask)
> @@ -620,7 +630,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
>  	/* this always happens on GIC0 */
>  	writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
>  
> -	raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
> +	sgi_map_unlock(flags);
>  }
>  #endif
>  
> @@ -690,6 +700,7 @@ void gic_migrate_target(unsigned int new_cpu_id)
>  	ror_val = (cur_cpu_id - new_cpu_id) & 31;
>  
>  	raw_spin_lock(&irq_controller_lock);
> +	raw_spin_lock(&gic_sgi_lock);
>  
>  	/* Update the target interface for this logical CPU */
>  	gic_cpu_map[cpu] = 1 << new_cpu_id;
> @@ -709,6 +720,7 @@ void gic_migrate_target(unsigned int new_cpu_id)
>  		}
>  	}
>  
> +	raw_spin_unlock(&gic_sgi_lock);
>  	raw_spin_unlock(&irq_controller_lock);
>  
>  	/*
> -- 
> The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
> hosted by The Linux Foundation
> 
> 

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v5] irqchip: gic: Allow gic_arch_extn hooks to call into scheduler
  2014-08-20 19:22 ` Stephen Boyd
@ 2014-08-21  9:47   ` Russell King - ARM Linux
  -1 siblings, 0 replies; 8+ messages in thread
From: Russell King - ARM Linux @ 2014-08-21  9:47 UTC (permalink / raw)
  To: Stephen Boyd
  Cc: Thomas Gleixner, Jason Cooper, linux-kernel, linux-arm-msm,
	linux-arm-kernel, Nicolas Pitre

On Wed, Aug 20, 2014 at 12:22:41PM -0700, Stephen Boyd wrote:
> @@ -605,7 +615,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
>  	int cpu;
>  	unsigned long flags, map = 0;
>  
> -	raw_spin_lock_irqsave(&irq_controller_lock, flags);
> +	sgi_map_lock(flags);
>  
>  	/* Convert our logical CPU mask into a physical one. */
>  	for_each_cpu(cpu, mask)
> @@ -620,7 +630,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
>  	/* this always happens on GIC0 */
>  	writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
>  
> -	raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
> +	sgi_map_unlock(flags);

What would make more sense is if this were a read-write lock, then
gic_raise_softirq() could run concurrently on several CPUs without
interfering with each other, yet still be safe with gic_migrate_target().

I'd then argue that we wouldn't need the ifdeffery, we might as well
keep the locking in place - it's overhead is likely small (when lockdep
is disabled) when compared to everything else which goes on in this
path.

> @@ -690,6 +700,7 @@ void gic_migrate_target(unsigned int new_cpu_id)
>  	ror_val = (cur_cpu_id - new_cpu_id) & 31;
>  
>  	raw_spin_lock(&irq_controller_lock);
> +	raw_spin_lock(&gic_sgi_lock);
>  
>  	/* Update the target interface for this logical CPU */
>  	gic_cpu_map[cpu] = 1 << new_cpu_id;
> @@ -709,6 +720,7 @@ void gic_migrate_target(unsigned int new_cpu_id)
>  		}
>  	}
>  
> +	raw_spin_unlock(&gic_sgi_lock);
>  	raw_spin_unlock(&irq_controller_lock);

I would actually suggest we go a bit further.  Use gic_sgi_lock to only
lock gic_cpu_map[] itself, and not have it beneath any other lock.
That's an advantage because right now, lockdep learns from the above that
there's a dependency between irq_controller_lock and gic_sgi_lock.
Reasonably keeping lock dependencies to a minimum is always a good idea.

The places where gic_cpu_map[] is used are:

static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
                            bool force)
{
...
        raw_spin_lock(&irq_controller_lock);
        mask = 0xff << shift;
        bit = gic_cpu_map[cpu] << shift;
        val = readl_relaxed(reg) & ~mask;
        writel_relaxed(val | bit, reg);
        raw_spin_unlock(&irq_controller_lock);

So, we can move:

        mask = 0xff << shift;
        bit = gic_cpu_map[cpu] << shift;

out from under irq_controller_lock and put it under gic_sgi_lock.  The
"mask" bit doesn't need to be under any lock at all.

There's gic_cpu_init():

        cpu_mask = gic_get_cpumask(gic);
        gic_cpu_map[cpu] = cpu_mask;

        /*
         * Clear our mask from the other map entries in case they're
         * still undefined.
         */
        for (i = 0; i < NR_GIC_CPU_IF; i++)
                if (i != cpu)
                        gic_cpu_map[i] &= ~cpu_mask;

which better had be stable after boot - if not, this needs locking.
Remember that the above will be called on hotplug too.

-- 
FTTC broadband for 0.8mile line: currently at 9.5Mbps down 400kbps up
according to speedtest.net.

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH v5] irqchip: gic: Allow gic_arch_extn hooks to call into scheduler
@ 2014-08-21  9:47   ` Russell King - ARM Linux
  0 siblings, 0 replies; 8+ messages in thread
From: Russell King - ARM Linux @ 2014-08-21  9:47 UTC (permalink / raw)
  To: linux-arm-kernel

On Wed, Aug 20, 2014 at 12:22:41PM -0700, Stephen Boyd wrote:
> @@ -605,7 +615,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
>  	int cpu;
>  	unsigned long flags, map = 0;
>  
> -	raw_spin_lock_irqsave(&irq_controller_lock, flags);
> +	sgi_map_lock(flags);
>  
>  	/* Convert our logical CPU mask into a physical one. */
>  	for_each_cpu(cpu, mask)
> @@ -620,7 +630,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
>  	/* this always happens on GIC0 */
>  	writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
>  
> -	raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
> +	sgi_map_unlock(flags);

What would make more sense is if this were a read-write lock, then
gic_raise_softirq() could run concurrently on several CPUs without
interfering with each other, yet still be safe with gic_migrate_target().

I'd then argue that we wouldn't need the ifdeffery, we might as well
keep the locking in place - it's overhead is likely small (when lockdep
is disabled) when compared to everything else which goes on in this
path.

> @@ -690,6 +700,7 @@ void gic_migrate_target(unsigned int new_cpu_id)
>  	ror_val = (cur_cpu_id - new_cpu_id) & 31;
>  
>  	raw_spin_lock(&irq_controller_lock);
> +	raw_spin_lock(&gic_sgi_lock);
>  
>  	/* Update the target interface for this logical CPU */
>  	gic_cpu_map[cpu] = 1 << new_cpu_id;
> @@ -709,6 +720,7 @@ void gic_migrate_target(unsigned int new_cpu_id)
>  		}
>  	}
>  
> +	raw_spin_unlock(&gic_sgi_lock);
>  	raw_spin_unlock(&irq_controller_lock);

I would actually suggest we go a bit further.  Use gic_sgi_lock to only
lock gic_cpu_map[] itself, and not have it beneath any other lock.
That's an advantage because right now, lockdep learns from the above that
there's a dependency between irq_controller_lock and gic_sgi_lock.
Reasonably keeping lock dependencies to a minimum is always a good idea.

The places where gic_cpu_map[] is used are:

static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
                            bool force)
{
...
        raw_spin_lock(&irq_controller_lock);
        mask = 0xff << shift;
        bit = gic_cpu_map[cpu] << shift;
        val = readl_relaxed(reg) & ~mask;
        writel_relaxed(val | bit, reg);
        raw_spin_unlock(&irq_controller_lock);

So, we can move:

        mask = 0xff << shift;
        bit = gic_cpu_map[cpu] << shift;

out from under irq_controller_lock and put it under gic_sgi_lock.  The
"mask" bit doesn't need to be under any lock at all.

There's gic_cpu_init():

        cpu_mask = gic_get_cpumask(gic);
        gic_cpu_map[cpu] = cpu_mask;

        /*
         * Clear our mask from the other map entries in case they're
         * still undefined.
         */
        for (i = 0; i < NR_GIC_CPU_IF; i++)
                if (i != cpu)
                        gic_cpu_map[i] &= ~cpu_mask;

which better had be stable after boot - if not, this needs locking.
Remember that the above will be called on hotplug too.

-- 
FTTC broadband for 0.8mile line: currently@9.5Mbps down 400kbps up
according to speedtest.net.

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v5] irqchip: gic: Allow gic_arch_extn hooks to call into scheduler
  2014-08-21  9:47   ` Russell King - ARM Linux
@ 2014-08-22  2:06     ` Stephen Boyd
  -1 siblings, 0 replies; 8+ messages in thread
From: Stephen Boyd @ 2014-08-22  2:06 UTC (permalink / raw)
  To: Russell King - ARM Linux
  Cc: Thomas Gleixner, Jason Cooper, linux-kernel, linux-arm-msm,
	linux-arm-kernel, Nicolas Pitre

On 08/21/14 02:47, Russell King - ARM Linux wrote:
> What would make more sense is if this were a read-write lock, then
> gic_raise_softirq() could run concurrently on several CPUs without
> interfering with each other, yet still be safe with gic_migrate_target().
>
> I'd then argue that we wouldn't need the ifdeffery, we might as well
> keep the locking in place - it's overhead is likely small (when lockdep
> is disabled) when compared to everything else which goes on in this
> path.

Ok.

>> @@ -690,6 +700,7 @@ void gic_migrate_target(unsigned int new_cpu_id)
>>  	ror_val = (cur_cpu_id - new_cpu_id) & 31;
>>  
>>  	raw_spin_lock(&irq_controller_lock);
>> +	raw_spin_lock(&gic_sgi_lock);
>>  
>>  	/* Update the target interface for this logical CPU */
>>  	gic_cpu_map[cpu] = 1 << new_cpu_id;
>> @@ -709,6 +720,7 @@ void gic_migrate_target(unsigned int new_cpu_id)
>>  		}
>>  	}
>>  
>> +	raw_spin_unlock(&gic_sgi_lock);
>>  	raw_spin_unlock(&irq_controller_lock);
> I would actually suggest we go a bit further.  Use gic_sgi_lock to only
> lock gic_cpu_map[] itself, and not have it beneath any other lock.
> That's an advantage because right now, lockdep learns from the above that
> there's a dependency between irq_controller_lock and gic_sgi_lock.
> Reasonably keeping lock dependencies to a minimum is always a good idea.
>
> The places where gic_cpu_map[] is used are:
>
> static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
>                             bool force)
> {
> ...
>         raw_spin_lock(&irq_controller_lock);
>         mask = 0xff << shift;
>         bit = gic_cpu_map[cpu] << shift;
>         val = readl_relaxed(reg) & ~mask;
>         writel_relaxed(val | bit, reg);
>         raw_spin_unlock(&irq_controller_lock);
>
> So, we can move:
>
>         mask = 0xff << shift;
>         bit = gic_cpu_map[cpu] << shift;
>
> out from under irq_controller_lock and put it under gic_sgi_lock.  The
> "mask" bit doesn't need to be under any lock at all.
>
> There's gic_cpu_init():
>
>         cpu_mask = gic_get_cpumask(gic);
>         gic_cpu_map[cpu] = cpu_mask;
>
>         /*
>          * Clear our mask from the other map entries in case they're
>          * still undefined.
>          */
>         for (i = 0; i < NR_GIC_CPU_IF; i++)
>                 if (i != cpu)
>                         gic_cpu_map[i] &= ~cpu_mask;
>
> which better had be stable after boot - if not, this needs locking.
> Remember that the above will be called on hotplug too.
>

Perhaps you'd like to send this patch? It isn't clear to me from your
description how this would work. What happens if we update the
gic_cpu_map between the time we read the map and acquire the
irq_controller_lock in gic_set_affinity()? I think we would program the
affinity for the wrong CPU?

Either way, here's the patch I think you described.

----8<-----

diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 4b959e606fe8..d159590461c7 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -39,6 +39,7 @@
 #include <linux/slab.h>
 #include <linux/irqchip/chained_irq.h>
 #include <linux/irqchip/arm-gic.h>
+#include <linux/rwlock.h>
 
 #include <asm/cputype.h>
 #include <asm/irq.h>
@@ -79,6 +80,7 @@ static DEFINE_RAW_SPINLOCK(irq_controller_lock);
  */
 #define NR_GIC_CPU_IF 8
 static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
+static DEFINE_RWLOCK(gic_cpu_map_lock);
 
 /*
  * Supported arch specific GIC irq extension.
@@ -233,9 +235,13 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
 	if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
 		return -EINVAL;
 
-	raw_spin_lock(&irq_controller_lock);
 	mask = 0xff << shift;
+
+	read_lock(&gic_cpu_map_lock);
 	bit = gic_cpu_map[cpu] << shift;
+	read_unlock(&gic_cpu_map_lock);
+
+	raw_spin_lock(&irq_controller_lock);
 	val = readl_relaxed(reg) & ~mask;
 	writel_relaxed(val | bit, reg);
 	raw_spin_unlock(&irq_controller_lock);
@@ -605,7 +611,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
 	int cpu;
 	unsigned long flags, map = 0;
 
-	raw_spin_lock_irqsave(&irq_controller_lock, flags);
+	read_lock_irqsave(&gic_cpu_map_lock, flags);
 
 	/* Convert our logical CPU mask into a physical one. */
 	for_each_cpu(cpu, mask)
@@ -620,7 +626,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
 	/* this always happens on GIC0 */
 	writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
 
-	raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
+	read_unlock_irqrestore(&gic_cpu_map_lock, flags);
 }
 #endif
 
@@ -689,11 +695,12 @@ void gic_migrate_target(unsigned int new_cpu_id)
 	cur_target_mask = 0x01010101 << cur_cpu_id;
 	ror_val = (cur_cpu_id - new_cpu_id) & 31;
 
-	raw_spin_lock(&irq_controller_lock);
-
+	write_lock(&gic_cpu_map_lock);
 	/* Update the target interface for this logical CPU */
 	gic_cpu_map[cpu] = 1 << new_cpu_id;
+	write_unlock(&gic_cpu_map_lock);
 
+	raw_spin_lock(&irq_controller_lock);
 	/*
 	 * Find all the peripheral interrupts targetting the current
 	 * CPU interface and migrate them to the new CPU interface.


-- 
Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum,
hosted by The Linux Foundation

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH v5] irqchip: gic: Allow gic_arch_extn hooks to call into scheduler
@ 2014-08-22  2:06     ` Stephen Boyd
  0 siblings, 0 replies; 8+ messages in thread
From: Stephen Boyd @ 2014-08-22  2:06 UTC (permalink / raw)
  To: linux-arm-kernel

On 08/21/14 02:47, Russell King - ARM Linux wrote:
> What would make more sense is if this were a read-write lock, then
> gic_raise_softirq() could run concurrently on several CPUs without
> interfering with each other, yet still be safe with gic_migrate_target().
>
> I'd then argue that we wouldn't need the ifdeffery, we might as well
> keep the locking in place - it's overhead is likely small (when lockdep
> is disabled) when compared to everything else which goes on in this
> path.

Ok.

>> @@ -690,6 +700,7 @@ void gic_migrate_target(unsigned int new_cpu_id)
>>  	ror_val = (cur_cpu_id - new_cpu_id) & 31;
>>  
>>  	raw_spin_lock(&irq_controller_lock);
>> +	raw_spin_lock(&gic_sgi_lock);
>>  
>>  	/* Update the target interface for this logical CPU */
>>  	gic_cpu_map[cpu] = 1 << new_cpu_id;
>> @@ -709,6 +720,7 @@ void gic_migrate_target(unsigned int new_cpu_id)
>>  		}
>>  	}
>>  
>> +	raw_spin_unlock(&gic_sgi_lock);
>>  	raw_spin_unlock(&irq_controller_lock);
> I would actually suggest we go a bit further.  Use gic_sgi_lock to only
> lock gic_cpu_map[] itself, and not have it beneath any other lock.
> That's an advantage because right now, lockdep learns from the above that
> there's a dependency between irq_controller_lock and gic_sgi_lock.
> Reasonably keeping lock dependencies to a minimum is always a good idea.
>
> The places where gic_cpu_map[] is used are:
>
> static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
>                             bool force)
> {
> ...
>         raw_spin_lock(&irq_controller_lock);
>         mask = 0xff << shift;
>         bit = gic_cpu_map[cpu] << shift;
>         val = readl_relaxed(reg) & ~mask;
>         writel_relaxed(val | bit, reg);
>         raw_spin_unlock(&irq_controller_lock);
>
> So, we can move:
>
>         mask = 0xff << shift;
>         bit = gic_cpu_map[cpu] << shift;
>
> out from under irq_controller_lock and put it under gic_sgi_lock.  The
> "mask" bit doesn't need to be under any lock at all.
>
> There's gic_cpu_init():
>
>         cpu_mask = gic_get_cpumask(gic);
>         gic_cpu_map[cpu] = cpu_mask;
>
>         /*
>          * Clear our mask from the other map entries in case they're
>          * still undefined.
>          */
>         for (i = 0; i < NR_GIC_CPU_IF; i++)
>                 if (i != cpu)
>                         gic_cpu_map[i] &= ~cpu_mask;
>
> which better had be stable after boot - if not, this needs locking.
> Remember that the above will be called on hotplug too.
>

Perhaps you'd like to send this patch? It isn't clear to me from your
description how this would work. What happens if we update the
gic_cpu_map between the time we read the map and acquire the
irq_controller_lock in gic_set_affinity()? I think we would program the
affinity for the wrong CPU?

Either way, here's the patch I think you described.

----8<-----

diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 4b959e606fe8..d159590461c7 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -39,6 +39,7 @@
 #include <linux/slab.h>
 #include <linux/irqchip/chained_irq.h>
 #include <linux/irqchip/arm-gic.h>
+#include <linux/rwlock.h>
 
 #include <asm/cputype.h>
 #include <asm/irq.h>
@@ -79,6 +80,7 @@ static DEFINE_RAW_SPINLOCK(irq_controller_lock);
  */
 #define NR_GIC_CPU_IF 8
 static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
+static DEFINE_RWLOCK(gic_cpu_map_lock);
 
 /*
  * Supported arch specific GIC irq extension.
@@ -233,9 +235,13 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
 	if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
 		return -EINVAL;
 
-	raw_spin_lock(&irq_controller_lock);
 	mask = 0xff << shift;
+
+	read_lock(&gic_cpu_map_lock);
 	bit = gic_cpu_map[cpu] << shift;
+	read_unlock(&gic_cpu_map_lock);
+
+	raw_spin_lock(&irq_controller_lock);
 	val = readl_relaxed(reg) & ~mask;
 	writel_relaxed(val | bit, reg);
 	raw_spin_unlock(&irq_controller_lock);
@@ -605,7 +611,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
 	int cpu;
 	unsigned long flags, map = 0;
 
-	raw_spin_lock_irqsave(&irq_controller_lock, flags);
+	read_lock_irqsave(&gic_cpu_map_lock, flags);
 
 	/* Convert our logical CPU mask into a physical one. */
 	for_each_cpu(cpu, mask)
@@ -620,7 +626,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
 	/* this always happens on GIC0 */
 	writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
 
-	raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
+	read_unlock_irqrestore(&gic_cpu_map_lock, flags);
 }
 #endif
 
@@ -689,11 +695,12 @@ void gic_migrate_target(unsigned int new_cpu_id)
 	cur_target_mask = 0x01010101 << cur_cpu_id;
 	ror_val = (cur_cpu_id - new_cpu_id) & 31;
 
-	raw_spin_lock(&irq_controller_lock);
-
+	write_lock(&gic_cpu_map_lock);
 	/* Update the target interface for this logical CPU */
 	gic_cpu_map[cpu] = 1 << new_cpu_id;
+	write_unlock(&gic_cpu_map_lock);
 
+	raw_spin_lock(&irq_controller_lock);
 	/*
 	 * Find all the peripheral interrupts targetting the current
 	 * CPU interface and migrate them to the new CPU interface.


-- 
Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum,
hosted by The Linux Foundation

^ permalink raw reply related	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2014-08-22  2:06 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-08-20 19:22 [PATCH v5] irqchip: gic: Allow gic_arch_extn hooks to call into scheduler Stephen Boyd
2014-08-20 19:22 ` Stephen Boyd
2014-08-20 20:43 ` Nicolas Pitre
2014-08-20 20:43   ` Nicolas Pitre
2014-08-21  9:47 ` Russell King - ARM Linux
2014-08-21  9:47   ` Russell King - ARM Linux
2014-08-22  2:06   ` Stephen Boyd
2014-08-22  2:06     ` Stephen Boyd

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.