* [PATCH v2 1/4] jump_label: Move cpu hotplug locking
2017-08-01 8:02 [PATCH v2 0/4] Working around CPU hotplug and static keys locking Marc Zyngier
@ 2017-08-01 8:02 ` Marc Zyngier
2017-08-01 8:02 ` [PATCH v2 2/4] jump_label: Split out code under the hotplug lock Marc Zyngier
` (2 subsequent siblings)
3 siblings, 0 replies; 7+ messages in thread
From: Marc Zyngier @ 2017-08-01 8:02 UTC (permalink / raw)
To: linux-arm-kernel
As we're about to rework the locking, let's move the taking and
release of the CPU hotplug lock to locations that will make its
reworking completely obvious.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
---
kernel/jump_label.c | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index d11c506a6ac3..f11b10091100 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -105,6 +105,7 @@ void static_key_slow_inc(struct static_key *key)
{
int v, v1;
+ cpus_read_lock();
STATIC_KEY_CHECK_USE();
/*
@@ -121,11 +122,12 @@ void static_key_slow_inc(struct static_key *key)
*/
for (v = atomic_read(&key->enabled); v > 0; v = v1) {
v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
- if (likely(v1 == v))
+ if (likely(v1 == v)) {
+ cpus_read_unlock();
return;
+ }
}
- cpus_read_lock();
jump_label_lock();
if (atomic_read(&key->enabled) == 0) {
atomic_set(&key->enabled, -1);
--
2.11.0
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH v2 2/4] jump_label: Split out code under the hotplug lock
2017-08-01 8:02 [PATCH v2 0/4] Working around CPU hotplug and static keys locking Marc Zyngier
2017-08-01 8:02 ` [PATCH v2 1/4] jump_label: Move cpu hotplug locking Marc Zyngier
@ 2017-08-01 8:02 ` Marc Zyngier
2017-08-01 8:02 ` [PATCH v2 3/4] jump_label: Provide hotplug context variants Marc Zyngier
2017-08-01 8:02 ` [PATCH v2 4/4] clocksource/arm_arch_timer: Use static_branch_enable_cpuslocked() Marc Zyngier
3 siblings, 0 replies; 7+ messages in thread
From: Marc Zyngier @ 2017-08-01 8:02 UTC (permalink / raw)
To: linux-arm-kernel
In order to later introduce an "already locked" version of some
of the static key funcions, let's split the code into the core stuff
(the *_cpuslocked functions) and the usual helpers, which now
take/release the hotplug lock and call into the _cpuslocked
versions.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
---
kernel/jump_label.c | 28 +++++++++++++++++++---------
1 file changed, 19 insertions(+), 9 deletions(-)
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index f11b10091100..0ab8b35daa54 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -101,11 +101,10 @@ void static_key_disable(struct static_key *key)
}
EXPORT_SYMBOL_GPL(static_key_disable);
-void static_key_slow_inc(struct static_key *key)
+static void static_key_slow_inc_cpuslocked(struct static_key *key)
{
int v, v1;
- cpus_read_lock();
STATIC_KEY_CHECK_USE();
/*
@@ -122,10 +121,8 @@ void static_key_slow_inc(struct static_key *key)
*/
for (v = atomic_read(&key->enabled); v > 0; v = v1) {
v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
- if (likely(v1 == v)) {
- cpus_read_unlock();
+ if (likely(v1 == v))
return;
- }
}
jump_label_lock();
@@ -137,14 +134,20 @@ void static_key_slow_inc(struct static_key *key)
atomic_inc(&key->enabled);
}
jump_label_unlock();
+}
+
+void static_key_slow_inc(struct static_key *key)
+{
+ cpus_read_lock();
+ static_key_slow_inc_cpuslocked(key);
cpus_read_unlock();
}
EXPORT_SYMBOL_GPL(static_key_slow_inc);
-static void __static_key_slow_dec(struct static_key *key,
- unsigned long rate_limit, struct delayed_work *work)
+static void static_key_slow_dec_cpuslocked(struct static_key *key,
+ unsigned long rate_limit,
+ struct delayed_work *work)
{
- cpus_read_lock();
/*
* The negative count check is valid even when a negative
* key->enabled is in use by static_key_slow_inc(); a
@@ -155,7 +158,6 @@ static void __static_key_slow_dec(struct static_key *key,
if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
WARN(atomic_read(&key->enabled) < 0,
"jump label: negative count!\n");
- cpus_read_unlock();
return;
}
@@ -166,6 +168,14 @@ static void __static_key_slow_dec(struct static_key *key,
jump_label_update(key);
}
jump_label_unlock();
+}
+
+static void __static_key_slow_dec(struct static_key *key,
+ unsigned long rate_limit,
+ struct delayed_work *work)
+{
+ cpus_read_lock();
+ static_key_slow_dec_cpuslocked(key, rate_limit, work);
cpus_read_unlock();
}
--
2.11.0
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH v2 3/4] jump_label: Provide hotplug context variants
2017-08-01 8:02 [PATCH v2 0/4] Working around CPU hotplug and static keys locking Marc Zyngier
2017-08-01 8:02 ` [PATCH v2 1/4] jump_label: Move cpu hotplug locking Marc Zyngier
2017-08-01 8:02 ` [PATCH v2 2/4] jump_label: Split out code under the hotplug lock Marc Zyngier
@ 2017-08-01 8:02 ` Marc Zyngier
2017-08-02 14:37 ` Peter Zijlstra
2017-08-01 8:02 ` [PATCH v2 4/4] clocksource/arm_arch_timer: Use static_branch_enable_cpuslocked() Marc Zyngier
3 siblings, 1 reply; 7+ messages in thread
From: Marc Zyngier @ 2017-08-01 8:02 UTC (permalink / raw)
To: linux-arm-kernel
As using the normal static key API under the hotplug lock is
pretty much impossible, let's provide a variant of some of them
that require the hotplug lock to have already been taken.
These function are only meant to be used in CPU hotplug callbacks.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
---
Documentation/static-keys.txt | 15 +++++++++++++++
include/linux/jump_label.h | 11 +++++++++--
kernel/jump_label.c | 20 ++++++++++++++++++++
3 files changed, 44 insertions(+), 2 deletions(-)
diff --git a/Documentation/static-keys.txt b/Documentation/static-keys.txt
index b83dfa1c0602..6d0a181e74d2 100644
--- a/Documentation/static-keys.txt
+++ b/Documentation/static-keys.txt
@@ -149,6 +149,21 @@ static_branch_inc(), will change the branch back to true. Likewise, if the
key is initialized false, a 'static_branch_inc()', will change the branch to
true. And then a 'static_branch_dec()', will again make the branch false.
+Note that switching branches results in some locks being taken,
+particularly the CPU hotplug lock (in order to avoid races against
+CPUs being brought in the kernel whilst the kernel is getting
+patched). Calling the static key API from within a hotplug notifier is
+thus a sure deadlock recipe. In order to still allow use of the
+functionnality, the following functions are provided:
+
+ static_key_enable_cpuslocked()
+ static_key_disable_cpuslocked()
+ static_branch_enable_cpuslocked()
+ static_branch_disable_cpuslocked()
+
+These functions are *not* general purpose, and must only be used when
+you really know that you're in the above context, and no other.
+
Where an array of keys is required, it can be defined as::
DEFINE_STATIC_KEY_ARRAY_TRUE(keys, count);
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 2afd74b9d844..c5e5d2ffce58 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -163,6 +163,8 @@ extern void jump_label_apply_nops(struct module *mod);
extern int static_key_count(struct static_key *key);
extern void static_key_enable(struct static_key *key);
extern void static_key_disable(struct static_key *key);
+extern void static_key_enable_cpuslocked(struct static_key *key);
+extern void static_key_disable_cpuslocked(struct static_key *key);
/*
* We should be using ATOMIC_INIT() for initializing .enabled, but
@@ -252,6 +254,9 @@ static inline void static_key_disable(struct static_key *key)
static_key_slow_dec(key);
}
+#define static_key_enable_cpuslocked(k) static_key_enable((k))
+#define static_key_disable_cpuslocked(k) static_key_disable((k))
+
#define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) }
#define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) }
@@ -413,8 +418,10 @@ extern bool ____wrong_branch_error(void);
* Normal usage; boolean enable/disable.
*/
-#define static_branch_enable(x) static_key_enable(&(x)->key)
-#define static_branch_disable(x) static_key_disable(&(x)->key)
+#define static_branch_enable(x) static_key_enable(&(x)->key)
+#define static_branch_disable(x) static_key_disable(&(x)->key)
+#define static_branch_enable_cpuslocked(x) static_key_enable_cpuslocked(&(x)->key)
+#define static_branch_disable_cpuslocked(x) static_key_disable_cpuslocked(&(x)->key)
#endif /* __ASSEMBLY__ */
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 0ab8b35daa54..4e7fa067ca17 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -170,6 +170,26 @@ static void static_key_slow_dec_cpuslocked(struct static_key *key,
jump_label_unlock();
}
+void static_key_enable_cpuslocked(struct static_key *key)
+{
+ int count = static_key_count(key);
+
+ WARN_ON_ONCE(count < 0 || count > 1);
+
+ if (!count)
+ static_key_slow_inc_cpuslocked(key);
+}
+
+void static_key_disable_cpuslocked(struct static_key *key)
+{
+ int count = static_key_count(key);
+
+ WARN_ON_ONCE(count < 0 || count > 1);
+
+ if (count)
+ static_key_slow_dec_cpuslocked(key, 0, NULL);
+}
+
static void __static_key_slow_dec(struct static_key *key,
unsigned long rate_limit,
struct delayed_work *work)
--
2.11.0
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH v2 3/4] jump_label: Provide hotplug context variants
2017-08-01 8:02 ` [PATCH v2 3/4] jump_label: Provide hotplug context variants Marc Zyngier
@ 2017-08-02 14:37 ` Peter Zijlstra
2017-08-02 15:02 ` Marc Zyngier
0 siblings, 1 reply; 7+ messages in thread
From: Peter Zijlstra @ 2017-08-02 14:37 UTC (permalink / raw)
To: linux-arm-kernel
On Tue, Aug 01, 2017 at 09:02:56AM +0100, Marc Zyngier wrote:
> As using the normal static key API under the hotplug lock is
> pretty much impossible, let's provide a variant of some of them
> that require the hotplug lock to have already been taken.
>
> These function are only meant to be used in CPU hotplug callbacks.
>
> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> ---
> Documentation/static-keys.txt | 15 +++++++++++++++
> include/linux/jump_label.h | 11 +++++++++--
> kernel/jump_label.c | 20 ++++++++++++++++++++
> 3 files changed, 44 insertions(+), 2 deletions(-)
I stuffed them in my locking/core branch on top of Paolo's patches:
https://lkml.kernel.org/r/1501601046-35683-1-git-send-email-pbonzini at redhat.com
This patch now looks like so.
---
Subject: jump_label: Provide hotplug context variants
From: Marc Zyngier <marc.zyngier@arm.com>
Date: Tue, 1 Aug 2017 09:02:56 +0100
As using the normal static key API under the hotplug lock is
pretty much impossible, let's provide a variant of some of them
that require the hotplug lock to have already been taken.
These function are only meant to be used in CPU hotplug callbacks.
Cc: linux-arm-kernel at lists.infradead.org
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20170801080257.5056-4-marc.zyngier at arm.com
---
Documentation/static-keys.txt | 15 +++++++++++++++
include/linux/jump_label.h | 11 +++++++++--
kernel/jump_label.c | 22 ++++++++++++++++++----
3 files changed, 42 insertions(+), 6 deletions(-)
--- a/Documentation/static-keys.txt
+++ b/Documentation/static-keys.txt
@@ -154,6 +154,21 @@ and 'static_key_count()'. In general, i
should be protected with the same mutex used around the enable/disable
or increment/decrement function.
+Note that switching branches results in some locks being taken,
+particularly the CPU hotplug lock (in order to avoid races against
+CPUs being brought in the kernel whilst the kernel is getting
+patched). Calling the static key API from within a hotplug notifier is
+thus a sure deadlock recipe. In order to still allow use of the
+functionnality, the following functions are provided:
+
+ static_key_enable_cpuslocked()
+ static_key_disable_cpuslocked()
+ static_branch_enable_cpuslocked()
+ static_branch_disable_cpuslocked()
+
+These functions are *not* general purpose, and must only be used when
+you really know that you're in the above context, and no other.
+
Where an array of keys is required, it can be defined as::
DEFINE_STATIC_KEY_ARRAY_TRUE(keys, count);
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -163,6 +163,8 @@ extern void jump_label_apply_nops(struct
extern int static_key_count(struct static_key *key);
extern void static_key_enable(struct static_key *key);
extern void static_key_disable(struct static_key *key);
+extern void static_key_enable_cpuslocked(struct static_key *key);
+extern void static_key_disable_cpuslocked(struct static_key *key);
/*
* We should be using ATOMIC_INIT() for initializing .enabled, but
@@ -254,6 +256,9 @@ static inline void static_key_disable(st
atomic_set(&key->enabled, 0);
}
+#define static_key_enable_cpuslocked(k) static_key_enable((k))
+#define static_key_disable_cpuslocked(k) static_key_disable((k))
+
#define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) }
#define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) }
@@ -415,8 +420,10 @@ extern bool ____wrong_branch_error(void)
* Normal usage; boolean enable/disable.
*/
-#define static_branch_enable(x) static_key_enable(&(x)->key)
-#define static_branch_disable(x) static_key_disable(&(x)->key)
+#define static_branch_enable(x) static_key_enable(&(x)->key)
+#define static_branch_disable(x) static_key_disable(&(x)->key)
+#define static_branch_enable_cpuslocked(x) static_key_enable_cpuslocked(&(x)->key)
+#define static_branch_disable_cpuslocked(x) static_key_disable_cpuslocked(&(x)->key)
#endif /* __ASSEMBLY__ */
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -126,15 +126,15 @@ void static_key_slow_inc(struct static_k
}
EXPORT_SYMBOL_GPL(static_key_slow_inc);
-void static_key_enable(struct static_key *key)
+void static_key_enable_cpuslocked(struct static_key *key)
{
STATIC_KEY_CHECK_USE();
+
if (atomic_read(&key->enabled) > 0) {
WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
return;
}
- cpus_read_lock();
jump_label_lock();
if (atomic_read(&key->enabled) == 0) {
atomic_set(&key->enabled, -1);
@@ -145,23 +145,37 @@ void static_key_enable(struct static_key
atomic_set_release(&key->enabled, 1);
}
jump_label_unlock();
+}
+EXPORT_SYMBOL_GPL(static_key_enable_cpuslocked);
+
+void static_key_enable(struct static_key *key)
+{
+ cpus_read_lock();
+ static_key_enable_cpuslocked(key);
cpus_read_unlock();
}
EXPORT_SYMBOL_GPL(static_key_enable);
-void static_key_disable(struct static_key *key)
+void static_key_disable_cpuslocked(struct static_key *key)
{
STATIC_KEY_CHECK_USE();
+
if (atomic_read(&key->enabled) != 1) {
WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
return;
}
- cpus_read_lock();
jump_label_lock();
if (atomic_cmpxchg(&key->enabled, 1, 0))
jump_label_update(key);
jump_label_unlock();
+}
+EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked);
+
+void static_key_disable(struct static_key *key)
+{
+ cpus_read_lock();
+ static_key_disable_cpuslocked(key);
cpus_read_unlock();
}
EXPORT_SYMBOL_GPL(static_key_disable);
^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH v2 3/4] jump_label: Provide hotplug context variants
2017-08-02 14:37 ` Peter Zijlstra
@ 2017-08-02 15:02 ` Marc Zyngier
0 siblings, 0 replies; 7+ messages in thread
From: Marc Zyngier @ 2017-08-02 15:02 UTC (permalink / raw)
To: linux-arm-kernel
On 02/08/17 15:37, Peter Zijlstra wrote:
> On Tue, Aug 01, 2017 at 09:02:56AM +0100, Marc Zyngier wrote:
>> As using the normal static key API under the hotplug lock is
>> pretty much impossible, let's provide a variant of some of them
>> that require the hotplug lock to have already been taken.
>>
>> These function are only meant to be used in CPU hotplug callbacks.
>>
>> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
>> ---
>> Documentation/static-keys.txt | 15 +++++++++++++++
>> include/linux/jump_label.h | 11 +++++++++--
>> kernel/jump_label.c | 20 ++++++++++++++++++++
>> 3 files changed, 44 insertions(+), 2 deletions(-)
>
> I stuffed them in my locking/core branch on top of Paolo's patches:
>
> https://lkml.kernel.org/r/1501601046-35683-1-git-send-email-pbonzini at redhat.com
>
> This patch now looks like so.
>
> ---
> Subject: jump_label: Provide hotplug context variants
> From: Marc Zyngier <marc.zyngier@arm.com>
> Date: Tue, 1 Aug 2017 09:02:56 +0100
>
> As using the normal static key API under the hotplug lock is
> pretty much impossible, let's provide a variant of some of them
> that require the hotplug lock to have already been taken.
>
> These function are only meant to be used in CPU hotplug callbacks.
>
> Cc: linux-arm-kernel at lists.infradead.org
> Cc: Leo Yan <leo.yan@linaro.org>
> Cc: Thomas Gleixner <tglx@linutronix.de>
> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
> Link: http://lkml.kernel.org/r/20170801080257.5056-4-marc.zyngier at arm.com
> ---
> Documentation/static-keys.txt | 15 +++++++++++++++
> include/linux/jump_label.h | 11 +++++++++--
> kernel/jump_label.c | 22 ++++++++++++++++++----
> 3 files changed, 42 insertions(+), 6 deletions(-)
>
> --- a/Documentation/static-keys.txt
> +++ b/Documentation/static-keys.txt
> @@ -154,6 +154,21 @@ and 'static_key_count()'. In general, i
> should be protected with the same mutex used around the enable/disable
> or increment/decrement function.
>
> +Note that switching branches results in some locks being taken,
> +particularly the CPU hotplug lock (in order to avoid races against
> +CPUs being brought in the kernel whilst the kernel is getting
> +patched). Calling the static key API from within a hotplug notifier is
> +thus a sure deadlock recipe. In order to still allow use of the
> +functionnality, the following functions are provided:
> +
> + static_key_enable_cpuslocked()
> + static_key_disable_cpuslocked()
> + static_branch_enable_cpuslocked()
> + static_branch_disable_cpuslocked()
> +
> +These functions are *not* general purpose, and must only be used when
> +you really know that you're in the above context, and no other.
> +
> Where an array of keys is required, it can be defined as::
>
> DEFINE_STATIC_KEY_ARRAY_TRUE(keys, count);
> --- a/include/linux/jump_label.h
> +++ b/include/linux/jump_label.h
> @@ -163,6 +163,8 @@ extern void jump_label_apply_nops(struct
> extern int static_key_count(struct static_key *key);
> extern void static_key_enable(struct static_key *key);
> extern void static_key_disable(struct static_key *key);
> +extern void static_key_enable_cpuslocked(struct static_key *key);
> +extern void static_key_disable_cpuslocked(struct static_key *key);
>
> /*
> * We should be using ATOMIC_INIT() for initializing .enabled, but
> @@ -254,6 +256,9 @@ static inline void static_key_disable(st
> atomic_set(&key->enabled, 0);
> }
>
> +#define static_key_enable_cpuslocked(k) static_key_enable((k))
> +#define static_key_disable_cpuslocked(k) static_key_disable((k))
> +
> #define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) }
> #define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) }
>
> @@ -415,8 +420,10 @@ extern bool ____wrong_branch_error(void)
> * Normal usage; boolean enable/disable.
> */
>
> -#define static_branch_enable(x) static_key_enable(&(x)->key)
> -#define static_branch_disable(x) static_key_disable(&(x)->key)
> +#define static_branch_enable(x) static_key_enable(&(x)->key)
> +#define static_branch_disable(x) static_key_disable(&(x)->key)
> +#define static_branch_enable_cpuslocked(x) static_key_enable_cpuslocked(&(x)->key)
> +#define static_branch_disable_cpuslocked(x) static_key_disable_cpuslocked(&(x)->key)
>
> #endif /* __ASSEMBLY__ */
>
> --- a/kernel/jump_label.c
> +++ b/kernel/jump_label.c
> @@ -126,15 +126,15 @@ void static_key_slow_inc(struct static_k
> }
> EXPORT_SYMBOL_GPL(static_key_slow_inc);
>
> -void static_key_enable(struct static_key *key)
> +void static_key_enable_cpuslocked(struct static_key *key)
> {
> STATIC_KEY_CHECK_USE();
> +
> if (atomic_read(&key->enabled) > 0) {
> WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
> return;
> }
>
> - cpus_read_lock();
> jump_label_lock();
> if (atomic_read(&key->enabled) == 0) {
> atomic_set(&key->enabled, -1);
> @@ -145,23 +145,37 @@ void static_key_enable(struct static_key
> atomic_set_release(&key->enabled, 1);
> }
> jump_label_unlock();
> +}
> +EXPORT_SYMBOL_GPL(static_key_enable_cpuslocked);
> +
> +void static_key_enable(struct static_key *key)
> +{
> + cpus_read_lock();
> + static_key_enable_cpuslocked(key);
> cpus_read_unlock();
> }
> EXPORT_SYMBOL_GPL(static_key_enable);
>
> -void static_key_disable(struct static_key *key)
> +void static_key_disable_cpuslocked(struct static_key *key)
> {
> STATIC_KEY_CHECK_USE();
> +
> if (atomic_read(&key->enabled) != 1) {
> WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
> return;
> }
>
> - cpus_read_lock();
> jump_label_lock();
> if (atomic_cmpxchg(&key->enabled, 1, 0))
> jump_label_update(key);
> jump_label_unlock();
> +}
> +EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked);
> +
> +void static_key_disable(struct static_key *key)
> +{
> + cpus_read_lock();
> + static_key_disable_cpuslocked(key);
> cpus_read_unlock();
> }
> EXPORT_SYMBOL_GPL(static_key_disable);
>
I've just tried this one of the affected systems, and it booted just
fine. So thumbs up from me.
Thanks again,
M.
--
Jazz is not dead. It just smells funny...
^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH v2 4/4] clocksource/arm_arch_timer: Use static_branch_enable_cpuslocked()
2017-08-01 8:02 [PATCH v2 0/4] Working around CPU hotplug and static keys locking Marc Zyngier
` (2 preceding siblings ...)
2017-08-01 8:02 ` [PATCH v2 3/4] jump_label: Provide hotplug context variants Marc Zyngier
@ 2017-08-01 8:02 ` Marc Zyngier
3 siblings, 0 replies; 7+ messages in thread
From: Marc Zyngier @ 2017-08-01 8:02 UTC (permalink / raw)
To: linux-arm-kernel
Use the new static_branch_enable_cpuslocked function to switch
the workaround static key on the CPU hotplug path.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
---
drivers/clocksource/arm_arch_timer.c | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index aae87c4c546e..c62e71614c75 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -455,7 +455,11 @@ void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa
per_cpu(timer_unstable_counter_workaround, i) = wa;
}
- static_branch_enable(&arch_timer_read_ool_enabled);
+ /*
+ * Use the locked version, as we're called from the CPU
+ * hotplug framework. Otherwise, we end-up in deadlock-land.
+ */
+ static_branch_enable_cpuslocked(&arch_timer_read_ool_enabled);
/*
* Don't use the vdso fastpath if errata require using the
--
2.11.0
^ permalink raw reply related [flat|nested] 7+ messages in thread