From: Will Deacon <will@kernel.org>
To: linux-arm-kernel@lists.infradead.org
Cc: linux-kernel@vger.kernel.org, Will Deacon <will@kernel.org>,
Frederic Weisbecker <fweisbec@gmail.com>,
Thomas Gleixner <tglx@linutronix.de>,
Marc Zyngier <maz@kernel.org>,
Lorenzo Colitti <lorenzo@google.com>,
John Stultz <john.stultz@linaro.org>,
Stephen Boyd <sboyd@kernel.org>,
kernel-team@android.com
Subject: [PATCH 3/5] tick/broadcast: Prefer per-cpu oneshot wakeup timers to broadcast
Date: Thu, 20 May 2021 19:47:03 +0100 [thread overview]
Message-ID: <20210520184705.10845-4-will@kernel.org> (raw)
In-Reply-To: <20210520184705.10845-1-will@kernel.org>
Some SoCs have two per-cpu timer implementations where the timer with
the higher rating stops in deep idle (i.e. suffers from
CLOCK_EVT_FEAT_C3STOP) but is otherwise preferable to the timer with the
lower rating. In such a design, we rely on a global broadcast timer and
IPIs to wake up from deep idle states.
To avoid the reliance on a global broadcast timer and also to reduce the
overhead associated with the IPI wakeups, extend
tick_install_broadcast_device() to manage per-cpu wakeup timers
separately from the broadcast device.
For now, these timers remain unused.
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Lorenzo Colitti <lorenzo@google.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: Stephen Boyd <sboyd@kernel.org>
Signed-off-by: Will Deacon <will@kernel.org>
---
kernel/time/tick-broadcast.c | 57 +++++++++++++++++++++++++++++++++++-
kernel/time/tick-common.c | 2 +-
kernel/time/tick-internal.h | 4 +--
3 files changed, 59 insertions(+), 4 deletions(-)
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index f3f2f4ba4321..8bd8cd69c8c9 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -33,6 +33,8 @@ static int tick_broadcast_forced;
static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
#ifdef CONFIG_TICK_ONESHOT
+static DEFINE_PER_CPU(struct clock_event_device *, tick_oneshot_wakeup_device);
+
static void tick_broadcast_setup_oneshot(struct clock_event_device *bc);
static void tick_broadcast_clear_oneshot(int cpu);
static void tick_resume_broadcast_oneshot(struct clock_event_device *bc);
@@ -88,13 +90,59 @@ static bool tick_check_broadcast_device(struct clock_event_device *curdev,
return !curdev || newdev->rating > curdev->rating;
}
+#ifdef CONFIG_TICK_ONESHOT
+static struct clock_event_device *tick_get_oneshot_wakeup_device(int cpu)
+{
+ return per_cpu(tick_oneshot_wakeup_device, cpu);
+}
+
+static bool tick_set_oneshot_wakeup_device(struct clock_event_device *newdev,
+ int cpu)
+{
+ struct clock_event_device *curdev;
+
+ if (!newdev)
+ goto set_device;
+
+ if ((newdev->features & CLOCK_EVT_FEAT_DUMMY) ||
+ (newdev->features & CLOCK_EVT_FEAT_C3STOP) ||
+ !(newdev->features & CLOCK_EVT_FEAT_ONESHOT))
+ return false;
+
+ if (!cpumask_equal(newdev->cpumask, cpumask_of(cpu)))
+ return false;
+
+ curdev = tick_get_oneshot_wakeup_device(cpu);
+ if (curdev && newdev->rating <= curdev->rating)
+ return false;
+
+set_device:
+ per_cpu(tick_oneshot_wakeup_device, cpu) = newdev;
+ return true;
+}
+#else
+static struct clock_event_device *tick_get_oneshot_wakeup_device(int cpu)
+{
+ return NULL;
+}
+
+static bool tick_set_oneshot_wakeup_device(struct clock_event_device *newdev,
+ int cpu)
+{
+ return false;
+}
+#endif
+
/*
* Conditionally install/replace broadcast device
*/
-void tick_install_broadcast_device(struct clock_event_device *dev)
+void tick_install_broadcast_device(struct clock_event_device *dev, int cpu)
{
struct clock_event_device *cur = tick_broadcast_device.evtdev;
+ if (tick_set_oneshot_wakeup_device(dev, cpu))
+ return;
+
if (!tick_check_broadcast_device(cur, dev))
return;
@@ -996,6 +1044,13 @@ void hotplug_cpu__broadcast_tick_pull(int deadcpu)
*/
static void tick_broadcast_oneshot_offline(unsigned int cpu)
{
+ struct clock_event_device *dev = tick_get_oneshot_wakeup_device(cpu);
+
+ if (dev) {
+ clockevents_switch_state(dev, CLOCK_EVT_STATE_DETACHED);
+ tick_set_oneshot_wakeup_device(NULL, cpu);
+ }
+
/*
* Clear the broadcast masks for the dead cpu, but do not stop
* the broadcast device!
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index e15bc0ef1912..d663249652ef 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -373,7 +373,7 @@ void tick_check_new_device(struct clock_event_device *newdev)
/*
* Can the new device be used as a broadcast device ?
*/
- tick_install_broadcast_device(newdev);
+ tick_install_broadcast_device(newdev, cpu);
}
/**
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 7a981c9e87a4..30c89639e305 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -61,7 +61,7 @@ extern ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt);
/* Broadcasting support */
# ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
extern int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu);
-extern void tick_install_broadcast_device(struct clock_event_device *dev);
+extern void tick_install_broadcast_device(struct clock_event_device *dev, int cpu);
extern int tick_is_broadcast_device(struct clock_event_device *dev);
extern void tick_suspend_broadcast(void);
extern void tick_resume_broadcast(void);
@@ -72,7 +72,7 @@ extern int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq);
extern struct tick_device *tick_get_broadcast_device(void);
extern struct cpumask *tick_get_broadcast_mask(void);
# else /* !CONFIG_GENERIC_CLOCKEVENTS_BROADCAST: */
-static inline void tick_install_broadcast_device(struct clock_event_device *dev) { }
+static inline void tick_install_broadcast_device(struct clock_event_device *dev, int cpu) { }
static inline int tick_is_broadcast_device(struct clock_event_device *dev) { return 0; }
static inline int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) { return 0; }
static inline void tick_do_periodic_broadcast(struct clock_event_device *d) { }
--
2.31.1.818.g46aad6cb9e-goog
next prev parent reply other threads:[~2021-05-20 18:47 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-05-20 18:47 [PATCH 0/5] tick/broadcast: Allow per-cpu timers to be used instead of broadcast Will Deacon
2021-05-20 18:47 ` [PATCH 1/5] tick/broadcast: Drop unneeded CONFIG_GENERIC_CLOCKEVENTS_BROADCAST guard Will Deacon
2021-05-20 18:47 ` [PATCH 2/5] tick/broadcast: Split __tick_broadcast_oneshot_control() into a helper Will Deacon
2021-05-20 18:47 ` Will Deacon [this message]
2021-05-21 2:25 ` [PATCH 3/5] tick/broadcast: Prefer per-cpu oneshot wakeup timers to broadcast Mika Penttilä
2021-05-21 11:25 ` Will Deacon
2021-05-21 12:18 ` Will Deacon
2021-05-21 13:49 ` Thomas Gleixner
2021-05-21 14:43 ` Will Deacon
2021-05-20 18:47 ` [PATCH 4/5] tick/broadcast: Program wakeup timer when entering idle if required Will Deacon
2021-05-20 18:47 ` [PATCH 5/5] timer_list: Print name of per-cpu wakeup device Will Deacon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210520184705.10845-4-will@kernel.org \
--to=will@kernel.org \
--cc=fweisbec@gmail.com \
--cc=john.stultz@linaro.org \
--cc=kernel-team@android.com \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=lorenzo@google.com \
--cc=maz@kernel.org \
--cc=sboyd@kernel.org \
--cc=tglx@linutronix.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).