linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: Ingo Molnar <mingo@kernel.org>,
	Peter Zijlstra <peterz@infradead.org>,
	"Paul E. McKenney" <paulmck@linux.vnet.ibm.com>,
	Eric Dumazet <edumazet@google.com>,
	Frederic Weisbecker <fweisbec@gmail.com>,
	Chris Mason <clm@fb.com>, Arjan van de Ven <arjan@infradead.org>,
	rt@linutronix.de, Rik van Riel <riel@redhat.com>,
	Linus Torvalds <torvalds@linux-foundation.org>,
	George Spelvin <linux@sciencehorizons.net>,
	Len Brown <lenb@kernel.org>
Subject: [patch V2 17/20] timer: Forward wheel clock whenever possible
Date: Fri, 17 Jun 2016 13:26:44 -0000	[thread overview]
Message-ID: <20160617131004.226303099@linutronix.de> (raw)
In-Reply-To: 20160617121134.417319325@linutronix.de

[-- Attachment #1: timer_Forward_wheel_clock_whenever_possible.patch --]
[-- Type: text/plain, Size: 7536 bytes --]

The wheel clock is stale when a cpu goes into a long idle sleep. This has the
side effect, that timers which are queued end up in the outer wheel
levels. That results in coarser granularity.

To solve this, we keep track of the idle state and forward the wheel clock
whenever it's possible.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Chris Mason <clm@fb.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: rt@linutronix.de
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Arjan van de Ven <arjan@infradead.org>

---
 kernel/time/tick-internal.h |    1 
 kernel/time/tick-sched.c    |   13 ++++
 kernel/time/timer.c         |  123 +++++++++++++++++++++++++++++++++++---------
 3 files changed, 114 insertions(+), 23 deletions(-)

--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -164,3 +164,4 @@ static inline void timers_update_migrati
 DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
 
 extern u64 get_next_timer_interrupt(unsigned long basej, u64 basem);
+void timer_clear_idle(void);
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -700,6 +700,12 @@ static ktime_t tick_nohz_stop_sched_tick
 	delta = next_tick - basemono;
 	if (delta <= (u64)TICK_NSEC) {
 		tick.tv64 = 0;
+
+		/*
+		 * Tell the timer code that the base is not idle, i.e. undo
+		 * the effect of get_next_timer_interrupt().
+		 */
+		timer_clear_idle();
 		/*
 		 * We've not stopped the tick yet, and there's a timer in the
 		 * next period, so no point in stopping it either, bail.
@@ -809,6 +815,12 @@ static void tick_nohz_restart_sched_tick
 	tick_do_update_jiffies64(now);
 	cpu_load_update_nohz_stop();
 
+	/*
+	 * Clear the timer idle flag, so we avoid IPIs on remote queueing and
+	 * the clock forward checks in the enqueue path.
+	 */
+	timer_clear_idle();
+
 	calc_load_exit_idle();
 	touch_softlockup_watchdog_sched();
 	/*
@@ -1025,6 +1037,7 @@ void tick_nohz_idle_exit(void)
 		tick_nohz_stop_idle(ts, now);
 
 	if (ts->tick_stopped) {
+		timer_clear_idle();
 		tick_nohz_restart_sched_tick(ts, now);
 		tick_nohz_account_idle_ticks(ts);
 	}
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -192,9 +192,11 @@ struct timer_base {
 	spinlock_t		lock;
 	struct timer_list	*running_timer;
 	unsigned long		clk;
+	unsigned long		next_expiry;
 	unsigned int		cpu;
 	bool			migration_enabled;
 	bool			nohz_active;
+	bool			is_idle;
 	DECLARE_BITMAP(pending_map, WHEEL_SIZE);
 	struct hlist_head	vectors[WHEEL_SIZE];
 } ____cacheline_aligned;
@@ -514,23 +516,27 @@ static void internal_add_timer(struct ti
 	__internal_add_timer(base, timer);
 
 	/*
-	 * Check whether the other CPU is in dynticks mode and needs
-	 * to be triggered to reevaluate the timer wheel.  We are
-	 * protected against the other CPU fiddling with the timer by
-	 * holding the timer base lock. This also makes sure that a
-	 * CPU on the way to stop its tick can not evaluate the timer
-	 * wheel.
-	 *
-	 * Spare the IPI for deferrable timers on idle targets though.
-	 * The next busy ticks will take care of it. Except full dynticks
-	 * require special care against races with idle_cpu(), lets deal
-	 * with that later.
-	 */
-	if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active) {
-		if (!(timer->flags & TIMER_DEFERRABLE) ||
-		    tick_nohz_full_cpu(base->cpu))
-			wake_up_nohz_cpu(base->cpu);
-	}
+	 * We might have to IPI the remote CPU if the base is idle and the
+	 * timer is not deferrable. If the other cpu is on the way to idle
+	 * then it can't set base->is_idle as we hold base lock.
+	 */
+	if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->is_idle ||
+	    (timer->flags & TIMER_DEFERRABLE))
+		return;
+
+	/* Check whether this is the new first expiring timer */
+	if (time_after_eq(timer->expires, base->next_expiry))
+		return;
+	base->next_expiry = BASE_RND_UP(timer->expires);
+
+	/*
+	 * Check whether the other CPU is in dynticks mode and needs to be
+	 * triggered to reevaluate the timer wheel.  We are protected against
+	 * the other CPU fiddling with the timer by holding the timer base
+	 * lock.
+	 */
+	if (tick_nohz_full_cpu(base->cpu))
+		wake_up_nohz_cpu(base->cpu);
 }
 
 #ifdef CONFIG_TIMER_STATS
@@ -838,10 +844,11 @@ static inline struct timer_base *get_tim
 	return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK);
 }
 
-static inline struct timer_base *get_target_base(struct timer_base *base,
-						 unsigned tflags)
+#ifdef CONFIG_NO_HZ_COMMON
+static inline struct timer_base *__get_target_base(struct timer_base *base,
+						   unsigned tflags)
 {
-#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
+#ifdef CONFIG_SMP
 	if ((tflags & TIMER_PINNED) || !base->migration_enabled)
 		return get_timer_this_cpu_base(tflags);
 	return get_timer_cpu_base(tflags, get_nohz_timer_target());
@@ -850,6 +857,43 @@ static inline struct timer_base *get_tar
 #endif
 }
 
+static inline void forward_timer_base(struct timer_base *base)
+{
+	/*
+	 * We only forward the base when it's idle and we have a delta between
+	 * base clock and jiffies.
+	 */
+	if (!base->is_idle || (long) (jiffies - base->clk) < 2 * BASE_INCR)
+		return;
+
+	/*
+	 * If the next expiry value is > jiffies, then we fast forward to
+	 * jiffies otherwise we forward to the next expiry value.
+	 */
+	if (time_after(base->next_expiry, jiffies))
+		base->clk = BASE_RND_UP(jiffies);
+	else
+		base->clk = base->next_expiry;
+}
+#else
+static inline struct timer_base *__get_target_base(struct timer_base *base,
+						   unsigned tflags)
+{
+	return get_timer_this_cpu_base(tflags);
+}
+
+static inline void forward_timer_base(struct timer_base *base) { }
+#endif
+
+static inline struct timer_base *get_target_base(struct timer_base *base,
+						 unsigned tflags)
+{
+	struct timer_base *target = __get_target_base(base, tflags);
+
+	forward_timer_base(target);
+	return target;
+}
+
 /*
  * We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means
  * that all timers which are tied to this base are locked, and the base itself
@@ -1417,16 +1461,49 @@ u64 get_next_timer_interrupt(unsigned lo
 
 	spin_lock(&base->lock);
 	nextevt = __next_timer_interrupt(base);
-	spin_unlock(&base->lock);
+	base->next_expiry = nextevt;
+	/*
+	 * We have a fresh next event. Check whether we can forward the base.
+	 */
+	if (time_after(nextevt, jiffies))
+		base->clk = BASE_RND_UP(jiffies);
+	else if (time_after(nextevt, base->clk))
+		base->clk = nextevt;
 
-	if (time_before_eq(nextevt, basej))
+	if (time_before_eq(nextevt, basej)) {
 		expires = basem;
-	else
+		base->is_idle = false;
+	} else {
 		expires = basem + (nextevt - basej) * TICK_NSEC;
+		/*
+		 * If we expect to sleep more than a tick, mark the base idle.
+		 */
+		if ((expires - basem) > TICK_NSEC)
+			base->is_idle = true;
+	}
+	spin_unlock(&base->lock);
 
 	return cmp_next_hrtimer_event(basem, expires);
 }
 
+/**
+ * timer_clear_idle - Clear the idle state of the timer base
+ *
+ * Called with interrupts disabled
+ */
+void timer_clear_idle(void)
+{
+	struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
+
+	/*
+	 * We do this unlocked. The worst outcome is a remote enqueue sending
+	 * a pointless IPI, but taking the lock would just make the window for
+	 * sending the IPI a few instructions smaller for the cost of taking
+	 * the lock in the exit from idle path.
+	 */
+	base->is_idle = false;
+}
+
 static int collect_expired_timers(struct timer_base *base,
 				  struct hlist_head *heads)
 {

  parent reply	other threads:[~2016-06-17 13:30 UTC|newest]

Thread overview: 52+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-06-17 13:26 [patch V2 00/20] timer: Refactor the timer wheel Thomas Gleixner
2016-06-17 13:26 ` [patch V2 01/20] timer: Make pinned a timer property Thomas Gleixner
2016-06-17 13:26 ` [patch V2 02/20] x86/apic/uv: Initialize timer as pinned Thomas Gleixner
2016-06-17 13:26 ` [patch V2 03/20] x86/mce: " Thomas Gleixner
2016-06-17 13:26 ` [patch V2 05/20] driver/net/ethernet/tile: " Thomas Gleixner
2016-06-21 18:14   ` Peter Zijlstra
2016-06-17 13:26 ` [patch V2 04/20] cpufreq/powernv: " Thomas Gleixner
2016-06-17 13:26 ` [patch V2 06/20] drivers/tty/metag_da: " Thomas Gleixner
2016-06-17 13:26 ` [patch V2 07/20] drivers/tty/mips_ejtag: " Thomas Gleixner
2016-06-17 13:26 ` [patch V2 08/20] net/ipv4/inet: Initialize timers " Thomas Gleixner
2016-06-17 13:26 ` [patch V2 09/20] timer: Remove mod_timer_pinned Thomas Gleixner
2016-06-17 13:26 ` [patch V2 10/20] hlist: Add hlist_is_singular_node() helper Thomas Gleixner
2016-06-17 13:26 ` [patch V2 11/20] timer: Give a few structs and members proper names Thomas Gleixner
2016-06-17 13:26 ` [patch V2 12/20] timer: Switch to a non cascading wheel Thomas Gleixner
2016-06-18  9:55   ` George Spelvin
2016-06-24 10:06     ` Thomas Gleixner
2016-06-17 13:26 ` [patch V2 13/20] timer: Remove slack leftovers Thomas Gleixner
2016-06-17 13:26 ` [patch V2 14/20] timer: Move __run_timers() function Thomas Gleixner
2016-06-17 13:26 ` [patch V2 15/20] timer: Optimize collect timers for NOHZ Thomas Gleixner
2016-06-17 13:26 ` [patch V2 16/20] tick/sched: Remove pointless empty function Thomas Gleixner
2016-06-17 13:26 ` Thomas Gleixner [this message]
2016-06-17 13:26 ` [patch V2 18/20] timer: Only wake softirq if necessary Thomas Gleixner
2016-06-17 13:26 ` [patch V2 19/20] timer: Split out index calculation Thomas Gleixner
2016-06-17 13:26 ` [patch V2 20/20] timer: Optimization for same expiry time in mod_timer() Thomas Gleixner
2016-06-17 13:48 ` [patch V2 00/20] timer: Refactor the timer wheel Eric Dumazet
2016-06-17 13:57   ` Thomas Gleixner
2016-06-17 14:25     ` Eric Dumazet
2016-06-20 13:56       ` Thomas Gleixner
2016-06-20 14:46         ` Arjan van de Ven
2016-06-20 14:46           ` Thomas Gleixner
2016-06-20 14:49             ` Arjan van de Ven
2016-06-20 19:03         ` Rik van Riel
2016-06-21  2:48           ` Eric Dumazet
2016-06-17 14:26 ` Arjan van de Ven
2016-06-20 15:05 ` Paul E. McKenney
2016-06-20 15:13   ` Thomas Gleixner
2016-06-20 15:41     ` Paul E. McKenney
2016-06-22  7:37 ` Mike Galbraith
2016-06-22  8:44   ` Thomas Gleixner
2016-06-22  9:06     ` Mike Galbraith
2016-06-22 13:37       ` Mike Galbraith
2016-06-22 10:28     ` [LTP] " Cyril Hrubis
2016-06-23  8:27       ` Thomas Gleixner
2016-06-23 11:47         ` Cyril Hrubis
2016-06-23 13:58           ` George Spelvin
2016-06-23 14:10             ` Thomas Gleixner
2016-06-23 15:11             ` Cyril Hrubis
2016-06-23 15:21               ` Thomas Gleixner
2016-06-23 16:31                 ` Cyril Hrubis
2016-06-26 19:00     ` Pavel Machek
2016-06-26 19:21       ` Arjan van de Ven
2016-06-26 20:02         ` Pavel Machek

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20160617131004.226303099@linutronix.de \
    --to=tglx@linutronix.de \
    --cc=arjan@infradead.org \
    --cc=clm@fb.com \
    --cc=edumazet@google.com \
    --cc=fweisbec@gmail.com \
    --cc=lenb@kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux@sciencehorizons.net \
    --cc=mingo@kernel.org \
    --cc=paulmck@linux.vnet.ibm.com \
    --cc=peterz@infradead.org \
    --cc=riel@redhat.com \
    --cc=rt@linutronix.de \
    --cc=torvalds@linux-foundation.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).