From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S933791AbcFQN2v (ORCPT ); Fri, 17 Jun 2016 09:28:51 -0400 Received: from www.linutronix.de ([62.245.132.108]:51050 "EHLO Galois.linutronix.de" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S933758AbcFQN2q (ORCPT ); Fri, 17 Jun 2016 09:28:46 -0400 Message-Id: <20160617131004.060581803@linutronix.de> User-Agent: quilt/0.63-1 Date: Fri, 17 Jun 2016 13:26:42 -0000 From: Thomas Gleixner To: LKML Cc: Ingo Molnar , Peter Zijlstra , "Paul E. McKenney" , Eric Dumazet , Frederic Weisbecker , Chris Mason , Arjan van de Ven , rt@linutronix.de, Rik van Riel , Linus Torvalds , George Spelvin , Len Brown , Anna-Maria Gleixner Subject: [patch V2 15/20] timer: Optimize collect timers for NOHZ References: <20160617121134.417319325@linutronix.de> MIME-Version: 1.0 Content-Type: text/plain; charset=ISO-8859-15 Content-Disposition: inline; filename=timer_Optimize_collect_timers_for_NOHZ.patch X-Linutronix-Spam-Score: -1.0 X-Linutronix-Spam-Level: - X-Linutronix-Spam-Status: No , -1.0 points, 5.0 required, ALL_TRUSTED=-1,SHORTCIRCUIT=-0.0001,URIBL_BLOCKED=0.001 Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org From: Anna-Maria Gleixner After a NOHZ idle sleep the wheel must be forwarded to current jiffies. There might be expired timers so the current code loops and checks the epxired buckets for timers. This can take quite some time for long NOHZ idle periods. The pending bitmask in the timer base allows us to do a quick search for the next expiring timer and therefor a fast forward of the base time which prevents pointless long lasting loops. For a 3 second idle sleep this reduces the catchup time from ~1ms to 5us. Signed-off-by: Anna-Maria Gleixner Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: Chris Mason Cc: Eric Dumazet Cc: rt@linutronix.de Cc: "Paul E. McKenney" Cc: Arjan van de Ven --- kernel/time/timer.c | 52 ++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 44 insertions(+), 8 deletions(-) --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -1246,8 +1246,8 @@ static void expire_timers(struct timer_b } } -static int collect_expired_timers(struct timer_base *base, - struct hlist_head *heads) +static int __collect_expired_timers(struct timer_base *base, + struct hlist_head *heads) { unsigned long clk = base->clk >> BASE_CLK_SHIFT; struct hlist_head *vec; @@ -1273,9 +1273,9 @@ static int collect_expired_timers(struct #ifdef CONFIG_NO_HZ_COMMON /* - * Find the next pending bucket of a level. Search from @offset + @clk upwards - * and if nothing there, search from start of the level (@offset) up to - * @offset + clk. + * Find the next pending bucket of a level. Search from level start (@offset) + * + @clk upwards and if nothing there, search from start of the level + * (@offset) up to @offset + clk. */ static int next_pending_bucket(struct timer_base *base, unsigned offset, unsigned clk) @@ -1292,7 +1292,8 @@ static int next_pending_bucket(struct ti } /* - * Search the first expiring timer in the various clock levels. + * Search the first expiring timer in the various clock levels. Caller must + * hold base->lock. * * Note: This implementation might be suboptimal vs. timers enqueued in the * cascade level because we do not look at the timers to figure out when @@ -1305,7 +1306,6 @@ static unsigned long __next_timer_interr unsigned long clk, next, adj; unsigned lvl, offset = 0; - spin_lock(&base->lock); next = BASE_RND_UP(base->clk + NEXT_TIMER_MAX_DELTA); clk = base->clk >> BASE_CLK_SHIFT; for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { @@ -1358,7 +1358,6 @@ static unsigned long __next_timer_interr clk >>= LVL_CLK_SHIFT; clk += adj; } - spin_unlock(&base->lock); return next; } @@ -1416,7 +1415,10 @@ u64 get_next_timer_interrupt(unsigned lo if (cpu_is_offline(smp_processor_id())) return expires; + spin_lock(&base->lock); nextevt = __next_timer_interrupt(base); + spin_unlock(&base->lock); + if (time_before_eq(nextevt, basej)) expires = basem; else @@ -1424,6 +1426,40 @@ u64 get_next_timer_interrupt(unsigned lo return cmp_next_hrtimer_event(basem, expires); } + +static int collect_expired_timers(struct timer_base *base, + struct hlist_head *heads) +{ + /* + * NOHZ optimization. After a long idle sleep we need to forward the + * base to current jiffies. Avoid a loop by searching the bitfield for + * the next expiring timer. + */ + if ((long)(jiffies - base->clk) > 2 * BASE_INCR) { + unsigned long next = __next_timer_interrupt(base); + + /* + * If the next timer is ahead of time forward to current + * jiffies, otherwise forward to the next expiry time. + */ + if (time_after(next, jiffies)) { + /* + * We need to round down here as the call site will + * increment clock once more. + */ + base->clk = BASE_RND_DN(jiffies); + return 0; + } + base->clk = next; + } + return __collect_expired_timers(base, heads); +} +#else +static inline int collect_expired_timers(struct timer_base *base, + struct hlist_head *heads) +{ + return __collect_expired_timers(base, heads); +} #endif /*