All of lore.kernel.org
 help / color / mirror / Atom feed
* [RFC][PATCH] cache __next_timer_interrupt result
@ 2009-07-21 18:25 Martin Schwidefsky
  2009-07-22 14:38 ` Thomas Gleixner
                   ` (2 more replies)
  0 siblings, 3 replies; 6+ messages in thread
From: Martin Schwidefsky @ 2009-07-21 18:25 UTC (permalink / raw)
  To: linux-kernel, Ingo Molnar, Thomas Gleixner, john stultz, Venki Pallipadi

From: Martin Schwidefsky <schwidefsky@de.ibm.com>

Each time a cpu goes to sleep on a NOHZ=y system the timer wheel is
searched for the next timer interrupt. It can take quite a few cycles
to find the next pending timer. This patch adds a field to tvec_base
that caches the result of __next_timer_interrupt. The hit ratio is
around 80% on my thinkpad under normal use, on a server I've seen
hit ratios from 5% to 95% dependent on the workload.

Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: john stultz <johnstul@us.ibm.com>
Cc: Venki Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
---
 kernel/timer.c |   24 +++++++++++++++++++++++-
 1 file changed, 23 insertions(+), 1 deletion(-)

Index: linux-2.6/kernel/timer.c
===================================================================
--- linux-2.6.orig/kernel/timer.c
+++ linux-2.6/kernel/timer.c
@@ -72,6 +72,7 @@ struct tvec_base {
 	spinlock_t lock;
 	struct timer_list *running_timer;
 	unsigned long timer_jiffies;
+	unsigned long next_timer;
 	struct tvec_root tv1;
 	struct tvec tv2;
 	struct tvec tv3;
@@ -622,6 +623,9 @@ __mod_timer(struct timer_list *timer, un
 
 	if (timer_pending(timer)) {
 		detach_timer(timer, 0);
+		if (timer->expires == base->next_timer &&
+		    !tbase_get_deferrable(timer->base))
+			base->next_timer = base->timer_jiffies;
 		ret = 1;
 	} else {
 		if (pending_only)
@@ -663,6 +667,9 @@ __mod_timer(struct timer_list *timer, un
 	}
 
 	timer->expires = expires;
+	if (timer->expires < base->next_timer &&
+	    !tbase_get_deferrable(timer->base))
+		base->next_timer = timer->expires;
 	internal_add_timer(base, timer);
 
 out_unlock:
@@ -781,6 +788,9 @@ void add_timer_on(struct timer_list *tim
 	spin_lock_irqsave(&base->lock, flags);
 	timer_set_base(timer, base);
 	debug_timer_activate(timer);
+	if (timer->expires < base->next_timer &&
+	    !tbase_get_deferrable(timer->base))
+		base->next_timer = timer->expires;
 	internal_add_timer(base, timer);
 	/*
 	 * Check whether the other CPU is idle and needs to be
@@ -817,6 +827,9 @@ int del_timer(struct timer_list *timer)
 		base = lock_timer_base(timer, &flags);
 		if (timer_pending(timer)) {
 			detach_timer(timer, 1);
+			if (timer->expires == base->next_timer &&
+			    !tbase_get_deferrable(timer->base))
+				base->next_timer = base->timer_jiffies;
 			ret = 1;
 		}
 		spin_unlock_irqrestore(&base->lock, flags);
@@ -850,6 +863,9 @@ int try_to_del_timer_sync(struct timer_l
 	ret = 0;
 	if (timer_pending(timer)) {
 		detach_timer(timer, 1);
+		if (timer->expires == base->next_timer &&
+		    !tbase_get_deferrable(timer->base))
+			base->next_timer = base->timer_jiffies;
 		ret = 1;
 	}
 out:
@@ -1134,7 +1150,9 @@ unsigned long get_next_timer_interrupt(u
 	unsigned long expires;
 
 	spin_lock(&base->lock);
-	expires = __next_timer_interrupt(base);
+	if (base->next_timer <= base->timer_jiffies)
+		base->next_timer = __next_timer_interrupt(base);
+	expires = base->next_timer;
 	spin_unlock(&base->lock);
 
 	if (time_before_eq(expires, now))
@@ -1523,6 +1541,7 @@ static int __cpuinit init_timers_cpu(int
 		INIT_LIST_HEAD(base->tv1.vec + j);
 
 	base->timer_jiffies = jiffies;
+	base->next_timer = base->timer_jiffies;
 	return 0;
 }
 
@@ -1535,6 +1554,9 @@ static void migrate_timer_list(struct tv
 		timer = list_first_entry(head, struct timer_list, entry);
 		detach_timer(timer, 0);
 		timer_set_base(timer, new_base);
+		if (timer->expires < new_base->next_timer &&
+		    !tbase_get_deferrable(timer->base))
+			new_base->next_timer = timer->expires;
 		internal_add_timer(new_base, timer);
 	}
 }

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [RFC][PATCH] cache __next_timer_interrupt result
  2009-07-21 18:25 [RFC][PATCH] cache __next_timer_interrupt result Martin Schwidefsky
@ 2009-07-22 14:38 ` Thomas Gleixner
  2009-07-22 16:02   ` Martin Schwidefsky
  2009-08-04 14:16 ` [tip:timers/core] timers: Cache " tip-bot for Martin Schwidefsky
  2009-08-04 18:30 ` tip-bot for Martin Schwidefsky
  2 siblings, 1 reply; 6+ messages in thread
From: Thomas Gleixner @ 2009-07-22 14:38 UTC (permalink / raw)
  To: Martin Schwidefsky
  Cc: linux-kernel, Ingo Molnar, john stultz, Venki Pallipadi

On Tue, 21 Jul 2009, Martin Schwidefsky wrote:

> From: Martin Schwidefsky <schwidefsky@de.ibm.com>
> 
> Each time a cpu goes to sleep on a NOHZ=y system the timer wheel is
> searched for the next timer interrupt. It can take quite a few cycles
> to find the next pending timer. This patch adds a field to tvec_base
> that caches the result of __next_timer_interrupt. The hit ratio is
> around 80% on my thinkpad under normal use, on a server I've seen

Nice, I like it.

> hit ratios from 5% to 95% dependent on the workload.

Which workloads result in lower hit ratios ? Heavy networking ?
 
Thanks,

	tglx

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [RFC][PATCH] cache __next_timer_interrupt result
  2009-07-22 14:38 ` Thomas Gleixner
@ 2009-07-22 16:02   ` Martin Schwidefsky
  0 siblings, 0 replies; 6+ messages in thread
From: Martin Schwidefsky @ 2009-07-22 16:02 UTC (permalink / raw)
  To: Thomas Gleixner; +Cc: linux-kernel, Ingo Molnar, john stultz, Venki Pallipadi

On Wed, 22 Jul 2009 16:38:18 +0200 (CEST)
Thomas Gleixner <tglx@linutronix.de> wrote:

> On Tue, 21 Jul 2009, Martin Schwidefsky wrote:
> 
> > From: Martin Schwidefsky <schwidefsky@de.ibm.com>
> > 
> > Each time a cpu goes to sleep on a NOHZ=y system the timer wheel is
> > searched for the next timer interrupt. It can take quite a few cycles
> > to find the next pending timer. This patch adds a field to tvec_base
> > that caches the result of __next_timer_interrupt. The hit ratio is
> > around 80% on my thinkpad under normal use, on a server I've seen
> 
> Nice, I like it.

Thanks :-)
 
> > hit ratios from 5% to 95% dependent on the workload.
> 
> Which workloads result in lower hit ratios ? Heavy networking ?

5% ping-pong packet over loopback between two cpus. So yes, networking.

-- 
blue skies,
   Martin.

"Reality continues to ruin my life." - Calvin.


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [tip:timers/core] timers: Cache __next_timer_interrupt result
  2009-07-21 18:25 [RFC][PATCH] cache __next_timer_interrupt result Martin Schwidefsky
  2009-07-22 14:38 ` Thomas Gleixner
@ 2009-08-04 14:16 ` tip-bot for Martin Schwidefsky
  2009-08-04 17:47   ` Martin Schwidefsky
  2009-08-04 18:30 ` tip-bot for Martin Schwidefsky
  2 siblings, 1 reply; 6+ messages in thread
From: tip-bot for Martin Schwidefsky @ 2009-08-04 14:16 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: linux-kernel, hpa, mingo, johnstul, venkatesh.pallipadi,
	schwidefsky, tglx, mingo

Commit-ID:  91ff44bdb806a3d26436cc4f5e4816d1ea75b34b
Gitweb:     http://git.kernel.org/tip/91ff44bdb806a3d26436cc4f5e4816d1ea75b34b
Author:     Martin Schwidefsky <schwidefsky@de.ibm.com>
AuthorDate: Tue, 21 Jul 2009 20:25:05 +0200
Committer:  Ingo Molnar <mingo@elte.hu>
CommitDate: Tue, 4 Aug 2009 16:07:51 +0200

timers: Cache __next_timer_interrupt result

Each time a cpu goes to sleep on a NOHZ=y system the timer
wheel is searched for the next timer interrupt. It can take
quite a few cycles to find the next pending timer.

This patch adds a field to tvec_base that caches the result of
__next_timer_interrupt.

The hit ratio is around 80% on my thinkpad under normal use, on
a server I've seen hit ratios from 5% to 95% dependent on the
workload.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Cc: john stultz <johnstul@us.ibm.com>
Cc: Venki Pallipadi <venkatesh.pallipadi@intel.com>
LKML-Reference: <20090721202505.7d56a079@skybase>
Signed-off-by: Ingo Molnar <mingo@elte.hu>


---
 kernel/timer.c |   24 +++++++++++++++++++++++-
 1 files changed, 23 insertions(+), 1 deletions(-)

diff --git a/kernel/timer.c b/kernel/timer.c
index 0b36b9e..7769923 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -72,6 +72,7 @@ struct tvec_base {
 	spinlock_t lock;
 	struct timer_list *running_timer;
 	unsigned long timer_jiffies;
+	unsigned long next_timer;
 	struct tvec_root tv1;
 	struct tvec tv2;
 	struct tvec tv3;
@@ -622,6 +623,9 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
 
 	if (timer_pending(timer)) {
 		detach_timer(timer, 0);
+		if (timer->expires == base->next_timer &&
+		    !tbase_get_deferrable(timer->base))
+			base->next_timer = base->timer_jiffies;
 		ret = 1;
 	} else {
 		if (pending_only)
@@ -663,6 +667,9 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
 	}
 
 	timer->expires = expires;
+	if (timer->expires < base->next_timer &&
+	    !tbase_get_deferrable(timer->base))
+		base->next_timer = timer->expires;
 	internal_add_timer(base, timer);
 
 out_unlock:
@@ -781,6 +788,9 @@ void add_timer_on(struct timer_list *timer, int cpu)
 	spin_lock_irqsave(&base->lock, flags);
 	timer_set_base(timer, base);
 	debug_timer_activate(timer);
+	if (timer->expires < base->next_timer &&
+	    !tbase_get_deferrable(timer->base))
+		base->next_timer = timer->expires;
 	internal_add_timer(base, timer);
 	/*
 	 * Check whether the other CPU is idle and needs to be
@@ -817,6 +827,9 @@ int del_timer(struct timer_list *timer)
 		base = lock_timer_base(timer, &flags);
 		if (timer_pending(timer)) {
 			detach_timer(timer, 1);
+			if (timer->expires == base->next_timer &&
+			    !tbase_get_deferrable(timer->base))
+				base->next_timer = base->timer_jiffies;
 			ret = 1;
 		}
 		spin_unlock_irqrestore(&base->lock, flags);
@@ -850,6 +863,9 @@ int try_to_del_timer_sync(struct timer_list *timer)
 	ret = 0;
 	if (timer_pending(timer)) {
 		detach_timer(timer, 1);
+		if (timer->expires == base->next_timer &&
+		    !tbase_get_deferrable(timer->base))
+			base->next_timer = base->timer_jiffies;
 		ret = 1;
 	}
 out:
@@ -1134,7 +1150,9 @@ unsigned long get_next_timer_interrupt(unsigned long now)
 	unsigned long expires;
 
 	spin_lock(&base->lock);
-	expires = __next_timer_interrupt(base);
+	if (base->next_timer <= base->timer_jiffies)
+		base->next_timer = __next_timer_interrupt(base);
+	expires = base->next_timer;
 	spin_unlock(&base->lock);
 
 	if (time_before_eq(expires, now))
@@ -1523,6 +1541,7 @@ static int __cpuinit init_timers_cpu(int cpu)
 		INIT_LIST_HEAD(base->tv1.vec + j);
 
 	base->timer_jiffies = jiffies;
+	base->next_timer = base->timer_jiffies;
 	return 0;
 }
 
@@ -1535,6 +1554,9 @@ static void migrate_timer_list(struct tvec_base *new_base, struct list_head *hea
 		timer = list_first_entry(head, struct timer_list, entry);
 		detach_timer(timer, 0);
 		timer_set_base(timer, new_base);
+		if (timer->expires < new_base->next_timer &&
+		    !tbase_get_deferrable(timer->base))
+			new_base->next_timer = timer->expires;
 		internal_add_timer(new_base, timer);
 	}
 }

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [tip:timers/core] timers: Cache __next_timer_interrupt result
  2009-08-04 14:16 ` [tip:timers/core] timers: Cache " tip-bot for Martin Schwidefsky
@ 2009-08-04 17:47   ` Martin Schwidefsky
  0 siblings, 0 replies; 6+ messages in thread
From: Martin Schwidefsky @ 2009-08-04 17:47 UTC (permalink / raw)
  To: mingo, hpa, linux-kernel, johnstul, venkatesh.pallipadi, tglx, mingo

On Tue, 4 Aug 2009 14:16:04 GMT
tip-bot for Martin Schwidefsky <schwidefsky@de.ibm.com> wrote:

> Commit-ID:  91ff44bdb806a3d26436cc4f5e4816d1ea75b34b
> Gitweb:     http://git.kernel.org/tip/91ff44bdb806a3d26436cc4f5e4816d1ea75b34b
> Author:     Martin Schwidefsky <schwidefsky@de.ibm.com>
> AuthorDate: Tue, 21 Jul 2009 20:25:05 +0200
> Committer:  Ingo Molnar <mingo@elte.hu>
> CommitDate: Tue, 4 Aug 2009 16:07:51 +0200
> 
> timers: Cache __next_timer_interrupt result

Seeing that patch again after a few days and all of a sudden I find the
bugs .. I really should use time_before and time_before_eq instead of
comparing the expires values directly. New patch:
--
Subject: [PATCH] cache __next_timer_interrupt result

From: Martin Schwidefsky <schwidefsky@de.ibm.com>

Each time a cpu goes to sleep on a NOHZ=y system the timer wheel is
searched for the next timer interrupt. It can take quite a few cycles
to find the next pending timer. This patch adds a field to tvec_base
that caches the result of __next_timer_interrupt. The hit ratio is
around 80% on my thinkpad under normal use, on a server I've seen
hit ratios from 5% to 95% dependent on the workload.

Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: john stultz <johnstul@us.ibm.com>
Cc: Venki Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
---

 kernel/timer.c |   24 +++++++++++++++++++++++-
 1 file changed, 23 insertions(+), 1 deletion(-)

diff -urpN linux-2.6/kernel/timer.c linux-2.6-patched/kernel/timer.c
--- linux-2.6/kernel/timer.c	2009-08-04 19:45:03.000000000 +0200
+++ linux-2.6-patched/kernel/timer.c	2009-08-04 19:45:19.000000000 +0200
@@ -72,6 +72,7 @@ struct tvec_base {
 	spinlock_t lock;
 	struct timer_list *running_timer;
 	unsigned long timer_jiffies;
+	unsigned long next_timer;
 	struct tvec_root tv1;
 	struct tvec tv2;
 	struct tvec tv3;
@@ -622,6 +623,9 @@ __mod_timer(struct timer_list *timer, un
 
 	if (timer_pending(timer)) {
 		detach_timer(timer, 0);
+		if (timer->expires == base->next_timer &&
+		    !tbase_get_deferrable(timer->base))
+			base->next_timer = base->timer_jiffies;
 		ret = 1;
 	} else {
 		if (pending_only)
@@ -663,6 +667,9 @@ __mod_timer(struct timer_list *timer, un
 	}
 
 	timer->expires = expires;
+	if (time_before(timer->expires, base->next_timer) &&
+	    !tbase_get_deferrable(timer->base))
+		base->next_timer = timer->expires;
 	internal_add_timer(base, timer);
 
 out_unlock:
@@ -781,6 +788,9 @@ void add_timer_on(struct timer_list *tim
 	spin_lock_irqsave(&base->lock, flags);
 	timer_set_base(timer, base);
 	debug_timer_activate(timer);
+	if (time_before(timer->expires, base->next_timer) &&
+	    !tbase_get_deferrable(timer->base))
+		base->next_timer = timer->expires;
 	internal_add_timer(base, timer);
 	/*
 	 * Check whether the other CPU is idle and needs to be
@@ -817,6 +827,9 @@ int del_timer(struct timer_list *timer)
 		base = lock_timer_base(timer, &flags);
 		if (timer_pending(timer)) {
 			detach_timer(timer, 1);
+			if (timer->expires == base->next_timer &&
+			    !tbase_get_deferrable(timer->base))
+				base->next_timer = base->timer_jiffies;
 			ret = 1;
 		}
 		spin_unlock_irqrestore(&base->lock, flags);
@@ -850,6 +863,9 @@ int try_to_del_timer_sync(struct timer_l
 	ret = 0;
 	if (timer_pending(timer)) {
 		detach_timer(timer, 1);
+		if (timer->expires == base->next_timer &&
+		    !tbase_get_deferrable(timer->base))
+			base->next_timer = base->timer_jiffies;
 		ret = 1;
 	}
 out:
@@ -1134,7 +1150,9 @@ unsigned long get_next_timer_interrupt(u
 	unsigned long expires;
 
 	spin_lock(&base->lock);
-	expires = __next_timer_interrupt(base);
+	if (time_before_eq(base->next_timer, base->timer_jiffies))
+		base->next_timer = __next_timer_interrupt(base);
+	expires = base->next_timer;
 	spin_unlock(&base->lock);
 
 	if (time_before_eq(expires, now))
@@ -1523,6 +1541,7 @@ static int __cpuinit init_timers_cpu(int
 		INIT_LIST_HEAD(base->tv1.vec + j);
 
 	base->timer_jiffies = jiffies;
+	base->next_timer = base->timer_jiffies;
 	return 0;
 }
 
@@ -1535,6 +1554,9 @@ static void migrate_timer_list(struct tv
 		timer = list_first_entry(head, struct timer_list, entry);
 		detach_timer(timer, 0);
 		timer_set_base(timer, new_base);
+		if (time_before(timer->expires, new_base->next_timer) &&
+		    !tbase_get_deferrable(timer->base))
+			new_base->next_timer = timer->expires;
 		internal_add_timer(new_base, timer);
 	}
 }


-- 
blue skies,
   Martin.

"Reality continues to ruin my life." - Calvin.

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [tip:timers/core] timers: Cache __next_timer_interrupt result
  2009-07-21 18:25 [RFC][PATCH] cache __next_timer_interrupt result Martin Schwidefsky
  2009-07-22 14:38 ` Thomas Gleixner
  2009-08-04 14:16 ` [tip:timers/core] timers: Cache " tip-bot for Martin Schwidefsky
@ 2009-08-04 18:30 ` tip-bot for Martin Schwidefsky
  2 siblings, 0 replies; 6+ messages in thread
From: tip-bot for Martin Schwidefsky @ 2009-08-04 18:30 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: linux-kernel, hpa, mingo, johnstul, venkatesh.pallipadi,
	schwidefsky, tglx, mingo

Commit-ID:  97fd9ed48ce2b807edc363bef3e817aeeb5cd5e6
Gitweb:     http://git.kernel.org/tip/97fd9ed48ce2b807edc363bef3e817aeeb5cd5e6
Author:     Martin Schwidefsky <schwidefsky@de.ibm.com>
AuthorDate: Tue, 21 Jul 2009 20:25:05 +0200
Committer:  Ingo Molnar <mingo@elte.hu>
CommitDate: Tue, 4 Aug 2009 20:28:25 +0200

timers: Cache __next_timer_interrupt result

Each time a cpu goes to sleep on a NOHZ=y system the timer
wheel is searched for the next timer interrupt. It can take
quite a few cycles to find the next pending timer.

This patch adds a field to tvec_base that caches the result of
__next_timer_interrupt.

The hit ratio is around 80% on my thinkpad under normal use, on
a server I've seen hit ratios from 5% to 95% dependent on the
workload.

-v2: jiffies wrap fixes

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Cc: john stultz <johnstul@us.ibm.com>
Cc: Venki Pallipadi <venkatesh.pallipadi@intel.com>
LKML-Reference: <20090721202505.7d56a079@skybase>
Signed-off-by: Ingo Molnar <mingo@elte.hu>


---
 kernel/timer.c |   24 +++++++++++++++++++++++-
 1 files changed, 23 insertions(+), 1 deletions(-)

diff --git a/kernel/timer.c b/kernel/timer.c
index 0b36b9e..5c1e49e 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -72,6 +72,7 @@ struct tvec_base {
 	spinlock_t lock;
 	struct timer_list *running_timer;
 	unsigned long timer_jiffies;
+	unsigned long next_timer;
 	struct tvec_root tv1;
 	struct tvec tv2;
 	struct tvec tv3;
@@ -622,6 +623,9 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
 
 	if (timer_pending(timer)) {
 		detach_timer(timer, 0);
+		if (timer->expires == base->next_timer &&
+		    !tbase_get_deferrable(timer->base))
+			base->next_timer = base->timer_jiffies;
 		ret = 1;
 	} else {
 		if (pending_only)
@@ -663,6 +667,9 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
 	}
 
 	timer->expires = expires;
+	if (time_before(timer->expires, base->next_timer) &&
+	    !tbase_get_deferrable(timer->base))
+		base->next_timer = timer->expires;
 	internal_add_timer(base, timer);
 
 out_unlock:
@@ -781,6 +788,9 @@ void add_timer_on(struct timer_list *timer, int cpu)
 	spin_lock_irqsave(&base->lock, flags);
 	timer_set_base(timer, base);
 	debug_timer_activate(timer);
+	if (time_before(timer->expires, base->next_timer) &&
+	    !tbase_get_deferrable(timer->base))
+		base->next_timer = timer->expires;
 	internal_add_timer(base, timer);
 	/*
 	 * Check whether the other CPU is idle and needs to be
@@ -817,6 +827,9 @@ int del_timer(struct timer_list *timer)
 		base = lock_timer_base(timer, &flags);
 		if (timer_pending(timer)) {
 			detach_timer(timer, 1);
+			if (timer->expires == base->next_timer &&
+			    !tbase_get_deferrable(timer->base))
+				base->next_timer = base->timer_jiffies;
 			ret = 1;
 		}
 		spin_unlock_irqrestore(&base->lock, flags);
@@ -850,6 +863,9 @@ int try_to_del_timer_sync(struct timer_list *timer)
 	ret = 0;
 	if (timer_pending(timer)) {
 		detach_timer(timer, 1);
+		if (timer->expires == base->next_timer &&
+		    !tbase_get_deferrable(timer->base))
+			base->next_timer = base->timer_jiffies;
 		ret = 1;
 	}
 out:
@@ -1134,7 +1150,9 @@ unsigned long get_next_timer_interrupt(unsigned long now)
 	unsigned long expires;
 
 	spin_lock(&base->lock);
-	expires = __next_timer_interrupt(base);
+	if (time_before_eq(base->next_timer, base->timer_jiffies))
+		base->next_timer = __next_timer_interrupt(base);
+	expires = base->next_timer;
 	spin_unlock(&base->lock);
 
 	if (time_before_eq(expires, now))
@@ -1523,6 +1541,7 @@ static int __cpuinit init_timers_cpu(int cpu)
 		INIT_LIST_HEAD(base->tv1.vec + j);
 
 	base->timer_jiffies = jiffies;
+	base->next_timer = base->timer_jiffies;
 	return 0;
 }
 
@@ -1535,6 +1554,9 @@ static void migrate_timer_list(struct tvec_base *new_base, struct list_head *hea
 		timer = list_first_entry(head, struct timer_list, entry);
 		detach_timer(timer, 0);
 		timer_set_base(timer, new_base);
+		if (time_before(timer->expires, new_base->next_timer) &&
+		    !tbase_get_deferrable(timer->base))
+			new_base->next_timer = timer->expires;
 		internal_add_timer(new_base, timer);
 	}
 }

^ permalink raw reply related	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2009-08-04 18:31 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2009-07-21 18:25 [RFC][PATCH] cache __next_timer_interrupt result Martin Schwidefsky
2009-07-22 14:38 ` Thomas Gleixner
2009-07-22 16:02   ` Martin Schwidefsky
2009-08-04 14:16 ` [tip:timers/core] timers: Cache " tip-bot for Martin Schwidefsky
2009-08-04 17:47   ` Martin Schwidefsky
2009-08-04 18:30 ` tip-bot for Martin Schwidefsky

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.