linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [patch 4 00/22] timer: Refactor the timer wheel
@ 2016-07-04  9:50 Thomas Gleixner
  2016-07-04  9:50 ` [patch 4 01/22] timer: Make pinned a timer property Thomas Gleixner
                   ` (21 more replies)
  0 siblings, 22 replies; 60+ messages in thread
From: Thomas Gleixner @ 2016-07-04  9:50 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Paul McKenney, Frederic Weisbecker,
	Chris Mason, Arjan van de Ven, rt, Rik van Riel, George Spelvin,
	Len Brown, Josh Triplett

This is the third version of the timer wheel rework series. The previous
versions can be found here:

V1:   http://lkml.kernel.org/r/20160613070440.950649741@linutronix.de
V2:   http://lkml.kernel.org/r/20160617121134.417319325@linutronix.de
V3:   http://lkml.kernel.org/r/20160624140325.554996200@linutronix.de

The series is also available in git:

   git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git WIP.timers

Changes vs. V3:

  - Folded the delta fixes into the affected patches

Thanks,

	tglx

---
 arch/x86/kernel/apic/x2apic_uv_x.c  |    4 
 arch/x86/kernel/cpu/mcheck/mce.c    |    4 
 block/genhd.c                       |    5 
 drivers/cpufreq/powernv-cpufreq.c   |    5 
 drivers/mmc/host/jz4740_mmc.c       |    2 
 drivers/net/ethernet/tile/tilepro.c |    4 
 drivers/power/bq27xxx_battery.c     |    5 
 drivers/tty/metag_da.c              |    4 
 drivers/tty/mips_ejtag_fdc.c        |    4 
 drivers/usb/host/ohci-hcd.c         |    1 
 drivers/usb/host/xhci.c             |    2 
 include/linux/list.h                |   10 
 include/linux/timer.h               |   34 -
 kernel/signal.c                     |   24 
 kernel/time/tick-internal.h         |    1 
 kernel/time/tick-sched.c            |   45 -
 kernel/time/timer.c                 | 1101 +++++++++++++++++++++---------------
 lib/random32.c                      |    1 
 net/ipv4/inet_connection_sock.c     |    7 
 net/ipv4/inet_timewait_sock.c       |    5 
 20 files changed, 740 insertions(+), 528 deletions(-)

^ permalink raw reply	[flat|nested] 60+ messages in thread

* [patch 4 01/22] timer: Make pinned a timer property
  2016-07-04  9:50 [patch 4 00/22] timer: Refactor the timer wheel Thomas Gleixner
@ 2016-07-04  9:50 ` Thomas Gleixner
  2016-07-07  8:39   ` [tip:timers/core] timers: Make 'pinned' " tip-bot for Thomas Gleixner
  2016-07-04  9:50 ` [patch 4 02/22] x86/apic/uv: Initialize timer as pinned Thomas Gleixner
                   ` (20 subsequent siblings)
  21 siblings, 1 reply; 60+ messages in thread
From: Thomas Gleixner @ 2016-07-04  9:50 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Paul McKenney, Frederic Weisbecker,
	Chris Mason, Arjan van de Ven, rt, Rik van Riel, George Spelvin,
	Len Brown, Josh Triplett, Eric Dumazet

[-- Attachment #1: timer_Make_pinned_a_timer_property.patch --]
[-- Type: text/plain, Size: 5247 bytes --]

We want to move the timer migration from a push to a pull model. This requires
to store the pinned attribute of a timer in the timer itself. This must happen
at initialization time.

Add the helper macros for this.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Chris Mason <clm@fb.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: rt@linutronix.de
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Arjan van de Ven <arjan@infradead.org>


---
 include/linux/timer.h |   25 ++++++++++++++++++++++---
 kernel/time/timer.c   |   10 +++++-----
 2 files changed, 27 insertions(+), 8 deletions(-)

--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -62,7 +62,8 @@ struct timer_list {
 #define TIMER_MIGRATING		0x00080000
 #define TIMER_BASEMASK		(TIMER_CPUMASK | TIMER_MIGRATING)
 #define TIMER_DEFERRABLE	0x00100000
-#define TIMER_IRQSAFE		0x00200000
+#define TIMER_PINNED		0x00200000
+#define TIMER_IRQSAFE		0x00400000
 
 #define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \
 		.entry = { .next = TIMER_ENTRY_STATIC },	\
@@ -78,9 +79,15 @@ struct timer_list {
 #define TIMER_INITIALIZER(_function, _expires, _data)		\
 	__TIMER_INITIALIZER((_function), (_expires), (_data), 0)
 
+#define TIMER_PINNED_INITIALIZER(_function, _expires, _data)	\
+	__TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_PINNED)
+
 #define TIMER_DEFERRED_INITIALIZER(_function, _expires, _data)	\
 	__TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_DEFERRABLE)
 
+#define TIMER_PINNED_DEFERRED_INITIALIZER(_function, _expires, _data)	\
+	__TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_DEFERRABLE | TIMER_PINNED)
+
 #define DEFINE_TIMER(_name, _function, _expires, _data)		\
 	struct timer_list _name =				\
 		TIMER_INITIALIZER(_function, _expires, _data)
@@ -124,8 +131,12 @@ static inline void init_timer_on_stack_k
 
 #define init_timer(timer)						\
 	__init_timer((timer), 0)
+#define init_timer_pinned(timer)					\
+	__init_timer((timer), TIMER_PINNED)
 #define init_timer_deferrable(timer)					\
 	__init_timer((timer), TIMER_DEFERRABLE)
+#define init_timer_pinned_deferrable(timer)				\
+	__init_timer((timer), TIMER_DEFERRABLE | TIMER_PINNED)
 #define init_timer_on_stack(timer)					\
 	__init_timer_on_stack((timer), 0)
 
@@ -145,12 +156,20 @@ static inline void init_timer_on_stack_k
 
 #define setup_timer(timer, fn, data)					\
 	__setup_timer((timer), (fn), (data), 0)
+#define setup_pinned_timer(timer, fn, data)				\
+	__setup_timer((timer), (fn), (data), TIMER_PINNED)
 #define setup_deferrable_timer(timer, fn, data)				\
 	__setup_timer((timer), (fn), (data), TIMER_DEFERRABLE)
+#define setup_pinned_deferrable_timer(timer, fn, data)			\
+	__setup_timer((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED)
 #define setup_timer_on_stack(timer, fn, data)				\
 	__setup_timer_on_stack((timer), (fn), (data), 0)
+#define setup_pinned_timer_on_stack(timer, fn, data)			\
+	__setup_timer_on_stack((timer), (fn), (data), TIMER_PINNED)
 #define setup_deferrable_timer_on_stack(timer, fn, data)		\
 	__setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE)
+#define setup_pinned_deferrable_timer_on_stack(timer, fn, data)		\
+	__setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED)
 
 /**
  * timer_pending - is a timer pending?
@@ -175,8 +194,8 @@ extern int mod_timer_pinned(struct timer
 
 extern void set_timer_slack(struct timer_list *time, int slack_hz);
 
-#define TIMER_NOT_PINNED	0
-#define TIMER_PINNED		1
+#define MOD_TIMER_NOT_PINNED	0
+#define MOD_TIMER_PINNED	1
 /*
  * The jiffies value which is added to now, when there is no timer
  * in the timer wheel:
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -782,7 +782,7 @@ static inline int
 
 	debug_activate(timer, expires);
 
-	new_base = get_target_base(base, pinned);
+	new_base = get_target_base(base, pinned || timer->flags & TIMER_PINNED);
 
 	if (base != new_base) {
 		/*
@@ -825,7 +825,7 @@ static inline int
  */
 int mod_timer_pending(struct timer_list *timer, unsigned long expires)
 {
-	return __mod_timer(timer, expires, true, TIMER_NOT_PINNED);
+	return __mod_timer(timer, expires, true, MOD_TIMER_NOT_PINNED);
 }
 EXPORT_SYMBOL(mod_timer_pending);
 
@@ -900,7 +900,7 @@ int mod_timer(struct timer_list *timer,
 	if (timer_pending(timer) && timer->expires == expires)
 		return 1;
 
-	return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
+	return __mod_timer(timer, expires, false, MOD_TIMER_NOT_PINNED);
 }
 EXPORT_SYMBOL(mod_timer);
 
@@ -928,7 +928,7 @@ int mod_timer_pinned(struct timer_list *
 	if (timer->expires == expires && timer_pending(timer))
 		return 1;
 
-	return __mod_timer(timer, expires, false, TIMER_PINNED);
+	return __mod_timer(timer, expires, false, MOD_TIMER_PINNED);
 }
 EXPORT_SYMBOL(mod_timer_pinned);
 
@@ -1512,7 +1512,7 @@ signed long __sched schedule_timeout(sig
 	expire = timeout + jiffies;
 
 	setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
-	__mod_timer(&timer, expire, false, TIMER_NOT_PINNED);
+	__mod_timer(&timer, expire, false, MOD_TIMER_NOT_PINNED);
 	schedule();
 	del_singleshot_timer_sync(&timer);
 

^ permalink raw reply	[flat|nested] 60+ messages in thread

* [patch 4 02/22] x86/apic/uv: Initialize timer as pinned
  2016-07-04  9:50 [patch 4 00/22] timer: Refactor the timer wheel Thomas Gleixner
  2016-07-04  9:50 ` [patch 4 01/22] timer: Make pinned a timer property Thomas Gleixner
@ 2016-07-04  9:50 ` Thomas Gleixner
  2016-07-07  8:40   ` [tip:timers/core] timers, x86/apic/uv: Initialize the UV heartbeat " tip-bot for Thomas Gleixner
  2016-07-04  9:50 ` [patch 4 03/22] x86/mce: Initialize " Thomas Gleixner
                   ` (19 subsequent siblings)
  21 siblings, 1 reply; 60+ messages in thread
From: Thomas Gleixner @ 2016-07-04  9:50 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Paul McKenney, Frederic Weisbecker,
	Chris Mason, Arjan van de Ven, rt, Rik van Riel, George Spelvin,
	Len Brown, Josh Triplett, Eric Dumazet

[-- Attachment #1: x86apicuv_Initialize_timer_as_pinned.patch --]
[-- Type: text/plain, Size: 1285 bytes --]

Pinned timers must carry that attribute in the timer itself. No functional
change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Chris Mason <clm@fb.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: rt@linutronix.de
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Arjan van de Ven <arjan@infradead.org>


---
 arch/x86/kernel/apic/x2apic_uv_x.c |    4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -919,7 +919,7 @@ static void uv_heartbeat(unsigned long i
 	uv_set_scir_bits(bits);
 
 	/* enable next timer period */
-	mod_timer_pinned(timer, jiffies + SCIR_CPU_HB_INTERVAL);
+	mod_timer(timer, jiffies + SCIR_CPU_HB_INTERVAL);
 }
 
 static void uv_heartbeat_enable(int cpu)
@@ -928,7 +928,7 @@ static void uv_heartbeat_enable(int cpu)
 		struct timer_list *timer = &uv_cpu_scir_info(cpu)->timer;
 
 		uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY);
-		setup_timer(timer, uv_heartbeat, cpu);
+		setup_pinned_timer(timer, uv_heartbeat, cpu);
 		timer->expires = jiffies + SCIR_CPU_HB_INTERVAL;
 		add_timer_on(timer, cpu);
 		uv_cpu_scir_info(cpu)->enabled = 1;

^ permalink raw reply	[flat|nested] 60+ messages in thread

* [patch 4 03/22] x86/mce: Initialize timer as pinned
  2016-07-04  9:50 [patch 4 00/22] timer: Refactor the timer wheel Thomas Gleixner
  2016-07-04  9:50 ` [patch 4 01/22] timer: Make pinned a timer property Thomas Gleixner
  2016-07-04  9:50 ` [patch 4 02/22] x86/apic/uv: Initialize timer as pinned Thomas Gleixner
@ 2016-07-04  9:50 ` Thomas Gleixner
  2016-07-07  8:40   ` [tip:timers/core] timers, x86/mce: Initialize MCE restart " tip-bot for Thomas Gleixner
  2016-07-04  9:50 ` [patch 4 04/22] cpufreq/powernv: Initialize " Thomas Gleixner
                   ` (18 subsequent siblings)
  21 siblings, 1 reply; 60+ messages in thread
From: Thomas Gleixner @ 2016-07-04  9:50 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Paul McKenney, Frederic Weisbecker,
	Chris Mason, Arjan van de Ven, rt, Rik van Riel, George Spelvin,
	Len Brown, Josh Triplett, Eric Dumazet

[-- Attachment #1: x86mce_Initialize_timer_as_pinned.patch --]
[-- Type: text/plain, Size: 1137 bytes --]

Pinned timers must carry that attribute in the timer itself. No functional
change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Chris Mason <clm@fb.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: rt@linutronix.de
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Arjan van de Ven <arjan@infradead.org>

---
 arch/x86/kernel/cpu/mcheck/mce.c |    4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1309,7 +1309,7 @@ static void __restart_timer(struct timer
 
 	if (timer_pending(t)) {
 		if (time_before(when, t->expires))
-			mod_timer_pinned(t, when);
+			mod_timer(t, when);
 	} else {
 		t->expires = round_jiffies(when);
 		add_timer_on(t, smp_processor_id());
@@ -1735,7 +1735,7 @@ static void __mcheck_cpu_init_timer(void
 	struct timer_list *t = this_cpu_ptr(&mce_timer);
 	unsigned int cpu = smp_processor_id();
 
-	setup_timer(t, mce_timer_fn, cpu);
+	setup_pinned_timer(t, mce_timer_fn, cpu);
 	mce_start_timer(cpu, t);
 }
 

^ permalink raw reply	[flat|nested] 60+ messages in thread

* [patch 4 04/22] cpufreq/powernv: Initialize timer as pinned
  2016-07-04  9:50 [patch 4 00/22] timer: Refactor the timer wheel Thomas Gleixner
                   ` (2 preceding siblings ...)
  2016-07-04  9:50 ` [patch 4 03/22] x86/mce: Initialize " Thomas Gleixner
@ 2016-07-04  9:50 ` Thomas Gleixner
  2016-07-07  8:41   ` [tip:timers/core] timers, cpufreq/powernv: Initialize the gpstate " tip-bot for Thomas Gleixner
  2016-07-04  9:50 ` [patch 4 05/22] driver/net/ethernet/tile: Initialize " Thomas Gleixner
                   ` (17 subsequent siblings)
  21 siblings, 1 reply; 60+ messages in thread
From: Thomas Gleixner @ 2016-07-04  9:50 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Paul McKenney, Frederic Weisbecker,
	Chris Mason, Arjan van de Ven, rt, Rik van Riel, George Spelvin,
	Len Brown, Josh Triplett, Eric Dumazet

[-- Attachment #1: cpufreqpowernv_Initialize_timer_as_pinned.patch --]
[-- Type: text/plain, Size: 1237 bytes --]

Pinned timers must carry that attribute in the timer itself. No functional
change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Chris Mason <clm@fb.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: rt@linutronix.de
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Arjan van de Ven <arjan@infradead.org>

---
 drivers/cpufreq/powernv-cpufreq.c |    5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -530,8 +530,7 @@ static inline void  queue_gpstate_timer(
 	else
 		timer_interval = GPSTATE_TIMER_INTERVAL;
 
-	mod_timer_pinned(&gpstates->timer, jiffies +
-			msecs_to_jiffies(timer_interval));
+	mod_timer(&gpstates->timer, jiffies + msecs_to_jiffies(timer_interval));
 }
 
 /**
@@ -699,7 +698,7 @@ static int powernv_cpufreq_cpu_init(stru
 	policy->driver_data = gpstates;
 
 	/* initialize timer */
-	init_timer_deferrable(&gpstates->timer);
+	init_timer_pinned_deferrable(&gpstates->timer);
 	gpstates->timer.data = (unsigned long)policy;
 	gpstates->timer.function = gpstate_timer_handler;
 	gpstates->timer.expires = jiffies +

^ permalink raw reply	[flat|nested] 60+ messages in thread

* [patch 4 05/22] driver/net/ethernet/tile: Initialize timer as pinned
  2016-07-04  9:50 [patch 4 00/22] timer: Refactor the timer wheel Thomas Gleixner
                   ` (3 preceding siblings ...)
  2016-07-04  9:50 ` [patch 4 04/22] cpufreq/powernv: Initialize " Thomas Gleixner
@ 2016-07-04  9:50 ` Thomas Gleixner
  2016-07-07  8:41   ` [tip:timers/core] timers, driver/net/ethernet/tile: Initialize the egress " tip-bot for Thomas Gleixner
  2016-07-04  9:50 ` [patch 4 06/22] drivers/tty/metag_da: Initialize " Thomas Gleixner
                   ` (16 subsequent siblings)
  21 siblings, 1 reply; 60+ messages in thread
From: Thomas Gleixner @ 2016-07-04  9:50 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Paul McKenney, Frederic Weisbecker,
	Chris Mason, Arjan van de Ven, rt, Rik van Riel, George Spelvin,
	Len Brown, Josh Triplett, Eric Dumazet

[-- Attachment #1: drivernetethernettile_Initialize_timer_as_pinned.patch --]
[-- Type: text/plain, Size: 1221 bytes --]

Pinned timers must carry that attribute in the timer itself. No functional
change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Chris Mason <clm@fb.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: rt@linutronix.de
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Arjan van de Ven <arjan@infradead.org>

---
 drivers/net/ethernet/tile/tilepro.c |    4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -588,7 +588,7 @@ static bool tile_net_lepp_free_comps(str
 static void tile_net_schedule_egress_timer(struct tile_net_cpu *info)
 {
 	if (!info->egress_timer_scheduled) {
-		mod_timer_pinned(&info->egress_timer, jiffies + 1);
+		mod_timer(&info->egress_timer, jiffies + 1);
 		info->egress_timer_scheduled = true;
 	}
 }
@@ -1004,7 +1004,7 @@ static void tile_net_register(void *dev_
 		BUG();
 
 	/* Initialize the egress timer. */
-	init_timer(&info->egress_timer);
+	init_timer_pinned(&info->egress_timer);
 	info->egress_timer.data = (long)info;
 	info->egress_timer.function = tile_net_handle_egress_timer;
 

^ permalink raw reply	[flat|nested] 60+ messages in thread

* [patch 4 06/22] drivers/tty/metag_da: Initialize timer as pinned
  2016-07-04  9:50 [patch 4 00/22] timer: Refactor the timer wheel Thomas Gleixner
                   ` (4 preceding siblings ...)
  2016-07-04  9:50 ` [patch 4 05/22] driver/net/ethernet/tile: Initialize " Thomas Gleixner
@ 2016-07-04  9:50 ` Thomas Gleixner
  2016-07-07  8:42   ` [tip:timers/core] timers, drivers/tty/metag_da: Initialize the poll " tip-bot for Thomas Gleixner
  2016-07-04  9:50 ` [patch 4 07/22] drivers/tty/mips_ejtag: Initialize " Thomas Gleixner
                   ` (15 subsequent siblings)
  21 siblings, 1 reply; 60+ messages in thread
From: Thomas Gleixner @ 2016-07-04  9:50 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Paul McKenney, Frederic Weisbecker,
	Chris Mason, Arjan van de Ven, rt, Rik van Riel, George Spelvin,
	Len Brown, Josh Triplett, Eric Dumazet

[-- Attachment #1: driversttymetag_da_Initialize_timer_as_pinned.patch --]
[-- Type: text/plain, Size: 996 bytes --]

Pinned timers must carry that attribute in the timer itself. No functional
change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Chris Mason <clm@fb.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: rt@linutronix.de
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Arjan van de Ven <arjan@infradead.org>

---
 drivers/tty/metag_da.c |    4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

--- a/drivers/tty/metag_da.c
+++ b/drivers/tty/metag_da.c
@@ -323,12 +323,12 @@ static void dashtty_timer(unsigned long
 	if (channel >= 0)
 		fetch_data(channel);
 
-	mod_timer_pinned(&poll_timer, jiffies + DA_TTY_POLL);
+	mod_timer(&poll_timer, jiffies + DA_TTY_POLL);
 }
 
 static void add_poll_timer(struct timer_list *poll_timer)
 {
-	setup_timer(poll_timer, dashtty_timer, 0);
+	setup_pinned_timer(poll_timer, dashtty_timer, 0);
 	poll_timer->expires = jiffies + DA_TTY_POLL;
 
 	/*

^ permalink raw reply	[flat|nested] 60+ messages in thread

* [patch 4 07/22] drivers/tty/mips_ejtag: Initialize timer as pinned
  2016-07-04  9:50 [patch 4 00/22] timer: Refactor the timer wheel Thomas Gleixner
                   ` (5 preceding siblings ...)
  2016-07-04  9:50 ` [patch 4 06/22] drivers/tty/metag_da: Initialize " Thomas Gleixner
@ 2016-07-04  9:50 ` Thomas Gleixner
  2016-07-07  8:42   ` [tip:timers/core] timers, drivers/tty/mips_ejtag: Initialize the poll " tip-bot for Thomas Gleixner
  2016-07-04  9:50 ` [patch 4 08/22] net/ipv4/inet: Initialize timers " Thomas Gleixner
                   ` (14 subsequent siblings)
  21 siblings, 1 reply; 60+ messages in thread
From: Thomas Gleixner @ 2016-07-04  9:50 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Paul McKenney, Frederic Weisbecker,
	Chris Mason, Arjan van de Ven, rt, Rik van Riel, George Spelvin,
	Len Brown, Josh Triplett, Eric Dumazet

[-- Attachment #1: driversttymips_ejtag_Initialize_timer_as_pinned.patch --]
[-- Type: text/plain, Size: 1235 bytes --]

Pinned timers must carry that attribute in the timer itself. No functional
change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Chris Mason <clm@fb.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: rt@linutronix.de
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Arjan van de Ven <arjan@infradead.org>

---
 drivers/tty/mips_ejtag_fdc.c |    4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

--- a/drivers/tty/mips_ejtag_fdc.c
+++ b/drivers/tty/mips_ejtag_fdc.c
@@ -689,7 +689,7 @@ static void mips_ejtag_fdc_tty_timer(uns
 
 	mips_ejtag_fdc_handle(priv);
 	if (!priv->removing)
-		mod_timer_pinned(&priv->poll_timer, jiffies + FDC_TTY_POLL);
+		mod_timer(&priv->poll_timer, jiffies + FDC_TTY_POLL);
 }
 
 /* TTY Port operations */
@@ -1002,7 +1002,7 @@ static int mips_ejtag_fdc_tty_probe(stru
 		raw_spin_unlock_irq(&priv->lock);
 	} else {
 		/* If we didn't get an usable IRQ, poll instead */
-		setup_timer(&priv->poll_timer, mips_ejtag_fdc_tty_timer,
+		setup_pinned_timer(&priv->poll_timer, mips_ejtag_fdc_tty_timer,
 			    (unsigned long)priv);
 		priv->poll_timer.expires = jiffies + FDC_TTY_POLL;
 		/*

^ permalink raw reply	[flat|nested] 60+ messages in thread

* [patch 4 08/22] net/ipv4/inet: Initialize timers as pinned
  2016-07-04  9:50 [patch 4 00/22] timer: Refactor the timer wheel Thomas Gleixner
                   ` (6 preceding siblings ...)
  2016-07-04  9:50 ` [patch 4 07/22] drivers/tty/mips_ejtag: Initialize " Thomas Gleixner
@ 2016-07-04  9:50 ` Thomas Gleixner
  2016-07-07  8:43   ` [tip:timers/core] timers, net/ipv4/inet: Initialize connection request " tip-bot for Thomas Gleixner
  2016-07-04  9:50 ` [patch 4 09/22] timer: Remove mod_timer_pinned Thomas Gleixner
                   ` (13 subsequent siblings)
  21 siblings, 1 reply; 60+ messages in thread
From: Thomas Gleixner @ 2016-07-04  9:50 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Paul McKenney, Frederic Weisbecker,
	Chris Mason, Arjan van de Ven, rt, Rik van Riel, George Spelvin,
	Len Brown, Josh Triplett, Eric Dumazet

[-- Attachment #1: netipv4inet_Initialize_timers_as_pinned.patch --]
[-- Type: text/plain, Size: 2317 bytes --]

Pinned timers must carry that attribute in the timer itself. No functional
change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Chris Mason <clm@fb.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: rt@linutronix.de
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Arjan van de Ven <arjan@infradead.org>

---
 net/ipv4/inet_connection_sock.c |    7 ++++---
 net/ipv4/inet_timewait_sock.c   |    5 +++--
 2 files changed, 7 insertions(+), 5 deletions(-)

--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -603,7 +603,7 @@ static void reqsk_timer_handler(unsigned
 		if (req->num_timeout++ == 0)
 			atomic_dec(&queue->young);
 		timeo = min(TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
-		mod_timer_pinned(&req->rsk_timer, jiffies + timeo);
+		mod_timer(&req->rsk_timer, jiffies + timeo);
 		return;
 	}
 drop:
@@ -617,8 +617,9 @@ static void reqsk_queue_hash_req(struct
 	req->num_timeout = 0;
 	req->sk = NULL;
 
-	setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req);
-	mod_timer_pinned(&req->rsk_timer, jiffies + timeout);
+	setup_pinned_timer(&req->rsk_timer, reqsk_timer_handler,
+			    (unsigned long)req);
+	mod_timer(&req->rsk_timer, jiffies + timeout);
 
 	inet_ehash_insert(req_to_sk(req), NULL);
 	/* before letting lookups find us, make sure all req fields
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -188,7 +188,8 @@ struct inet_timewait_sock *inet_twsk_all
 		tw->tw_prot	    = sk->sk_prot_creator;
 		atomic64_set(&tw->tw_cookie, atomic64_read(&sk->sk_cookie));
 		twsk_net_set(tw, sock_net(sk));
-		setup_timer(&tw->tw_timer, tw_timer_handler, (unsigned long)tw);
+		setup_pinned_timer(&tw->tw_timer, tw_timer_handler,
+				   (unsigned long)tw);
 		/*
 		 * Because we use RCU lookups, we should not set tw_refcnt
 		 * to a non null value before everything is setup for this
@@ -248,7 +249,7 @@ void __inet_twsk_schedule(struct inet_ti
 
 	tw->tw_kill = timeo <= 4*HZ;
 	if (!rearm) {
-		BUG_ON(mod_timer_pinned(&tw->tw_timer, jiffies + timeo));
+		BUG_ON(mod_timer(&tw->tw_timer, jiffies + timeo));
 		atomic_inc(&tw->tw_dr->tw_count);
 	} else {
 		mod_timer_pending(&tw->tw_timer, jiffies + timeo);

^ permalink raw reply	[flat|nested] 60+ messages in thread

* [patch 4 09/22] timer: Remove mod_timer_pinned
  2016-07-04  9:50 [patch 4 00/22] timer: Refactor the timer wheel Thomas Gleixner
                   ` (7 preceding siblings ...)
  2016-07-04  9:50 ` [patch 4 08/22] net/ipv4/inet: Initialize timers " Thomas Gleixner
@ 2016-07-04  9:50 ` Thomas Gleixner
  2016-07-07  8:43   ` [tip:timers/core] timers: Remove the deprecated mod_timer_pinned() API tip-bot for Thomas Gleixner
  2016-07-04  9:50 ` [patch 4 10/22] signal: Use hrtimer for sigtimedwait Thomas Gleixner
                   ` (12 subsequent siblings)
  21 siblings, 1 reply; 60+ messages in thread
From: Thomas Gleixner @ 2016-07-04  9:50 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Paul McKenney, Frederic Weisbecker,
	Chris Mason, Arjan van de Ven, rt, Rik van Riel, George Spelvin,
	Len Brown, Josh Triplett, Eric Dumazet

[-- Attachment #1: timer_Remove_mod_timer_pinned.patch --]
[-- Type: text/plain, Size: 3790 bytes --]

We switched all users to initialize the timers as pinned and call
mod_timer(). Remove the now unused function.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Chris Mason <clm@fb.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: rt@linutronix.de
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Arjan van de Ven <arjan@infradead.org>
---
 include/linux/timer.h |    3 ---
 kernel/time/timer.c   |   39 +++++----------------------------------
 2 files changed, 5 insertions(+), 37 deletions(-)

--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -190,12 +190,9 @@ extern void add_timer_on(struct timer_li
 extern int del_timer(struct timer_list * timer);
 extern int mod_timer(struct timer_list *timer, unsigned long expires);
 extern int mod_timer_pending(struct timer_list *timer, unsigned long expires);
-extern int mod_timer_pinned(struct timer_list *timer, unsigned long expires);
 
 extern void set_timer_slack(struct timer_list *time, int slack_hz);
 
-#define MOD_TIMER_NOT_PINNED	0
-#define MOD_TIMER_PINNED	1
 /*
  * The jiffies value which is added to now, when there is no timer
  * in the timer wheel:
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -764,8 +764,7 @@ static struct tvec_base *lock_timer_base
 }
 
 static inline int
-__mod_timer(struct timer_list *timer, unsigned long expires,
-	    bool pending_only, int pinned)
+__mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
 {
 	struct tvec_base *base, *new_base;
 	unsigned long flags;
@@ -782,7 +781,7 @@ static inline int
 
 	debug_activate(timer, expires);
 
-	new_base = get_target_base(base, pinned || timer->flags & TIMER_PINNED);
+	new_base = get_target_base(base, timer->flags & TIMER_PINNED);
 
 	if (base != new_base) {
 		/*
@@ -825,7 +824,7 @@ static inline int
  */
 int mod_timer_pending(struct timer_list *timer, unsigned long expires)
 {
-	return __mod_timer(timer, expires, true, MOD_TIMER_NOT_PINNED);
+	return __mod_timer(timer, expires, true);
 }
 EXPORT_SYMBOL(mod_timer_pending);
 
@@ -900,39 +899,11 @@ int mod_timer(struct timer_list *timer,
 	if (timer_pending(timer) && timer->expires == expires)
 		return 1;
 
-	return __mod_timer(timer, expires, false, MOD_TIMER_NOT_PINNED);
+	return __mod_timer(timer, expires, false);
 }
 EXPORT_SYMBOL(mod_timer);
 
 /**
- * mod_timer_pinned - modify a timer's timeout
- * @timer: the timer to be modified
- * @expires: new timeout in jiffies
- *
- * mod_timer_pinned() is a way to update the expire field of an
- * active timer (if the timer is inactive it will be activated)
- * and to ensure that the timer is scheduled on the current CPU.
- *
- * Note that this does not prevent the timer from being migrated
- * when the current CPU goes offline.  If this is a problem for
- * you, use CPU-hotplug notifiers to handle it correctly, for
- * example, cancelling the timer when the corresponding CPU goes
- * offline.
- *
- * mod_timer_pinned(timer, expires) is equivalent to:
- *
- *     del_timer(timer); timer->expires = expires; add_timer(timer);
- */
-int mod_timer_pinned(struct timer_list *timer, unsigned long expires)
-{
-	if (timer->expires == expires && timer_pending(timer))
-		return 1;
-
-	return __mod_timer(timer, expires, false, MOD_TIMER_PINNED);
-}
-EXPORT_SYMBOL(mod_timer_pinned);
-
-/**
  * add_timer - start a timer
  * @timer: the timer to be added
  *
@@ -1512,7 +1483,7 @@ signed long __sched schedule_timeout(sig
 	expire = timeout + jiffies;
 
 	setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
-	__mod_timer(&timer, expire, false, MOD_TIMER_NOT_PINNED);
+	__mod_timer(&timer, expire, false);
 	schedule();
 	del_singleshot_timer_sync(&timer);
 

^ permalink raw reply	[flat|nested] 60+ messages in thread

* [patch 4 10/22] signal: Use hrtimer for sigtimedwait
  2016-07-04  9:50 [patch 4 00/22] timer: Refactor the timer wheel Thomas Gleixner
                   ` (8 preceding siblings ...)
  2016-07-04  9:50 ` [patch 4 09/22] timer: Remove mod_timer_pinned Thomas Gleixner
@ 2016-07-04  9:50 ` Thomas Gleixner
  2016-07-07  8:43   ` [tip:timers/core] signals: Use hrtimer for sigtimedwait() tip-bot for Thomas Gleixner
  2016-07-04  9:50 ` [patch 4 11/22] hlist: Add hlist_is_singular_node() helper Thomas Gleixner
                   ` (11 subsequent siblings)
  21 siblings, 1 reply; 60+ messages in thread
From: Thomas Gleixner @ 2016-07-04  9:50 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Paul McKenney, Frederic Weisbecker,
	Chris Mason, Arjan van de Ven, rt, Rik van Riel, George Spelvin,
	Len Brown, Josh Triplett, Cyril Hrubis

[-- Attachment #1: signal--Use-hrtimer-for-sigtimedwait.patch --]
[-- Type: text/plain, Size: 2249 bytes --]

We've converted most timeout related syscalls to hrtimers. sigtimedwait() did
not get this treatment. Convert it so we get a reasonable accuracy and remove
the user space exposure to the timer wheel properties.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyril Hrubis <chrubis@suse.cz>
---
 kernel/signal.c |   24 ++++++++++--------------
 1 file changed, 10 insertions(+), 14 deletions(-)

--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2751,23 +2751,18 @@ int copy_siginfo_to_user(siginfo_t __use
  *  @ts: upper bound on process time suspension
  */
 int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
-			const struct timespec *ts)
+		    const struct timespec *ts)
 {
+	ktime_t *to = NULL, timeout = { .tv64 = KTIME_MAX };
 	struct task_struct *tsk = current;
-	long timeout = MAX_SCHEDULE_TIMEOUT;
 	sigset_t mask = *which;
-	int sig;
+	int sig, ret = 0;
 
 	if (ts) {
 		if (!timespec_valid(ts))
 			return -EINVAL;
-		timeout = timespec_to_jiffies(ts);
-		/*
-		 * We can be close to the next tick, add another one
-		 * to ensure we will wait at least the time asked for.
-		 */
-		if (ts->tv_sec || ts->tv_nsec)
-			timeout++;
+		timeout = timespec_to_ktime(*ts);
+		to = &timeout;
 	}
 
 	/*
@@ -2778,7 +2773,7 @@ int do_sigtimedwait(const sigset_t *whic
 
 	spin_lock_irq(&tsk->sighand->siglock);
 	sig = dequeue_signal(tsk, &mask, info);
-	if (!sig && timeout) {
+	if (!sig && timeout.tv64) {
 		/*
 		 * None ready, temporarily unblock those we're interested
 		 * while we are sleeping in so that we'll be awakened when
@@ -2790,8 +2785,9 @@ int do_sigtimedwait(const sigset_t *whic
 		recalc_sigpending();
 		spin_unlock_irq(&tsk->sighand->siglock);
 
-		timeout = freezable_schedule_timeout_interruptible(timeout);
-
+		__set_current_state(TASK_INTERRUPTIBLE);
+		ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
+							 HRTIMER_MODE_REL);
 		spin_lock_irq(&tsk->sighand->siglock);
 		__set_task_blocked(tsk, &tsk->real_blocked);
 		sigemptyset(&tsk->real_blocked);
@@ -2801,7 +2797,7 @@ int do_sigtimedwait(const sigset_t *whic
 
 	if (sig)
 		return sig;
-	return timeout ? -EINTR : -EAGAIN;
+	return ret ? -EINTR : -EAGAIN;
 }
 
 /**

^ permalink raw reply	[flat|nested] 60+ messages in thread

* [patch 4 11/22] hlist: Add hlist_is_singular_node() helper
  2016-07-04  9:50 [patch 4 00/22] timer: Refactor the timer wheel Thomas Gleixner
                   ` (9 preceding siblings ...)
  2016-07-04  9:50 ` [patch 4 10/22] signal: Use hrtimer for sigtimedwait Thomas Gleixner
@ 2016-07-04  9:50 ` Thomas Gleixner
  2016-07-07  8:44   ` [tip:timers/core] " tip-bot for Thomas Gleixner
  2016-07-04  9:50 ` [patch 4 12/22] timer: Give a few structs and members proper names Thomas Gleixner
                   ` (10 subsequent siblings)
  21 siblings, 1 reply; 60+ messages in thread
From: Thomas Gleixner @ 2016-07-04  9:50 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Paul McKenney, Frederic Weisbecker,
	Chris Mason, Arjan van de Ven, rt, Rik van Riel, George Spelvin,
	Len Brown, Josh Triplett, Eric Dumazet

[-- Attachment #1: hlist_Add_hlist_is_last_node_helper.patch --]
[-- Type: text/plain, Size: 964 bytes --]

Required to figure out whether the entry is the only one in the hlist.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Chris Mason <clm@fb.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: rt@linutronix.de
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Arjan van de Ven <arjan@infradead.org>

---
 include/linux/list.h |   10 ++++++++++
 1 file changed, 10 insertions(+)

--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -679,6 +679,16 @@ static inline bool hlist_fake(struct hli
 }
 
 /*
+ * Check whether the node is the only node of the head without
+ * accessing head.
+ */
+static inline bool hlist_is_singular_node(struct hlist_node *n,
+					  struct hlist_head *h)
+{
+	return !n->next && n->pprev == &h->first;
+}
+
+/*
  * Move a list from one list head to another. Fixup the pprev
  * reference of the first entry if it exists.
  */

^ permalink raw reply	[flat|nested] 60+ messages in thread

* [patch 4 12/22] timer: Give a few structs and members proper names
  2016-07-04  9:50 [patch 4 00/22] timer: Refactor the timer wheel Thomas Gleixner
                   ` (10 preceding siblings ...)
  2016-07-04  9:50 ` [patch 4 11/22] hlist: Add hlist_is_singular_node() helper Thomas Gleixner
@ 2016-07-04  9:50 ` Thomas Gleixner
  2016-07-07  8:44   ` [tip:timers/core] timers: " tip-bot for Thomas Gleixner
  2016-07-04  9:50 ` [patch 4 13/22] timer: Reduce the CPU index space to 256k Thomas Gleixner
                   ` (9 subsequent siblings)
  21 siblings, 1 reply; 60+ messages in thread
From: Thomas Gleixner @ 2016-07-04  9:50 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Paul McKenney, Frederic Weisbecker,
	Chris Mason, Arjan van de Ven, rt, Rik van Riel, George Spelvin,
	Len Brown, Josh Triplett, Eric Dumazet

[-- Attachment #1: timer_Give_a_few_structs_and_members_proper_names.patch --]
[-- Type: text/plain, Size: 13267 bytes --]

Some of the names are not longer correct and others are simply too long to
type. Clean it up before we switch the wheel implementation over to the new
scheme.

No functional change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Chris Mason <clm@fb.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: rt@linutronix.de
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Arjan van de Ven <arjan@infradead.org>


---
 kernel/time/timer.c |  118 ++++++++++++++++++++++++++--------------------------
 1 file changed, 59 insertions(+), 59 deletions(-)

--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -77,10 +77,10 @@ struct tvec_root {
 	struct hlist_head vec[TVR_SIZE];
 };
 
-struct tvec_base {
+struct timer_base {
 	spinlock_t lock;
 	struct timer_list *running_timer;
-	unsigned long timer_jiffies;
+	unsigned long clk;
 	unsigned long next_timer;
 	unsigned long active_timers;
 	unsigned long all_timers;
@@ -95,7 +95,7 @@ struct tvec_base {
 } ____cacheline_aligned;
 
 
-static DEFINE_PER_CPU(struct tvec_base, tvec_bases);
+static DEFINE_PER_CPU(struct timer_base, timer_bases);
 
 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
 unsigned int sysctl_timer_migration = 1;
@@ -106,15 +106,15 @@ void timers_update_migration(bool update
 	unsigned int cpu;
 
 	/* Avoid the loop, if nothing to update */
-	if (this_cpu_read(tvec_bases.migration_enabled) == on)
+	if (this_cpu_read(timer_bases.migration_enabled) == on)
 		return;
 
 	for_each_possible_cpu(cpu) {
-		per_cpu(tvec_bases.migration_enabled, cpu) = on;
+		per_cpu(timer_bases.migration_enabled, cpu) = on;
 		per_cpu(hrtimer_bases.migration_enabled, cpu) = on;
 		if (!update_nohz)
 			continue;
-		per_cpu(tvec_bases.nohz_active, cpu) = true;
+		per_cpu(timer_bases.nohz_active, cpu) = true;
 		per_cpu(hrtimer_bases.nohz_active, cpu) = true;
 	}
 }
@@ -134,18 +134,18 @@ int timer_migration_handler(struct ctl_t
 	return ret;
 }
 
-static inline struct tvec_base *get_target_base(struct tvec_base *base,
+static inline struct timer_base *get_target_base(struct timer_base *base,
 						int pinned)
 {
 	if (pinned || !base->migration_enabled)
-		return this_cpu_ptr(&tvec_bases);
-	return per_cpu_ptr(&tvec_bases, get_nohz_timer_target());
+		return this_cpu_ptr(&timer_bases);
+	return per_cpu_ptr(&timer_bases, get_nohz_timer_target());
 }
 #else
-static inline struct tvec_base *get_target_base(struct tvec_base *base,
+static inline struct timer_base *get_target_base(struct timer_base *base,
 						int pinned)
 {
-	return this_cpu_ptr(&tvec_bases);
+	return this_cpu_ptr(&timer_bases);
 }
 #endif
 
@@ -371,10 +371,10 @@ void set_timer_slack(struct timer_list *
 EXPORT_SYMBOL_GPL(set_timer_slack);
 
 static void
-__internal_add_timer(struct tvec_base *base, struct timer_list *timer)
+__internal_add_timer(struct timer_base *base, struct timer_list *timer)
 {
 	unsigned long expires = timer->expires;
-	unsigned long idx = expires - base->timer_jiffies;
+	unsigned long idx = expires - base->clk;
 	struct hlist_head *vec;
 
 	if (idx < TVR_SIZE) {
@@ -394,7 +394,7 @@ static void
 		 * Can happen if you add a timer with expires == jiffies,
 		 * or you set a timer to go off in the past
 		 */
-		vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
+		vec = base->tv1.vec + (base->clk & TVR_MASK);
 	} else {
 		int i;
 		/* If the timeout is larger than MAX_TVAL (on 64-bit
@@ -403,7 +403,7 @@ static void
 		 */
 		if (idx > MAX_TVAL) {
 			idx = MAX_TVAL;
-			expires = idx + base->timer_jiffies;
+			expires = idx + base->clk;
 		}
 		i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
 		vec = base->tv5.vec + i;
@@ -412,11 +412,11 @@ static void
 	hlist_add_head(&timer->entry, vec);
 }
 
-static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
+static void internal_add_timer(struct timer_base *base, struct timer_list *timer)
 {
 	/* Advance base->jiffies, if the base is empty */
 	if (!base->all_timers++)
-		base->timer_jiffies = jiffies;
+		base->clk = jiffies;
 
 	__internal_add_timer(base, timer);
 	/*
@@ -707,7 +707,7 @@ static inline void detach_timer(struct t
 }
 
 static inline void
-detach_expired_timer(struct timer_list *timer, struct tvec_base *base)
+detach_expired_timer(struct timer_list *timer, struct timer_base *base)
 {
 	detach_timer(timer, true);
 	if (!(timer->flags & TIMER_DEFERRABLE))
@@ -715,7 +715,7 @@ detach_expired_timer(struct timer_list *
 	base->all_timers--;
 }
 
-static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
+static int detach_if_pending(struct timer_list *timer, struct timer_base *base,
 			     bool clear_pending)
 {
 	if (!timer_pending(timer))
@@ -725,16 +725,16 @@ static int detach_if_pending(struct time
 	if (!(timer->flags & TIMER_DEFERRABLE)) {
 		base->active_timers--;
 		if (timer->expires == base->next_timer)
-			base->next_timer = base->timer_jiffies;
+			base->next_timer = base->clk;
 	}
 	/* If this was the last timer, advance base->jiffies */
 	if (!--base->all_timers)
-		base->timer_jiffies = jiffies;
+		base->clk = jiffies;
 	return 1;
 }
 
 /*
- * We are using hashed locking: holding per_cpu(tvec_bases).lock
+ * We are using hashed locking: holding per_cpu(timer_bases).lock
  * means that all timers which are tied to this base via timer->base are
  * locked, and the base itself is locked too.
  *
@@ -744,16 +744,16 @@ static int detach_if_pending(struct time
  * When the timer's base is locked and removed from the list, the
  * TIMER_MIGRATING flag is set, FIXME
  */
-static struct tvec_base *lock_timer_base(struct timer_list *timer,
+static struct timer_base *lock_timer_base(struct timer_list *timer,
 					unsigned long *flags)
 	__acquires(timer->base->lock)
 {
 	for (;;) {
 		u32 tf = timer->flags;
-		struct tvec_base *base;
+		struct timer_base *base;
 
 		if (!(tf & TIMER_MIGRATING)) {
-			base = per_cpu_ptr(&tvec_bases, tf & TIMER_CPUMASK);
+			base = per_cpu_ptr(&timer_bases, tf & TIMER_CPUMASK);
 			spin_lock_irqsave(&base->lock, *flags);
 			if (timer->flags == tf)
 				return base;
@@ -766,7 +766,7 @@ static struct tvec_base *lock_timer_base
 static inline int
 __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
 {
-	struct tvec_base *base, *new_base;
+	struct timer_base *base, *new_base;
 	unsigned long flags;
 	int ret = 0;
 
@@ -933,8 +933,8 @@ EXPORT_SYMBOL(add_timer);
  */
 void add_timer_on(struct timer_list *timer, int cpu)
 {
-	struct tvec_base *new_base = per_cpu_ptr(&tvec_bases, cpu);
-	struct tvec_base *base;
+	struct timer_base *new_base = per_cpu_ptr(&timer_bases, cpu);
+	struct timer_base *base;
 	unsigned long flags;
 
 	timer_stats_timer_set_start_info(timer);
@@ -975,7 +975,7 @@ EXPORT_SYMBOL_GPL(add_timer_on);
  */
 int del_timer(struct timer_list *timer)
 {
-	struct tvec_base *base;
+	struct timer_base *base;
 	unsigned long flags;
 	int ret = 0;
 
@@ -1001,7 +1001,7 @@ EXPORT_SYMBOL(del_timer);
  */
 int try_to_del_timer_sync(struct timer_list *timer)
 {
-	struct tvec_base *base;
+	struct timer_base *base;
 	unsigned long flags;
 	int ret = -1;
 
@@ -1085,7 +1085,7 @@ int del_timer_sync(struct timer_list *ti
 EXPORT_SYMBOL(del_timer_sync);
 #endif
 
-static int cascade(struct tvec_base *base, struct tvec *tv, int index)
+static int cascade(struct timer_base *base, struct tvec *tv, int index)
 {
 	/* cascade all the timers from tv up one level */
 	struct timer_list *timer;
@@ -1149,7 +1149,7 @@ static void call_timer_fn(struct timer_l
 	}
 }
 
-#define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
+#define INDEX(N) ((base->clk >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
 
 /**
  * __run_timers - run all expired timers (if any) on this CPU.
@@ -1158,23 +1158,23 @@ static void call_timer_fn(struct timer_l
  * This function cascades all vectors and executes all expired timer
  * vectors.
  */
-static inline void __run_timers(struct tvec_base *base)
+static inline void __run_timers(struct timer_base *base)
 {
 	struct timer_list *timer;
 
 	spin_lock_irq(&base->lock);
 
-	while (time_after_eq(jiffies, base->timer_jiffies)) {
+	while (time_after_eq(jiffies, base->clk)) {
 		struct hlist_head work_list;
 		struct hlist_head *head = &work_list;
 		int index;
 
 		if (!base->all_timers) {
-			base->timer_jiffies = jiffies;
+			base->clk = jiffies;
 			break;
 		}
 
-		index = base->timer_jiffies & TVR_MASK;
+		index = base->clk & TVR_MASK;
 
 		/*
 		 * Cascade timers:
@@ -1184,7 +1184,7 @@ static inline void __run_timers(struct t
 				(!cascade(base, &base->tv3, INDEX(1))) &&
 					!cascade(base, &base->tv4, INDEX(2)))
 			cascade(base, &base->tv5, INDEX(3));
-		++base->timer_jiffies;
+		++base->clk;
 		hlist_move_list(base->tv1.vec + index, head);
 		while (!hlist_empty(head)) {
 			void (*fn)(unsigned long);
@@ -1222,16 +1222,16 @@ static inline void __run_timers(struct t
  * is used on S/390 to stop all activity when a CPU is idle.
  * This function needs to be called with interrupts disabled.
  */
-static unsigned long __next_timer_interrupt(struct tvec_base *base)
+static unsigned long __next_timer_interrupt(struct timer_base *base)
 {
-	unsigned long timer_jiffies = base->timer_jiffies;
-	unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
+	unsigned long clk = base->clk;
+	unsigned long expires = clk + NEXT_TIMER_MAX_DELTA;
 	int index, slot, array, found = 0;
 	struct timer_list *nte;
 	struct tvec *varray[4];
 
 	/* Look for timer events in tv1. */
-	index = slot = timer_jiffies & TVR_MASK;
+	index = slot = clk & TVR_MASK;
 	do {
 		hlist_for_each_entry(nte, base->tv1.vec + slot, entry) {
 			if (nte->flags & TIMER_DEFERRABLE)
@@ -1250,8 +1250,8 @@ static unsigned long __next_timer_interr
 cascade:
 	/* Calculate the next cascade event */
 	if (index)
-		timer_jiffies += TVR_SIZE - index;
-	timer_jiffies >>= TVR_BITS;
+		clk += TVR_SIZE - index;
+	clk >>= TVR_BITS;
 
 	/* Check tv2-tv5. */
 	varray[0] = &base->tv2;
@@ -1262,7 +1262,7 @@ static unsigned long __next_timer_interr
 	for (array = 0; array < 4; array++) {
 		struct tvec *varp = varray[array];
 
-		index = slot = timer_jiffies & TVN_MASK;
+		index = slot = clk & TVN_MASK;
 		do {
 			hlist_for_each_entry(nte, varp->vec + slot, entry) {
 				if (nte->flags & TIMER_DEFERRABLE)
@@ -1286,8 +1286,8 @@ static unsigned long __next_timer_interr
 		} while (slot != index);
 
 		if (index)
-			timer_jiffies += TVN_SIZE - index;
-		timer_jiffies >>= TVN_BITS;
+			clk += TVN_SIZE - index;
+		clk >>= TVN_BITS;
 	}
 	return expires;
 }
@@ -1335,7 +1335,7 @@ static u64 cmp_next_hrtimer_event(u64 ba
  */
 u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
 {
-	struct tvec_base *base = this_cpu_ptr(&tvec_bases);
+	struct timer_base *base = this_cpu_ptr(&timer_bases);
 	u64 expires = KTIME_MAX;
 	unsigned long nextevt;
 
@@ -1348,7 +1348,7 @@ u64 get_next_timer_interrupt(unsigned lo
 
 	spin_lock(&base->lock);
 	if (base->active_timers) {
-		if (time_before_eq(base->next_timer, base->timer_jiffies))
+		if (time_before_eq(base->next_timer, base->clk))
 			base->next_timer = __next_timer_interrupt(base);
 		nextevt = base->next_timer;
 		if (time_before_eq(nextevt, basej))
@@ -1387,9 +1387,9 @@ void update_process_times(int user_tick)
  */
 static void run_timer_softirq(struct softirq_action *h)
 {
-	struct tvec_base *base = this_cpu_ptr(&tvec_bases);
+	struct timer_base *base = this_cpu_ptr(&timer_bases);
 
-	if (time_after_eq(jiffies, base->timer_jiffies))
+	if (time_after_eq(jiffies, base->clk))
 		__run_timers(base);
 }
 
@@ -1534,7 +1534,7 @@ signed long __sched schedule_timeout_idl
 EXPORT_SYMBOL(schedule_timeout_idle);
 
 #ifdef CONFIG_HOTPLUG_CPU
-static void migrate_timer_list(struct tvec_base *new_base, struct hlist_head *head)
+static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *head)
 {
 	struct timer_list *timer;
 	int cpu = new_base->cpu;
@@ -1550,13 +1550,13 @@ static void migrate_timer_list(struct tv
 
 static void migrate_timers(int cpu)
 {
-	struct tvec_base *old_base;
-	struct tvec_base *new_base;
+	struct timer_base *old_base;
+	struct timer_base *new_base;
 	int i;
 
 	BUG_ON(cpu_online(cpu));
-	old_base = per_cpu_ptr(&tvec_bases, cpu);
-	new_base = get_cpu_ptr(&tvec_bases);
+	old_base = per_cpu_ptr(&timer_bases, cpu);
+	new_base = get_cpu_ptr(&timer_bases);
 	/*
 	 * The caller is globally serialized and nobody else
 	 * takes two locks at once, deadlock is not possible.
@@ -1580,7 +1580,7 @@ static void migrate_timers(int cpu)
 
 	spin_unlock(&old_base->lock);
 	spin_unlock_irq(&new_base->lock);
-	put_cpu_ptr(&tvec_bases);
+	put_cpu_ptr(&timer_bases);
 }
 
 static int timer_cpu_notify(struct notifier_block *self,
@@ -1608,13 +1608,13 @@ static inline void timer_register_cpu_no
 
 static void __init init_timer_cpu(int cpu)
 {
-	struct tvec_base *base = per_cpu_ptr(&tvec_bases, cpu);
+	struct timer_base *base = per_cpu_ptr(&timer_bases, cpu);
 
 	base->cpu = cpu;
 	spin_lock_init(&base->lock);
 
-	base->timer_jiffies = jiffies;
-	base->next_timer = base->timer_jiffies;
+	base->clk = jiffies;
+	base->next_timer = base->clk;
 }
 
 static void __init init_timer_cpus(void)

^ permalink raw reply	[flat|nested] 60+ messages in thread

* [patch 4 13/22] timer: Reduce the CPU index space to 256k
  2016-07-04  9:50 [patch 4 00/22] timer: Refactor the timer wheel Thomas Gleixner
                   ` (11 preceding siblings ...)
  2016-07-04  9:50 ` [patch 4 12/22] timer: Give a few structs and members proper names Thomas Gleixner
@ 2016-07-04  9:50 ` Thomas Gleixner
  2016-07-07  8:45   ` [tip:timers/core] timers: " tip-bot for Thomas Gleixner
  2016-07-04  9:50 ` [patch 4 14/22] timer: Switch to a non cascading wheel Thomas Gleixner
                   ` (8 subsequent siblings)
  21 siblings, 1 reply; 60+ messages in thread
From: Thomas Gleixner @ 2016-07-04  9:50 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Paul McKenney, Frederic Weisbecker,
	Chris Mason, Arjan van de Ven, rt, Rik van Riel, George Spelvin,
	Len Brown, Josh Triplett

[-- Attachment #1: timer--Reduce-the-CPU-index-space-to-256k.patch --]
[-- Type: text/plain, Size: 1049 bytes --]

We want to store the array index in the flags space. 256k CPUs should be
enough for a while.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
---
 include/linux/timer.h |   10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -58,12 +58,12 @@ struct timer_list {
  * workqueue locking issues. It's not meant for executing random crap
  * with interrupts disabled. Abuse is monitored!
  */
-#define TIMER_CPUMASK		0x0007FFFF
-#define TIMER_MIGRATING		0x00080000
+#define TIMER_CPUMASK		0x0003FFFF
+#define TIMER_MIGRATING		0x00040000
 #define TIMER_BASEMASK		(TIMER_CPUMASK | TIMER_MIGRATING)
-#define TIMER_DEFERRABLE	0x00100000
-#define TIMER_PINNED		0x00200000
-#define TIMER_IRQSAFE		0x00400000
+#define TIMER_DEFERRABLE	0x00080000
+#define TIMER_PINNED		0x00100000
+#define TIMER_IRQSAFE		0x00200000
 
 #define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \
 		.entry = { .next = TIMER_ENTRY_STATIC },	\

^ permalink raw reply	[flat|nested] 60+ messages in thread

* [patch 4 14/22] timer: Switch to a non cascading wheel
  2016-07-04  9:50 [patch 4 00/22] timer: Refactor the timer wheel Thomas Gleixner
                   ` (12 preceding siblings ...)
  2016-07-04  9:50 ` [patch 4 13/22] timer: Reduce the CPU index space to 256k Thomas Gleixner
@ 2016-07-04  9:50 ` Thomas Gleixner
  2016-07-07  8:45   ` [tip:timers/core] timers: Switch to a non-cascading wheel tip-bot for Thomas Gleixner
  2016-08-11 15:21   ` [patch 4 14/22] timer: Switch to a non cascading wheel Jouni Malinen
  2016-07-04  9:50 ` [patch 4 15/22] timer: Remove slack leftovers Thomas Gleixner
                   ` (7 subsequent siblings)
  21 siblings, 2 replies; 60+ messages in thread
From: Thomas Gleixner @ 2016-07-04  9:50 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Paul McKenney, Frederic Weisbecker,
	Chris Mason, Arjan van de Ven, rt, Rik van Riel, George Spelvin,
	Len Brown, Josh Triplett, Eric Dumazet

[-- Attachment #1: timer_Switch_to_a_non_cascading_wheel.patch --]
[-- Type: text/plain, Size: 38351 bytes --]

The current timer wheel has some drawbacks:

1) Cascading

   Cascading can be an unbound operation and is completely pointless in most
   cases because the vast majority of the timer wheel timers are canceled or
   rearmed before expiration.

2) No fast lookup of the next expiring timer

   In NOHZ scenarios the first timer soft interrupt after a long NOHZ period
   must fast forward the base time to current jiffies. As we have no way to
   find the next expiring timer fast, the code loops and increments the base
   time by one and checks for expired timers in each step.

After a thorough analysis of real world data gathered on laptops,
workstations, webservers and other machines (thanks Chris!) I came to the
conclusion that the current 'classic' timer wheel implementation can be
modified to address the above issues.

The vast majority of timer wheel timers is canceled or rearmed before
expiry. Most of them are timeouts for networking and other I/O tasks. The
nature of timeouts is to catch the exception from normal operation (TCP ack
timed out, disk does not respond, etc.). For these kind of timeouts the
accuracy is not really a concern. In case the timeout fires, performance is
down the drain already.

The few timers which actually expire can be split into two categories:

 1) Short expiry times which expect halfways accurate expiry

 2) Long term expiry times are inaccurate today already due to the batching
    which is done for NOHZ.

So for long term expiry timers we can avoid the cascading property and just
leave them in the less granular outer wheels until expiry or
cancelation. Timers which are armed with a timeout larger than the wheel
capacity are not longer cascaded. We expire them with the longest possible
timeout (6+ days). We have not observed such timeouts in our data collection,
but at least we handle them with the least surprising effect.

To avoid extending the wheel levels for HZ=1000 so we can accomodate the
longest observed timeouts (5 days in the network conntrack code) we reduce the
first level granularity on HZ=1000 to 4ms, which effectively is the same as
the HZ=250 behaviour. From our data analysis there is nothing which relies on
that 1ms granularity and as a side effect we get better batching and timer
locality for the networking code as well.

Contrary to the classic wheel the granularity of the next wheel is not the
capacity of the first wheel. The granularities of the wheels are in the
currently chosen setting 8 times the granularity of the previous wheel. So for
HZ=250 we end up with the following granularity levels:

Level Offset  Granularity            Range
  0   0          4 ms                 0 ms -        252 ms
  1  64         32 ms               256 ms -       2044 ms (256ms - ~2s)
  2 128        256 ms              2048 ms -      16380 ms (~2s - ~16s)
  3 192       2048 ms (~2s)       16384 ms -     131068 ms (~16s - ~2m)
  4 256      16384 ms (~16s)     131072 ms -    1048572 ms (~2m - ~17m)
  5 320     131072 ms (~2m)     1048576 ms -    8388604 ms (~17m - ~2h)
  6 384    1048576 ms (~17m)    8388608 ms -   67108863 ms (~2h - ~18h)
  7 448    8388608 ms (~2h)    67108864 ms -  536870911 ms (~18h - ~6d)

That's a worst case inaccuracy of 12.5% for the timers which are queued at the
beginning of a level. 

So the new wheel concept addresses the old issues:

1) Cascading is avoided (except for extreme long time timers)

2) By keeping the timers in the bucket until expiry/cancelation we can track
   the buckets which have timers enqueued in a bucket bitmap and therefor can
   lookup the next expiring timer fast and time bound.

A further benefit of the concept is, that the slack calculation which is done
on every timer start is not longer necessary because the granularity levels
provide natural batching already.

Our extensive testing with various loads did not show any performance
degradation vs. the current wheel implementation.

This patch does not address the 'fast lookup' issue as we wanted to make sure
that there is no regression introduced by the wheel redesign. The
optimizations are in follow up patches.

[ Contains fixes from Anna-Maria Gleixner and Richard Cochran ]

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Chris Mason <clm@fb.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: rt@linutronix.de
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Arjan van de Ven <arjan@infradead.org>

---
v4: Simplify wheel constants handling as pointed out by George Spelvin. Switch
    to a non cascading wheel and let HZ=1000 have reduced granularity to
    accomodate the 5days timeouts of the networking code.

v3: fix return value of __next_timer_interrupt()
v2: change HASH_SIZE to TOT_HASH_SIZE (as Richard mentioned)

 include/linux/timer.h |    2 
 kernel/time/timer.c   |  825 ++++++++++++++++++++++++++++----------------------
 2 files changed, 467 insertions(+), 360 deletions(-)

--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -64,6 +64,8 @@ struct timer_list {
 #define TIMER_DEFERRABLE	0x00080000
 #define TIMER_PINNED		0x00100000
 #define TIMER_IRQSAFE		0x00200000
+#define TIMER_ARRAYSHIFT	22
+#define TIMER_ARRAYMASK		0xFFC00000
 
 #define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \
 		.entry = { .next = TIMER_ENTRY_STATIC },	\
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -59,43 +59,151 @@
 EXPORT_SYMBOL(jiffies_64);
 
 /*
- * per-CPU timer vector definitions:
+ * The timer wheel has LVL_DEPTH array levels. Each level provides an array of
+ * LVL_SIZE buckets. Each level is driven by its own clock and therefor each
+ * level has a different granularity.
+ *
+ * The level granularity is:		LVL_CLK_DIV ^ lvl
+ * The level clock frequency is:	HZ / (LVL_CLK_DIV ^ level)
+ *
+ * The array level of a newly armed timer depends on the relative expiry
+ * time. The farther the expiry time is away the higher the array level and
+ * therefor the granularity becomes.
+ *
+ * Contrary to the original timer wheel implementation, which aims for 'exact'
+ * expiry of the timers, this implementation removes the need for recascading
+ * the timers into the lower array levels. The previous 'classic' timer wheel
+ * implementation of the kernel already violated the 'exact' expiry by adding
+ * slack to the expiry time to provide batched expiration. The granularity
+ * levels provide implicit batching.
+ *
+ * This is an optimization of the original timer wheel implementation for the
+ * majority of the timer wheel use cases: timeouts. The vast majority of
+ * timeout timers (networking, disk I/O ...) are canceled before expiry. If
+ * the timeout expires it indicates that normal operation is disturbed, so it
+ * does not matter much whether the timeout comes with a slight delay.
+ *
+ * The only exception to this are networking timers with a small expiry
+ * time. They rely on the granularity. Those fit into the first wheel level,
+ * which has HZ granularity.
+ *
+ * We don't have cascading anymore. timers with a expiry time above the
+ * capacity of the last wheel level are force expired at the maximum timeout
+ * value of the last wheel level. From data sampling we know that the maximum
+ * value observed is 5 days (network connection tracking), so this should not
+ * be an issue.
+ *
+ * The currently chosen array constants values are a good compromise between
+ * array size and granularity.
+ *
+ * This results in the following granularity and range levels:
+ *
+ * HZ 1000 steps
+ * Level Offset  Granularity            Range
+ *  0      0         1 ms                0 ms -         63 ms
+ *  1     64         8 ms               64 ms -        511 ms
+ *  2    128        64 ms              512 ms -       4095 ms (512ms - ~4s)
+ *  3    192       512 ms             4096 ms -      32767 ms (~4s - ~32s)
+ *  4    256      4096 ms (~4s)      32768 ms -     262143 ms (~32s - ~4m)
+ *  5    320     32768 ms (~32s)    262144 ms -    2097151 ms (~4m - ~34m)
+ *  6    384    262144 ms (~4m)    2097152 ms -   16777215 ms (~34m - ~4h)
+ *  7    448   2097152 ms (~34m)  16777216 ms -  134217727 ms (~4h - ~1d)
+ *  8    512  16777216 ms (~4h)  134217728 ms - 1073741822 ms (~1d - ~12d)
+ *
+ * HZ  300
+ * Level Offset  Granularity            Range
+ *  0	   0         3 ms                0 ms -        210 ms
+ *  1	  64        26 ms              213 ms -       1703 ms (213ms - ~1s)
+ *  2	 128       213 ms             1706 ms -      13650 ms (~1s - ~13s)
+ *  3	 192      1706 ms (~1s)      13653 ms -     109223 ms (~13s - ~1m)
+ *  4	 256     13653 ms (~13s)    109226 ms -     873810 ms (~1m - ~14m)
+ *  5	 320    109226 ms (~1m)     873813 ms -    6990503 ms (~14m - ~1h)
+ *  6	 384    873813 ms (~14m)   6990506 ms -   55924050 ms (~1h - ~15h)
+ *  7	 448   6990506 ms (~1h)   55924053 ms -  447392423 ms (~15h - ~5d)
+ *  8    512  55924053 ms (~15h) 447392426 ms - 3579139406 ms (~5d - ~41d)
+ *
+ * HZ  250
+ * Level Offset  Granularity            Range
+ *  0	   0         4 ms                0 ms -        255 ms
+ *  1	  64        32 ms              256 ms -       2047 ms (256ms - ~2s)
+ *  2	 128       256 ms             2048 ms -      16383 ms (~2s - ~16s)
+ *  3	 192      2048 ms (~2s)      16384 ms -     131071 ms (~16s - ~2m)
+ *  4	 256     16384 ms (~16s)    131072 ms -    1048575 ms (~2m - ~17m)
+ *  5	 320    131072 ms (~2m)    1048576 ms -    8388607 ms (~17m - ~2h)
+ *  6	 384   1048576 ms (~17m)   8388608 ms -   67108863 ms (~2h - ~18h)
+ *  7	 448   8388608 ms (~2h)   67108864 ms -  536870911 ms (~18h - ~6d)
+ *  8    512  67108864 ms (~18h) 536870912 ms - 4294967288 ms (~6d - ~49d)
+ *
+ * HZ  100
+ * Level Offset  Granularity            Range
+ *  0	   0         10 ms               0 ms -        630 ms
+ *  1	  64         80 ms             640 ms -       5110 ms (640ms - ~5s)
+ *  2	 128        640 ms            5120 ms -      40950 ms (~5s - ~40s)
+ *  3	 192       5120 ms (~5s)     40960 ms -     327670 ms (~40s - ~5m)
+ *  4	 256      40960 ms (~40s)   327680 ms -    2621430 ms (~5m - ~43m)
+ *  5	 320     327680 ms (~5m)   2621440 ms -   20971510 ms (~43m - ~5h)
+ *  6	 384    2621440 ms (~43m) 20971520 ms -  167772150 ms (~5h - ~1d)
+ *  7	 448   20971520 ms (~5h) 167772160 ms - 1342177270 ms (~1d - ~15d)
+ */
+
+/* Clock divisor for the next level */
+#define LVL_CLK_SHIFT	3
+#define LVL_CLK_DIV	(1UL << LVL_CLK_SHIFT)
+#define LVL_CLK_MASK	(LVL_CLK_DIV - 1)
+#define LVL_SHIFT(n)	((n) * LVL_CLK_SHIFT)
+#define LVL_GRAN(n)	(1UL << LVL_SHIFT(n))
+
+/*
+ * The time start value for each level to select the bucket at enqueue
+ * time.
  */
-#define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
-#define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
-#define TVN_SIZE (1 << TVN_BITS)
-#define TVR_SIZE (1 << TVR_BITS)
-#define TVN_MASK (TVN_SIZE - 1)
-#define TVR_MASK (TVR_SIZE - 1)
-#define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1))
+#define LVL_START(n)	((LVL_SIZE - 1) << (((n) - 1) * LVL_CLK_SHIFT))
 
-struct tvec {
-	struct hlist_head vec[TVN_SIZE];
-};
+/* Size of each clock level */
+#define LVL_BITS	6
+#define LVL_SIZE	(1UL << LVL_BITS)
+#define LVL_MASK	(LVL_SIZE - 1)
+#define LVL_OFFS(n)	((n) * LVL_SIZE)
+
+/* Level depth */
+#if HZ > 100
+# define LVL_DEPTH	9
+# else
+# define LVL_DEPTH	8
+#endif
 
-struct tvec_root {
-	struct hlist_head vec[TVR_SIZE];
-};
+/* The cutoff (max. capacity of the wheel) */
+#define WHEEL_TIMEOUT_CUTOFF	(LVL_START(LVL_DEPTH))
+#define WHEEL_TIMEOUT_MAX	(WHEEL_TIMEOUT_CUTOFF - LVL_GRAN(LVL_DEPTH - 1))
+
+/*
+ * The resulting wheel size. If NOHZ is configured we allocate two
+ * wheels so we have a separate storage for the deferrable timers.
+ */
+#define WHEEL_SIZE	(LVL_SIZE * LVL_DEPTH)
+
+#ifdef CONFIG_NO_HZ_COMMON
+# define NR_BASES	2
+# define BASE_STD	0
+# define BASE_DEF	1
+#else
+# define NR_BASES	1
+# define BASE_STD	0
+# define BASE_DEF	0
+#endif
 
 struct timer_base {
-	spinlock_t lock;
-	struct timer_list *running_timer;
-	unsigned long clk;
-	unsigned long next_timer;
-	unsigned long active_timers;
-	unsigned long all_timers;
-	int cpu;
-	bool migration_enabled;
-	bool nohz_active;
-	struct tvec_root tv1;
-	struct tvec tv2;
-	struct tvec tv3;
-	struct tvec tv4;
-	struct tvec tv5;
+	spinlock_t		lock;
+	struct timer_list	*running_timer;
+	unsigned long		clk;
+	unsigned int		cpu;
+	bool			migration_enabled;
+	bool			nohz_active;
+	DECLARE_BITMAP(pending_map, WHEEL_SIZE);
+	struct hlist_head	vectors[WHEEL_SIZE];
 } ____cacheline_aligned;
 
-
-static DEFINE_PER_CPU(struct timer_base, timer_bases);
+static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
 
 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
 unsigned int sysctl_timer_migration = 1;
@@ -106,15 +214,17 @@ void timers_update_migration(bool update
 	unsigned int cpu;
 
 	/* Avoid the loop, if nothing to update */
-	if (this_cpu_read(timer_bases.migration_enabled) == on)
+	if (this_cpu_read(timer_bases[BASE_STD].migration_enabled) == on)
 		return;
 
 	for_each_possible_cpu(cpu) {
-		per_cpu(timer_bases.migration_enabled, cpu) = on;
+		per_cpu(timer_bases[BASE_STD].migration_enabled, cpu) = on;
+		per_cpu(timer_bases[BASE_DEF].migration_enabled, cpu) = on;
 		per_cpu(hrtimer_bases.migration_enabled, cpu) = on;
 		if (!update_nohz)
 			continue;
-		per_cpu(timer_bases.nohz_active, cpu) = true;
+		per_cpu(timer_bases[BASE_STD].nohz_active, cpu) = true;
+		per_cpu(timer_bases[BASE_DEF].nohz_active, cpu) = true;
 		per_cpu(hrtimer_bases.nohz_active, cpu) = true;
 	}
 }
@@ -133,20 +243,6 @@ int timer_migration_handler(struct ctl_t
 	mutex_unlock(&mutex);
 	return ret;
 }
-
-static inline struct timer_base *get_target_base(struct timer_base *base,
-						int pinned)
-{
-	if (pinned || !base->migration_enabled)
-		return this_cpu_ptr(&timer_bases);
-	return per_cpu_ptr(&timer_bases, get_nohz_timer_target());
-}
-#else
-static inline struct timer_base *get_target_base(struct timer_base *base,
-						int pinned)
-{
-	return this_cpu_ptr(&timer_bases);
-}
 #endif
 
 static unsigned long round_jiffies_common(unsigned long j, int cpu,
@@ -370,78 +466,91 @@ void set_timer_slack(struct timer_list *
 }
 EXPORT_SYMBOL_GPL(set_timer_slack);
 
+static inline unsigned int timer_get_idx(struct timer_list *timer)
+{
+	return (timer->flags & TIMER_ARRAYMASK) >> TIMER_ARRAYSHIFT;
+}
+
+static inline void timer_set_idx(struct timer_list *timer, unsigned int idx)
+{
+	timer->flags = (timer->flags & ~TIMER_ARRAYMASK) |
+			idx << TIMER_ARRAYSHIFT;
+}
+
+/*
+ * Helper function to calculate the array index for a given expiry
+ * time.
+ */
+static inline unsigned calc_index(unsigned expires, unsigned lvl)
+{
+	expires = (expires + LVL_GRAN(lvl)) >> LVL_SHIFT(lvl);
+	return LVL_OFFS(lvl) + (expires & LVL_MASK);
+}
+
 static void
 __internal_add_timer(struct timer_base *base, struct timer_list *timer)
 {
 	unsigned long expires = timer->expires;
-	unsigned long idx = expires - base->clk;
+	unsigned long delta = expires - base->clk;
 	struct hlist_head *vec;
+	unsigned int idx;
 
-	if (idx < TVR_SIZE) {
-		int i = expires & TVR_MASK;
-		vec = base->tv1.vec + i;
-	} else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
-		int i = (expires >> TVR_BITS) & TVN_MASK;
-		vec = base->tv2.vec + i;
-	} else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
-		int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
-		vec = base->tv3.vec + i;
-	} else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
-		int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
-		vec = base->tv4.vec + i;
-	} else if ((signed long) idx < 0) {
-		/*
-		 * Can happen if you add a timer with expires == jiffies,
-		 * or you set a timer to go off in the past
-		 */
-		vec = base->tv1.vec + (base->clk & TVR_MASK);
+	if (delta < LVL_START(1)) {
+		idx = calc_index(expires, 0);
+	} else if (delta < LVL_START(2)) {
+		idx = calc_index(expires, 1);
+	} else if (delta < LVL_START(3)) {
+		idx = calc_index(expires, 2);
+	} else if (delta < LVL_START(4)) {
+		idx = calc_index(expires, 3);
+	} else if (delta < LVL_START(5)) {
+		idx = calc_index(expires, 4);
+	} else if (delta < LVL_START(6)) {
+		idx = calc_index(expires, 5);
+	} else if (delta < LVL_START(7)) {
+		idx = calc_index(expires, 6);
+	} else if (LVL_DEPTH > 8 && delta < LVL_START(8)) {
+		idx = calc_index(expires, 7);
+	} else if ((long) delta < 0) {
+		idx = base->clk & LVL_MASK;
 	} else {
-		int i;
-		/* If the timeout is larger than MAX_TVAL (on 64-bit
-		 * architectures or with CONFIG_BASE_SMALL=1) then we
-		 * use the maximum timeout.
+		/*
+		 * Force expire obscene large timeouts to expire at the
+		 * capacity limit of the wheel.
 		 */
-		if (idx > MAX_TVAL) {
-			idx = MAX_TVAL;
-			expires = idx + base->clk;
-		}
-		i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
-		vec = base->tv5.vec + i;
-	}
+		if (expires >= WHEEL_TIMEOUT_CUTOFF)
+			expires = WHEEL_TIMEOUT_MAX;
 
+		idx = calc_index(expires, LVL_DEPTH - 1);
+	}
+	/*
+	 * Enqueue the timer into the array bucket, mark it pending in
+	 * the bitmap and store the index in the timer flags.
+	 */
+	vec = base->vectors + idx;
 	hlist_add_head(&timer->entry, vec);
+	__set_bit(idx, base->pending_map);
+	timer_set_idx(timer, idx);
 }
 
 static void internal_add_timer(struct timer_base *base, struct timer_list *timer)
 {
-	/* Advance base->jiffies, if the base is empty */
-	if (!base->all_timers++)
-		base->clk = jiffies;
-
 	__internal_add_timer(base, timer);
-	/*
-	 * Update base->active_timers and base->next_timer
-	 */
-	if (!(timer->flags & TIMER_DEFERRABLE)) {
-		if (!base->active_timers++ ||
-		    time_before(timer->expires, base->next_timer))
-			base->next_timer = timer->expires;
-	}
 
 	/*
 	 * Check whether the other CPU is in dynticks mode and needs
-	 * to be triggered to reevaluate the timer wheel.
-	 * We are protected against the other CPU fiddling
-	 * with the timer by holding the timer base lock. This also
-	 * makes sure that a CPU on the way to stop its tick can not
-	 * evaluate the timer wheel.
+	 * to be triggered to reevaluate the timer wheel.  We are
+	 * protected against the other CPU fiddling with the timer by
+	 * holding the timer base lock. This also makes sure that a
+	 * CPU on the way to stop its tick can not evaluate the timer
+	 * wheel.
 	 *
 	 * Spare the IPI for deferrable timers on idle targets though.
 	 * The next busy ticks will take care of it. Except full dynticks
 	 * require special care against races with idle_cpu(), lets deal
 	 * with that later.
 	 */
-	if (base->nohz_active) {
+	if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active) {
 		if (!(timer->flags & TIMER_DEFERRABLE) ||
 		    tick_nohz_full_cpu(base->cpu))
 			wake_up_nohz_cpu(base->cpu);
@@ -706,54 +815,87 @@ static inline void detach_timer(struct t
 	entry->next = LIST_POISON2;
 }
 
-static inline void
-detach_expired_timer(struct timer_list *timer, struct timer_base *base)
-{
-	detach_timer(timer, true);
-	if (!(timer->flags & TIMER_DEFERRABLE))
-		base->active_timers--;
-	base->all_timers--;
-}
-
 static int detach_if_pending(struct timer_list *timer, struct timer_base *base,
 			     bool clear_pending)
 {
+	unsigned idx = timer_get_idx(timer);
+
 	if (!timer_pending(timer))
 		return 0;
 
+	if (hlist_is_singular_node(&timer->entry, base->vectors + idx))
+		__clear_bit(idx, base->pending_map);
+
 	detach_timer(timer, clear_pending);
-	if (!(timer->flags & TIMER_DEFERRABLE)) {
-		base->active_timers--;
-		if (timer->expires == base->next_timer)
-			base->next_timer = base->clk;
-	}
-	/* If this was the last timer, advance base->jiffies */
-	if (!--base->all_timers)
-		base->clk = jiffies;
 	return 1;
 }
 
+static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu)
+{
+	struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu);
+
+	/*
+	 * If the timer is deferrable and nohz is active then we need to use
+	 * the deferrable base.
+	 */
+	if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active &&
+	    (tflags & TIMER_DEFERRABLE))
+		base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu);
+	return base;
+}
+
+static inline struct timer_base *get_timer_this_cpu_base(u32 tflags)
+{
+	struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
+
+	/*
+	 * If the timer is deferrable and nohz is active then we need to use
+	 * the deferrable base.
+	 */
+	if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active &&
+	    (tflags & TIMER_DEFERRABLE))
+		base = this_cpu_ptr(&timer_bases[BASE_DEF]);
+	return base;
+}
+
+static inline struct timer_base *get_timer_base(u32 tflags)
+{
+	return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK);
+}
+
+static inline struct timer_base *get_target_base(struct timer_base *base,
+						 unsigned tflags)
+{
+#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
+	if ((tflags & TIMER_PINNED) || !base->migration_enabled)
+		return get_timer_this_cpu_base(tflags);
+	return get_timer_cpu_base(tflags, get_nohz_timer_target());
+#else
+	return get_timer_this_cpu_base(tflags);
+#endif
+}
+
 /*
- * We are using hashed locking: holding per_cpu(timer_bases).lock
- * means that all timers which are tied to this base via timer->base are
- * locked, and the base itself is locked too.
+ * We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means
+ * that all timers which are tied to this base are locked, and the base itself
+ * is locked too.
  *
  * So __run_timers/migrate_timers can safely modify all timers which could
- * be found on ->tvX lists.
+ * be found in the base->vectors array.
  *
- * When the timer's base is locked and removed from the list, the
- * TIMER_MIGRATING flag is set, FIXME
+ * When a timer is migrating then the TIMER_MIGRATING flag is set and we need
+ * to wait until the migration is done.
  */
 static struct timer_base *lock_timer_base(struct timer_list *timer,
-					unsigned long *flags)
+					  unsigned long *flags)
 	__acquires(timer->base->lock)
 {
 	for (;;) {
-		u32 tf = timer->flags;
 		struct timer_base *base;
+		u32 tf = timer->flags;
 
 		if (!(tf & TIMER_MIGRATING)) {
-			base = per_cpu_ptr(&timer_bases, tf & TIMER_CPUMASK);
+			base = get_timer_base(tf);
 			spin_lock_irqsave(&base->lock, *flags);
 			if (timer->flags == tf)
 				return base;
@@ -770,6 +912,27 @@ static inline int
 	unsigned long flags;
 	int ret = 0;
 
+	/*
+	 * TODO: Calculate the array bucket of the timer right here w/o
+	 * holding the base lock. This allows to check not only
+	 * timer->expires == expires below, but also whether the timer
+	 * ends up in the same bucket. If we really need to requeue
+	 * the timer then we check whether base->clk have
+	 * advanced between here and locking the timer base. If
+	 * jiffies advanced we have to recalc the array bucket with the
+	 * lock held.
+	 */
+
+	/*
+	 * This is a common optimization triggered by the
+	 * networking code - if the timer is re-modified
+	 * to be the same thing then just return:
+	 */
+	if (timer_pending(timer)) {
+		if (timer->expires == expires)
+			return 1;
+	}
+
 	timer_stats_timer_set_start_info(timer);
 	BUG_ON(!timer->function);
 
@@ -781,15 +944,15 @@ static inline int
 
 	debug_activate(timer, expires);
 
-	new_base = get_target_base(base, timer->flags & TIMER_PINNED);
+	new_base = get_target_base(base, timer->flags);
 
 	if (base != new_base) {
 		/*
-		 * We are trying to schedule the timer on the local CPU.
+		 * We are trying to schedule the timer on the new base.
 		 * However we can't change timer's base while it is running,
 		 * otherwise del_timer_sync() can't detect that the timer's
-		 * handler yet has not finished. This also guarantees that
-		 * the timer is serialized wrt itself.
+		 * handler yet has not finished. This also guarantees that the
+		 * timer is serialized wrt itself.
 		 */
 		if (likely(base->running_timer != timer)) {
 			/* See the comment in lock_timer_base() */
@@ -828,45 +991,6 @@ int mod_timer_pending(struct timer_list
 }
 EXPORT_SYMBOL(mod_timer_pending);
 
-/*
- * Decide where to put the timer while taking the slack into account
- *
- * Algorithm:
- *   1) calculate the maximum (absolute) time
- *   2) calculate the highest bit where the expires and new max are different
- *   3) use this bit to make a mask
- *   4) use the bitmask to round down the maximum time, so that all last
- *      bits are zeros
- */
-static inline
-unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
-{
-	unsigned long expires_limit, mask;
-	int bit;
-
-	if (timer->slack >= 0) {
-		expires_limit = expires + timer->slack;
-	} else {
-		long delta = expires - jiffies;
-
-		if (delta < 256)
-			return expires;
-
-		expires_limit = expires + delta / 256;
-	}
-	mask = expires ^ expires_limit;
-	if (mask == 0)
-		return expires;
-
-	bit = __fls(mask);
-
-	mask = (1UL << bit) - 1;
-
-	expires_limit = expires_limit & ~(mask);
-
-	return expires_limit;
-}
-
 /**
  * mod_timer - modify a timer's timeout
  * @timer: the timer to be modified
@@ -889,16 +1013,6 @@ unsigned long apply_slack(struct timer_l
  */
 int mod_timer(struct timer_list *timer, unsigned long expires)
 {
-	expires = apply_slack(timer, expires);
-
-	/*
-	 * This is a common optimization triggered by the
-	 * networking code - if the timer is re-modified
-	 * to be the same thing then just return:
-	 */
-	if (timer_pending(timer) && timer->expires == expires)
-		return 1;
-
 	return __mod_timer(timer, expires, false);
 }
 EXPORT_SYMBOL(mod_timer);
@@ -933,13 +1047,14 @@ EXPORT_SYMBOL(add_timer);
  */
 void add_timer_on(struct timer_list *timer, int cpu)
 {
-	struct timer_base *new_base = per_cpu_ptr(&timer_bases, cpu);
-	struct timer_base *base;
+	struct timer_base *new_base, *base;
 	unsigned long flags;
 
 	timer_stats_timer_set_start_info(timer);
 	BUG_ON(timer_pending(timer) || !timer->function);
 
+	new_base = get_timer_cpu_base(timer->flags, cpu);
+
 	/*
 	 * If @timer was on a different CPU, it should be migrated with the
 	 * old base locked to prevent other operations proceeding with the
@@ -1085,27 +1200,6 @@ int del_timer_sync(struct timer_list *ti
 EXPORT_SYMBOL(del_timer_sync);
 #endif
 
-static int cascade(struct timer_base *base, struct tvec *tv, int index)
-{
-	/* cascade all the timers from tv up one level */
-	struct timer_list *timer;
-	struct hlist_node *tmp;
-	struct hlist_head tv_list;
-
-	hlist_move_list(tv->vec + index, &tv_list);
-
-	/*
-	 * We are removing _all_ timers from the list, so we
-	 * don't have to detach them individually.
-	 */
-	hlist_for_each_entry_safe(timer, tmp, &tv_list, entry) {
-		/* No accounting, while moving them */
-		__internal_add_timer(base, timer);
-	}
-
-	return index;
-}
-
 static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
 			  unsigned long data)
 {
@@ -1149,68 +1243,80 @@ static void call_timer_fn(struct timer_l
 	}
 }
 
-#define INDEX(N) ((base->clk >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
+static void expire_timers(struct timer_base *base, struct hlist_head *head)
+{
+	while (!hlist_empty(head)) {
+		struct timer_list *timer;
+		void (*fn)(unsigned long);
+		unsigned long data;
+
+		timer = hlist_entry(head->first, struct timer_list, entry);
+		timer_stats_account_timer(timer);
+
+		base->running_timer = timer;
+		detach_timer(timer, true);
+
+		fn = timer->function;
+		data = timer->data;
+
+		if (timer->flags & TIMER_IRQSAFE) {
+			spin_unlock(&base->lock);
+			call_timer_fn(timer, fn, data);
+			spin_lock(&base->lock);
+		} else {
+			spin_unlock_irq(&base->lock);
+			call_timer_fn(timer, fn, data);
+			spin_lock_irq(&base->lock);
+		}
+	}
+}
+
+static int collect_expired_timers(struct timer_base *base,
+				  struct hlist_head *heads)
+{
+	unsigned long clk = base->clk;
+	struct hlist_head *vec;
+	int i, levels = 0;
+	unsigned int idx;
+
+	for (i = 0; i < LVL_DEPTH; i++) {
+		idx = (clk & LVL_MASK) + i * LVL_SIZE;
+
+		if (__test_and_clear_bit(idx, base->pending_map)) {
+			vec = base->vectors + idx;
+			hlist_move_list(vec, heads++);
+			levels++;
+		}
+		/* Is it time to look at the next level? */
+		if (clk & LVL_CLK_MASK)
+			break;
+		/* Shift clock for the next level granularity */
+		clk >>= LVL_CLK_SHIFT;
+	}
+	return levels;
+}
 
 /**
  * __run_timers - run all expired timers (if any) on this CPU.
  * @base: the timer vector to be processed.
- *
- * This function cascades all vectors and executes all expired timer
- * vectors.
  */
 static inline void __run_timers(struct timer_base *base)
 {
-	struct timer_list *timer;
+	struct hlist_head heads[LVL_DEPTH];
+	int levels;
+
+	if (!time_after_eq(jiffies, base->clk))
+		return;
 
 	spin_lock_irq(&base->lock);
 
 	while (time_after_eq(jiffies, base->clk)) {
-		struct hlist_head work_list;
-		struct hlist_head *head = &work_list;
-		int index;
 
-		if (!base->all_timers) {
-			base->clk = jiffies;
-			break;
-		}
-
-		index = base->clk & TVR_MASK;
+		levels = collect_expired_timers(base, heads);
+		base->clk++;
 
-		/*
-		 * Cascade timers:
-		 */
-		if (!index &&
-			(!cascade(base, &base->tv2, INDEX(0))) &&
-				(!cascade(base, &base->tv3, INDEX(1))) &&
-					!cascade(base, &base->tv4, INDEX(2)))
-			cascade(base, &base->tv5, INDEX(3));
-		++base->clk;
-		hlist_move_list(base->tv1.vec + index, head);
-		while (!hlist_empty(head)) {
-			void (*fn)(unsigned long);
-			unsigned long data;
-			bool irqsafe;
-
-			timer = hlist_entry(head->first, struct timer_list, entry);
-			fn = timer->function;
-			data = timer->data;
-			irqsafe = timer->flags & TIMER_IRQSAFE;
-
-			timer_stats_account_timer(timer);
-
-			base->running_timer = timer;
-			detach_expired_timer(timer, base);
-
-			if (irqsafe) {
-				spin_unlock(&base->lock);
-				call_timer_fn(timer, fn, data);
-				spin_lock(&base->lock);
-			} else {
-				spin_unlock_irq(&base->lock);
-				call_timer_fn(timer, fn, data);
-				spin_lock_irq(&base->lock);
-			}
-		}
+		while (levels--)
+			expire_timers(base, heads + levels);
 	}
 	base->running_timer = NULL;
 	spin_unlock_irq(&base->lock);
@@ -1218,78 +1324,87 @@ static inline void __run_timers(struct t
 
 #ifdef CONFIG_NO_HZ_COMMON
 /*
- * Find out when the next timer event is due to happen. This
- * is used on S/390 to stop all activity when a CPU is idle.
- * This function needs to be called with interrupts disabled.
+ * Find the next pending bucket of a level. Search from @offset + @clk upwards
+ * and if nothing there, search from start of the level (@offset) up to
+ * @offset + clk.
+ */
+static int next_pending_bucket(struct timer_base *base, unsigned offset,
+			       unsigned clk)
+{
+	unsigned pos, start = offset + clk;
+	unsigned end = offset + LVL_SIZE;
+
+	pos = find_next_bit(base->pending_map, end, start);
+	if (pos < end)
+		return pos - start;
+
+	pos = find_next_bit(base->pending_map, start, offset);
+	return pos < start ? pos + LVL_SIZE - start : -1;
+}
+
+/*
+ * Search the first expiring timer in the various clock levels.
  */
 static unsigned long __next_timer_interrupt(struct timer_base *base)
 {
-	unsigned long clk = base->clk;
-	unsigned long expires = clk + NEXT_TIMER_MAX_DELTA;
-	int index, slot, array, found = 0;
-	struct timer_list *nte;
-	struct tvec *varray[4];
-
-	/* Look for timer events in tv1. */
-	index = slot = clk & TVR_MASK;
-	do {
-		hlist_for_each_entry(nte, base->tv1.vec + slot, entry) {
-			if (nte->flags & TIMER_DEFERRABLE)
-				continue;
-
-			found = 1;
-			expires = nte->expires;
-			/* Look at the cascade bucket(s)? */
-			if (!index || slot < index)
-				goto cascade;
-			return expires;
-		}
-		slot = (slot + 1) & TVR_MASK;
-	} while (slot != index);
+	unsigned long clk, next, adj;
+	unsigned lvl, offset = 0;
 
-cascade:
-	/* Calculate the next cascade event */
-	if (index)
-		clk += TVR_SIZE - index;
-	clk >>= TVR_BITS;
-
-	/* Check tv2-tv5. */
-	varray[0] = &base->tv2;
-	varray[1] = &base->tv3;
-	varray[2] = &base->tv4;
-	varray[3] = &base->tv5;
-
-	for (array = 0; array < 4; array++) {
-		struct tvec *varp = varray[array];
-
-		index = slot = clk & TVN_MASK;
-		do {
-			hlist_for_each_entry(nte, varp->vec + slot, entry) {
-				if (nte->flags & TIMER_DEFERRABLE)
-					continue;
-
-				found = 1;
-				if (time_before(nte->expires, expires))
-					expires = nte->expires;
-			}
-			/*
-			 * Do we still search for the first timer or are
-			 * we looking up the cascade buckets ?
-			 */
-			if (found) {
-				/* Look at the cascade bucket(s)? */
-				if (!index || slot < index)
-					break;
-				return expires;
-			}
-			slot = (slot + 1) & TVN_MASK;
-		} while (slot != index);
-
-		if (index)
-			clk += TVN_SIZE - index;
-		clk >>= TVN_BITS;
+	spin_lock(&base->lock);
+	next = base->clk + NEXT_TIMER_MAX_DELTA;
+	clk = base->clk;
+	for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) {
+		int pos = next_pending_bucket(base, offset, clk & LVL_MASK);
+
+		if (pos >= 0) {
+			unsigned long tmp = clk + (unsigned long) pos;
+
+			tmp <<= LVL_SHIFT(lvl);
+			if (time_before(tmp, next))
+				next = tmp;
+		}
+		/*
+		 * Clock for the next level. If the current level clock lower
+		 * bits are zero, we look at the next level as is. If not we
+		 * need to advance it by one because that's going to be the
+		 * next expiring bucket in that level. base->clk is the next
+		 * expiring jiffie. So in case of:
+		 *
+		 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
+		 *  0    0    0    0    0    0
+		 *
+		 * we have to look at all levels @index 0. With
+		 *
+		 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
+		 *  0    0    0    0    0    2
+		 *
+		 * LVL0 has the next expiring bucket @index 2. The upper
+		 * levels have the next expiring bucket @index 1.
+		 *
+		 * In case that the propagation wraps the next level the same
+		 * rules apply:
+		 *
+		 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
+		 *  0    0    0    0    F    2
+		 *
+		 * So after looking at LVL0 we get:
+		 *
+		 * LVL5 LVL4 LVL3 LVL2 LVL1
+		 *  0    0    0    1    0
+		 *
+		 * So no propagation from LVL1 to LVL2 because that happened
+		 * with the add already, but then we need to propagate further
+		 * from LVL2 to LVL3.
+		 *
+		 * So the simple check whether the lower bits of the current
+		 * level are 0 or not is sufficient for all cases.
+		 */
+		adj = clk & LVL_CLK_MASK ? 1 : 0;
+		clk >>= LVL_CLK_SHIFT;
+		clk += adj;
 	}
-	return expires;
+	spin_unlock(&base->lock);
+	return next;
 }
 
 /*
@@ -1335,7 +1450,7 @@ static u64 cmp_next_hrtimer_event(u64 ba
  */
 u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
 {
-	struct timer_base *base = this_cpu_ptr(&timer_bases);
+	struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
 	u64 expires = KTIME_MAX;
 	unsigned long nextevt;
 
@@ -1346,17 +1461,11 @@ u64 get_next_timer_interrupt(unsigned lo
 	if (cpu_is_offline(smp_processor_id()))
 		return expires;
 
-	spin_lock(&base->lock);
-	if (base->active_timers) {
-		if (time_before_eq(base->next_timer, base->clk))
-			base->next_timer = __next_timer_interrupt(base);
-		nextevt = base->next_timer;
-		if (time_before_eq(nextevt, basej))
-			expires = basem;
-		else
-			expires = basem + (nextevt - basej) * TICK_NSEC;
-	}
-	spin_unlock(&base->lock);
+	nextevt = __next_timer_interrupt(base);
+	if (time_before_eq(nextevt, basej))
+		expires = basem;
+	else
+		expires = basem + (nextevt - basej) * TICK_NSEC;
 
 	return cmp_next_hrtimer_event(basem, expires);
 }
@@ -1387,10 +1496,11 @@ void update_process_times(int user_tick)
  */
 static void run_timer_softirq(struct softirq_action *h)
 {
-	struct timer_base *base = this_cpu_ptr(&timer_bases);
+	struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
 
-	if (time_after_eq(jiffies, base->clk))
-		__run_timers(base);
+	__run_timers(base);
+	if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active)
+		__run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
 }
 
 /*
@@ -1541,7 +1651,6 @@ static void migrate_timer_list(struct ti
 
 	while (!hlist_empty(head)) {
 		timer = hlist_entry(head->first, struct timer_list, entry);
-		/* We ignore the accounting on the dying cpu */
 		detach_timer(timer, false);
 		timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu;
 		internal_add_timer(new_base, timer);
@@ -1552,35 +1661,29 @@ static void migrate_timers(int cpu)
 {
 	struct timer_base *old_base;
 	struct timer_base *new_base;
-	int i;
+	int b, i;
 
 	BUG_ON(cpu_online(cpu));
-	old_base = per_cpu_ptr(&timer_bases, cpu);
-	new_base = get_cpu_ptr(&timer_bases);
-	/*
-	 * The caller is globally serialized and nobody else
-	 * takes two locks at once, deadlock is not possible.
-	 */
-	spin_lock_irq(&new_base->lock);
-	spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
 
-	BUG_ON(old_base->running_timer);
+	for (b = 0; b < NR_BASES; b++) {
+		old_base = per_cpu_ptr(&timer_bases[b], cpu);
+		new_base = get_cpu_ptr(&timer_bases[b]);
+		/*
+		 * The caller is globally serialized and nobody else
+		 * takes two locks at once, deadlock is not possible.
+		 */
+		spin_lock_irq(&new_base->lock);
+		spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
+
+		BUG_ON(old_base->running_timer);
+
+		for (i = 0; i < WHEEL_SIZE; i++)
+			migrate_timer_list(new_base, old_base->vectors + i);
 
-	for (i = 0; i < TVR_SIZE; i++)
-		migrate_timer_list(new_base, old_base->tv1.vec + i);
-	for (i = 0; i < TVN_SIZE; i++) {
-		migrate_timer_list(new_base, old_base->tv2.vec + i);
-		migrate_timer_list(new_base, old_base->tv3.vec + i);
-		migrate_timer_list(new_base, old_base->tv4.vec + i);
-		migrate_timer_list(new_base, old_base->tv5.vec + i);
-	}
-
-	old_base->active_timers = 0;
-	old_base->all_timers = 0;
-
-	spin_unlock(&old_base->lock);
-	spin_unlock_irq(&new_base->lock);
-	put_cpu_ptr(&timer_bases);
+		spin_unlock(&old_base->lock);
+		spin_unlock_irq(&new_base->lock);
+		put_cpu_ptr(&timer_bases);
+	}
 }
 
 static int timer_cpu_notify(struct notifier_block *self,
@@ -1608,13 +1711,15 @@ static inline void timer_register_cpu_no
 
 static void __init init_timer_cpu(int cpu)
 {
-	struct timer_base *base = per_cpu_ptr(&timer_bases, cpu);
-
-	base->cpu = cpu;
-	spin_lock_init(&base->lock);
+	struct timer_base *base;
+	int i;
 
-	base->clk = jiffies;
-	base->next_timer = base->clk;
+	for (i = 0; i < NR_BASES; i++) {
+		base = per_cpu_ptr(&timer_bases[i], cpu);
+		base->cpu = cpu;
+		spin_lock_init(&base->lock);
+		base->clk = jiffies;
+	}
 }
 
 static void __init init_timer_cpus(void)

^ permalink raw reply	[flat|nested] 60+ messages in thread

* [patch 4 15/22] timer: Remove slack leftovers
  2016-07-04  9:50 [patch 4 00/22] timer: Refactor the timer wheel Thomas Gleixner
                   ` (13 preceding siblings ...)
  2016-07-04  9:50 ` [patch 4 14/22] timer: Switch to a non cascading wheel Thomas Gleixner
@ 2016-07-04  9:50 ` Thomas Gleixner
  2016-07-07  8:46   ` [tip:timers/core] timers: Remove set_timer_slack() leftovers tip-bot for Thomas Gleixner
  2016-07-22 11:31   ` [patch 4 15/22] timer: Remove slack leftovers Jason A. Donenfeld
  2016-07-04  9:50 ` [patch 4 16/22] timer: Move __run_timers() function Thomas Gleixner
                   ` (6 subsequent siblings)
  21 siblings, 2 replies; 60+ messages in thread
From: Thomas Gleixner @ 2016-07-04  9:50 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Paul McKenney, Frederic Weisbecker,
	Chris Mason, Arjan van de Ven, rt, Rik van Riel, George Spelvin,
	Len Brown, Josh Triplett, Eric Dumazet

[-- Attachment #1: timer_Remove_slack_leftovers.patch --]
[-- Type: text/plain, Size: 5252 bytes --]

We now have implicit batching in the timer wheel. The slack is not longer
used. Remove it.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Chris Mason <clm@fb.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: rt@linutronix.de
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Arjan van de Ven <arjan@infradead.org>


---
 block/genhd.c                   |    5 -----
 drivers/mmc/host/jz4740_mmc.c   |    2 --
 drivers/power/bq27xxx_battery.c |    5 +----
 drivers/usb/host/ohci-hcd.c     |    1 -
 drivers/usb/host/xhci.c         |    2 --
 include/linux/timer.h           |    4 ----
 kernel/time/timer.c             |   19 -------------------
 lib/random32.c                  |    1 -
 8 files changed, 1 insertion(+), 38 deletions(-)

--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1523,12 +1523,7 @@ static void __disk_unblock_events(struct
 	if (--ev->block)
 		goto out_unlock;
 
-	/*
-	 * Not exactly a latency critical operation, set poll timer
-	 * slack to 25% and kick event check.
-	 */
 	intv = disk_events_poll_jiffies(disk);
-	set_timer_slack(&ev->dwork.timer, intv / 4);
 	if (check_now)
 		queue_delayed_work(system_freezable_power_efficient_wq,
 				&ev->dwork, 0);
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -1068,8 +1068,6 @@ static int jz4740_mmc_probe(struct platf
 	jz4740_mmc_clock_disable(host);
 	setup_timer(&host->timeout_timer, jz4740_mmc_timeout,
 			(unsigned long)host);
-	/* It is not important when it times out, it just needs to timeout. */
-	set_timer_slack(&host->timeout_timer, HZ);
 
 	host->use_dma = true;
 	if (host->use_dma && jz4740_mmc_acquire_dma_channels(host) != 0)
--- a/drivers/power/bq27xxx_battery.c
+++ b/drivers/power/bq27xxx_battery.c
@@ -735,11 +735,8 @@ static void bq27xxx_battery_poll(struct
 
 	bq27xxx_battery_update(di);
 
-	if (poll_interval > 0) {
-		/* The timer does not have to be accurate. */
-		set_timer_slack(&di->work.timer, poll_interval * HZ / 4);
+	if (poll_interval > 0)
 		schedule_delayed_work(&di->work, poll_interval * HZ);
-	}
 }
 
 /*
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -500,7 +500,6 @@ static int ohci_init (struct ohci_hcd *o
 
 	setup_timer(&ohci->io_watchdog, io_watchdog_func,
 			(unsigned long) ohci);
-	set_timer_slack(&ohci->io_watchdog, msecs_to_jiffies(20));
 
 	ohci->hcca = dma_alloc_coherent (hcd->self.controller,
 			sizeof(*ohci->hcca), &ohci->hcca_dma, GFP_KERNEL);
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -490,8 +490,6 @@ static void compliance_mode_recovery_tim
 	xhci->comp_mode_recovery_timer.expires = jiffies +
 			msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
 
-	set_timer_slack(&xhci->comp_mode_recovery_timer,
-			msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
 	add_timer(&xhci->comp_mode_recovery_timer);
 	xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
 			"Compliance mode recovery timer initialized");
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -19,7 +19,6 @@ struct timer_list {
 	void			(*function)(unsigned long);
 	unsigned long		data;
 	u32			flags;
-	int			slack;
 
 #ifdef CONFIG_TIMER_STATS
 	int			start_pid;
@@ -73,7 +72,6 @@ struct timer_list {
 		.expires = (_expires),				\
 		.data = (_data),				\
 		.flags = (_flags),				\
-		.slack = -1,					\
 		__TIMER_LOCKDEP_MAP_INITIALIZER(		\
 			__FILE__ ":" __stringify(__LINE__))	\
 	}
@@ -193,8 +191,6 @@ extern int del_timer(struct timer_list *
 extern int mod_timer(struct timer_list *timer, unsigned long expires);
 extern int mod_timer_pending(struct timer_list *timer, unsigned long expires);
 
-extern void set_timer_slack(struct timer_list *time, int slack_hz);
-
 /*
  * The jiffies value which is added to now, when there is no timer
  * in the timer wheel:
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -447,24 +447,6 @@ unsigned long round_jiffies_up_relative(
 }
 EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
 
-/**
- * set_timer_slack - set the allowed slack for a timer
- * @timer: the timer to be modified
- * @slack_hz: the amount of time (in jiffies) allowed for rounding
- *
- * Set the amount of time, in jiffies, that a certain timer has
- * in terms of slack. By setting this value, the timer subsystem
- * will schedule the actual timer somewhere between
- * the time mod_timer() asks for, and that time plus the slack.
- *
- * By setting the slack to -1, a percentage of the delay is used
- * instead.
- */
-void set_timer_slack(struct timer_list *timer, int slack_hz)
-{
-	timer->slack = slack_hz;
-}
-EXPORT_SYMBOL_GPL(set_timer_slack);
 
 static inline unsigned int timer_get_idx(struct timer_list *timer)
 {
@@ -775,7 +757,6 @@ static void do_init_timer(struct timer_l
 {
 	timer->entry.pprev = NULL;
 	timer->flags = flags | raw_smp_processor_id();
-	timer->slack = -1;
 #ifdef CONFIG_TIMER_STATS
 	timer->start_site = NULL;
 	timer->start_pid = -1;
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -233,7 +233,6 @@ static void __prandom_timer(unsigned lon
 
 static void __init __prandom_start_seed_timer(void)
 {
-	set_timer_slack(&seed_timer, HZ);
 	seed_timer.expires = jiffies + msecs_to_jiffies(40 * MSEC_PER_SEC);
 	add_timer(&seed_timer);
 }

^ permalink raw reply	[flat|nested] 60+ messages in thread

* [patch 4 16/22] timer: Move __run_timers() function
  2016-07-04  9:50 [patch 4 00/22] timer: Refactor the timer wheel Thomas Gleixner
                   ` (14 preceding siblings ...)
  2016-07-04  9:50 ` [patch 4 15/22] timer: Remove slack leftovers Thomas Gleixner
@ 2016-07-04  9:50 ` Thomas Gleixner
  2016-07-07  8:46   ` [tip:timers/core] timers: " tip-bot for Anna-Maria Gleixner
  2016-07-04  9:50 ` [patch 4 17/22] timer: Optimize collect timers for NOHZ Thomas Gleixner
                   ` (5 subsequent siblings)
  21 siblings, 1 reply; 60+ messages in thread
From: Thomas Gleixner @ 2016-07-04  9:50 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Paul McKenney, Frederic Weisbecker,
	Chris Mason, Arjan van de Ven, rt, Rik van Riel, George Spelvin,
	Len Brown, Josh Triplett, Anna-Maria Gleixner, Eric Dumazet

[-- Attachment #1: timer_Move___run_timers_function.patch --]
[-- Type: text/plain, Size: 2293 bytes --]

From: Anna-Maria Gleixner <anna-maria@linutronix.de>

Move __run_timers() below __next_timer_interrupt() and next_pending_bucket()
in preparation for __run_timers() NOHZ optimization.

No functional change.

Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Chris Mason <clm@fb.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: rt@linutronix.de
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Arjan van de Ven <arjan@infradead.org>

---
 kernel/time/timer.c |   52 ++++++++++++++++++++++++++--------------------------
 1 file changed, 26 insertions(+), 26 deletions(-)

--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1277,32 +1277,6 @@ static int collect_expired_timers(struct
 	return levels;
 }
 
-/**
- * __run_timers - run all expired timers (if any) on this CPU.
- * @base: the timer vector to be processed.
- */
-static inline void __run_timers(struct timer_base *base)
-{
-	struct hlist_head heads[LVL_DEPTH];
-	int levels;
-
-	if (!time_after_eq(jiffies, base->clk))
-		return;
-
-	spin_lock_irq(&base->lock);
-
-	while (time_after_eq(jiffies, base->clk)) {
-
-		levels = collect_expired_timers(base, heads);
-		base->clk++;
-
-		while (levels--)
-			expire_timers(base, heads + levels);
-	}
-	base->running_timer = NULL;
-	spin_unlock_irq(&base->lock);
-}
-
 #ifdef CONFIG_NO_HZ_COMMON
 /*
  * Find the next pending bucket of a level. Search from @offset + @clk upwards
@@ -1472,6 +1446,32 @@ void update_process_times(int user_tick)
 	run_posix_cpu_timers(p);
 }
 
+/**
+ * __run_timers - run all expired timers (if any) on this CPU.
+ * @base: the timer vector to be processed.
+ */
+static inline void __run_timers(struct timer_base *base)
+{
+	struct hlist_head heads[LVL_DEPTH];
+	int levels;
+
+	if (!time_after_eq(jiffies, base->clk))
+		return;
+
+	spin_lock_irq(&base->lock);
+
+	while (time_after_eq(jiffies, base->clk)) {
+
+		levels = collect_expired_timers(base, heads);
+		base->clk++;
+
+		while (levels--)
+			expire_timers(base, heads + levels);
+	}
+	base->running_timer = NULL;
+	spin_unlock_irq(&base->lock);
+}
+
 /*
  * This function runs timers and the timer-tq in bottom half context.
  */

^ permalink raw reply	[flat|nested] 60+ messages in thread

* [patch 4 17/22] timer: Optimize collect timers for NOHZ
  2016-07-04  9:50 [patch 4 00/22] timer: Refactor the timer wheel Thomas Gleixner
                   ` (15 preceding siblings ...)
  2016-07-04  9:50 ` [patch 4 16/22] timer: Move __run_timers() function Thomas Gleixner
@ 2016-07-04  9:50 ` Thomas Gleixner
  2016-07-07  8:47   ` [tip:timers/core] timers: Optimize collect_expired_timers() " tip-bot for Anna-Maria Gleixner
  2016-07-04  9:50 ` [patch 4 18/22] tick/sched: Remove pointless empty function Thomas Gleixner
                   ` (4 subsequent siblings)
  21 siblings, 1 reply; 60+ messages in thread
From: Thomas Gleixner @ 2016-07-04  9:50 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Paul McKenney, Frederic Weisbecker,
	Chris Mason, Arjan van de Ven, rt, Rik van Riel, George Spelvin,
	Len Brown, Josh Triplett, Anna-Maria Gleixner, Eric Dumazet

[-- Attachment #1: timer_Optimize_collect_timers_for_NOHZ.patch --]
[-- Type: text/plain, Size: 3984 bytes --]

From: Anna-Maria Gleixner <anna-maria@linutronix.de>

After a NOHZ idle sleep the wheel must be forwarded to current jiffies. There
might be expired timers so the current code loops and checks the epxired
buckets for timers. This can take quite some time for long NOHZ idle periods.

The pending bitmask in the timer base allows us to do a quick search for the
next expiring timer and therefor a fast forward of the base time which
prevents pointless long lasting loops.

For a 3 second idle sleep this reduces the catchup time from ~1ms to 5us.

Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Chris Mason <clm@fb.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: rt@linutronix.de
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Arjan van de Ven <arjan@infradead.org>

---
 kernel/time/timer.c |   49 +++++++++++++++++++++++++++++++++++++++++--------
 1 file changed, 41 insertions(+), 8 deletions(-)

--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1252,8 +1252,8 @@ static void expire_timers(struct timer_b
 	}
 }
 
-static int collect_expired_timers(struct timer_base *base,
-				  struct hlist_head *heads)
+static int __collect_expired_timers(struct timer_base *base,
+				    struct hlist_head *heads)
 {
 	unsigned long clk = base->clk;
 	struct hlist_head *vec;
@@ -1279,9 +1279,9 @@ static int collect_expired_timers(struct
 
 #ifdef CONFIG_NO_HZ_COMMON
 /*
- * Find the next pending bucket of a level. Search from @offset + @clk upwards
- * and if nothing there, search from start of the level (@offset) up to
- * @offset + clk.
+ * Find the next pending bucket of a level. Search from level start (@offset)
+ * + @clk upwards and if nothing there, search from start of the level
+ * (@offset) up to @offset + clk.
  */
 static int next_pending_bucket(struct timer_base *base, unsigned offset,
 			       unsigned clk)
@@ -1298,14 +1298,14 @@ static int next_pending_bucket(struct ti
 }
 
 /*
- * Search the first expiring timer in the various clock levels.
+ * Search the first expiring timer in the various clock levels. Caller must
+ * hold base->lock.
  */
 static unsigned long __next_timer_interrupt(struct timer_base *base)
 {
 	unsigned long clk, next, adj;
 	unsigned lvl, offset = 0;
 
-	spin_lock(&base->lock);
 	next = base->clk + NEXT_TIMER_MAX_DELTA;
 	clk = base->clk;
 	for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) {
@@ -1358,7 +1358,6 @@ static unsigned long __next_timer_interr
 		clk >>= LVL_CLK_SHIFT;
 		clk += adj;
 	}
-	spin_unlock(&base->lock);
 	return next;
 }
 
@@ -1416,7 +1415,10 @@ u64 get_next_timer_interrupt(unsigned lo
 	if (cpu_is_offline(smp_processor_id()))
 		return expires;
 
+	spin_lock(&base->lock);
 	nextevt = __next_timer_interrupt(base);
+	spin_unlock(&base->lock);
+
 	if (time_before_eq(nextevt, basej))
 		expires = basem;
 	else
@@ -1424,6 +1426,37 @@ u64 get_next_timer_interrupt(unsigned lo
 
 	return cmp_next_hrtimer_event(basem, expires);
 }
+
+static int collect_expired_timers(struct timer_base *base,
+				  struct hlist_head *heads)
+{
+	/*
+	 * NOHZ optimization. After a long idle sleep we need to forward the
+	 * base to current jiffies. Avoid a loop by searching the bitfield for
+	 * the next expiring timer.
+	 */
+	if ((long)(jiffies - base->clk) > 2) {
+		unsigned long next = __next_timer_interrupt(base);
+
+		/*
+		 * If the next timer is ahead of time forward to current
+		 * jiffies, otherwise forward to the next expiry time.
+		 */
+		if (time_after(next, jiffies)) {
+			/* The call site will increment clock! */
+			base->clk = jiffies - 1;
+			return 0;
+		}
+		base->clk = next;
+	}
+	return __collect_expired_timers(base, heads);
+}
+#else
+static inline int collect_expired_timers(struct timer_base *base,
+					 struct hlist_head *heads)
+{
+	return __collect_expired_timers(base, heads);
+}
 #endif
 
 /*

^ permalink raw reply	[flat|nested] 60+ messages in thread

* [patch 4 18/22] tick/sched: Remove pointless empty function
  2016-07-04  9:50 [patch 4 00/22] timer: Refactor the timer wheel Thomas Gleixner
                   ` (16 preceding siblings ...)
  2016-07-04  9:50 ` [patch 4 17/22] timer: Optimize collect timers for NOHZ Thomas Gleixner
@ 2016-07-04  9:50 ` Thomas Gleixner
  2016-07-07  8:47   ` [tip:timers/core] timers/nohz: Remove pointless tick_nohz_kick_tick() function tip-bot for Thomas Gleixner
  2016-07-04  9:50 ` [patch 4 19/22] timer: Forward wheel clock whenever possible Thomas Gleixner
                   ` (3 subsequent siblings)
  21 siblings, 1 reply; 60+ messages in thread
From: Thomas Gleixner @ 2016-07-04  9:50 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Paul McKenney, Frederic Weisbecker,
	Chris Mason, Arjan van de Ven, rt, Rik van Riel, George Spelvin,
	Len Brown, Josh Triplett, Eric Dumazet

[-- Attachment #1: ticksched_Remove_pointless_empty_function.patch --]
[-- Type: text/plain, Size: 2021 bytes --]

This was a failed attempt to optimize the timer expiry in idle, which was
disabled and never revisited. Remove the cruft.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Chris Mason <clm@fb.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: rt@linutronix.de
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Arjan van de Ven <arjan@infradead.org>

---
 kernel/time/tick-sched.c |   33 +--------------------------------
 1 file changed, 1 insertion(+), 32 deletions(-)

--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -1092,35 +1092,6 @@ static void tick_nohz_switch_to_nohz(voi
 	tick_nohz_activate(ts, NOHZ_MODE_LOWRES);
 }
 
-/*
- * When NOHZ is enabled and the tick is stopped, we need to kick the
- * tick timer from irq_enter() so that the jiffies update is kept
- * alive during long running softirqs. That's ugly as hell, but
- * correctness is key even if we need to fix the offending softirq in
- * the first place.
- *
- * Note, this is different to tick_nohz_restart. We just kick the
- * timer and do not touch the other magic bits which need to be done
- * when idle is left.
- */
-static void tick_nohz_kick_tick(struct tick_sched *ts, ktime_t now)
-{
-#if 0
-	/* Switch back to 2.6.27 behaviour */
-	ktime_t delta;
-
-	/*
-	 * Do not touch the tick device, when the next expiry is either
-	 * already reached or less/equal than the tick period.
-	 */
-	delta =	ktime_sub(hrtimer_get_expires(&ts->sched_timer), now);
-	if (delta.tv64 <= tick_period.tv64)
-		return;
-
-	tick_nohz_restart(ts, now);
-#endif
-}
-
 static inline void tick_nohz_irq_enter(void)
 {
 	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
@@ -1131,10 +1102,8 @@ static inline void tick_nohz_irq_enter(v
 	now = ktime_get();
 	if (ts->idle_active)
 		tick_nohz_stop_idle(ts, now);
-	if (ts->tick_stopped) {
+	if (ts->tick_stopped)
 		tick_nohz_update_jiffies(now);
-		tick_nohz_kick_tick(ts, now);
-	}
 }
 
 #else

^ permalink raw reply	[flat|nested] 60+ messages in thread

* [patch 4 19/22] timer: Forward wheel clock whenever possible
  2016-07-04  9:50 [patch 4 00/22] timer: Refactor the timer wheel Thomas Gleixner
                   ` (17 preceding siblings ...)
  2016-07-04  9:50 ` [patch 4 18/22] tick/sched: Remove pointless empty function Thomas Gleixner
@ 2016-07-04  9:50 ` Thomas Gleixner
  2016-07-07  8:48   ` [tip:timers/core] timers: Forward the " tip-bot for Thomas Gleixner
  2016-07-04  9:50 ` [patch 4 20/22] timer: Only wake softirq if necessary Thomas Gleixner
                   ` (2 subsequent siblings)
  21 siblings, 1 reply; 60+ messages in thread
From: Thomas Gleixner @ 2016-07-04  9:50 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Paul McKenney, Frederic Weisbecker,
	Chris Mason, Arjan van de Ven, rt, Rik van Riel, George Spelvin,
	Len Brown, Josh Triplett, Eric Dumazet

[-- Attachment #1: timer_Forward_wheel_clock_whenever_possible.patch --]
[-- Type: text/plain, Size: 7321 bytes --]

The wheel clock is stale when a cpu goes into a long idle sleep. This has the
side effect, that timers which are queued end up in the outer wheel
levels. That results in coarser granularity.

To solve this, we keep track of the idle state and forward the wheel clock
whenever it's possible.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Chris Mason <clm@fb.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: rt@linutronix.de
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Arjan van de Ven <arjan@infradead.org>

---
 kernel/time/tick-internal.h |    1 
 kernel/time/tick-sched.c    |   12 ++++
 kernel/time/timer.c         |  128 ++++++++++++++++++++++++++++++++++++--------
 3 files changed, 120 insertions(+), 21 deletions(-)

--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -164,3 +164,4 @@ static inline void timers_update_migrati
 DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
 
 extern u64 get_next_timer_interrupt(unsigned long basej, u64 basem);
+void timer_clear_idle(void);
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -700,6 +700,12 @@ static ktime_t tick_nohz_stop_sched_tick
 	delta = next_tick - basemono;
 	if (delta <= (u64)TICK_NSEC) {
 		tick.tv64 = 0;
+
+		/*
+		 * Tell the timer code that the base is not idle, i.e. undo
+		 * the effect of get_next_timer_interrupt().
+		 */
+		timer_clear_idle();
 		/*
 		 * We've not stopped the tick yet, and there's a timer in the
 		 * next period, so no point in stopping it either, bail.
@@ -809,6 +815,12 @@ static void tick_nohz_restart_sched_tick
 	tick_do_update_jiffies64(now);
 	cpu_load_update_nohz_stop();
 
+	/*
+	 * Clear the timer idle flag, so we avoid IPIs on remote queueing and
+	 * the clock forward checks in the enqueue path.
+	 */
+	timer_clear_idle();
+
 	calc_load_exit_idle();
 	touch_softlockup_watchdog_sched();
 	/*
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -196,9 +196,11 @@ struct timer_base {
 	spinlock_t		lock;
 	struct timer_list	*running_timer;
 	unsigned long		clk;
+	unsigned long		next_expiry;
 	unsigned int		cpu;
 	bool			migration_enabled;
 	bool			nohz_active;
+	bool			is_idle;
 	DECLARE_BITMAP(pending_map, WHEEL_SIZE);
 	struct hlist_head	vectors[WHEEL_SIZE];
 } ____cacheline_aligned;
@@ -519,24 +521,37 @@ static void internal_add_timer(struct ti
 {
 	__internal_add_timer(base, timer);
 
+	if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
+		return;
+
 	/*
-	 * Check whether the other CPU is in dynticks mode and needs
-	 * to be triggered to reevaluate the timer wheel.  We are
-	 * protected against the other CPU fiddling with the timer by
-	 * holding the timer base lock. This also makes sure that a
-	 * CPU on the way to stop its tick can not evaluate the timer
-	 * wheel.
-	 *
-	 * Spare the IPI for deferrable timers on idle targets though.
-	 * The next busy ticks will take care of it. Except full dynticks
-	 * require special care against races with idle_cpu(), lets deal
-	 * with that later.
-	 */
-	if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active) {
-		if (!(timer->flags & TIMER_DEFERRABLE) ||
-		    tick_nohz_full_cpu(base->cpu))
+	 * This wants some optimizing similar to the below, but we do that
+	 * when we switch from push to pull for deferrable timers.
+	 */
+	if (timer->flags & TIMER_DEFERRABLE) {
+		if (tick_nohz_full_cpu(base->cpu))
 			wake_up_nohz_cpu(base->cpu);
+		return;
 	}
+
+	/*
+	 * We might have to IPI the remote CPU if the base is idle and the
+	 * timer is not deferrable. If the other cpu is on the way to idle
+	 * then it can't set base->is_idle as we hold base lock.
+	 */
+	if (!base->is_idle)
+		return;
+
+	/* Check whether this is the new first expiring timer */
+	if (time_after_eq(timer->expires, base->next_expiry))
+		return;
+
+	/*
+	 * Set the next expiry time and kick the cpu so it can reevaluate the
+	 * wheel
+	 */
+	base->next_expiry = timer->expires;
+	wake_up_nohz_cpu(base->cpu);
 }
 
 #ifdef CONFIG_TIMER_STATS
@@ -844,10 +859,11 @@ static inline struct timer_base *get_tim
 	return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK);
 }
 
-static inline struct timer_base *get_target_base(struct timer_base *base,
-						 unsigned tflags)
+#ifdef CONFIG_NO_HZ_COMMON
+static inline struct timer_base *__get_target_base(struct timer_base *base,
+						   unsigned tflags)
 {
-#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
+#ifdef CONFIG_SMP
 	if ((tflags & TIMER_PINNED) || !base->migration_enabled)
 		return get_timer_this_cpu_base(tflags);
 	return get_timer_cpu_base(tflags, get_nohz_timer_target());
@@ -856,6 +872,43 @@ static inline struct timer_base *get_tar
 #endif
 }
 
+static inline void forward_timer_base(struct timer_base *base)
+{
+	/*
+	 * We only forward the base when it's idle and we have a delta between
+	 * base clock and jiffies.
+	 */
+	if (!base->is_idle || (long) (jiffies - base->clk) < 2)
+		return;
+
+	/*
+	 * If the next expiry value is > jiffies, then we fast forward to
+	 * jiffies otherwise we forward to the next expiry value.
+	 */
+	if (time_after(base->next_expiry, jiffies))
+		base->clk = jiffies;
+	else
+		base->clk = base->next_expiry;
+}
+#else
+static inline struct timer_base *__get_target_base(struct timer_base *base,
+						   unsigned tflags)
+{
+	return get_timer_this_cpu_base(tflags);
+}
+
+static inline void forward_timer_base(struct timer_base *base) { }
+#endif
+
+static inline struct timer_base *get_target_base(struct timer_base *base,
+						 unsigned tflags)
+{
+	struct timer_base *target = __get_target_base(base, tflags);
+
+	forward_timer_base(target);
+	return target;
+}
+
 /*
  * We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means
  * that all timers which are tied to this base are locked, and the base itself
@@ -1417,16 +1470,49 @@ u64 get_next_timer_interrupt(unsigned lo
 
 	spin_lock(&base->lock);
 	nextevt = __next_timer_interrupt(base);
-	spin_unlock(&base->lock);
+	base->next_expiry = nextevt;
+	/*
+	 * We have a fresh next event. Check whether we can forward the base.
+	 */
+	if (time_after(nextevt, jiffies))
+		base->clk = jiffies;
+	else if (time_after(nextevt, base->clk))
+		base->clk = nextevt;
 
-	if (time_before_eq(nextevt, basej))
+	if (time_before_eq(nextevt, basej)) {
 		expires = basem;
-	else
+		base->is_idle = false;
+	} else {
 		expires = basem + (nextevt - basej) * TICK_NSEC;
+		/*
+		 * If we expect to sleep more than a tick, mark the base idle.
+		 */
+		if ((expires - basem) > TICK_NSEC)
+			base->is_idle = true;
+	}
+	spin_unlock(&base->lock);
 
 	return cmp_next_hrtimer_event(basem, expires);
 }
 
+/**
+ * timer_clear_idle - Clear the idle state of the timer base
+ *
+ * Called with interrupts disabled
+ */
+void timer_clear_idle(void)
+{
+	struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
+
+	/*
+	 * We do this unlocked. The worst outcome is a remote enqueue sending
+	 * a pointless IPI, but taking the lock would just make the window for
+	 * sending the IPI a few instructions smaller for the cost of taking
+	 * the lock in the exit from idle path.
+	 */
+	base->is_idle = false;
+}
+
 static int collect_expired_timers(struct timer_base *base,
 				  struct hlist_head *heads)
 {

^ permalink raw reply	[flat|nested] 60+ messages in thread

* [patch 4 20/22] timer: Only wake softirq if necessary
  2016-07-04  9:50 [patch 4 00/22] timer: Refactor the timer wheel Thomas Gleixner
                   ` (18 preceding siblings ...)
  2016-07-04  9:50 ` [patch 4 19/22] timer: Forward wheel clock whenever possible Thomas Gleixner
@ 2016-07-04  9:50 ` Thomas Gleixner
  2016-07-07  8:48   ` [tip:timers/core] timers: " tip-bot for Thomas Gleixner
  2016-07-04  9:50 ` [patch 4 21/22] timer: Split out index calculation Thomas Gleixner
  2016-07-04  9:50 ` [patch 4 22/22] timer: Optimization for same expiry time in mod_timer() Thomas Gleixner
  21 siblings, 1 reply; 60+ messages in thread
From: Thomas Gleixner @ 2016-07-04  9:50 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Paul McKenney, Frederic Weisbecker,
	Chris Mason, Arjan van de Ven, rt, Rik van Riel, George Spelvin,
	Len Brown, Josh Triplett

[-- Attachment #1: timer--Only-wake-softirq-if-necessary.patch --]
[-- Type: text/plain, Size: 802 bytes --]

With the wheel forwading in place and with the HZ=1000 4ms folding we can
avoid running the softirq at all.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 kernel/time/timer.c |   11 +++++++++++
 1 file changed, 11 insertions(+)

--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1608,7 +1608,18 @@ static void run_timer_softirq(struct sof
  */
 void run_local_timers(void)
 {
+	struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
+
 	hrtimer_run_queues();
+	/* Raise the softirq only if required. */
+	if (time_before(jiffies, base->clk)) {
+		if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
+			return;
+		/* CPU is awake, so check the deferrable base. */
+		base++;
+		if (time_before(jiffies, base->clk))
+			return;
+	}
 	raise_softirq(TIMER_SOFTIRQ);
 }
 

^ permalink raw reply	[flat|nested] 60+ messages in thread

* [patch 4 21/22] timer: Split out index calculation
  2016-07-04  9:50 [patch 4 00/22] timer: Refactor the timer wheel Thomas Gleixner
                   ` (19 preceding siblings ...)
  2016-07-04  9:50 ` [patch 4 20/22] timer: Only wake softirq if necessary Thomas Gleixner
@ 2016-07-04  9:50 ` Thomas Gleixner
  2016-07-07  8:48   ` [tip:timers/core] timers: " tip-bot for Anna-Maria Gleixner
  2016-07-04  9:50 ` [patch 4 22/22] timer: Optimization for same expiry time in mod_timer() Thomas Gleixner
  21 siblings, 1 reply; 60+ messages in thread
From: Thomas Gleixner @ 2016-07-04  9:50 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Paul McKenney, Frederic Weisbecker,
	Chris Mason, Arjan van de Ven, rt, Rik van Riel, George Spelvin,
	Len Brown, Josh Triplett, Anna-Maria Gleixner, Eric Dumazet

[-- Attachment #1: timer_Split_out_index_calculation.patch --]
[-- Type: text/plain, Size: 3011 bytes --]

From: Anna-Maria Gleixner <anna-maria@linutronix.de>

For further optimizations we need to seperate index calculation and
queueing. No functional change.

Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Chris Mason <clm@fb.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: rt@linutronix.de
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Arjan van de Ven <arjan@infradead.org>

---
 kernel/time/timer.c |   47 ++++++++++++++++++++++++++++++++---------------
 1 file changed, 32 insertions(+), 15 deletions(-)

--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -471,12 +471,9 @@ static inline unsigned calc_index(unsign
 	return LVL_OFFS(lvl) + (expires & LVL_MASK);
 }
 
-static void
-__internal_add_timer(struct timer_base *base, struct timer_list *timer)
+static int calc_wheel_index(unsigned long expires, unsigned long clk)
 {
-	unsigned long expires = timer->expires;
-	unsigned long delta = expires - base->clk;
-	struct hlist_head *vec;
+	unsigned long delta = expires - clk;
 	unsigned int idx;
 
 	if (delta < LVL_START(1)) {
@@ -496,7 +493,7 @@ static void
 	} else if (LVL_DEPTH > 8 && delta < LVL_START(8)) {
 		idx = calc_index(expires, 7);
 	} else if ((long) delta < 0) {
-		idx = base->clk & LVL_MASK;
+		idx = clk & LVL_MASK;
 	} else {
 		/*
 		 * Force expire obscene large timeouts to expire at the
@@ -507,20 +504,33 @@ static void
 
 		idx = calc_index(expires, LVL_DEPTH - 1);
 	}
-	/*
-	 * Enqueue the timer into the array bucket, mark it pending in
-	 * the bitmap and store the index in the timer flags.
-	 */
-	vec = base->vectors + idx;
-	hlist_add_head(&timer->entry, vec);
+	return idx;
+}
+
+/*
+ * Enqueue the timer into the hash bucket, mark it pending in
+ * the bitmap and store the index in the timer flags.
+ */
+static void enqueue_timer(struct timer_base *base, struct timer_list *timer,
+			  unsigned int idx)
+{
+	hlist_add_head(&timer->entry, base->vectors + idx);
 	__set_bit(idx, base->pending_map);
 	timer_set_idx(timer, idx);
 }
 
-static void internal_add_timer(struct timer_base *base, struct timer_list *timer)
+static void
+__internal_add_timer(struct timer_base *base, struct timer_list *timer)
 {
-	__internal_add_timer(base, timer);
+	unsigned int idx;
+
+	idx = calc_wheel_index(timer->expires, base->clk);
+	enqueue_timer(base, timer, idx);
+}
 
+static void
+trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)
+{
 	if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
 		return;
 
@@ -551,7 +561,14 @@ static void internal_add_timer(struct ti
 	 * wheel
 	 */
 	base->next_expiry = timer->expires;
-	wake_up_nohz_cpu(base->cpu);
+		wake_up_nohz_cpu(base->cpu);
+}
+
+static void
+internal_add_timer(struct timer_base *base, struct timer_list *timer)
+{
+	__internal_add_timer(base, timer);
+	trigger_dyntick_cpu(base, timer);
 }
 
 #ifdef CONFIG_TIMER_STATS

^ permalink raw reply	[flat|nested] 60+ messages in thread

* [patch 4 22/22] timer: Optimization for same expiry time in mod_timer()
  2016-07-04  9:50 [patch 4 00/22] timer: Refactor the timer wheel Thomas Gleixner
                   ` (20 preceding siblings ...)
  2016-07-04  9:50 ` [patch 4 21/22] timer: Split out index calculation Thomas Gleixner
@ 2016-07-04  9:50 ` Thomas Gleixner
  2016-07-07  8:49   ` [tip:timers/core] timers: Implement optimization " tip-bot for Anna-Maria Gleixner
  21 siblings, 1 reply; 60+ messages in thread
From: Thomas Gleixner @ 2016-07-04  9:50 UTC (permalink / raw)
  To: LKML
  Cc: Ingo Molnar, Peter Zijlstra, Paul McKenney, Frederic Weisbecker,
	Chris Mason, Arjan van de Ven, rt, Rik van Riel, George Spelvin,
	Len Brown, Josh Triplett, Anna-Maria Gleixner, Eric Dumazet

[-- Attachment #1: timer_Optimization_for_same_expiry_time_in_mod_timer.patch --]
[-- Type: text/plain, Size: 4535 bytes --]

From: Anna-Maria Gleixner <anna-maria@linutronix.de>

The existing optimization for same expiry time in mod_timer() checks whether
the timer expiry time is the same as the new requested expiry time. In the old
timer wheel implementation this does not take the slack batching into account,
neither does the new implementation evaluate whether the new expiry time will
requeue the timer to the same bucket.

To optimize that, we can calculate the resulting bucket and check if the new
expiry time is different from the current expiry time. This calculation
happens outside the base lock held region. If the resulting bucket is the same
we can avoid taking the base lock and requeueing the timer.

If the timer needs to be requeued then we have to check under the base lock
whether the base time has changed between the lockless calculation and taking
the lock. If it has changed we need to recalculate under the lock.

This optimization takes effect for timers which are enqueued into the less
granular wheel levels (1 and above). With a simple test case the functionality
has been verified:

    	    Before	After
Match:	     5.5%	86.6%
Requeue:    94.5%	13.4%
Recalc:  		<0.01%

In the non optimized case the timer is requeued in 94.5% of the cases. With
the index optimization in place the requeue rate drops to 13.4%. The case
where the lockless index calculation has to be redone is less than 0.01%.

With a real world test case (networking) we observed the following changes:

    	    Before	After
Match:	    97.8%	99.7%
Requeue:     2.2%	 0.3%
Recalc:  		<0.001%

That means two percent less lock/requeue/unlock operations in one of the hot
path use cases of timers.


Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Chris Mason <clm@fb.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: rt@linutronix.de
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Arjan van de Ven <arjan@infradead.org>

---
 kernel/time/timer.c |   51 +++++++++++++++++++++++++++++++++++----------------
 1 file changed, 35 insertions(+), 16 deletions(-)

--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -960,28 +960,36 @@ static inline int
 __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
 {
 	struct timer_base *base, *new_base;
-	unsigned long flags;
+	unsigned int idx = UINT_MAX;
+	unsigned long clk = 0, flags;
 	int ret = 0;
 
 	/*
-	 * TODO: Calculate the array bucket of the timer right here w/o
-	 * holding the base lock. This allows to check not only
-	 * timer->expires == expires below, but also whether the timer
-	 * ends up in the same bucket. If we really need to requeue
-	 * the timer then we check whether base->clk have
-	 * advanced between here and locking the timer base. If
-	 * jiffies advanced we have to recalc the array bucket with the
-	 * lock held.
-	 */
-
-	/*
-	 * This is a common optimization triggered by the
-	 * networking code - if the timer is re-modified
-	 * to be the same thing then just return:
+	 * This is a common optimization triggered by the networking code - if
+	 * the timer is re-modified to be the same thing or ends up in the
+	 * same array bucket then just return:
 	 */
 	if (timer_pending(timer)) {
 		if (timer->expires == expires)
 			return 1;
+		/*
+		 * Take the current timer_jiffies of base, but without holding
+		 * the lock!
+		 */
+		base = get_timer_base(timer->flags);
+		clk = base->clk;
+
+		idx = calc_wheel_index(expires, clk);
+
+		/*
+		 * Retrieve and compare the array index of the pending
+		 * timer. If it matches set the expiry to the new value so a
+		 * subsequent call will exit in the expires check above.
+		 */
+		if (idx == timer_get_idx(timer)) {
+			timer->expires = expires;
+			return 1;
+		}
 	}
 
 	timer_stats_timer_set_start_info(timer);
@@ -1018,7 +1026,18 @@ static inline int
 	}
 
 	timer->expires = expires;
-	internal_add_timer(base, timer);
+	/*
+	 * If idx was calculated above and the base time did not advance
+	 * between calculating idx and taking the lock, only enqueue_timer()
+	 * and trigger_dyntick_cpu() is required. Otherwise we need to
+	 * (re)calculate the wheel index via internal_add_timer().
+	 */
+	if (idx != UINT_MAX && clk == base->clk) {
+		enqueue_timer(base, timer, idx);
+		trigger_dyntick_cpu(base, timer);
+	} else {
+		internal_add_timer(base, timer);
+	}
 
 out_unlock:
 	spin_unlock_irqrestore(&base->lock, flags);

^ permalink raw reply	[flat|nested] 60+ messages in thread

* [tip:timers/core] timers: Make 'pinned' a timer property
  2016-07-04  9:50 ` [patch 4 01/22] timer: Make pinned a timer property Thomas Gleixner
@ 2016-07-07  8:39   ` tip-bot for Thomas Gleixner
  0 siblings, 0 replies; 60+ messages in thread
From: tip-bot for Thomas Gleixner @ 2016-07-07  8:39 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: lenb, arjan, linux-kernel, paulmck, clm, fweisbec, hpa, tglx,
	torvalds, peterz, josh, mingo, edumazet, linux, riel

Commit-ID:  e675447bda51c1ea72d1ac9132ce3bed974f1da3
Gitweb:     http://git.kernel.org/tip/e675447bda51c1ea72d1ac9132ce3bed974f1da3
Author:     Thomas Gleixner <tglx@linutronix.de>
AuthorDate: Mon, 4 Jul 2016 09:50:15 +0000
Committer:  Ingo Molnar <mingo@kernel.org>
CommitDate: Thu, 7 Jul 2016 10:25:13 +0200

timers: Make 'pinned' a timer property

We want to move the timer migration logic from a 'push' to a 'pull' model.

Under the current 'push' model pinned timers are handled via
a runtime API variant: mod_timer_pinned().

The 'pull' model requires us to store the pinned attribute of a timer
in the timer_list structure itself, as a new TIMER_PINNED bit in
timer->flags.

This flag must be set at initialization time and the timer APIs
recognize the flag.

This patch:

 - Implements the new flag and associated new-style initialization
   methods

 - makes mod_timer() recognize new-style pinned timers,

 - and adds some migration helper facility to allow
   step by step conversion of old-style to new-style
   pinned timers.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arjan van de Ven <arjan@infradead.org>
Cc: Chris Mason <clm@fb.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: George Spelvin <linux@sciencehorizons.net>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Len Brown <lenb@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: rt@linutronix.de
Link: http://lkml.kernel.org/r/20160704094341.049338558@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
 include/linux/timer.h | 25 ++++++++++++++++++++++---
 kernel/time/timer.c   | 10 +++++-----
 2 files changed, 27 insertions(+), 8 deletions(-)

diff --git a/include/linux/timer.h b/include/linux/timer.h
index 20ac746..046d6cf 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -62,7 +62,8 @@ struct timer_list {
 #define TIMER_MIGRATING		0x00080000
 #define TIMER_BASEMASK		(TIMER_CPUMASK | TIMER_MIGRATING)
 #define TIMER_DEFERRABLE	0x00100000
-#define TIMER_IRQSAFE		0x00200000
+#define TIMER_PINNED		0x00200000
+#define TIMER_IRQSAFE		0x00400000
 
 #define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \
 		.entry = { .next = TIMER_ENTRY_STATIC },	\
@@ -78,9 +79,15 @@ struct timer_list {
 #define TIMER_INITIALIZER(_function, _expires, _data)		\
 	__TIMER_INITIALIZER((_function), (_expires), (_data), 0)
 
+#define TIMER_PINNED_INITIALIZER(_function, _expires, _data)	\
+	__TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_PINNED)
+
 #define TIMER_DEFERRED_INITIALIZER(_function, _expires, _data)	\
 	__TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_DEFERRABLE)
 
+#define TIMER_PINNED_DEFERRED_INITIALIZER(_function, _expires, _data)	\
+	__TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_DEFERRABLE | TIMER_PINNED)
+
 #define DEFINE_TIMER(_name, _function, _expires, _data)		\
 	struct timer_list _name =				\
 		TIMER_INITIALIZER(_function, _expires, _data)
@@ -124,8 +131,12 @@ static inline void init_timer_on_stack_key(struct timer_list *timer,
 
 #define init_timer(timer)						\
 	__init_timer((timer), 0)
+#define init_timer_pinned(timer)					\
+	__init_timer((timer), TIMER_PINNED)
 #define init_timer_deferrable(timer)					\
 	__init_timer((timer), TIMER_DEFERRABLE)
+#define init_timer_pinned_deferrable(timer)				\
+	__init_timer((timer), TIMER_DEFERRABLE | TIMER_PINNED)
 #define init_timer_on_stack(timer)					\
 	__init_timer_on_stack((timer), 0)
 
@@ -145,12 +156,20 @@ static inline void init_timer_on_stack_key(struct timer_list *timer,
 
 #define setup_timer(timer, fn, data)					\
 	__setup_timer((timer), (fn), (data), 0)
+#define setup_pinned_timer(timer, fn, data)				\
+	__setup_timer((timer), (fn), (data), TIMER_PINNED)
 #define setup_deferrable_timer(timer, fn, data)				\
 	__setup_timer((timer), (fn), (data), TIMER_DEFERRABLE)
+#define setup_pinned_deferrable_timer(timer, fn, data)			\
+	__setup_timer((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED)
 #define setup_timer_on_stack(timer, fn, data)				\
 	__setup_timer_on_stack((timer), (fn), (data), 0)
+#define setup_pinned_timer_on_stack(timer, fn, data)			\
+	__setup_timer_on_stack((timer), (fn), (data), TIMER_PINNED)
 #define setup_deferrable_timer_on_stack(timer, fn, data)		\
 	__setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE)
+#define setup_pinned_deferrable_timer_on_stack(timer, fn, data)		\
+	__setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED)
 
 /**
  * timer_pending - is a timer pending?
@@ -175,8 +194,8 @@ extern int mod_timer_pinned(struct timer_list *timer, unsigned long expires);
 
 extern void set_timer_slack(struct timer_list *time, int slack_hz);
 
-#define TIMER_NOT_PINNED	0
-#define TIMER_PINNED		1
+#define MOD_TIMER_NOT_PINNED	0
+#define MOD_TIMER_PINNED	1
 /*
  * The jiffies value which is added to now, when there is no timer
  * in the timer wheel:
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 3a95f97..693f6d1 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -782,7 +782,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
 
 	debug_activate(timer, expires);
 
-	new_base = get_target_base(base, pinned);
+	new_base = get_target_base(base, pinned || timer->flags & TIMER_PINNED);
 
 	if (base != new_base) {
 		/*
@@ -825,7 +825,7 @@ out_unlock:
  */
 int mod_timer_pending(struct timer_list *timer, unsigned long expires)
 {
-	return __mod_timer(timer, expires, true, TIMER_NOT_PINNED);
+	return __mod_timer(timer, expires, true, MOD_TIMER_NOT_PINNED);
 }
 EXPORT_SYMBOL(mod_timer_pending);
 
@@ -900,7 +900,7 @@ int mod_timer(struct timer_list *timer, unsigned long expires)
 	if (timer_pending(timer) && timer->expires == expires)
 		return 1;
 
-	return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
+	return __mod_timer(timer, expires, false, MOD_TIMER_NOT_PINNED);
 }
 EXPORT_SYMBOL(mod_timer);
 
@@ -928,7 +928,7 @@ int mod_timer_pinned(struct timer_list *timer, unsigned long expires)
 	if (timer->expires == expires && timer_pending(timer))
 		return 1;
 
-	return __mod_timer(timer, expires, false, TIMER_PINNED);
+	return __mod_timer(timer, expires, false, MOD_TIMER_PINNED);
 }
 EXPORT_SYMBOL(mod_timer_pinned);
 
@@ -1512,7 +1512,7 @@ signed long __sched schedule_timeout(signed long timeout)
 	expire = timeout + jiffies;
 
 	setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
-	__mod_timer(&timer, expire, false, TIMER_NOT_PINNED);
+	__mod_timer(&timer, expire, false, MOD_TIMER_NOT_PINNED);
 	schedule();
 	del_singleshot_timer_sync(&timer);
 

^ permalink raw reply related	[flat|nested] 60+ messages in thread

* [tip:timers/core] timers, x86/apic/uv: Initialize the UV heartbeat timer as pinned
  2016-07-04  9:50 ` [patch 4 02/22] x86/apic/uv: Initialize timer as pinned Thomas Gleixner
@ 2016-07-07  8:40   ` tip-bot for Thomas Gleixner
  0 siblings, 0 replies; 60+ messages in thread
From: tip-bot for Thomas Gleixner @ 2016-07-07  8:40 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: linux-kernel, lenb, riel, josh, mingo, hpa, fweisbec, paulmck,
	torvalds, peterz, clm, tglx, arjan, linux, edumazet

Commit-ID:  920a4a70c55058a9997f2e35bf41503acf87c301
Gitweb:     http://git.kernel.org/tip/920a4a70c55058a9997f2e35bf41503acf87c301
Author:     Thomas Gleixner <tglx@linutronix.de>
AuthorDate: Mon, 4 Jul 2016 09:50:16 +0000
Committer:  Ingo Molnar <mingo@kernel.org>
CommitDate: Thu, 7 Jul 2016 10:25:14 +0200

timers, x86/apic/uv: Initialize the UV heartbeat timer as pinned

Pinned timers must carry the pinned attribute in the timer structure
itself, so convert the code to the new API.

No functional change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arjan van de Ven <arjan@infradead.org>
Cc: Chris Mason <clm@fb.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: George Spelvin <linux@sciencehorizons.net>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Len Brown <lenb@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: rt@linutronix.de
Link: http://lkml.kernel.org/r/20160704094341.133837204@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
 arch/x86/kernel/apic/x2apic_uv_x.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 2900315..7a50519 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -919,7 +919,7 @@ static void uv_heartbeat(unsigned long ignored)
 	uv_set_scir_bits(bits);
 
 	/* enable next timer period */
-	mod_timer_pinned(timer, jiffies + SCIR_CPU_HB_INTERVAL);
+	mod_timer(timer, jiffies + SCIR_CPU_HB_INTERVAL);
 }
 
 static void uv_heartbeat_enable(int cpu)
@@ -928,7 +928,7 @@ static void uv_heartbeat_enable(int cpu)
 		struct timer_list *timer = &uv_cpu_scir_info(cpu)->timer;
 
 		uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY);
-		setup_timer(timer, uv_heartbeat, cpu);
+		setup_pinned_timer(timer, uv_heartbeat, cpu);
 		timer->expires = jiffies + SCIR_CPU_HB_INTERVAL;
 		add_timer_on(timer, cpu);
 		uv_cpu_scir_info(cpu)->enabled = 1;

^ permalink raw reply related	[flat|nested] 60+ messages in thread

* [tip:timers/core] timers, x86/mce: Initialize MCE restart timer as pinned
  2016-07-04  9:50 ` [patch 4 03/22] x86/mce: Initialize " Thomas Gleixner
@ 2016-07-07  8:40   ` tip-bot for Thomas Gleixner
  0 siblings, 0 replies; 60+ messages in thread
From: tip-bot for Thomas Gleixner @ 2016-07-07  8:40 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: torvalds, josh, hpa, fweisbec, peterz, linux, edumazet, paulmck,
	lenb, linux-kernel, tglx, arjan, clm, mingo, riel

Commit-ID:  f9c287ba3861714a1959accf14e815c44291bec4
Gitweb:     http://git.kernel.org/tip/f9c287ba3861714a1959accf14e815c44291bec4
Author:     Thomas Gleixner <tglx@linutronix.de>
AuthorDate: Mon, 4 Jul 2016 09:50:17 +0000
Committer:  Ingo Molnar <mingo@kernel.org>
CommitDate: Thu, 7 Jul 2016 10:25:14 +0200

timers, x86/mce: Initialize MCE restart timer as pinned

Pinned timers must carry the pinned attribute in the timer structure
itself, so convert the code to the new API.

No functional change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arjan van de Ven <arjan@infradead.org>
Cc: Chris Mason <clm@fb.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: George Spelvin <linux@sciencehorizons.net>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Len Brown <lenb@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: rt@linutronix.de
Link: http://lkml.kernel.org/r/20160704094341.215783439@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
 arch/x86/kernel/cpu/mcheck/mce.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 92e5e37..b80a636 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1309,7 +1309,7 @@ static void __restart_timer(struct timer_list *t, unsigned long interval)
 
 	if (timer_pending(t)) {
 		if (time_before(when, t->expires))
-			mod_timer_pinned(t, when);
+			mod_timer(t, when);
 	} else {
 		t->expires = round_jiffies(when);
 		add_timer_on(t, smp_processor_id());
@@ -1735,7 +1735,7 @@ static void __mcheck_cpu_init_timer(void)
 	struct timer_list *t = this_cpu_ptr(&mce_timer);
 	unsigned int cpu = smp_processor_id();
 
-	setup_timer(t, mce_timer_fn, cpu);
+	setup_pinned_timer(t, mce_timer_fn, cpu);
 	mce_start_timer(cpu, t);
 }
 

^ permalink raw reply related	[flat|nested] 60+ messages in thread

* [tip:timers/core] timers, cpufreq/powernv: Initialize the gpstate timer as pinned
  2016-07-04  9:50 ` [patch 4 04/22] cpufreq/powernv: Initialize " Thomas Gleixner
@ 2016-07-07  8:41   ` tip-bot for Thomas Gleixner
  0 siblings, 0 replies; 60+ messages in thread
From: tip-bot for Thomas Gleixner @ 2016-07-07  8:41 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: peterz, riel, arjan, clm, mingo, torvalds, edumazet, fweisbec,
	linux, lenb, hpa, josh, paulmck, linux-kernel, tglx

Commit-ID:  7bc54b652f13119f64e87dd96bb792efbfc5a786
Gitweb:     http://git.kernel.org/tip/7bc54b652f13119f64e87dd96bb792efbfc5a786
Author:     Thomas Gleixner <tglx@linutronix.de>
AuthorDate: Mon, 4 Jul 2016 09:50:18 +0000
Committer:  Ingo Molnar <mingo@kernel.org>
CommitDate: Thu, 7 Jul 2016 10:25:14 +0200

timers, cpufreq/powernv: Initialize the gpstate timer as pinned

Pinned timers must carry the pinned attribute in the timer structure
itself, so convert the code to the new API.

No functional change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arjan van de Ven <arjan@infradead.org>
Cc: Chris Mason <clm@fb.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: George Spelvin <linux@sciencehorizons.net>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Len Brown <lenb@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: rt@linutronix.de
Link: http://lkml.kernel.org/r/20160704094341.297014487@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
 drivers/cpufreq/powernv-cpufreq.c | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 54c4536..6bd715b 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -530,8 +530,7 @@ static inline void  queue_gpstate_timer(struct global_pstate_info *gpstates)
 	else
 		timer_interval = GPSTATE_TIMER_INTERVAL;
 
-	mod_timer_pinned(&gpstates->timer, jiffies +
-			msecs_to_jiffies(timer_interval));
+	mod_timer(&gpstates->timer, jiffies + msecs_to_jiffies(timer_interval));
 }
 
 /**
@@ -699,7 +698,7 @@ static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
 	policy->driver_data = gpstates;
 
 	/* initialize timer */
-	init_timer_deferrable(&gpstates->timer);
+	init_timer_pinned_deferrable(&gpstates->timer);
 	gpstates->timer.data = (unsigned long)policy;
 	gpstates->timer.function = gpstate_timer_handler;
 	gpstates->timer.expires = jiffies +

^ permalink raw reply related	[flat|nested] 60+ messages in thread

* [tip:timers/core] timers, driver/net/ethernet/tile: Initialize the egress timer as pinned
  2016-07-04  9:50 ` [patch 4 05/22] driver/net/ethernet/tile: Initialize " Thomas Gleixner
@ 2016-07-07  8:41   ` tip-bot for Thomas Gleixner
  0 siblings, 0 replies; 60+ messages in thread
From: tip-bot for Thomas Gleixner @ 2016-07-07  8:41 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: lenb, linux-kernel, edumazet, mingo, linux, hpa, torvalds, tglx,
	riel, fweisbec, paulmck, peterz, josh, clm, arjan

Commit-ID:  25df57605edbe92b6f4215226bd40e1a465ef6b4
Gitweb:     http://git.kernel.org/tip/25df57605edbe92b6f4215226bd40e1a465ef6b4
Author:     Thomas Gleixner <tglx@linutronix.de>
AuthorDate: Mon, 4 Jul 2016 09:50:19 +0000
Committer:  Ingo Molnar <mingo@kernel.org>
CommitDate: Thu, 7 Jul 2016 10:25:14 +0200

timers, driver/net/ethernet/tile: Initialize the egress timer as pinned

Pinned timers must carry the pinned attribute in the timer structure
itself, so convert the code to the new API.

No functional change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arjan van de Ven <arjan@infradead.org>
Cc: Chris Mason <clm@fb.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: George Spelvin <linux@sciencehorizons.net>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Len Brown <lenb@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: rt@linutronix.de
Link: http://lkml.kernel.org/r/20160704094341.376394205@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
 drivers/net/ethernet/tile/tilepro.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 922a443..4ef605a 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -588,7 +588,7 @@ static bool tile_net_lepp_free_comps(struct net_device *dev, bool all)
 static void tile_net_schedule_egress_timer(struct tile_net_cpu *info)
 {
 	if (!info->egress_timer_scheduled) {
-		mod_timer_pinned(&info->egress_timer, jiffies + 1);
+		mod_timer(&info->egress_timer, jiffies + 1);
 		info->egress_timer_scheduled = true;
 	}
 }
@@ -1004,7 +1004,7 @@ static void tile_net_register(void *dev_ptr)
 		BUG();
 
 	/* Initialize the egress timer. */
-	init_timer(&info->egress_timer);
+	init_timer_pinned(&info->egress_timer);
 	info->egress_timer.data = (long)info;
 	info->egress_timer.function = tile_net_handle_egress_timer;
 

^ permalink raw reply related	[flat|nested] 60+ messages in thread

* [tip:timers/core] timers, drivers/tty/metag_da: Initialize the poll timer as pinned
  2016-07-04  9:50 ` [patch 4 06/22] drivers/tty/metag_da: Initialize " Thomas Gleixner
@ 2016-07-07  8:42   ` tip-bot for Thomas Gleixner
  0 siblings, 0 replies; 60+ messages in thread
From: tip-bot for Thomas Gleixner @ 2016-07-07  8:42 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: josh, lenb, clm, mingo, arjan, hpa, riel, paulmck, peterz, tglx,
	torvalds, fweisbec, edumazet, linux-kernel, linux

Commit-ID:  01bab536b1fe982fcba8144e2ee11ac889a22f0e
Gitweb:     http://git.kernel.org/tip/01bab536b1fe982fcba8144e2ee11ac889a22f0e
Author:     Thomas Gleixner <tglx@linutronix.de>
AuthorDate: Mon, 4 Jul 2016 09:50:21 +0000
Committer:  Ingo Molnar <mingo@kernel.org>
CommitDate: Thu, 7 Jul 2016 10:25:14 +0200

timers, drivers/tty/metag_da: Initialize the poll timer as pinned

Pinned timers must carry the pinned attribute in the timer structure
itself, so convert the code to the new API.

No functional change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arjan van de Ven <arjan@infradead.org>
Cc: Chris Mason <clm@fb.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: George Spelvin <linux@sciencehorizons.net>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Len Brown <lenb@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: rt@linutronix.de
Link: http://lkml.kernel.org/r/20160704094341.456452642@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
 drivers/tty/metag_da.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/tty/metag_da.c b/drivers/tty/metag_da.c
index 9325262..25ccef2 100644
--- a/drivers/tty/metag_da.c
+++ b/drivers/tty/metag_da.c
@@ -323,12 +323,12 @@ static void dashtty_timer(unsigned long ignored)
 	if (channel >= 0)
 		fetch_data(channel);
 
-	mod_timer_pinned(&poll_timer, jiffies + DA_TTY_POLL);
+	mod_timer(&poll_timer, jiffies + DA_TTY_POLL);
 }
 
 static void add_poll_timer(struct timer_list *poll_timer)
 {
-	setup_timer(poll_timer, dashtty_timer, 0);
+	setup_pinned_timer(poll_timer, dashtty_timer, 0);
 	poll_timer->expires = jiffies + DA_TTY_POLL;
 
 	/*

^ permalink raw reply related	[flat|nested] 60+ messages in thread

* [tip:timers/core] timers, drivers/tty/mips_ejtag: Initialize the poll timer as pinned
  2016-07-04  9:50 ` [patch 4 07/22] drivers/tty/mips_ejtag: Initialize " Thomas Gleixner
@ 2016-07-07  8:42   ` tip-bot for Thomas Gleixner
  0 siblings, 0 replies; 60+ messages in thread
From: tip-bot for Thomas Gleixner @ 2016-07-07  8:42 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: peterz, fweisbec, riel, arjan, linux, edumazet, lenb, hpa, tglx,
	clm, paulmck, mingo, josh, linux-kernel, torvalds

Commit-ID:  853f90d49bbabcf4e01c615402c1bea1871d7646
Gitweb:     http://git.kernel.org/tip/853f90d49bbabcf4e01c615402c1bea1871d7646
Author:     Thomas Gleixner <tglx@linutronix.de>
AuthorDate: Mon, 4 Jul 2016 09:50:22 +0000
Committer:  Ingo Molnar <mingo@kernel.org>
CommitDate: Thu, 7 Jul 2016 10:34:59 +0200

timers, drivers/tty/mips_ejtag: Initialize the poll timer as pinned

Pinned timers must carry the pinned attribute in the timer structure
itself, so convert the code to the new API.

No functional change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arjan van de Ven <arjan@infradead.org>
Cc: Chris Mason <clm@fb.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: George Spelvin <linux@sciencehorizons.net>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Len Brown <lenb@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: rt@linutronix.de
Link: http://lkml.kernel.org/r/20160704094341.537448301@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
 drivers/tty/mips_ejtag_fdc.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c
index a119176..234123b 100644
--- a/drivers/tty/mips_ejtag_fdc.c
+++ b/drivers/tty/mips_ejtag_fdc.c
@@ -689,7 +689,7 @@ static void mips_ejtag_fdc_tty_timer(unsigned long opaque)
 
 	mips_ejtag_fdc_handle(priv);
 	if (!priv->removing)
-		mod_timer_pinned(&priv->poll_timer, jiffies + FDC_TTY_POLL);
+		mod_timer(&priv->poll_timer, jiffies + FDC_TTY_POLL);
 }
 
 /* TTY Port operations */
@@ -1002,7 +1002,7 @@ static int mips_ejtag_fdc_tty_probe(struct mips_cdmm_device *dev)
 		raw_spin_unlock_irq(&priv->lock);
 	} else {
 		/* If we didn't get an usable IRQ, poll instead */
-		setup_timer(&priv->poll_timer, mips_ejtag_fdc_tty_timer,
+		setup_pinned_timer(&priv->poll_timer, mips_ejtag_fdc_tty_timer,
 			    (unsigned long)priv);
 		priv->poll_timer.expires = jiffies + FDC_TTY_POLL;
 		/*

^ permalink raw reply related	[flat|nested] 60+ messages in thread

* [tip:timers/core] timers, net/ipv4/inet: Initialize connection request timers as pinned
  2016-07-04  9:50 ` [patch 4 08/22] net/ipv4/inet: Initialize timers " Thomas Gleixner
@ 2016-07-07  8:43   ` tip-bot for Thomas Gleixner
  0 siblings, 0 replies; 60+ messages in thread
From: tip-bot for Thomas Gleixner @ 2016-07-07  8:43 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: tglx, arjan, edumazet, lenb, peterz, mingo, riel, torvalds,
	linux, clm, josh, paulmck, fweisbec, linux-kernel, hpa

Commit-ID:  f3438bc7813c439a06104ba074301c8bdd64ac8b
Gitweb:     http://git.kernel.org/tip/f3438bc7813c439a06104ba074301c8bdd64ac8b
Author:     Thomas Gleixner <tglx@linutronix.de>
AuthorDate: Mon, 4 Jul 2016 09:50:23 +0000
Committer:  Ingo Molnar <mingo@kernel.org>
CommitDate: Thu, 7 Jul 2016 10:35:06 +0200

timers, net/ipv4/inet: Initialize connection request timers as pinned

Pinned timers must carry the pinned attribute in the timer structure
itself, so convert the code to the new API.

No functional change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arjan van de Ven <arjan@infradead.org>
Cc: Chris Mason <clm@fb.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: George Spelvin <linux@sciencehorizons.net>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Len Brown <lenb@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: rt@linutronix.de
Link: http://lkml.kernel.org/r/20160704094341.617891430@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
 net/ipv4/inet_connection_sock.c | 7 ++++---
 net/ipv4/inet_timewait_sock.c   | 5 +++--
 2 files changed, 7 insertions(+), 5 deletions(-)

diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index fa8c398..61a9dee 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -603,7 +603,7 @@ static void reqsk_timer_handler(unsigned long data)
 		if (req->num_timeout++ == 0)
 			atomic_dec(&queue->young);
 		timeo = min(TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
-		mod_timer_pinned(&req->rsk_timer, jiffies + timeo);
+		mod_timer(&req->rsk_timer, jiffies + timeo);
 		return;
 	}
 drop:
@@ -617,8 +617,9 @@ static void reqsk_queue_hash_req(struct request_sock *req,
 	req->num_timeout = 0;
 	req->sk = NULL;
 
-	setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req);
-	mod_timer_pinned(&req->rsk_timer, jiffies + timeout);
+	setup_pinned_timer(&req->rsk_timer, reqsk_timer_handler,
+			    (unsigned long)req);
+	mod_timer(&req->rsk_timer, jiffies + timeout);
 
 	inet_ehash_insert(req_to_sk(req), NULL);
 	/* before letting lookups find us, make sure all req fields
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 2065816..ddcd56c 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -188,7 +188,8 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
 		tw->tw_prot	    = sk->sk_prot_creator;
 		atomic64_set(&tw->tw_cookie, atomic64_read(&sk->sk_cookie));
 		twsk_net_set(tw, sock_net(sk));
-		setup_timer(&tw->tw_timer, tw_timer_handler, (unsigned long)tw);
+		setup_pinned_timer(&tw->tw_timer, tw_timer_handler,
+				   (unsigned long)tw);
 		/*
 		 * Because we use RCU lookups, we should not set tw_refcnt
 		 * to a non null value before everything is setup for this
@@ -248,7 +249,7 @@ void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, bool rearm)
 
 	tw->tw_kill = timeo <= 4*HZ;
 	if (!rearm) {
-		BUG_ON(mod_timer_pinned(&tw->tw_timer, jiffies + timeo));
+		BUG_ON(mod_timer(&tw->tw_timer, jiffies + timeo));
 		atomic_inc(&tw->tw_dr->tw_count);
 	} else {
 		mod_timer_pending(&tw->tw_timer, jiffies + timeo);

^ permalink raw reply related	[flat|nested] 60+ messages in thread

* [tip:timers/core] timers: Remove the deprecated mod_timer_pinned() API
  2016-07-04  9:50 ` [patch 4 09/22] timer: Remove mod_timer_pinned Thomas Gleixner
@ 2016-07-07  8:43   ` tip-bot for Thomas Gleixner
  0 siblings, 0 replies; 60+ messages in thread
From: tip-bot for Thomas Gleixner @ 2016-07-07  8:43 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: arjan, riel, josh, paulmck, peterz, fweisbec, torvalds, lenb,
	linux-kernel, clm, linux, mingo, hpa, tglx, edumazet

Commit-ID:  177ec0a0a531695210b277d734b2f92ee5796303
Gitweb:     http://git.kernel.org/tip/177ec0a0a531695210b277d734b2f92ee5796303
Author:     Thomas Gleixner <tglx@linutronix.de>
AuthorDate: Mon, 4 Jul 2016 09:50:24 +0000
Committer:  Ingo Molnar <mingo@kernel.org>
CommitDate: Thu, 7 Jul 2016 10:35:06 +0200

timers: Remove the deprecated mod_timer_pinned() API

We switched all users to initialize the timers as pinned and call
mod_timer(). Remove the now unused timer API function.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arjan van de Ven <arjan@infradead.org>
Cc: Chris Mason <clm@fb.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: George Spelvin <linux@sciencehorizons.net>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Len Brown <lenb@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: rt@linutronix.de
Link: http://lkml.kernel.org/r/20160704094341.706205231@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
 include/linux/timer.h |  3 ---
 kernel/time/timer.c   | 39 +++++----------------------------------
 2 files changed, 5 insertions(+), 37 deletions(-)

diff --git a/include/linux/timer.h b/include/linux/timer.h
index 046d6cf..a8f6c70 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -190,12 +190,9 @@ extern void add_timer_on(struct timer_list *timer, int cpu);
 extern int del_timer(struct timer_list * timer);
 extern int mod_timer(struct timer_list *timer, unsigned long expires);
 extern int mod_timer_pending(struct timer_list *timer, unsigned long expires);
-extern int mod_timer_pinned(struct timer_list *timer, unsigned long expires);
 
 extern void set_timer_slack(struct timer_list *time, int slack_hz);
 
-#define MOD_TIMER_NOT_PINNED	0
-#define MOD_TIMER_PINNED	1
 /*
  * The jiffies value which is added to now, when there is no timer
  * in the timer wheel:
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 693f6d1..ba49c1c 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -764,8 +764,7 @@ static struct tvec_base *lock_timer_base(struct timer_list *timer,
 }
 
 static inline int
-__mod_timer(struct timer_list *timer, unsigned long expires,
-	    bool pending_only, int pinned)
+__mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
 {
 	struct tvec_base *base, *new_base;
 	unsigned long flags;
@@ -782,7 +781,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
 
 	debug_activate(timer, expires);
 
-	new_base = get_target_base(base, pinned || timer->flags & TIMER_PINNED);
+	new_base = get_target_base(base, timer->flags & TIMER_PINNED);
 
 	if (base != new_base) {
 		/*
@@ -825,7 +824,7 @@ out_unlock:
  */
 int mod_timer_pending(struct timer_list *timer, unsigned long expires)
 {
-	return __mod_timer(timer, expires, true, MOD_TIMER_NOT_PINNED);
+	return __mod_timer(timer, expires, true);
 }
 EXPORT_SYMBOL(mod_timer_pending);
 
@@ -900,39 +899,11 @@ int mod_timer(struct timer_list *timer, unsigned long expires)
 	if (timer_pending(timer) && timer->expires == expires)
 		return 1;
 
-	return __mod_timer(timer, expires, false, MOD_TIMER_NOT_PINNED);
+	return __mod_timer(timer, expires, false);
 }
 EXPORT_SYMBOL(mod_timer);
 
 /**
- * mod_timer_pinned - modify a timer's timeout
- * @timer: the timer to be modified
- * @expires: new timeout in jiffies
- *
- * mod_timer_pinned() is a way to update the expire field of an
- * active timer (if the timer is inactive it will be activated)
- * and to ensure that the timer is scheduled on the current CPU.
- *
- * Note that this does not prevent the timer from being migrated
- * when the current CPU goes offline.  If this is a problem for
- * you, use CPU-hotplug notifiers to handle it correctly, for
- * example, cancelling the timer when the corresponding CPU goes
- * offline.
- *
- * mod_timer_pinned(timer, expires) is equivalent to:
- *
- *     del_timer(timer); timer->expires = expires; add_timer(timer);
- */
-int mod_timer_pinned(struct timer_list *timer, unsigned long expires)
-{
-	if (timer->expires == expires && timer_pending(timer))
-		return 1;
-
-	return __mod_timer(timer, expires, false, MOD_TIMER_PINNED);
-}
-EXPORT_SYMBOL(mod_timer_pinned);
-
-/**
  * add_timer - start a timer
  * @timer: the timer to be added
  *
@@ -1512,7 +1483,7 @@ signed long __sched schedule_timeout(signed long timeout)
 	expire = timeout + jiffies;
 
 	setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
-	__mod_timer(&timer, expire, false, MOD_TIMER_NOT_PINNED);
+	__mod_timer(&timer, expire, false);
 	schedule();
 	del_singleshot_timer_sync(&timer);
 

^ permalink raw reply related	[flat|nested] 60+ messages in thread

* [tip:timers/core] signals: Use hrtimer for sigtimedwait()
  2016-07-04  9:50 ` [patch 4 10/22] signal: Use hrtimer for sigtimedwait Thomas Gleixner
@ 2016-07-07  8:43   ` tip-bot for Thomas Gleixner
  0 siblings, 0 replies; 60+ messages in thread
From: tip-bot for Thomas Gleixner @ 2016-07-07  8:43 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: clm, mingo, chrubis, fweisbec, lenb, arjan, torvalds, tglx, hpa,
	viro, peterz, paulmck, linux-kernel, josh, linux, riel

Commit-ID:  2b1ecc3d1a6b10f8fbac7f83d80db30b5a2c2791
Gitweb:     http://git.kernel.org/tip/2b1ecc3d1a6b10f8fbac7f83d80db30b5a2c2791
Author:     Thomas Gleixner <tglx@linutronix.de>
AuthorDate: Mon, 4 Jul 2016 09:50:25 +0000
Committer:  Ingo Molnar <mingo@kernel.org>
CommitDate: Thu, 7 Jul 2016 10:35:07 +0200

signals: Use hrtimer for sigtimedwait()

We've converted most timeout related syscalls to hrtimers, but
sigtimedwait() did not get this treatment.

Convert it so we get a reasonable accuracy and remove the
user space exposure to the timer wheel properties.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Arjan van de Ven <arjan@infradead.org>
Cc: Chris Mason <clm@fb.com>
Cc: Cyril Hrubis <chrubis@suse.cz>
Cc: George Spelvin <linux@sciencehorizons.net>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Len Brown <lenb@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: rt@linutronix.de
Link: http://lkml.kernel.org/r/20160704094341.787164909@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
 kernel/signal.c | 24 ++++++++++--------------
 1 file changed, 10 insertions(+), 14 deletions(-)

diff --git a/kernel/signal.c b/kernel/signal.c
index 96e9bc4..af21afc 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2751,23 +2751,18 @@ int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
  *  @ts: upper bound on process time suspension
  */
 int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
-			const struct timespec *ts)
+		    const struct timespec *ts)
 {
+	ktime_t *to = NULL, timeout = { .tv64 = KTIME_MAX };
 	struct task_struct *tsk = current;
-	long timeout = MAX_SCHEDULE_TIMEOUT;
 	sigset_t mask = *which;
-	int sig;
+	int sig, ret = 0;
 
 	if (ts) {
 		if (!timespec_valid(ts))
 			return -EINVAL;
-		timeout = timespec_to_jiffies(ts);
-		/*
-		 * We can be close to the next tick, add another one
-		 * to ensure we will wait at least the time asked for.
-		 */
-		if (ts->tv_sec || ts->tv_nsec)
-			timeout++;
+		timeout = timespec_to_ktime(*ts);
+		to = &timeout;
 	}
 
 	/*
@@ -2778,7 +2773,7 @@ int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
 
 	spin_lock_irq(&tsk->sighand->siglock);
 	sig = dequeue_signal(tsk, &mask, info);
-	if (!sig && timeout) {
+	if (!sig && timeout.tv64) {
 		/*
 		 * None ready, temporarily unblock those we're interested
 		 * while we are sleeping in so that we'll be awakened when
@@ -2790,8 +2785,9 @@ int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
 		recalc_sigpending();
 		spin_unlock_irq(&tsk->sighand->siglock);
 
-		timeout = freezable_schedule_timeout_interruptible(timeout);
-
+		__set_current_state(TASK_INTERRUPTIBLE);
+		ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
+							 HRTIMER_MODE_REL);
 		spin_lock_irq(&tsk->sighand->siglock);
 		__set_task_blocked(tsk, &tsk->real_blocked);
 		sigemptyset(&tsk->real_blocked);
@@ -2801,7 +2797,7 @@ int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
 
 	if (sig)
 		return sig;
-	return timeout ? -EINTR : -EAGAIN;
+	return ret ? -EINTR : -EAGAIN;
 }
 
 /**

^ permalink raw reply related	[flat|nested] 60+ messages in thread

* [tip:timers/core] hlist: Add hlist_is_singular_node() helper
  2016-07-04  9:50 ` [patch 4 11/22] hlist: Add hlist_is_singular_node() helper Thomas Gleixner
@ 2016-07-07  8:44   ` tip-bot for Thomas Gleixner
  0 siblings, 0 replies; 60+ messages in thread
From: tip-bot for Thomas Gleixner @ 2016-07-07  8:44 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: linux, peterz, fweisbec, torvalds, edumazet, linux-kernel, josh,
	clm, riel, paulmck, arjan, tglx, hpa, mingo, lenb

Commit-ID:  15dba1e37b5cfd7fab9bc84e0f05f35c918f4eef
Gitweb:     http://git.kernel.org/tip/15dba1e37b5cfd7fab9bc84e0f05f35c918f4eef
Author:     Thomas Gleixner <tglx@linutronix.de>
AuthorDate: Mon, 4 Jul 2016 09:50:27 +0000
Committer:  Ingo Molnar <mingo@kernel.org>
CommitDate: Thu, 7 Jul 2016 10:35:07 +0200

hlist: Add hlist_is_singular_node() helper

Required to figure out whether the entry is the only one in the hlist.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arjan van de Ven <arjan@infradead.org>
Cc: Chris Mason <clm@fb.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: George Spelvin <linux@sciencehorizons.net>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Len Brown <lenb@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: rt@linutronix.de
Link: http://lkml.kernel.org/r/20160704094341.867631372@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
 include/linux/list.h | 10 ++++++++++
 1 file changed, 10 insertions(+)

diff --git a/include/linux/list.h b/include/linux/list.h
index 5356f4d..5183138 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -679,6 +679,16 @@ static inline bool hlist_fake(struct hlist_node *h)
 }
 
 /*
+ * Check whether the node is the only node of the head without
+ * accessing head:
+ */
+static inline bool
+hlist_is_singular_node(struct hlist_node *n, struct hlist_head *h)
+{
+	return !n->next && n->pprev == &h->first;
+}
+
+/*
  * Move a list from one list head to another. Fixup the pprev
  * reference of the first entry if it exists.
  */

^ permalink raw reply related	[flat|nested] 60+ messages in thread

* [tip:timers/core] timers: Give a few structs and members proper names
  2016-07-04  9:50 ` [patch 4 12/22] timer: Give a few structs and members proper names Thomas Gleixner
@ 2016-07-07  8:44   ` tip-bot for Thomas Gleixner
  0 siblings, 0 replies; 60+ messages in thread
From: tip-bot for Thomas Gleixner @ 2016-07-07  8:44 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: edumazet, linux-kernel, tglx, paulmck, arjan, lenb, torvalds,
	mingo, fweisbec, josh, linux, clm, riel, peterz, hpa

Commit-ID:  494af3ed7848de08640d98ee5aff57a45c137c3c
Gitweb:     http://git.kernel.org/tip/494af3ed7848de08640d98ee5aff57a45c137c3c
Author:     Thomas Gleixner <tglx@linutronix.de>
AuthorDate: Mon, 4 Jul 2016 09:50:28 +0000
Committer:  Ingo Molnar <mingo@kernel.org>
CommitDate: Thu, 7 Jul 2016 10:35:08 +0200

timers: Give a few structs and members proper names

Some of the names in the internal implementation of the timer code
are not longer correct and others are simply too long to type.

Clean it up before we switch the wheel implementation over to
the new scheme.

No functional change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arjan van de Ven <arjan@infradead.org>
Cc: Chris Mason <clm@fb.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: George Spelvin <linux@sciencehorizons.net>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Len Brown <lenb@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: rt@linutronix.de
Link: http://lkml.kernel.org/r/20160704094341.948752516@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
 kernel/time/timer.c | 118 ++++++++++++++++++++++++++--------------------------
 1 file changed, 59 insertions(+), 59 deletions(-)

diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index ba49c1c..f259a3e 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -77,10 +77,10 @@ struct tvec_root {
 	struct hlist_head vec[TVR_SIZE];
 };
 
-struct tvec_base {
+struct timer_base {
 	spinlock_t lock;
 	struct timer_list *running_timer;
-	unsigned long timer_jiffies;
+	unsigned long clk;
 	unsigned long next_timer;
 	unsigned long active_timers;
 	unsigned long all_timers;
@@ -95,7 +95,7 @@ struct tvec_base {
 } ____cacheline_aligned;
 
 
-static DEFINE_PER_CPU(struct tvec_base, tvec_bases);
+static DEFINE_PER_CPU(struct timer_base, timer_bases);
 
 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
 unsigned int sysctl_timer_migration = 1;
@@ -106,15 +106,15 @@ void timers_update_migration(bool update_nohz)
 	unsigned int cpu;
 
 	/* Avoid the loop, if nothing to update */
-	if (this_cpu_read(tvec_bases.migration_enabled) == on)
+	if (this_cpu_read(timer_bases.migration_enabled) == on)
 		return;
 
 	for_each_possible_cpu(cpu) {
-		per_cpu(tvec_bases.migration_enabled, cpu) = on;
+		per_cpu(timer_bases.migration_enabled, cpu) = on;
 		per_cpu(hrtimer_bases.migration_enabled, cpu) = on;
 		if (!update_nohz)
 			continue;
-		per_cpu(tvec_bases.nohz_active, cpu) = true;
+		per_cpu(timer_bases.nohz_active, cpu) = true;
 		per_cpu(hrtimer_bases.nohz_active, cpu) = true;
 	}
 }
@@ -134,18 +134,18 @@ int timer_migration_handler(struct ctl_table *table, int write,
 	return ret;
 }
 
-static inline struct tvec_base *get_target_base(struct tvec_base *base,
+static inline struct timer_base *get_target_base(struct timer_base *base,
 						int pinned)
 {
 	if (pinned || !base->migration_enabled)
-		return this_cpu_ptr(&tvec_bases);
-	return per_cpu_ptr(&tvec_bases, get_nohz_timer_target());
+		return this_cpu_ptr(&timer_bases);
+	return per_cpu_ptr(&timer_bases, get_nohz_timer_target());
 }
 #else
-static inline struct tvec_base *get_target_base(struct tvec_base *base,
+static inline struct timer_base *get_target_base(struct timer_base *base,
 						int pinned)
 {
-	return this_cpu_ptr(&tvec_bases);
+	return this_cpu_ptr(&timer_bases);
 }
 #endif
 
@@ -371,10 +371,10 @@ void set_timer_slack(struct timer_list *timer, int slack_hz)
 EXPORT_SYMBOL_GPL(set_timer_slack);
 
 static void
-__internal_add_timer(struct tvec_base *base, struct timer_list *timer)
+__internal_add_timer(struct timer_base *base, struct timer_list *timer)
 {
 	unsigned long expires = timer->expires;
-	unsigned long idx = expires - base->timer_jiffies;
+	unsigned long idx = expires - base->clk;
 	struct hlist_head *vec;
 
 	if (idx < TVR_SIZE) {
@@ -394,7 +394,7 @@ __internal_add_timer(struct tvec_base *base, struct timer_list *timer)
 		 * Can happen if you add a timer with expires == jiffies,
 		 * or you set a timer to go off in the past
 		 */
-		vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
+		vec = base->tv1.vec + (base->clk & TVR_MASK);
 	} else {
 		int i;
 		/* If the timeout is larger than MAX_TVAL (on 64-bit
@@ -403,7 +403,7 @@ __internal_add_timer(struct tvec_base *base, struct timer_list *timer)
 		 */
 		if (idx > MAX_TVAL) {
 			idx = MAX_TVAL;
-			expires = idx + base->timer_jiffies;
+			expires = idx + base->clk;
 		}
 		i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
 		vec = base->tv5.vec + i;
@@ -412,11 +412,11 @@ __internal_add_timer(struct tvec_base *base, struct timer_list *timer)
 	hlist_add_head(&timer->entry, vec);
 }
 
-static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
+static void internal_add_timer(struct timer_base *base, struct timer_list *timer)
 {
 	/* Advance base->jiffies, if the base is empty */
 	if (!base->all_timers++)
-		base->timer_jiffies = jiffies;
+		base->clk = jiffies;
 
 	__internal_add_timer(base, timer);
 	/*
@@ -707,7 +707,7 @@ static inline void detach_timer(struct timer_list *timer, bool clear_pending)
 }
 
 static inline void
-detach_expired_timer(struct timer_list *timer, struct tvec_base *base)
+detach_expired_timer(struct timer_list *timer, struct timer_base *base)
 {
 	detach_timer(timer, true);
 	if (!(timer->flags & TIMER_DEFERRABLE))
@@ -715,7 +715,7 @@ detach_expired_timer(struct timer_list *timer, struct tvec_base *base)
 	base->all_timers--;
 }
 
-static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
+static int detach_if_pending(struct timer_list *timer, struct timer_base *base,
 			     bool clear_pending)
 {
 	if (!timer_pending(timer))
@@ -725,16 +725,16 @@ static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
 	if (!(timer->flags & TIMER_DEFERRABLE)) {
 		base->active_timers--;
 		if (timer->expires == base->next_timer)
-			base->next_timer = base->timer_jiffies;
+			base->next_timer = base->clk;
 	}
 	/* If this was the last timer, advance base->jiffies */
 	if (!--base->all_timers)
-		base->timer_jiffies = jiffies;
+		base->clk = jiffies;
 	return 1;
 }
 
 /*
- * We are using hashed locking: holding per_cpu(tvec_bases).lock
+ * We are using hashed locking: holding per_cpu(timer_bases).lock
  * means that all timers which are tied to this base via timer->base are
  * locked, and the base itself is locked too.
  *
@@ -744,16 +744,16 @@ static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
  * When the timer's base is locked and removed from the list, the
  * TIMER_MIGRATING flag is set, FIXME
  */
-static struct tvec_base *lock_timer_base(struct timer_list *timer,
+static struct timer_base *lock_timer_base(struct timer_list *timer,
 					unsigned long *flags)
 	__acquires(timer->base->lock)
 {
 	for (;;) {
 		u32 tf = timer->flags;
-		struct tvec_base *base;
+		struct timer_base *base;
 
 		if (!(tf & TIMER_MIGRATING)) {
-			base = per_cpu_ptr(&tvec_bases, tf & TIMER_CPUMASK);
+			base = per_cpu_ptr(&timer_bases, tf & TIMER_CPUMASK);
 			spin_lock_irqsave(&base->lock, *flags);
 			if (timer->flags == tf)
 				return base;
@@ -766,7 +766,7 @@ static struct tvec_base *lock_timer_base(struct timer_list *timer,
 static inline int
 __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
 {
-	struct tvec_base *base, *new_base;
+	struct timer_base *base, *new_base;
 	unsigned long flags;
 	int ret = 0;
 
@@ -933,8 +933,8 @@ EXPORT_SYMBOL(add_timer);
  */
 void add_timer_on(struct timer_list *timer, int cpu)
 {
-	struct tvec_base *new_base = per_cpu_ptr(&tvec_bases, cpu);
-	struct tvec_base *base;
+	struct timer_base *new_base = per_cpu_ptr(&timer_bases, cpu);
+	struct timer_base *base;
 	unsigned long flags;
 
 	timer_stats_timer_set_start_info(timer);
@@ -975,7 +975,7 @@ EXPORT_SYMBOL_GPL(add_timer_on);
  */
 int del_timer(struct timer_list *timer)
 {
-	struct tvec_base *base;
+	struct timer_base *base;
 	unsigned long flags;
 	int ret = 0;
 
@@ -1001,7 +1001,7 @@ EXPORT_SYMBOL(del_timer);
  */
 int try_to_del_timer_sync(struct timer_list *timer)
 {
-	struct tvec_base *base;
+	struct timer_base *base;
 	unsigned long flags;
 	int ret = -1;
 
@@ -1085,7 +1085,7 @@ int del_timer_sync(struct timer_list *timer)
 EXPORT_SYMBOL(del_timer_sync);
 #endif
 
-static int cascade(struct tvec_base *base, struct tvec *tv, int index)
+static int cascade(struct timer_base *base, struct tvec *tv, int index)
 {
 	/* cascade all the timers from tv up one level */
 	struct timer_list *timer;
@@ -1149,7 +1149,7 @@ static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
 	}
 }
 
-#define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
+#define INDEX(N) ((base->clk >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
 
 /**
  * __run_timers - run all expired timers (if any) on this CPU.
@@ -1158,23 +1158,23 @@ static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
  * This function cascades all vectors and executes all expired timer
  * vectors.
  */
-static inline void __run_timers(struct tvec_base *base)
+static inline void __run_timers(struct timer_base *base)
 {
 	struct timer_list *timer;
 
 	spin_lock_irq(&base->lock);
 
-	while (time_after_eq(jiffies, base->timer_jiffies)) {
+	while (time_after_eq(jiffies, base->clk)) {
 		struct hlist_head work_list;
 		struct hlist_head *head = &work_list;
 		int index;
 
 		if (!base->all_timers) {
-			base->timer_jiffies = jiffies;
+			base->clk = jiffies;
 			break;
 		}
 
-		index = base->timer_jiffies & TVR_MASK;
+		index = base->clk & TVR_MASK;
 
 		/*
 		 * Cascade timers:
@@ -1184,7 +1184,7 @@ static inline void __run_timers(struct tvec_base *base)
 				(!cascade(base, &base->tv3, INDEX(1))) &&
 					!cascade(base, &base->tv4, INDEX(2)))
 			cascade(base, &base->tv5, INDEX(3));
-		++base->timer_jiffies;
+		++base->clk;
 		hlist_move_list(base->tv1.vec + index, head);
 		while (!hlist_empty(head)) {
 			void (*fn)(unsigned long);
@@ -1222,16 +1222,16 @@ static inline void __run_timers(struct tvec_base *base)
  * is used on S/390 to stop all activity when a CPU is idle.
  * This function needs to be called with interrupts disabled.
  */
-static unsigned long __next_timer_interrupt(struct tvec_base *base)
+static unsigned long __next_timer_interrupt(struct timer_base *base)
 {
-	unsigned long timer_jiffies = base->timer_jiffies;
-	unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
+	unsigned long clk = base->clk;
+	unsigned long expires = clk + NEXT_TIMER_MAX_DELTA;
 	int index, slot, array, found = 0;
 	struct timer_list *nte;
 	struct tvec *varray[4];
 
 	/* Look for timer events in tv1. */
-	index = slot = timer_jiffies & TVR_MASK;
+	index = slot = clk & TVR_MASK;
 	do {
 		hlist_for_each_entry(nte, base->tv1.vec + slot, entry) {
 			if (nte->flags & TIMER_DEFERRABLE)
@@ -1250,8 +1250,8 @@ static unsigned long __next_timer_interrupt(struct tvec_base *base)
 cascade:
 	/* Calculate the next cascade event */
 	if (index)
-		timer_jiffies += TVR_SIZE - index;
-	timer_jiffies >>= TVR_BITS;
+		clk += TVR_SIZE - index;
+	clk >>= TVR_BITS;
 
 	/* Check tv2-tv5. */
 	varray[0] = &base->tv2;
@@ -1262,7 +1262,7 @@ cascade:
 	for (array = 0; array < 4; array++) {
 		struct tvec *varp = varray[array];
 
-		index = slot = timer_jiffies & TVN_MASK;
+		index = slot = clk & TVN_MASK;
 		do {
 			hlist_for_each_entry(nte, varp->vec + slot, entry) {
 				if (nte->flags & TIMER_DEFERRABLE)
@@ -1286,8 +1286,8 @@ cascade:
 		} while (slot != index);
 
 		if (index)
-			timer_jiffies += TVN_SIZE - index;
-		timer_jiffies >>= TVN_BITS;
+			clk += TVN_SIZE - index;
+		clk >>= TVN_BITS;
 	}
 	return expires;
 }
@@ -1335,7 +1335,7 @@ static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
  */
 u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
 {
-	struct tvec_base *base = this_cpu_ptr(&tvec_bases);
+	struct timer_base *base = this_cpu_ptr(&timer_bases);
 	u64 expires = KTIME_MAX;
 	unsigned long nextevt;
 
@@ -1348,7 +1348,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
 
 	spin_lock(&base->lock);
 	if (base->active_timers) {
-		if (time_before_eq(base->next_timer, base->timer_jiffies))
+		if (time_before_eq(base->next_timer, base->clk))
 			base->next_timer = __next_timer_interrupt(base);
 		nextevt = base->next_timer;
 		if (time_before_eq(nextevt, basej))
@@ -1387,9 +1387,9 @@ void update_process_times(int user_tick)
  */
 static void run_timer_softirq(struct softirq_action *h)
 {
-	struct tvec_base *base = this_cpu_ptr(&tvec_bases);
+	struct timer_base *base = this_cpu_ptr(&timer_bases);
 
-	if (time_after_eq(jiffies, base->timer_jiffies))
+	if (time_after_eq(jiffies, base->clk))
 		__run_timers(base);
 }
 
@@ -1534,7 +1534,7 @@ signed long __sched schedule_timeout_idle(signed long timeout)
 EXPORT_SYMBOL(schedule_timeout_idle);
 
 #ifdef CONFIG_HOTPLUG_CPU
-static void migrate_timer_list(struct tvec_base *new_base, struct hlist_head *head)
+static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *head)
 {
 	struct timer_list *timer;
 	int cpu = new_base->cpu;
@@ -1550,13 +1550,13 @@ static void migrate_timer_list(struct tvec_base *new_base, struct hlist_head *he
 
 static void migrate_timers(int cpu)
 {
-	struct tvec_base *old_base;
-	struct tvec_base *new_base;
+	struct timer_base *old_base;
+	struct timer_base *new_base;
 	int i;
 
 	BUG_ON(cpu_online(cpu));
-	old_base = per_cpu_ptr(&tvec_bases, cpu);
-	new_base = get_cpu_ptr(&tvec_bases);
+	old_base = per_cpu_ptr(&timer_bases, cpu);
+	new_base = get_cpu_ptr(&timer_bases);
 	/*
 	 * The caller is globally serialized and nobody else
 	 * takes two locks at once, deadlock is not possible.
@@ -1580,7 +1580,7 @@ static void migrate_timers(int cpu)
 
 	spin_unlock(&old_base->lock);
 	spin_unlock_irq(&new_base->lock);
-	put_cpu_ptr(&tvec_bases);
+	put_cpu_ptr(&timer_bases);
 }
 
 static int timer_cpu_notify(struct notifier_block *self,
@@ -1608,13 +1608,13 @@ static inline void timer_register_cpu_notifier(void) { }
 
 static void __init init_timer_cpu(int cpu)
 {
-	struct tvec_base *base = per_cpu_ptr(&tvec_bases, cpu);
+	struct timer_base *base = per_cpu_ptr(&timer_bases, cpu);
 
 	base->cpu = cpu;
 	spin_lock_init(&base->lock);
 
-	base->timer_jiffies = jiffies;
-	base->next_timer = base->timer_jiffies;
+	base->clk = jiffies;
+	base->next_timer = base->clk;
 }
 
 static void __init init_timer_cpus(void)

^ permalink raw reply related	[flat|nested] 60+ messages in thread

* [tip:timers/core] timers: Reduce the CPU index space to 256k
  2016-07-04  9:50 ` [patch 4 13/22] timer: Reduce the CPU index space to 256k Thomas Gleixner
@ 2016-07-07  8:45   ` tip-bot for Thomas Gleixner
  0 siblings, 0 replies; 60+ messages in thread
From: tip-bot for Thomas Gleixner @ 2016-07-07  8:45 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: linux, paulmck, josh, arjan, mingo, fweisbec, peterz, hpa, lenb,
	tglx, torvalds, riel, linux-kernel, clm

Commit-ID:  b0d6e2dcb284f1f4dcb4b92760f49eeaf5fc0bc7
Gitweb:     http://git.kernel.org/tip/b0d6e2dcb284f1f4dcb4b92760f49eeaf5fc0bc7
Author:     Thomas Gleixner <tglx@linutronix.de>
AuthorDate: Mon, 4 Jul 2016 09:50:29 +0000
Committer:  Ingo Molnar <mingo@kernel.org>
CommitDate: Thu, 7 Jul 2016 10:35:08 +0200

timers: Reduce the CPU index space to 256k

We want to store the array index in the flags space. 256k CPUs should be
enough for a while.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arjan van de Ven <arjan@infradead.org>
Cc: Chris Mason <clm@fb.com>
Cc: George Spelvin <linux@sciencehorizons.net>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Len Brown <lenb@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: rt@linutronix.de
Link: http://lkml.kernel.org/r/20160704094342.030144293@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
 include/linux/timer.h | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/include/linux/timer.h b/include/linux/timer.h
index a8f6c70..989f33d 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -58,12 +58,12 @@ struct timer_list {
  * workqueue locking issues. It's not meant for executing random crap
  * with interrupts disabled. Abuse is monitored!
  */
-#define TIMER_CPUMASK		0x0007FFFF
-#define TIMER_MIGRATING		0x00080000
+#define TIMER_CPUMASK		0x0003FFFF
+#define TIMER_MIGRATING		0x00040000
 #define TIMER_BASEMASK		(TIMER_CPUMASK | TIMER_MIGRATING)
-#define TIMER_DEFERRABLE	0x00100000
-#define TIMER_PINNED		0x00200000
-#define TIMER_IRQSAFE		0x00400000
+#define TIMER_DEFERRABLE	0x00080000
+#define TIMER_PINNED		0x00100000
+#define TIMER_IRQSAFE		0x00200000
 
 #define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \
 		.entry = { .next = TIMER_ENTRY_STATIC },	\

^ permalink raw reply related	[flat|nested] 60+ messages in thread

* [tip:timers/core] timers: Switch to a non-cascading wheel
  2016-07-04  9:50 ` [patch 4 14/22] timer: Switch to a non cascading wheel Thomas Gleixner
@ 2016-07-07  8:45   ` tip-bot for Thomas Gleixner
  2016-08-11 15:21   ` [patch 4 14/22] timer: Switch to a non cascading wheel Jouni Malinen
  1 sibling, 0 replies; 60+ messages in thread
From: tip-bot for Thomas Gleixner @ 2016-07-07  8:45 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: edumazet, linux-kernel, mingo, fweisbec, torvalds, clm, riel,
	hpa, josh, arjan, linux, peterz, tglx, paulmck, lenb

Commit-ID:  500462a9de657f86edaa102f8ab6bff7f7e43fc2
Gitweb:     http://git.kernel.org/tip/500462a9de657f86edaa102f8ab6bff7f7e43fc2
Author:     Thomas Gleixner <tglx@linutronix.de>
AuthorDate: Mon, 4 Jul 2016 09:50:30 +0000
Committer:  Ingo Molnar <mingo@kernel.org>
CommitDate: Thu, 7 Jul 2016 10:35:09 +0200

timers: Switch to a non-cascading wheel

The current timer wheel has some drawbacks:

1) Cascading:

   Cascading can be an unbound operation and is completely pointless in most
   cases because the vast majority of the timer wheel timers are canceled or
   rearmed before expiration. (They are used as timeout safeguards, not as
   real timers to measure time.)

2) No fast lookup of the next expiring timer:

   In NOHZ scenarios the first timer soft interrupt after a long NOHZ period
   must fast forward the base time to the current value of jiffies. As we
   have no way to find the next expiring timer fast, the code loops linearly
   and increments the base time one by one and checks for expired timers
   in each step. This causes unbound overhead spikes exactly in the moment
   when we should wake up as fast as possible.

After a thorough analysis of real world data gathered on laptops,
workstations, webservers and other machines (thanks Chris!) I came to the
conclusion that the current 'classic' timer wheel implementation can be
modified to address the above issues.

The vast majority of timer wheel timers is canceled or rearmed before
expiry. Most of them are timeouts for networking and other I/O tasks. The
nature of timeouts is to catch the exception from normal operation (TCP ack
timed out, disk does not respond, etc.). For these kinds of timeouts the
accuracy of the timeout is not really a concern. Timeouts are very often
approximate worst-case values and in case the timeout fires, we already
waited for a long time and performance is down the drain already.

The few timers which actually expire can be split into two categories:

 1) Short expiry times which expect halfways accurate expiry

 2) Long term expiry times are inaccurate today already due to the
    batching which is done for NOHZ automatically and also via the
    set_timer_slack() API.

So for long term expiry timers we can avoid the cascading property and just
leave them in the less granular outer wheels until expiry or
cancelation. Timers which are armed with a timeout larger than the wheel
capacity are no longer cascaded. We expire them with the longest possible
timeout (6+ days). We have not observed such timeouts in our data collection,
but at least we handle them, applying the rule of the least surprise.

To avoid extending the wheel levels for HZ=1000 so we can accomodate the
longest observed timeouts (5 days in the network conntrack code) we reduce the
first level granularity on HZ=1000 to 4ms, which effectively is the same as
the HZ=250 behaviour. From our data analysis there is nothing which relies on
that 1ms granularity and as a side effect we get better batching and timer
locality for the networking code as well.

Contrary to the classic wheel the granularity of the next wheel is not the
capacity of the first wheel. The granularities of the wheels are in the
currently chosen setting 8 times the granularity of the previous wheel.

So for HZ=250 we end up with the following granularity levels:

 Level Offset   Granularity                  Range
     0      0          4 ms                 0 ms -        252 ms
     1     64         32 ms               256 ms -       2044 ms (256ms - ~2s)
     2    128        256 ms              2048 ms -      16380 ms (~2s   - ~16s)
     3    192       2048 ms (~2s)       16384 ms -     131068 ms (~16s  - ~2m)
     4    256      16384 ms (~16s)     131072 ms -    1048572 ms (~2m   - ~17m)
     5    320     131072 ms (~2m)     1048576 ms -    8388604 ms (~17m  - ~2h)
     6    384    1048576 ms (~17m)    8388608 ms -   67108863 ms (~2h   - ~18h)
     7    448    8388608 ms (~2h)    67108864 ms -  536870911 ms (~18h  - ~6d)

That's a worst case inaccuracy of 12.5% for the timers which are queued at the
beginning of a level.

So the new wheel concept addresses the old issues:

1) Cascading is avoided completely

2) By keeping the timers in the bucket until expiry/cancelation we can track
   the buckets which have timers enqueued in a bucket bitmap and therefore can
   look up the next expiring timer very fast and O(1).

A further benefit of the concept is that the slack calculation which is done
on every timer start is no longer necessary because the granularity levels
provide natural batching already.

Our extensive testing with various loads did not show any performance
degradation vs. the current wheel implementation.

This patch does not address the 'fast lookup' issue as we wanted to make sure
that there is no regression introduced by the wheel redesign. The
optimizations are in follow up patches.

This patch contains fixes from Anna-Maria Gleixner and Richard Cochran.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Arjan van de Ven <arjan@infradead.org>
Cc: Chris Mason <clm@fb.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: George Spelvin <linux@sciencehorizons.net>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Len Brown <lenb@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: rt@linutronix.de
Link: http://lkml.kernel.org/r/20160704094342.108621834@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
 include/linux/timer.h |   2 +
 kernel/time/timer.c   | 829 ++++++++++++++++++++++++++++----------------------
 2 files changed, 469 insertions(+), 362 deletions(-)

diff --git a/include/linux/timer.h b/include/linux/timer.h
index 989f33d..5869ab9 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -64,6 +64,8 @@ struct timer_list {
 #define TIMER_DEFERRABLE	0x00080000
 #define TIMER_PINNED		0x00100000
 #define TIMER_IRQSAFE		0x00200000
+#define TIMER_ARRAYSHIFT	22
+#define TIMER_ARRAYMASK		0xFFC00000
 
 #define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \
 		.entry = { .next = TIMER_ENTRY_STATIC },	\
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index f259a3e..86e95b7 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -59,43 +59,151 @@ __visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
 EXPORT_SYMBOL(jiffies_64);
 
 /*
- * per-CPU timer vector definitions:
+ * The timer wheel has LVL_DEPTH array levels. Each level provides an array of
+ * LVL_SIZE buckets. Each level is driven by its own clock and therefor each
+ * level has a different granularity.
+ *
+ * The level granularity is:		LVL_CLK_DIV ^ lvl
+ * The level clock frequency is:	HZ / (LVL_CLK_DIV ^ level)
+ *
+ * The array level of a newly armed timer depends on the relative expiry
+ * time. The farther the expiry time is away the higher the array level and
+ * therefor the granularity becomes.
+ *
+ * Contrary to the original timer wheel implementation, which aims for 'exact'
+ * expiry of the timers, this implementation removes the need for recascading
+ * the timers into the lower array levels. The previous 'classic' timer wheel
+ * implementation of the kernel already violated the 'exact' expiry by adding
+ * slack to the expiry time to provide batched expiration. The granularity
+ * levels provide implicit batching.
+ *
+ * This is an optimization of the original timer wheel implementation for the
+ * majority of the timer wheel use cases: timeouts. The vast majority of
+ * timeout timers (networking, disk I/O ...) are canceled before expiry. If
+ * the timeout expires it indicates that normal operation is disturbed, so it
+ * does not matter much whether the timeout comes with a slight delay.
+ *
+ * The only exception to this are networking timers with a small expiry
+ * time. They rely on the granularity. Those fit into the first wheel level,
+ * which has HZ granularity.
+ *
+ * We don't have cascading anymore. timers with a expiry time above the
+ * capacity of the last wheel level are force expired at the maximum timeout
+ * value of the last wheel level. From data sampling we know that the maximum
+ * value observed is 5 days (network connection tracking), so this should not
+ * be an issue.
+ *
+ * The currently chosen array constants values are a good compromise between
+ * array size and granularity.
+ *
+ * This results in the following granularity and range levels:
+ *
+ * HZ 1000 steps
+ * Level Offset  Granularity            Range
+ *  0      0         1 ms                0 ms -         63 ms
+ *  1     64         8 ms               64 ms -        511 ms
+ *  2    128        64 ms              512 ms -       4095 ms (512ms - ~4s)
+ *  3    192       512 ms             4096 ms -      32767 ms (~4s - ~32s)
+ *  4    256      4096 ms (~4s)      32768 ms -     262143 ms (~32s - ~4m)
+ *  5    320     32768 ms (~32s)    262144 ms -    2097151 ms (~4m - ~34m)
+ *  6    384    262144 ms (~4m)    2097152 ms -   16777215 ms (~34m - ~4h)
+ *  7    448   2097152 ms (~34m)  16777216 ms -  134217727 ms (~4h - ~1d)
+ *  8    512  16777216 ms (~4h)  134217728 ms - 1073741822 ms (~1d - ~12d)
+ *
+ * HZ  300
+ * Level Offset  Granularity            Range
+ *  0	   0         3 ms                0 ms -        210 ms
+ *  1	  64        26 ms              213 ms -       1703 ms (213ms - ~1s)
+ *  2	 128       213 ms             1706 ms -      13650 ms (~1s - ~13s)
+ *  3	 192      1706 ms (~1s)      13653 ms -     109223 ms (~13s - ~1m)
+ *  4	 256     13653 ms (~13s)    109226 ms -     873810 ms (~1m - ~14m)
+ *  5	 320    109226 ms (~1m)     873813 ms -    6990503 ms (~14m - ~1h)
+ *  6	 384    873813 ms (~14m)   6990506 ms -   55924050 ms (~1h - ~15h)
+ *  7	 448   6990506 ms (~1h)   55924053 ms -  447392423 ms (~15h - ~5d)
+ *  8    512  55924053 ms (~15h) 447392426 ms - 3579139406 ms (~5d - ~41d)
+ *
+ * HZ  250
+ * Level Offset  Granularity            Range
+ *  0	   0         4 ms                0 ms -        255 ms
+ *  1	  64        32 ms              256 ms -       2047 ms (256ms - ~2s)
+ *  2	 128       256 ms             2048 ms -      16383 ms (~2s - ~16s)
+ *  3	 192      2048 ms (~2s)      16384 ms -     131071 ms (~16s - ~2m)
+ *  4	 256     16384 ms (~16s)    131072 ms -    1048575 ms (~2m - ~17m)
+ *  5	 320    131072 ms (~2m)    1048576 ms -    8388607 ms (~17m - ~2h)
+ *  6	 384   1048576 ms (~17m)   8388608 ms -   67108863 ms (~2h - ~18h)
+ *  7	 448   8388608 ms (~2h)   67108864 ms -  536870911 ms (~18h - ~6d)
+ *  8    512  67108864 ms (~18h) 536870912 ms - 4294967288 ms (~6d - ~49d)
+ *
+ * HZ  100
+ * Level Offset  Granularity            Range
+ *  0	   0         10 ms               0 ms -        630 ms
+ *  1	  64         80 ms             640 ms -       5110 ms (640ms - ~5s)
+ *  2	 128        640 ms            5120 ms -      40950 ms (~5s - ~40s)
+ *  3	 192       5120 ms (~5s)     40960 ms -     327670 ms (~40s - ~5m)
+ *  4	 256      40960 ms (~40s)   327680 ms -    2621430 ms (~5m - ~43m)
+ *  5	 320     327680 ms (~5m)   2621440 ms -   20971510 ms (~43m - ~5h)
+ *  6	 384    2621440 ms (~43m) 20971520 ms -  167772150 ms (~5h - ~1d)
+ *  7	 448   20971520 ms (~5h) 167772160 ms - 1342177270 ms (~1d - ~15d)
  */
-#define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
-#define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
-#define TVN_SIZE (1 << TVN_BITS)
-#define TVR_SIZE (1 << TVR_BITS)
-#define TVN_MASK (TVN_SIZE - 1)
-#define TVR_MASK (TVR_SIZE - 1)
-#define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1))
-
-struct tvec {
-	struct hlist_head vec[TVN_SIZE];
-};
 
-struct tvec_root {
-	struct hlist_head vec[TVR_SIZE];
-};
+/* Clock divisor for the next level */
+#define LVL_CLK_SHIFT	3
+#define LVL_CLK_DIV	(1UL << LVL_CLK_SHIFT)
+#define LVL_CLK_MASK	(LVL_CLK_DIV - 1)
+#define LVL_SHIFT(n)	((n) * LVL_CLK_SHIFT)
+#define LVL_GRAN(n)	(1UL << LVL_SHIFT(n))
+
+/*
+ * The time start value for each level to select the bucket at enqueue
+ * time.
+ */
+#define LVL_START(n)	((LVL_SIZE - 1) << (((n) - 1) * LVL_CLK_SHIFT))
+
+/* Size of each clock level */
+#define LVL_BITS	6
+#define LVL_SIZE	(1UL << LVL_BITS)
+#define LVL_MASK	(LVL_SIZE - 1)
+#define LVL_OFFS(n)	((n) * LVL_SIZE)
+
+/* Level depth */
+#if HZ > 100
+# define LVL_DEPTH	9
+# else
+# define LVL_DEPTH	8
+#endif
+
+/* The cutoff (max. capacity of the wheel) */
+#define WHEEL_TIMEOUT_CUTOFF	(LVL_START(LVL_DEPTH))
+#define WHEEL_TIMEOUT_MAX	(WHEEL_TIMEOUT_CUTOFF - LVL_GRAN(LVL_DEPTH - 1))
+
+/*
+ * The resulting wheel size. If NOHZ is configured we allocate two
+ * wheels so we have a separate storage for the deferrable timers.
+ */
+#define WHEEL_SIZE	(LVL_SIZE * LVL_DEPTH)
+
+#ifdef CONFIG_NO_HZ_COMMON
+# define NR_BASES	2
+# define BASE_STD	0
+# define BASE_DEF	1
+#else
+# define NR_BASES	1
+# define BASE_STD	0
+# define BASE_DEF	0
+#endif
 
 struct timer_base {
-	spinlock_t lock;
-	struct timer_list *running_timer;
-	unsigned long clk;
-	unsigned long next_timer;
-	unsigned long active_timers;
-	unsigned long all_timers;
-	int cpu;
-	bool migration_enabled;
-	bool nohz_active;
-	struct tvec_root tv1;
-	struct tvec tv2;
-	struct tvec tv3;
-	struct tvec tv4;
-	struct tvec tv5;
+	spinlock_t		lock;
+	struct timer_list	*running_timer;
+	unsigned long		clk;
+	unsigned int		cpu;
+	bool			migration_enabled;
+	bool			nohz_active;
+	DECLARE_BITMAP(pending_map, WHEEL_SIZE);
+	struct hlist_head	vectors[WHEEL_SIZE];
 } ____cacheline_aligned;
 
-
-static DEFINE_PER_CPU(struct timer_base, timer_bases);
+static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
 
 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
 unsigned int sysctl_timer_migration = 1;
@@ -106,15 +214,17 @@ void timers_update_migration(bool update_nohz)
 	unsigned int cpu;
 
 	/* Avoid the loop, if nothing to update */
-	if (this_cpu_read(timer_bases.migration_enabled) == on)
+	if (this_cpu_read(timer_bases[BASE_STD].migration_enabled) == on)
 		return;
 
 	for_each_possible_cpu(cpu) {
-		per_cpu(timer_bases.migration_enabled, cpu) = on;
+		per_cpu(timer_bases[BASE_STD].migration_enabled, cpu) = on;
+		per_cpu(timer_bases[BASE_DEF].migration_enabled, cpu) = on;
 		per_cpu(hrtimer_bases.migration_enabled, cpu) = on;
 		if (!update_nohz)
 			continue;
-		per_cpu(timer_bases.nohz_active, cpu) = true;
+		per_cpu(timer_bases[BASE_STD].nohz_active, cpu) = true;
+		per_cpu(timer_bases[BASE_DEF].nohz_active, cpu) = true;
 		per_cpu(hrtimer_bases.nohz_active, cpu) = true;
 	}
 }
@@ -133,20 +243,6 @@ int timer_migration_handler(struct ctl_table *table, int write,
 	mutex_unlock(&mutex);
 	return ret;
 }
-
-static inline struct timer_base *get_target_base(struct timer_base *base,
-						int pinned)
-{
-	if (pinned || !base->migration_enabled)
-		return this_cpu_ptr(&timer_bases);
-	return per_cpu_ptr(&timer_bases, get_nohz_timer_target());
-}
-#else
-static inline struct timer_base *get_target_base(struct timer_base *base,
-						int pinned)
-{
-	return this_cpu_ptr(&timer_bases);
-}
 #endif
 
 static unsigned long round_jiffies_common(unsigned long j, int cpu,
@@ -370,78 +466,91 @@ void set_timer_slack(struct timer_list *timer, int slack_hz)
 }
 EXPORT_SYMBOL_GPL(set_timer_slack);
 
+static inline unsigned int timer_get_idx(struct timer_list *timer)
+{
+	return (timer->flags & TIMER_ARRAYMASK) >> TIMER_ARRAYSHIFT;
+}
+
+static inline void timer_set_idx(struct timer_list *timer, unsigned int idx)
+{
+	timer->flags = (timer->flags & ~TIMER_ARRAYMASK) |
+			idx << TIMER_ARRAYSHIFT;
+}
+
+/*
+ * Helper function to calculate the array index for a given expiry
+ * time.
+ */
+static inline unsigned calc_index(unsigned expires, unsigned lvl)
+{
+	expires = (expires + LVL_GRAN(lvl)) >> LVL_SHIFT(lvl);
+	return LVL_OFFS(lvl) + (expires & LVL_MASK);
+}
+
 static void
 __internal_add_timer(struct timer_base *base, struct timer_list *timer)
 {
 	unsigned long expires = timer->expires;
-	unsigned long idx = expires - base->clk;
+	unsigned long delta = expires - base->clk;
 	struct hlist_head *vec;
-
-	if (idx < TVR_SIZE) {
-		int i = expires & TVR_MASK;
-		vec = base->tv1.vec + i;
-	} else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
-		int i = (expires >> TVR_BITS) & TVN_MASK;
-		vec = base->tv2.vec + i;
-	} else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
-		int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
-		vec = base->tv3.vec + i;
-	} else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
-		int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
-		vec = base->tv4.vec + i;
-	} else if ((signed long) idx < 0) {
-		/*
-		 * Can happen if you add a timer with expires == jiffies,
-		 * or you set a timer to go off in the past
-		 */
-		vec = base->tv1.vec + (base->clk & TVR_MASK);
+	unsigned int idx;
+
+	if (delta < LVL_START(1)) {
+		idx = calc_index(expires, 0);
+	} else if (delta < LVL_START(2)) {
+		idx = calc_index(expires, 1);
+	} else if (delta < LVL_START(3)) {
+		idx = calc_index(expires, 2);
+	} else if (delta < LVL_START(4)) {
+		idx = calc_index(expires, 3);
+	} else if (delta < LVL_START(5)) {
+		idx = calc_index(expires, 4);
+	} else if (delta < LVL_START(6)) {
+		idx = calc_index(expires, 5);
+	} else if (delta < LVL_START(7)) {
+		idx = calc_index(expires, 6);
+	} else if (LVL_DEPTH > 8 && delta < LVL_START(8)) {
+		idx = calc_index(expires, 7);
+	} else if ((long) delta < 0) {
+		idx = base->clk & LVL_MASK;
 	} else {
-		int i;
-		/* If the timeout is larger than MAX_TVAL (on 64-bit
-		 * architectures or with CONFIG_BASE_SMALL=1) then we
-		 * use the maximum timeout.
+		/*
+		 * Force expire obscene large timeouts to expire at the
+		 * capacity limit of the wheel.
 		 */
-		if (idx > MAX_TVAL) {
-			idx = MAX_TVAL;
-			expires = idx + base->clk;
-		}
-		i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
-		vec = base->tv5.vec + i;
-	}
+		if (expires >= WHEEL_TIMEOUT_CUTOFF)
+			expires = WHEEL_TIMEOUT_MAX;
 
+		idx = calc_index(expires, LVL_DEPTH - 1);
+	}
+	/*
+	 * Enqueue the timer into the array bucket, mark it pending in
+	 * the bitmap and store the index in the timer flags.
+	 */
+	vec = base->vectors + idx;
 	hlist_add_head(&timer->entry, vec);
+	__set_bit(idx, base->pending_map);
+	timer_set_idx(timer, idx);
 }
 
 static void internal_add_timer(struct timer_base *base, struct timer_list *timer)
 {
-	/* Advance base->jiffies, if the base is empty */
-	if (!base->all_timers++)
-		base->clk = jiffies;
-
 	__internal_add_timer(base, timer);
-	/*
-	 * Update base->active_timers and base->next_timer
-	 */
-	if (!(timer->flags & TIMER_DEFERRABLE)) {
-		if (!base->active_timers++ ||
-		    time_before(timer->expires, base->next_timer))
-			base->next_timer = timer->expires;
-	}
 
 	/*
 	 * Check whether the other CPU is in dynticks mode and needs
-	 * to be triggered to reevaluate the timer wheel.
-	 * We are protected against the other CPU fiddling
-	 * with the timer by holding the timer base lock. This also
-	 * makes sure that a CPU on the way to stop its tick can not
-	 * evaluate the timer wheel.
+	 * to be triggered to reevaluate the timer wheel.  We are
+	 * protected against the other CPU fiddling with the timer by
+	 * holding the timer base lock. This also makes sure that a
+	 * CPU on the way to stop its tick can not evaluate the timer
+	 * wheel.
 	 *
 	 * Spare the IPI for deferrable timers on idle targets though.
 	 * The next busy ticks will take care of it. Except full dynticks
 	 * require special care against races with idle_cpu(), lets deal
 	 * with that later.
 	 */
-	if (base->nohz_active) {
+	if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active) {
 		if (!(timer->flags & TIMER_DEFERRABLE) ||
 		    tick_nohz_full_cpu(base->cpu))
 			wake_up_nohz_cpu(base->cpu);
@@ -706,54 +815,87 @@ static inline void detach_timer(struct timer_list *timer, bool clear_pending)
 	entry->next = LIST_POISON2;
 }
 
-static inline void
-detach_expired_timer(struct timer_list *timer, struct timer_base *base)
-{
-	detach_timer(timer, true);
-	if (!(timer->flags & TIMER_DEFERRABLE))
-		base->active_timers--;
-	base->all_timers--;
-}
-
 static int detach_if_pending(struct timer_list *timer, struct timer_base *base,
 			     bool clear_pending)
 {
+	unsigned idx = timer_get_idx(timer);
+
 	if (!timer_pending(timer))
 		return 0;
 
+	if (hlist_is_singular_node(&timer->entry, base->vectors + idx))
+		__clear_bit(idx, base->pending_map);
+
 	detach_timer(timer, clear_pending);
-	if (!(timer->flags & TIMER_DEFERRABLE)) {
-		base->active_timers--;
-		if (timer->expires == base->next_timer)
-			base->next_timer = base->clk;
-	}
-	/* If this was the last timer, advance base->jiffies */
-	if (!--base->all_timers)
-		base->clk = jiffies;
 	return 1;
 }
 
+static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu)
+{
+	struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu);
+
+	/*
+	 * If the timer is deferrable and nohz is active then we need to use
+	 * the deferrable base.
+	 */
+	if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active &&
+	    (tflags & TIMER_DEFERRABLE))
+		base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu);
+	return base;
+}
+
+static inline struct timer_base *get_timer_this_cpu_base(u32 tflags)
+{
+	struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
+
+	/*
+	 * If the timer is deferrable and nohz is active then we need to use
+	 * the deferrable base.
+	 */
+	if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active &&
+	    (tflags & TIMER_DEFERRABLE))
+		base = this_cpu_ptr(&timer_bases[BASE_DEF]);
+	return base;
+}
+
+static inline struct timer_base *get_timer_base(u32 tflags)
+{
+	return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK);
+}
+
+static inline struct timer_base *get_target_base(struct timer_base *base,
+						 unsigned tflags)
+{
+#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
+	if ((tflags & TIMER_PINNED) || !base->migration_enabled)
+		return get_timer_this_cpu_base(tflags);
+	return get_timer_cpu_base(tflags, get_nohz_timer_target());
+#else
+	return get_timer_this_cpu_base(tflags);
+#endif
+}
+
 /*
- * We are using hashed locking: holding per_cpu(timer_bases).lock
- * means that all timers which are tied to this base via timer->base are
- * locked, and the base itself is locked too.
+ * We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means
+ * that all timers which are tied to this base are locked, and the base itself
+ * is locked too.
  *
  * So __run_timers/migrate_timers can safely modify all timers which could
- * be found on ->tvX lists.
+ * be found in the base->vectors array.
  *
- * When the timer's base is locked and removed from the list, the
- * TIMER_MIGRATING flag is set, FIXME
+ * When a timer is migrating then the TIMER_MIGRATING flag is set and we need
+ * to wait until the migration is done.
  */
 static struct timer_base *lock_timer_base(struct timer_list *timer,
-					unsigned long *flags)
+					  unsigned long *flags)
 	__acquires(timer->base->lock)
 {
 	for (;;) {
-		u32 tf = timer->flags;
 		struct timer_base *base;
+		u32 tf = timer->flags;
 
 		if (!(tf & TIMER_MIGRATING)) {
-			base = per_cpu_ptr(&timer_bases, tf & TIMER_CPUMASK);
+			base = get_timer_base(tf);
 			spin_lock_irqsave(&base->lock, *flags);
 			if (timer->flags == tf)
 				return base;
@@ -770,6 +912,27 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
 	unsigned long flags;
 	int ret = 0;
 
+	/*
+	 * TODO: Calculate the array bucket of the timer right here w/o
+	 * holding the base lock. This allows to check not only
+	 * timer->expires == expires below, but also whether the timer
+	 * ends up in the same bucket. If we really need to requeue
+	 * the timer then we check whether base->clk have
+	 * advanced between here and locking the timer base. If
+	 * jiffies advanced we have to recalc the array bucket with the
+	 * lock held.
+	 */
+
+	/*
+	 * This is a common optimization triggered by the
+	 * networking code - if the timer is re-modified
+	 * to be the same thing then just return:
+	 */
+	if (timer_pending(timer)) {
+		if (timer->expires == expires)
+			return 1;
+	}
+
 	timer_stats_timer_set_start_info(timer);
 	BUG_ON(!timer->function);
 
@@ -781,15 +944,15 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
 
 	debug_activate(timer, expires);
 
-	new_base = get_target_base(base, timer->flags & TIMER_PINNED);
+	new_base = get_target_base(base, timer->flags);
 
 	if (base != new_base) {
 		/*
-		 * We are trying to schedule the timer on the local CPU.
+		 * We are trying to schedule the timer on the new base.
 		 * However we can't change timer's base while it is running,
 		 * otherwise del_timer_sync() can't detect that the timer's
-		 * handler yet has not finished. This also guarantees that
-		 * the timer is serialized wrt itself.
+		 * handler yet has not finished. This also guarantees that the
+		 * timer is serialized wrt itself.
 		 */
 		if (likely(base->running_timer != timer)) {
 			/* See the comment in lock_timer_base() */
@@ -828,45 +991,6 @@ int mod_timer_pending(struct timer_list *timer, unsigned long expires)
 }
 EXPORT_SYMBOL(mod_timer_pending);
 
-/*
- * Decide where to put the timer while taking the slack into account
- *
- * Algorithm:
- *   1) calculate the maximum (absolute) time
- *   2) calculate the highest bit where the expires and new max are different
- *   3) use this bit to make a mask
- *   4) use the bitmask to round down the maximum time, so that all last
- *      bits are zeros
- */
-static inline
-unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
-{
-	unsigned long expires_limit, mask;
-	int bit;
-
-	if (timer->slack >= 0) {
-		expires_limit = expires + timer->slack;
-	} else {
-		long delta = expires - jiffies;
-
-		if (delta < 256)
-			return expires;
-
-		expires_limit = expires + delta / 256;
-	}
-	mask = expires ^ expires_limit;
-	if (mask == 0)
-		return expires;
-
-	bit = __fls(mask);
-
-	mask = (1UL << bit) - 1;
-
-	expires_limit = expires_limit & ~(mask);
-
-	return expires_limit;
-}
-
 /**
  * mod_timer - modify a timer's timeout
  * @timer: the timer to be modified
@@ -889,16 +1013,6 @@ unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
  */
 int mod_timer(struct timer_list *timer, unsigned long expires)
 {
-	expires = apply_slack(timer, expires);
-
-	/*
-	 * This is a common optimization triggered by the
-	 * networking code - if the timer is re-modified
-	 * to be the same thing then just return:
-	 */
-	if (timer_pending(timer) && timer->expires == expires)
-		return 1;
-
 	return __mod_timer(timer, expires, false);
 }
 EXPORT_SYMBOL(mod_timer);
@@ -933,13 +1047,14 @@ EXPORT_SYMBOL(add_timer);
  */
 void add_timer_on(struct timer_list *timer, int cpu)
 {
-	struct timer_base *new_base = per_cpu_ptr(&timer_bases, cpu);
-	struct timer_base *base;
+	struct timer_base *new_base, *base;
 	unsigned long flags;
 
 	timer_stats_timer_set_start_info(timer);
 	BUG_ON(timer_pending(timer) || !timer->function);
 
+	new_base = get_timer_cpu_base(timer->flags, cpu);
+
 	/*
 	 * If @timer was on a different CPU, it should be migrated with the
 	 * old base locked to prevent other operations proceeding with the
@@ -1085,27 +1200,6 @@ int del_timer_sync(struct timer_list *timer)
 EXPORT_SYMBOL(del_timer_sync);
 #endif
 
-static int cascade(struct timer_base *base, struct tvec *tv, int index)
-{
-	/* cascade all the timers from tv up one level */
-	struct timer_list *timer;
-	struct hlist_node *tmp;
-	struct hlist_head tv_list;
-
-	hlist_move_list(tv->vec + index, &tv_list);
-
-	/*
-	 * We are removing _all_ timers from the list, so we
-	 * don't have to detach them individually.
-	 */
-	hlist_for_each_entry_safe(timer, tmp, &tv_list, entry) {
-		/* No accounting, while moving them */
-		__internal_add_timer(base, timer);
-	}
-
-	return index;
-}
-
 static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
 			  unsigned long data)
 {
@@ -1149,68 +1243,80 @@ static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
 	}
 }
 
-#define INDEX(N) ((base->clk >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
+static void expire_timers(struct timer_base *base, struct hlist_head *head)
+{
+	while (!hlist_empty(head)) {
+		struct timer_list *timer;
+		void (*fn)(unsigned long);
+		unsigned long data;
+
+		timer = hlist_entry(head->first, struct timer_list, entry);
+		timer_stats_account_timer(timer);
+
+		base->running_timer = timer;
+		detach_timer(timer, true);
+
+		fn = timer->function;
+		data = timer->data;
+
+		if (timer->flags & TIMER_IRQSAFE) {
+			spin_unlock(&base->lock);
+			call_timer_fn(timer, fn, data);
+			spin_lock(&base->lock);
+		} else {
+			spin_unlock_irq(&base->lock);
+			call_timer_fn(timer, fn, data);
+			spin_lock_irq(&base->lock);
+		}
+	}
+}
+
+static int collect_expired_timers(struct timer_base *base,
+				  struct hlist_head *heads)
+{
+	unsigned long clk = base->clk;
+	struct hlist_head *vec;
+	int i, levels = 0;
+	unsigned int idx;
+
+	for (i = 0; i < LVL_DEPTH; i++) {
+		idx = (clk & LVL_MASK) + i * LVL_SIZE;
+
+		if (__test_and_clear_bit(idx, base->pending_map)) {
+			vec = base->vectors + idx;
+			hlist_move_list(vec, heads++);
+			levels++;
+		}
+		/* Is it time to look at the next level? */
+		if (clk & LVL_CLK_MASK)
+			break;
+		/* Shift clock for the next level granularity */
+		clk >>= LVL_CLK_SHIFT;
+	}
+	return levels;
+}
 
 /**
  * __run_timers - run all expired timers (if any) on this CPU.
  * @base: the timer vector to be processed.
- *
- * This function cascades all vectors and executes all expired timer
- * vectors.
  */
 static inline void __run_timers(struct timer_base *base)
 {
-	struct timer_list *timer;
+	struct hlist_head heads[LVL_DEPTH];
+	int levels;
+
+	if (!time_after_eq(jiffies, base->clk))
+		return;
 
 	spin_lock_irq(&base->lock);
 
 	while (time_after_eq(jiffies, base->clk)) {
-		struct hlist_head work_list;
-		struct hlist_head *head = &work_list;
-		int index;
 
-		if (!base->all_timers) {
-			base->clk = jiffies;
-			break;
-		}
-
-		index = base->clk & TVR_MASK;
+		levels = collect_expired_timers(base, heads);
+		base->clk++;
 
-		/*
-		 * Cascade timers:
-		 */
-		if (!index &&
-			(!cascade(base, &base->tv2, INDEX(0))) &&
-				(!cascade(base, &base->tv3, INDEX(1))) &&
-					!cascade(base, &base->tv4, INDEX(2)))
-			cascade(base, &base->tv5, INDEX(3));
-		++base->clk;
-		hlist_move_list(base->tv1.vec + index, head);
-		while (!hlist_empty(head)) {
-			void (*fn)(unsigned long);
-			unsigned long data;
-			bool irqsafe;
-
-			timer = hlist_entry(head->first, struct timer_list, entry);
-			fn = timer->function;
-			data = timer->data;
-			irqsafe = timer->flags & TIMER_IRQSAFE;
-
-			timer_stats_account_timer(timer);
-
-			base->running_timer = timer;
-			detach_expired_timer(timer, base);
-
-			if (irqsafe) {
-				spin_unlock(&base->lock);
-				call_timer_fn(timer, fn, data);
-				spin_lock(&base->lock);
-			} else {
-				spin_unlock_irq(&base->lock);
-				call_timer_fn(timer, fn, data);
-				spin_lock_irq(&base->lock);
-			}
-		}
+		while (levels--)
+			expire_timers(base, heads + levels);
 	}
 	base->running_timer = NULL;
 	spin_unlock_irq(&base->lock);
@@ -1218,78 +1324,87 @@ static inline void __run_timers(struct timer_base *base)
 
 #ifdef CONFIG_NO_HZ_COMMON
 /*
- * Find out when the next timer event is due to happen. This
- * is used on S/390 to stop all activity when a CPU is idle.
- * This function needs to be called with interrupts disabled.
+ * Find the next pending bucket of a level. Search from @offset + @clk upwards
+ * and if nothing there, search from start of the level (@offset) up to
+ * @offset + clk.
+ */
+static int next_pending_bucket(struct timer_base *base, unsigned offset,
+			       unsigned clk)
+{
+	unsigned pos, start = offset + clk;
+	unsigned end = offset + LVL_SIZE;
+
+	pos = find_next_bit(base->pending_map, end, start);
+	if (pos < end)
+		return pos - start;
+
+	pos = find_next_bit(base->pending_map, start, offset);
+	return pos < start ? pos + LVL_SIZE - start : -1;
+}
+
+/*
+ * Search the first expiring timer in the various clock levels.
  */
 static unsigned long __next_timer_interrupt(struct timer_base *base)
 {
-	unsigned long clk = base->clk;
-	unsigned long expires = clk + NEXT_TIMER_MAX_DELTA;
-	int index, slot, array, found = 0;
-	struct timer_list *nte;
-	struct tvec *varray[4];
-
-	/* Look for timer events in tv1. */
-	index = slot = clk & TVR_MASK;
-	do {
-		hlist_for_each_entry(nte, base->tv1.vec + slot, entry) {
-			if (nte->flags & TIMER_DEFERRABLE)
-				continue;
-
-			found = 1;
-			expires = nte->expires;
-			/* Look at the cascade bucket(s)? */
-			if (!index || slot < index)
-				goto cascade;
-			return expires;
+	unsigned long clk, next, adj;
+	unsigned lvl, offset = 0;
+
+	spin_lock(&base->lock);
+	next = base->clk + NEXT_TIMER_MAX_DELTA;
+	clk = base->clk;
+	for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) {
+		int pos = next_pending_bucket(base, offset, clk & LVL_MASK);
+
+		if (pos >= 0) {
+			unsigned long tmp = clk + (unsigned long) pos;
+
+			tmp <<= LVL_SHIFT(lvl);
+			if (time_before(tmp, next))
+				next = tmp;
 		}
-		slot = (slot + 1) & TVR_MASK;
-	} while (slot != index);
-
-cascade:
-	/* Calculate the next cascade event */
-	if (index)
-		clk += TVR_SIZE - index;
-	clk >>= TVR_BITS;
-
-	/* Check tv2-tv5. */
-	varray[0] = &base->tv2;
-	varray[1] = &base->tv3;
-	varray[2] = &base->tv4;
-	varray[3] = &base->tv5;
-
-	for (array = 0; array < 4; array++) {
-		struct tvec *varp = varray[array];
-
-		index = slot = clk & TVN_MASK;
-		do {
-			hlist_for_each_entry(nte, varp->vec + slot, entry) {
-				if (nte->flags & TIMER_DEFERRABLE)
-					continue;
-
-				found = 1;
-				if (time_before(nte->expires, expires))
-					expires = nte->expires;
-			}
-			/*
-			 * Do we still search for the first timer or are
-			 * we looking up the cascade buckets ?
-			 */
-			if (found) {
-				/* Look at the cascade bucket(s)? */
-				if (!index || slot < index)
-					break;
-				return expires;
-			}
-			slot = (slot + 1) & TVN_MASK;
-		} while (slot != index);
-
-		if (index)
-			clk += TVN_SIZE - index;
-		clk >>= TVN_BITS;
+		/*
+		 * Clock for the next level. If the current level clock lower
+		 * bits are zero, we look at the next level as is. If not we
+		 * need to advance it by one because that's going to be the
+		 * next expiring bucket in that level. base->clk is the next
+		 * expiring jiffie. So in case of:
+		 *
+		 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
+		 *  0    0    0    0    0    0
+		 *
+		 * we have to look at all levels @index 0. With
+		 *
+		 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
+		 *  0    0    0    0    0    2
+		 *
+		 * LVL0 has the next expiring bucket @index 2. The upper
+		 * levels have the next expiring bucket @index 1.
+		 *
+		 * In case that the propagation wraps the next level the same
+		 * rules apply:
+		 *
+		 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
+		 *  0    0    0    0    F    2
+		 *
+		 * So after looking at LVL0 we get:
+		 *
+		 * LVL5 LVL4 LVL3 LVL2 LVL1
+		 *  0    0    0    1    0
+		 *
+		 * So no propagation from LVL1 to LVL2 because that happened
+		 * with the add already, but then we need to propagate further
+		 * from LVL2 to LVL3.
+		 *
+		 * So the simple check whether the lower bits of the current
+		 * level are 0 or not is sufficient for all cases.
+		 */
+		adj = clk & LVL_CLK_MASK ? 1 : 0;
+		clk >>= LVL_CLK_SHIFT;
+		clk += adj;
 	}
-	return expires;
+	spin_unlock(&base->lock);
+	return next;
 }
 
 /*
@@ -1335,7 +1450,7 @@ static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
  */
 u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
 {
-	struct timer_base *base = this_cpu_ptr(&timer_bases);
+	struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
 	u64 expires = KTIME_MAX;
 	unsigned long nextevt;
 
@@ -1346,17 +1461,11 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
 	if (cpu_is_offline(smp_processor_id()))
 		return expires;
 
-	spin_lock(&base->lock);
-	if (base->active_timers) {
-		if (time_before_eq(base->next_timer, base->clk))
-			base->next_timer = __next_timer_interrupt(base);
-		nextevt = base->next_timer;
-		if (time_before_eq(nextevt, basej))
-			expires = basem;
-		else
-			expires = basem + (nextevt - basej) * TICK_NSEC;
-	}
-	spin_unlock(&base->lock);
+	nextevt = __next_timer_interrupt(base);
+	if (time_before_eq(nextevt, basej))
+		expires = basem;
+	else
+		expires = basem + (nextevt - basej) * TICK_NSEC;
 
 	return cmp_next_hrtimer_event(basem, expires);
 }
@@ -1387,10 +1496,11 @@ void update_process_times(int user_tick)
  */
 static void run_timer_softirq(struct softirq_action *h)
 {
-	struct timer_base *base = this_cpu_ptr(&timer_bases);
+	struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
 
-	if (time_after_eq(jiffies, base->clk))
-		__run_timers(base);
+	__run_timers(base);
+	if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active)
+		__run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
 }
 
 /*
@@ -1541,7 +1651,6 @@ static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *h
 
 	while (!hlist_empty(head)) {
 		timer = hlist_entry(head->first, struct timer_list, entry);
-		/* We ignore the accounting on the dying cpu */
 		detach_timer(timer, false);
 		timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu;
 		internal_add_timer(new_base, timer);
@@ -1552,35 +1661,29 @@ static void migrate_timers(int cpu)
 {
 	struct timer_base *old_base;
 	struct timer_base *new_base;
-	int i;
+	int b, i;
 
 	BUG_ON(cpu_online(cpu));
-	old_base = per_cpu_ptr(&timer_bases, cpu);
-	new_base = get_cpu_ptr(&timer_bases);
-	/*
-	 * The caller is globally serialized and nobody else
-	 * takes two locks at once, deadlock is not possible.
-	 */
-	spin_lock_irq(&new_base->lock);
-	spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
-
-	BUG_ON(old_base->running_timer);
-
-	for (i = 0; i < TVR_SIZE; i++)
-		migrate_timer_list(new_base, old_base->tv1.vec + i);
-	for (i = 0; i < TVN_SIZE; i++) {
-		migrate_timer_list(new_base, old_base->tv2.vec + i);
-		migrate_timer_list(new_base, old_base->tv3.vec + i);
-		migrate_timer_list(new_base, old_base->tv4.vec + i);
-		migrate_timer_list(new_base, old_base->tv5.vec + i);
-	}
 
-	old_base->active_timers = 0;
-	old_base->all_timers = 0;
+	for (b = 0; b < NR_BASES; b++) {
+		old_base = per_cpu_ptr(&timer_bases[b], cpu);
+		new_base = get_cpu_ptr(&timer_bases[b]);
+		/*
+		 * The caller is globally serialized and nobody else
+		 * takes two locks at once, deadlock is not possible.
+		 */
+		spin_lock_irq(&new_base->lock);
+		spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
+
+		BUG_ON(old_base->running_timer);
+
+		for (i = 0; i < WHEEL_SIZE; i++)
+			migrate_timer_list(new_base, old_base->vectors + i);
 
-	spin_unlock(&old_base->lock);
-	spin_unlock_irq(&new_base->lock);
-	put_cpu_ptr(&timer_bases);
+		spin_unlock(&old_base->lock);
+		spin_unlock_irq(&new_base->lock);
+		put_cpu_ptr(&timer_bases);
+	}
 }
 
 static int timer_cpu_notify(struct notifier_block *self,
@@ -1608,13 +1711,15 @@ static inline void timer_register_cpu_notifier(void) { }
 
 static void __init init_timer_cpu(int cpu)
 {
-	struct timer_base *base = per_cpu_ptr(&timer_bases, cpu);
-
-	base->cpu = cpu;
-	spin_lock_init(&base->lock);
+	struct timer_base *base;
+	int i;
 
-	base->clk = jiffies;
-	base->next_timer = base->clk;
+	for (i = 0; i < NR_BASES; i++) {
+		base = per_cpu_ptr(&timer_bases[i], cpu);
+		base->cpu = cpu;
+		spin_lock_init(&base->lock);
+		base->clk = jiffies;
+	}
 }
 
 static void __init init_timer_cpus(void)

^ permalink raw reply related	[flat|nested] 60+ messages in thread

* [tip:timers/core] timers: Remove set_timer_slack() leftovers
  2016-07-04  9:50 ` [patch 4 15/22] timer: Remove slack leftovers Thomas Gleixner
@ 2016-07-07  8:46   ` tip-bot for Thomas Gleixner
  2016-07-22 11:31   ` [patch 4 15/22] timer: Remove slack leftovers Jason A. Donenfeld
  1 sibling, 0 replies; 60+ messages in thread
From: tip-bot for Thomas Gleixner @ 2016-07-07  8:46 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: john.stultz, dbaryshkov, edumazet, tglx, linux, mathias.nyman,
	josh, riel, linux-kernel, fweisbec, jh80.chung, axboe, peterz,
	lenb, afd, torvalds, sre, gregkh, ulf.hansson, arjan, davem,
	paulmck, pali.rohar, hpa, dwmw2, mingo, clm, stern

Commit-ID:  53bf837b78d155b8e1110b3c25b4d0d6391b8ff3
Gitweb:     http://git.kernel.org/tip/53bf837b78d155b8e1110b3c25b4d0d6391b8ff3
Author:     Thomas Gleixner <tglx@linutronix.de>
AuthorDate: Mon, 4 Jul 2016 09:50:31 +0000
Committer:  Ingo Molnar <mingo@kernel.org>
CommitDate: Thu, 7 Jul 2016 10:35:09 +0200

timers: Remove set_timer_slack() leftovers

We now have implicit batching in the timer wheel. The slack API is no longer
used, so remove it.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Alan Stern <stern@rowland.harvard.edu>
Cc: Andrew F. Davis <afd@ti.com>
Cc: Arjan van de Ven <arjan@infradead.org>
Cc: Chris Mason <clm@fb.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: George Spelvin <linux@sciencehorizons.net>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Jaehoon Chung <jh80.chung@samsung.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: John Stultz <john.stultz@linaro.org>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Len Brown <lenb@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mathias Nyman <mathias.nyman@intel.com>
Cc: Pali Rohár <pali.rohar@gmail.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Sebastian Reichel <sre@kernel.org>
Cc: Ulf Hansson <ulf.hansson@linaro.org>
Cc: linux-block@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Cc: linux-mmc@vger.kernel.org
Cc: linux-pm@vger.kernel.org
Cc: linux-usb@vger.kernel.org
Cc: netdev@vger.kernel.org
Cc: rt@linutronix.de
Link: http://lkml.kernel.org/r/20160704094342.189813118@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
 block/genhd.c                   |  5 -----
 drivers/mmc/host/jz4740_mmc.c   |  2 --
 drivers/power/bq27xxx_battery.c |  5 +----
 drivers/usb/host/ohci-hcd.c     |  1 -
 drivers/usb/host/xhci.c         |  2 --
 include/linux/timer.h           |  4 ----
 kernel/time/timer.c             | 19 -------------------
 lib/random32.c                  |  1 -
 8 files changed, 1 insertion(+), 38 deletions(-)

diff --git a/block/genhd.c b/block/genhd.c
index 9f42526..f06d7f3 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1523,12 +1523,7 @@ static void __disk_unblock_events(struct gendisk *disk, bool check_now)
 	if (--ev->block)
 		goto out_unlock;
 
-	/*
-	 * Not exactly a latency critical operation, set poll timer
-	 * slack to 25% and kick event check.
-	 */
 	intv = disk_events_poll_jiffies(disk);
-	set_timer_slack(&ev->dwork.timer, intv / 4);
 	if (check_now)
 		queue_delayed_work(system_freezable_power_efficient_wq,
 				&ev->dwork, 0);
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 03ddf0e..684087d 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -1068,8 +1068,6 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
 	jz4740_mmc_clock_disable(host);
 	setup_timer(&host->timeout_timer, jz4740_mmc_timeout,
 			(unsigned long)host);
-	/* It is not important when it times out, it just needs to timeout. */
-	set_timer_slack(&host->timeout_timer, HZ);
 
 	host->use_dma = true;
 	if (host->use_dma && jz4740_mmc_acquire_dma_channels(host) != 0)
diff --git a/drivers/power/bq27xxx_battery.c b/drivers/power/bq27xxx_battery.c
index 45f6ebf..e90b3f3 100644
--- a/drivers/power/bq27xxx_battery.c
+++ b/drivers/power/bq27xxx_battery.c
@@ -735,11 +735,8 @@ static void bq27xxx_battery_poll(struct work_struct *work)
 
 	bq27xxx_battery_update(di);
 
-	if (poll_interval > 0) {
-		/* The timer does not have to be accurate. */
-		set_timer_slack(&di->work.timer, poll_interval * HZ / 4);
+	if (poll_interval > 0)
 		schedule_delayed_work(&di->work, poll_interval * HZ);
-	}
 }
 
 /*
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 0449235..1700908 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -500,7 +500,6 @@ static int ohci_init (struct ohci_hcd *ohci)
 
 	setup_timer(&ohci->io_watchdog, io_watchdog_func,
 			(unsigned long) ohci);
-	set_timer_slack(&ohci->io_watchdog, msecs_to_jiffies(20));
 
 	ohci->hcca = dma_alloc_coherent (hcd->self.controller,
 			sizeof(*ohci->hcca), &ohci->hcca_dma, GFP_KERNEL);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index f2f9518..a986fe7 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -490,8 +490,6 @@ static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
 	xhci->comp_mode_recovery_timer.expires = jiffies +
 			msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
 
-	set_timer_slack(&xhci->comp_mode_recovery_timer,
-			msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
 	add_timer(&xhci->comp_mode_recovery_timer);
 	xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
 			"Compliance mode recovery timer initialized");
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 5869ab9..4419506 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -19,7 +19,6 @@ struct timer_list {
 	void			(*function)(unsigned long);
 	unsigned long		data;
 	u32			flags;
-	int			slack;
 
 #ifdef CONFIG_TIMER_STATS
 	int			start_pid;
@@ -73,7 +72,6 @@ struct timer_list {
 		.expires = (_expires),				\
 		.data = (_data),				\
 		.flags = (_flags),				\
-		.slack = -1,					\
 		__TIMER_LOCKDEP_MAP_INITIALIZER(		\
 			__FILE__ ":" __stringify(__LINE__))	\
 	}
@@ -193,8 +191,6 @@ extern int del_timer(struct timer_list * timer);
 extern int mod_timer(struct timer_list *timer, unsigned long expires);
 extern int mod_timer_pending(struct timer_list *timer, unsigned long expires);
 
-extern void set_timer_slack(struct timer_list *time, int slack_hz);
-
 /*
  * The jiffies value which is added to now, when there is no timer
  * in the timer wheel:
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 86e95b7..a83e23d 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -447,24 +447,6 @@ unsigned long round_jiffies_up_relative(unsigned long j)
 }
 EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
 
-/**
- * set_timer_slack - set the allowed slack for a timer
- * @timer: the timer to be modified
- * @slack_hz: the amount of time (in jiffies) allowed for rounding
- *
- * Set the amount of time, in jiffies, that a certain timer has
- * in terms of slack. By setting this value, the timer subsystem
- * will schedule the actual timer somewhere between
- * the time mod_timer() asks for, and that time plus the slack.
- *
- * By setting the slack to -1, a percentage of the delay is used
- * instead.
- */
-void set_timer_slack(struct timer_list *timer, int slack_hz)
-{
-	timer->slack = slack_hz;
-}
-EXPORT_SYMBOL_GPL(set_timer_slack);
 
 static inline unsigned int timer_get_idx(struct timer_list *timer)
 {
@@ -775,7 +757,6 @@ static void do_init_timer(struct timer_list *timer, unsigned int flags,
 {
 	timer->entry.pprev = NULL;
 	timer->flags = flags | raw_smp_processor_id();
-	timer->slack = -1;
 #ifdef CONFIG_TIMER_STATS
 	timer->start_site = NULL;
 	timer->start_pid = -1;
diff --git a/lib/random32.c b/lib/random32.c
index 510d1ce..69ed593 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -233,7 +233,6 @@ static void __prandom_timer(unsigned long dontcare)
 
 static void __init __prandom_start_seed_timer(void)
 {
-	set_timer_slack(&seed_timer, HZ);
 	seed_timer.expires = jiffies + msecs_to_jiffies(40 * MSEC_PER_SEC);
 	add_timer(&seed_timer);
 }

^ permalink raw reply related	[flat|nested] 60+ messages in thread

* [tip:timers/core] timers: Move __run_timers() function
  2016-07-04  9:50 ` [patch 4 16/22] timer: Move __run_timers() function Thomas Gleixner
@ 2016-07-07  8:46   ` tip-bot for Anna-Maria Gleixner
  0 siblings, 0 replies; 60+ messages in thread
From: tip-bot for Anna-Maria Gleixner @ 2016-07-07  8:46 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: lenb, anna-maria, linux-kernel, riel, linux, edumazet, arjan,
	torvalds, tglx, paulmck, mingo, peterz, hpa, fweisbec, clm, josh

Commit-ID:  73420fea80c6c376d91a69defe64013baa0d7e95
Gitweb:     http://git.kernel.org/tip/73420fea80c6c376d91a69defe64013baa0d7e95
Author:     Anna-Maria Gleixner <anna-maria@linutronix.de>
AuthorDate: Mon, 4 Jul 2016 09:50:33 +0000
Committer:  Ingo Molnar <mingo@kernel.org>
CommitDate: Thu, 7 Jul 2016 10:35:09 +0200

timers: Move __run_timers() function

Move __run_timers() below __next_timer_interrupt() and next_pending_bucket()
in preparation for __run_timers() NOHZ optimization.

No functional change.

Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Arjan van de Ven <arjan@infradead.org>
Cc: Chris Mason <clm@fb.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: George Spelvin <linux@sciencehorizons.net>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Len Brown <lenb@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: rt@linutronix.de
Link: http://lkml.kernel.org/r/20160704094342.271872665@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
 kernel/time/timer.c | 52 ++++++++++++++++++++++++++--------------------------
 1 file changed, 26 insertions(+), 26 deletions(-)

diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index a83e23d..c16c48d 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1277,32 +1277,6 @@ static int collect_expired_timers(struct timer_base *base,
 	return levels;
 }
 
-/**
- * __run_timers - run all expired timers (if any) on this CPU.
- * @base: the timer vector to be processed.
- */
-static inline void __run_timers(struct timer_base *base)
-{
-	struct hlist_head heads[LVL_DEPTH];
-	int levels;
-
-	if (!time_after_eq(jiffies, base->clk))
-		return;
-
-	spin_lock_irq(&base->lock);
-
-	while (time_after_eq(jiffies, base->clk)) {
-
-		levels = collect_expired_timers(base, heads);
-		base->clk++;
-
-		while (levels--)
-			expire_timers(base, heads + levels);
-	}
-	base->running_timer = NULL;
-	spin_unlock_irq(&base->lock);
-}
-
 #ifdef CONFIG_NO_HZ_COMMON
 /*
  * Find the next pending bucket of a level. Search from @offset + @clk upwards
@@ -1472,6 +1446,32 @@ void update_process_times(int user_tick)
 	run_posix_cpu_timers(p);
 }
 
+/**
+ * __run_timers - run all expired timers (if any) on this CPU.
+ * @base: the timer vector to be processed.
+ */
+static inline void __run_timers(struct timer_base *base)
+{
+	struct hlist_head heads[LVL_DEPTH];
+	int levels;
+
+	if (!time_after_eq(jiffies, base->clk))
+		return;
+
+	spin_lock_irq(&base->lock);
+
+	while (time_after_eq(jiffies, base->clk)) {
+
+		levels = collect_expired_timers(base, heads);
+		base->clk++;
+
+		while (levels--)
+			expire_timers(base, heads + levels);
+	}
+	base->running_timer = NULL;
+	spin_unlock_irq(&base->lock);
+}
+
 /*
  * This function runs timers and the timer-tq in bottom half context.
  */

^ permalink raw reply related	[flat|nested] 60+ messages in thread

* [tip:timers/core] timers: Optimize collect_expired_timers() for NOHZ
  2016-07-04  9:50 ` [patch 4 17/22] timer: Optimize collect timers for NOHZ Thomas Gleixner
@ 2016-07-07  8:47   ` tip-bot for Anna-Maria Gleixner
  0 siblings, 0 replies; 60+ messages in thread
From: tip-bot for Anna-Maria Gleixner @ 2016-07-07  8:47 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: josh, arjan, tglx, fweisbec, linux, edumazet, lenb, hpa, mingo,
	paulmck, clm, linux-kernel, anna-maria, riel, torvalds, peterz

Commit-ID:  236968383cf5cd48835ff0d8a265e299e220d140
Gitweb:     http://git.kernel.org/tip/236968383cf5cd48835ff0d8a265e299e220d140
Author:     Anna-Maria Gleixner <anna-maria@linutronix.de>
AuthorDate: Mon, 4 Jul 2016 09:50:34 +0000
Committer:  Ingo Molnar <mingo@kernel.org>
CommitDate: Thu, 7 Jul 2016 10:35:10 +0200

timers: Optimize collect_expired_timers() for NOHZ

After a NOHZ idle sleep the timer wheel must be forwarded to current jiffies.
There might be expired timers so the current code loops and checks the expired
buckets for timers. This can take quite some time for long NOHZ idle periods.

The pending bitmask in the timer base allows us to do a quick search for the
next expiring timer and therefore a fast forward of the base time which
prevents pointless long lasting loops.

For a 3 seconds idle sleep this reduces the catchup time from ~1ms to 5us.

Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Arjan van de Ven <arjan@infradead.org>
Cc: Chris Mason <clm@fb.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: George Spelvin <linux@sciencehorizons.net>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Len Brown <lenb@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: rt@linutronix.de
Link: http://lkml.kernel.org/r/20160704094342.351296290@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
 kernel/time/timer.c | 49 +++++++++++++++++++++++++++++++++++++++++--------
 1 file changed, 41 insertions(+), 8 deletions(-)

diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index c16c48d..658051c 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1252,8 +1252,8 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head)
 	}
 }
 
-static int collect_expired_timers(struct timer_base *base,
-				  struct hlist_head *heads)
+static int __collect_expired_timers(struct timer_base *base,
+				    struct hlist_head *heads)
 {
 	unsigned long clk = base->clk;
 	struct hlist_head *vec;
@@ -1279,9 +1279,9 @@ static int collect_expired_timers(struct timer_base *base,
 
 #ifdef CONFIG_NO_HZ_COMMON
 /*
- * Find the next pending bucket of a level. Search from @offset + @clk upwards
- * and if nothing there, search from start of the level (@offset) up to
- * @offset + clk.
+ * Find the next pending bucket of a level. Search from level start (@offset)
+ * + @clk upwards and if nothing there, search from start of the level
+ * (@offset) up to @offset + clk.
  */
 static int next_pending_bucket(struct timer_base *base, unsigned offset,
 			       unsigned clk)
@@ -1298,14 +1298,14 @@ static int next_pending_bucket(struct timer_base *base, unsigned offset,
 }
 
 /*
- * Search the first expiring timer in the various clock levels.
+ * Search the first expiring timer in the various clock levels. Caller must
+ * hold base->lock.
  */
 static unsigned long __next_timer_interrupt(struct timer_base *base)
 {
 	unsigned long clk, next, adj;
 	unsigned lvl, offset = 0;
 
-	spin_lock(&base->lock);
 	next = base->clk + NEXT_TIMER_MAX_DELTA;
 	clk = base->clk;
 	for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) {
@@ -1358,7 +1358,6 @@ static unsigned long __next_timer_interrupt(struct timer_base *base)
 		clk >>= LVL_CLK_SHIFT;
 		clk += adj;
 	}
-	spin_unlock(&base->lock);
 	return next;
 }
 
@@ -1416,7 +1415,10 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
 	if (cpu_is_offline(smp_processor_id()))
 		return expires;
 
+	spin_lock(&base->lock);
 	nextevt = __next_timer_interrupt(base);
+	spin_unlock(&base->lock);
+
 	if (time_before_eq(nextevt, basej))
 		expires = basem;
 	else
@@ -1424,6 +1426,37 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
 
 	return cmp_next_hrtimer_event(basem, expires);
 }
+
+static int collect_expired_timers(struct timer_base *base,
+				  struct hlist_head *heads)
+{
+	/*
+	 * NOHZ optimization. After a long idle sleep we need to forward the
+	 * base to current jiffies. Avoid a loop by searching the bitfield for
+	 * the next expiring timer.
+	 */
+	if ((long)(jiffies - base->clk) > 2) {
+		unsigned long next = __next_timer_interrupt(base);
+
+		/*
+		 * If the next timer is ahead of time forward to current
+		 * jiffies, otherwise forward to the next expiry time.
+		 */
+		if (time_after(next, jiffies)) {
+			/* The call site will increment clock! */
+			base->clk = jiffies - 1;
+			return 0;
+		}
+		base->clk = next;
+	}
+	return __collect_expired_timers(base, heads);
+}
+#else
+static inline int collect_expired_timers(struct timer_base *base,
+					 struct hlist_head *heads)
+{
+	return __collect_expired_timers(base, heads);
+}
 #endif
 
 /*

^ permalink raw reply related	[flat|nested] 60+ messages in thread

* [tip:timers/core] timers/nohz: Remove pointless tick_nohz_kick_tick() function
  2016-07-04  9:50 ` [patch 4 18/22] tick/sched: Remove pointless empty function Thomas Gleixner
@ 2016-07-07  8:47   ` tip-bot for Thomas Gleixner
  0 siblings, 0 replies; 60+ messages in thread
From: tip-bot for Thomas Gleixner @ 2016-07-07  8:47 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: mingo, tglx, paulmck, lenb, riel, torvalds, arjan, edumazet,
	peterz, linux, clm, josh, linux-kernel, hpa, fweisbec

Commit-ID:  ff00673292bd42a3688b33de47252a6a3c3f424c
Gitweb:     http://git.kernel.org/tip/ff00673292bd42a3688b33de47252a6a3c3f424c
Author:     Thomas Gleixner <tglx@linutronix.de>
AuthorDate: Mon, 4 Jul 2016 09:50:35 +0000
Committer:  Ingo Molnar <mingo@kernel.org>
CommitDate: Thu, 7 Jul 2016 10:35:10 +0200

timers/nohz: Remove pointless tick_nohz_kick_tick() function

This was a failed attempt to optimize the timer expiry in idle, which was
disabled and never revisited. Remove the cruft.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Arjan van de Ven <arjan@infradead.org>
Cc: Chris Mason <clm@fb.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: George Spelvin <linux@sciencehorizons.net>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Len Brown <lenb@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: rt@linutronix.de
Link: http://lkml.kernel.org/r/20160704094342.431073782@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
 kernel/time/tick-sched.c | 33 +--------------------------------
 1 file changed, 1 insertion(+), 32 deletions(-)

diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 536ada8..69abc7b 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -1092,35 +1092,6 @@ static void tick_nohz_switch_to_nohz(void)
 	tick_nohz_activate(ts, NOHZ_MODE_LOWRES);
 }
 
-/*
- * When NOHZ is enabled and the tick is stopped, we need to kick the
- * tick timer from irq_enter() so that the jiffies update is kept
- * alive during long running softirqs. That's ugly as hell, but
- * correctness is key even if we need to fix the offending softirq in
- * the first place.
- *
- * Note, this is different to tick_nohz_restart. We just kick the
- * timer and do not touch the other magic bits which need to be done
- * when idle is left.
- */
-static void tick_nohz_kick_tick(struct tick_sched *ts, ktime_t now)
-{
-#if 0
-	/* Switch back to 2.6.27 behaviour */
-	ktime_t delta;
-
-	/*
-	 * Do not touch the tick device, when the next expiry is either
-	 * already reached or less/equal than the tick period.
-	 */
-	delta =	ktime_sub(hrtimer_get_expires(&ts->sched_timer), now);
-	if (delta.tv64 <= tick_period.tv64)
-		return;
-
-	tick_nohz_restart(ts, now);
-#endif
-}
-
 static inline void tick_nohz_irq_enter(void)
 {
 	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
@@ -1131,10 +1102,8 @@ static inline void tick_nohz_irq_enter(void)
 	now = ktime_get();
 	if (ts->idle_active)
 		tick_nohz_stop_idle(ts, now);
-	if (ts->tick_stopped) {
+	if (ts->tick_stopped)
 		tick_nohz_update_jiffies(now);
-		tick_nohz_kick_tick(ts, now);
-	}
 }
 
 #else

^ permalink raw reply related	[flat|nested] 60+ messages in thread

* [tip:timers/core] timers: Forward the wheel clock whenever possible
  2016-07-04  9:50 ` [patch 4 19/22] timer: Forward wheel clock whenever possible Thomas Gleixner
@ 2016-07-07  8:48   ` tip-bot for Thomas Gleixner
  0 siblings, 0 replies; 60+ messages in thread
From: tip-bot for Thomas Gleixner @ 2016-07-07  8:48 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: josh, arjan, paulmck, tglx, riel, linux, torvalds, lenb, hpa,
	fweisbec, mingo, clm, linux-kernel, peterz, edumazet

Commit-ID:  a683f390b93f4d1292f849fc48d28e322046120f
Gitweb:     http://git.kernel.org/tip/a683f390b93f4d1292f849fc48d28e322046120f
Author:     Thomas Gleixner <tglx@linutronix.de>
AuthorDate: Mon, 4 Jul 2016 09:50:36 +0000
Committer:  Ingo Molnar <mingo@kernel.org>
CommitDate: Thu, 7 Jul 2016 10:35:11 +0200

timers: Forward the wheel clock whenever possible

The wheel clock is stale when a CPU goes into a long idle sleep. This has the
side effect that timers which are queued end up in the outer wheel levels.
That results in coarser granularity.

To solve this, we keep track of the idle state and forward the wheel clock
whenever possible.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Arjan van de Ven <arjan@infradead.org>
Cc: Chris Mason <clm@fb.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: George Spelvin <linux@sciencehorizons.net>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Len Brown <lenb@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: rt@linutronix.de
Link: http://lkml.kernel.org/r/20160704094342.512039360@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
 kernel/time/tick-internal.h |   1 +
 kernel/time/tick-sched.c    |  12 +++++
 kernel/time/timer.c         | 128 ++++++++++++++++++++++++++++++++++++--------
 3 files changed, 120 insertions(+), 21 deletions(-)

diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 966a5a6..f738251 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -164,3 +164,4 @@ static inline void timers_update_migration(bool update_nohz) { }
 DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
 
 extern u64 get_next_timer_interrupt(unsigned long basej, u64 basem);
+void timer_clear_idle(void);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 69abc7b..5d81f9a 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -700,6 +700,12 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
 	delta = next_tick - basemono;
 	if (delta <= (u64)TICK_NSEC) {
 		tick.tv64 = 0;
+
+		/*
+		 * Tell the timer code that the base is not idle, i.e. undo
+		 * the effect of get_next_timer_interrupt():
+		 */
+		timer_clear_idle();
 		/*
 		 * We've not stopped the tick yet, and there's a timer in the
 		 * next period, so no point in stopping it either, bail.
@@ -809,6 +815,12 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
 	tick_do_update_jiffies64(now);
 	cpu_load_update_nohz_stop();
 
+	/*
+	 * Clear the timer idle flag, so we avoid IPIs on remote queueing and
+	 * the clock forward checks in the enqueue path:
+	 */
+	timer_clear_idle();
+
 	calc_load_exit_idle();
 	touch_softlockup_watchdog_sched();
 	/*
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 658051c..9339d71 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -196,9 +196,11 @@ struct timer_base {
 	spinlock_t		lock;
 	struct timer_list	*running_timer;
 	unsigned long		clk;
+	unsigned long		next_expiry;
 	unsigned int		cpu;
 	bool			migration_enabled;
 	bool			nohz_active;
+	bool			is_idle;
 	DECLARE_BITMAP(pending_map, WHEEL_SIZE);
 	struct hlist_head	vectors[WHEEL_SIZE];
 } ____cacheline_aligned;
@@ -519,24 +521,37 @@ static void internal_add_timer(struct timer_base *base, struct timer_list *timer
 {
 	__internal_add_timer(base, timer);
 
+	if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
+		return;
+
 	/*
-	 * Check whether the other CPU is in dynticks mode and needs
-	 * to be triggered to reevaluate the timer wheel.  We are
-	 * protected against the other CPU fiddling with the timer by
-	 * holding the timer base lock. This also makes sure that a
-	 * CPU on the way to stop its tick can not evaluate the timer
-	 * wheel.
-	 *
-	 * Spare the IPI for deferrable timers on idle targets though.
-	 * The next busy ticks will take care of it. Except full dynticks
-	 * require special care against races with idle_cpu(), lets deal
-	 * with that later.
+	 * TODO: This wants some optimizing similar to the code below, but we
+	 * will do that when we switch from push to pull for deferrable timers.
 	 */
-	if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active) {
-		if (!(timer->flags & TIMER_DEFERRABLE) ||
-		    tick_nohz_full_cpu(base->cpu))
+	if (timer->flags & TIMER_DEFERRABLE) {
+		if (tick_nohz_full_cpu(base->cpu))
 			wake_up_nohz_cpu(base->cpu);
+		return;
 	}
+
+	/*
+	 * We might have to IPI the remote CPU if the base is idle and the
+	 * timer is not deferrable. If the other CPU is on the way to idle
+	 * then it can't set base->is_idle as we hold the base lock:
+	 */
+	if (!base->is_idle)
+		return;
+
+	/* Check whether this is the new first expiring timer: */
+	if (time_after_eq(timer->expires, base->next_expiry))
+		return;
+
+	/*
+	 * Set the next expiry time and kick the CPU so it can reevaluate the
+	 * wheel:
+	 */
+	base->next_expiry = timer->expires;
+	wake_up_nohz_cpu(base->cpu);
 }
 
 #ifdef CONFIG_TIMER_STATS
@@ -844,10 +859,11 @@ static inline struct timer_base *get_timer_base(u32 tflags)
 	return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK);
 }
 
-static inline struct timer_base *get_target_base(struct timer_base *base,
-						 unsigned tflags)
+#ifdef CONFIG_NO_HZ_COMMON
+static inline struct timer_base *
+__get_target_base(struct timer_base *base, unsigned tflags)
 {
-#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
+#ifdef CONFIG_SMP
 	if ((tflags & TIMER_PINNED) || !base->migration_enabled)
 		return get_timer_this_cpu_base(tflags);
 	return get_timer_cpu_base(tflags, get_nohz_timer_target());
@@ -856,6 +872,43 @@ static inline struct timer_base *get_target_base(struct timer_base *base,
 #endif
 }
 
+static inline void forward_timer_base(struct timer_base *base)
+{
+	/*
+	 * We only forward the base when it's idle and we have a delta between
+	 * base clock and jiffies.
+	 */
+	if (!base->is_idle || (long) (jiffies - base->clk) < 2)
+		return;
+
+	/*
+	 * If the next expiry value is > jiffies, then we fast forward to
+	 * jiffies otherwise we forward to the next expiry value.
+	 */
+	if (time_after(base->next_expiry, jiffies))
+		base->clk = jiffies;
+	else
+		base->clk = base->next_expiry;
+}
+#else
+static inline struct timer_base *
+__get_target_base(struct timer_base *base, unsigned tflags)
+{
+	return get_timer_this_cpu_base(tflags);
+}
+
+static inline void forward_timer_base(struct timer_base *base) { }
+#endif
+
+static inline struct timer_base *
+get_target_base(struct timer_base *base, unsigned tflags)
+{
+	struct timer_base *target = __get_target_base(base, tflags);
+
+	forward_timer_base(target);
+	return target;
+}
+
 /*
  * We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means
  * that all timers which are tied to this base are locked, and the base itself
@@ -1417,16 +1470,49 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
 
 	spin_lock(&base->lock);
 	nextevt = __next_timer_interrupt(base);
-	spin_unlock(&base->lock);
+	base->next_expiry = nextevt;
+	/*
+	 * We have a fresh next event. Check whether we can forward the base:
+	 */
+	if (time_after(nextevt, jiffies))
+		base->clk = jiffies;
+	else if (time_after(nextevt, base->clk))
+		base->clk = nextevt;
 
-	if (time_before_eq(nextevt, basej))
+	if (time_before_eq(nextevt, basej)) {
 		expires = basem;
-	else
+		base->is_idle = false;
+	} else {
 		expires = basem + (nextevt - basej) * TICK_NSEC;
+		/*
+		 * If we expect to sleep more than a tick, mark the base idle:
+		 */
+		if ((expires - basem) > TICK_NSEC)
+			base->is_idle = true;
+	}
+	spin_unlock(&base->lock);
 
 	return cmp_next_hrtimer_event(basem, expires);
 }
 
+/**
+ * timer_clear_idle - Clear the idle state of the timer base
+ *
+ * Called with interrupts disabled
+ */
+void timer_clear_idle(void)
+{
+	struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
+
+	/*
+	 * We do this unlocked. The worst outcome is a remote enqueue sending
+	 * a pointless IPI, but taking the lock would just make the window for
+	 * sending the IPI a few instructions smaller for the cost of taking
+	 * the lock in the exit from idle path.
+	 */
+	base->is_idle = false;
+}
+
 static int collect_expired_timers(struct timer_base *base,
 				  struct hlist_head *heads)
 {
@@ -1440,7 +1526,7 @@ static int collect_expired_timers(struct timer_base *base,
 
 		/*
 		 * If the next timer is ahead of time forward to current
-		 * jiffies, otherwise forward to the next expiry time.
+		 * jiffies, otherwise forward to the next expiry time:
 		 */
 		if (time_after(next, jiffies)) {
 			/* The call site will increment clock! */

^ permalink raw reply related	[flat|nested] 60+ messages in thread

* [tip:timers/core] timers: Only wake softirq if necessary
  2016-07-04  9:50 ` [patch 4 20/22] timer: Only wake softirq if necessary Thomas Gleixner
@ 2016-07-07  8:48   ` tip-bot for Thomas Gleixner
  0 siblings, 0 replies; 60+ messages in thread
From: tip-bot for Thomas Gleixner @ 2016-07-07  8:48 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: torvalds, riel, hpa, peterz, mingo, paulmck, linux, clm, lenb,
	tglx, josh, linux-kernel, arjan, fweisbec

Commit-ID:  4e85876a9d2a977b4a07389da8c07edf76d10825
Gitweb:     http://git.kernel.org/tip/4e85876a9d2a977b4a07389da8c07edf76d10825
Author:     Thomas Gleixner <tglx@linutronix.de>
AuthorDate: Mon, 4 Jul 2016 09:50:37 +0000
Committer:  Ingo Molnar <mingo@kernel.org>
CommitDate: Thu, 7 Jul 2016 10:35:11 +0200

timers: Only wake softirq if necessary

With the wheel forwading in place and with the HZ=1000 4ms folding we can
avoid running the softirq at all.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Arjan van de Ven <arjan@infradead.org>
Cc: Chris Mason <clm@fb.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: George Spelvin <linux@sciencehorizons.net>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Len Brown <lenb@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: rt@linutronix.de
Link: http://lkml.kernel.org/r/20160704094342.607650550@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
 kernel/time/timer.c | 11 +++++++++++
 1 file changed, 11 insertions(+)

diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 9339d71..8d830f1 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1608,7 +1608,18 @@ static void run_timer_softirq(struct softirq_action *h)
  */
 void run_local_timers(void)
 {
+	struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
+
 	hrtimer_run_queues();
+	/* Raise the softirq only if required. */
+	if (time_before(jiffies, base->clk)) {
+		if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
+			return;
+		/* CPU is awake, so check the deferrable base. */
+		base++;
+		if (time_before(jiffies, base->clk))
+			return;
+	}
 	raise_softirq(TIMER_SOFTIRQ);
 }
 

^ permalink raw reply related	[flat|nested] 60+ messages in thread

* [tip:timers/core] timers: Split out index calculation
  2016-07-04  9:50 ` [patch 4 21/22] timer: Split out index calculation Thomas Gleixner
@ 2016-07-07  8:48   ` tip-bot for Anna-Maria Gleixner
  0 siblings, 0 replies; 60+ messages in thread
From: tip-bot for Anna-Maria Gleixner @ 2016-07-07  8:48 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: paulmck, fweisbec, hpa, mingo, arjan, peterz, linux-kernel, lenb,
	clm, tglx, torvalds, edumazet, riel, josh, linux, anna-maria

Commit-ID:  ffdf047728f8f93df896b58049c7513856027141
Gitweb:     http://git.kernel.org/tip/ffdf047728f8f93df896b58049c7513856027141
Author:     Anna-Maria Gleixner <anna-maria@linutronix.de>
AuthorDate: Mon, 4 Jul 2016 09:50:39 +0000
Committer:  Ingo Molnar <mingo@kernel.org>
CommitDate: Thu, 7 Jul 2016 10:35:12 +0200

timers: Split out index calculation

For further optimizations we need to seperate index calculation
from queueing. No functional change.

Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Arjan van de Ven <arjan@infradead.org>
Cc: Chris Mason <clm@fb.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: George Spelvin <linux@sciencehorizons.net>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Len Brown <lenb@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: rt@linutronix.de
Link: http://lkml.kernel.org/r/20160704094342.691159619@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
 kernel/time/timer.c | 47 ++++++++++++++++++++++++++++++++---------------
 1 file changed, 32 insertions(+), 15 deletions(-)

diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 8d830f1..8d7c23e 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -471,12 +471,9 @@ static inline unsigned calc_index(unsigned expires, unsigned lvl)
 	return LVL_OFFS(lvl) + (expires & LVL_MASK);
 }
 
-static void
-__internal_add_timer(struct timer_base *base, struct timer_list *timer)
+static int calc_wheel_index(unsigned long expires, unsigned long clk)
 {
-	unsigned long expires = timer->expires;
-	unsigned long delta = expires - base->clk;
-	struct hlist_head *vec;
+	unsigned long delta = expires - clk;
 	unsigned int idx;
 
 	if (delta < LVL_START(1)) {
@@ -496,7 +493,7 @@ __internal_add_timer(struct timer_base *base, struct timer_list *timer)
 	} else if (LVL_DEPTH > 8 && delta < LVL_START(8)) {
 		idx = calc_index(expires, 7);
 	} else if ((long) delta < 0) {
-		idx = base->clk & LVL_MASK;
+		idx = clk & LVL_MASK;
 	} else {
 		/*
 		 * Force expire obscene large timeouts to expire at the
@@ -507,20 +504,33 @@ __internal_add_timer(struct timer_base *base, struct timer_list *timer)
 
 		idx = calc_index(expires, LVL_DEPTH - 1);
 	}
-	/*
-	 * Enqueue the timer into the array bucket, mark it pending in
-	 * the bitmap and store the index in the timer flags.
-	 */
-	vec = base->vectors + idx;
-	hlist_add_head(&timer->entry, vec);
+	return idx;
+}
+
+/*
+ * Enqueue the timer into the hash bucket, mark it pending in
+ * the bitmap and store the index in the timer flags.
+ */
+static void enqueue_timer(struct timer_base *base, struct timer_list *timer,
+			  unsigned int idx)
+{
+	hlist_add_head(&timer->entry, base->vectors + idx);
 	__set_bit(idx, base->pending_map);
 	timer_set_idx(timer, idx);
 }
 
-static void internal_add_timer(struct timer_base *base, struct timer_list *timer)
+static void
+__internal_add_timer(struct timer_base *base, struct timer_list *timer)
 {
-	__internal_add_timer(base, timer);
+	unsigned int idx;
+
+	idx = calc_wheel_index(timer->expires, base->clk);
+	enqueue_timer(base, timer, idx);
+}
 
+static void
+trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)
+{
 	if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
 		return;
 
@@ -551,7 +561,14 @@ static void internal_add_timer(struct timer_base *base, struct timer_list *timer
 	 * wheel:
 	 */
 	base->next_expiry = timer->expires;
-	wake_up_nohz_cpu(base->cpu);
+		wake_up_nohz_cpu(base->cpu);
+}
+
+static void
+internal_add_timer(struct timer_base *base, struct timer_list *timer)
+{
+	__internal_add_timer(base, timer);
+	trigger_dyntick_cpu(base, timer);
 }
 
 #ifdef CONFIG_TIMER_STATS

^ permalink raw reply related	[flat|nested] 60+ messages in thread

* [tip:timers/core] timers: Implement optimization for same expiry time in mod_timer()
  2016-07-04  9:50 ` [patch 4 22/22] timer: Optimization for same expiry time in mod_timer() Thomas Gleixner
@ 2016-07-07  8:49   ` tip-bot for Anna-Maria Gleixner
  0 siblings, 0 replies; 60+ messages in thread
From: tip-bot for Anna-Maria Gleixner @ 2016-07-07  8:49 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: linux, lenb, edumazet, linux-kernel, anna-maria, mingo, paulmck,
	arjan, clm, josh, peterz, hpa, riel, torvalds, tglx, fweisbec

Commit-ID:  f00c0afdfa625165a609513bc74164d56752ec3e
Gitweb:     http://git.kernel.org/tip/f00c0afdfa625165a609513bc74164d56752ec3e
Author:     Anna-Maria Gleixner <anna-maria@linutronix.de>
AuthorDate: Mon, 4 Jul 2016 09:50:40 +0000
Committer:  Ingo Molnar <mingo@kernel.org>
CommitDate: Thu, 7 Jul 2016 10:35:12 +0200

timers: Implement optimization for same expiry time in mod_timer()

The existing optimization for same expiry time in mod_timer() checks whether
the timer expiry time is the same as the new requested expiry time. In the old
timer wheel implementation this does not take the slack batching into account,
neither does the new implementation evaluate whether the new expiry time will
requeue the timer to the same bucket.

To optimize that, we can calculate the resulting bucket and check if the new
expiry time is different from the current expiry time. This calculation
happens outside the base lock held region. If the resulting bucket is the same
we can avoid taking the base lock and requeueing the timer.

If the timer needs to be requeued then we have to check under the base lock
whether the base time has changed between the lockless calculation and taking
the lock. If it has changed we need to recalculate under the lock.

This optimization takes effect for timers which are enqueued into the less
granular wheel levels (1 and above). With a simple test case the functionality
has been verified:

            Before        After
 Match:       5.5%        86.6%
 Requeue:    94.5%        13.4%
 Recalc:                  <0.01%

In the non optimized case the timer is requeued in 94.5% of the cases. With
the index optimization in place the requeue rate drops to 13.4%. The case
where the lockless index calculation has to be redone is less than 0.01%.

With a real world test case (networking) we observed the following changes:

            Before        After
 Match:      97.8%        99.7%
 Requeue:     2.2%         0.3%
 Recalc:                  <0.001%

That means two percent fewer lock/requeue/unlock operations done in one of
the hot path use cases of timers.

Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Arjan van de Ven <arjan@infradead.org>
Cc: Chris Mason <clm@fb.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: George Spelvin <linux@sciencehorizons.net>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Len Brown <lenb@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: rt@linutronix.de
Link: http://lkml.kernel.org/r/20160704094342.778527749@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
 kernel/time/timer.c | 51 +++++++++++++++++++++++++++++++++++----------------
 1 file changed, 35 insertions(+), 16 deletions(-)

diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 8d7c23e..8f29abe 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -960,28 +960,36 @@ static inline int
 __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
 {
 	struct timer_base *base, *new_base;
-	unsigned long flags;
+	unsigned int idx = UINT_MAX;
+	unsigned long clk = 0, flags;
 	int ret = 0;
 
 	/*
-	 * TODO: Calculate the array bucket of the timer right here w/o
-	 * holding the base lock. This allows to check not only
-	 * timer->expires == expires below, but also whether the timer
-	 * ends up in the same bucket. If we really need to requeue
-	 * the timer then we check whether base->clk have
-	 * advanced between here and locking the timer base. If
-	 * jiffies advanced we have to recalc the array bucket with the
-	 * lock held.
-	 */
-
-	/*
-	 * This is a common optimization triggered by the
-	 * networking code - if the timer is re-modified
-	 * to be the same thing then just return:
+	 * This is a common optimization triggered by the networking code - if
+	 * the timer is re-modified to have the same timeout or ends up in the
+	 * same array bucket then just return:
 	 */
 	if (timer_pending(timer)) {
 		if (timer->expires == expires)
 			return 1;
+		/*
+		 * Take the current timer_jiffies of base, but without holding
+		 * the lock!
+		 */
+		base = get_timer_base(timer->flags);
+		clk = base->clk;
+
+		idx = calc_wheel_index(expires, clk);
+
+		/*
+		 * Retrieve and compare the array index of the pending
+		 * timer. If it matches set the expiry to the new value so a
+		 * subsequent call will exit in the expires check above.
+		 */
+		if (idx == timer_get_idx(timer)) {
+			timer->expires = expires;
+			return 1;
+		}
 	}
 
 	timer_stats_timer_set_start_info(timer);
@@ -1018,7 +1026,18 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
 	}
 
 	timer->expires = expires;
-	internal_add_timer(base, timer);
+	/*
+	 * If 'idx' was calculated above and the base time did not advance
+	 * between calculating 'idx' and taking the lock, only enqueue_timer()
+	 * and trigger_dyntick_cpu() is required. Otherwise we need to
+	 * (re)calculate the wheel index via internal_add_timer().
+	 */
+	if (idx != UINT_MAX && clk == base->clk) {
+		enqueue_timer(base, timer, idx);
+		trigger_dyntick_cpu(base, timer);
+	} else {
+		internal_add_timer(base, timer);
+	}
 
 out_unlock:
 	spin_unlock_irqrestore(&base->lock, flags);

^ permalink raw reply related	[flat|nested] 60+ messages in thread

* Re: [patch 4 15/22] timer: Remove slack leftovers
  2016-07-04  9:50 ` [patch 4 15/22] timer: Remove slack leftovers Thomas Gleixner
  2016-07-07  8:46   ` [tip:timers/core] timers: Remove set_timer_slack() leftovers tip-bot for Thomas Gleixner
@ 2016-07-22 11:31   ` Jason A. Donenfeld
  2016-07-22 13:04     ` Thomas Gleixner
  1 sibling, 1 reply; 60+ messages in thread
From: Jason A. Donenfeld @ 2016-07-22 11:31 UTC (permalink / raw)
  To: Thomas Gleixner
  Cc: LKML, Ingo Molnar, Peter Zijlstra, Paul McKenney,
	Frederic Weisbecker, Chris Mason, Arjan van de Ven, rt,
	Rik van Riel, George Spelvin, Len Brown, Josh Triplett,
	Eric Dumazet

Hi Thomas,

Thomas Gleixner <tglx@linutronix.de> writes:
> We now have implicit batching in the timer wheel. The slack is not longer
> used. Remove it.
> - set_timer_slack(&ev->dwork.timer, intv / 4);
> - set_timer_slack(&host->timeout_timer, HZ);
> - set_timer_slack(&di->work.timer, poll_interval * HZ / 4);
> - set_timer_slack(&ohci->io_watchdog, msecs_to_jiffies(20));
> - set_timer_slack(&xhci->comp_mode_recovery_timer,
> etc...

The current mod_timer implementation has this code:

        /*
         * This is a common optimization triggered by the
         * networking code - if the timer is re-modified
         * to be the same thing then just return:
         */
        if (timer_pending(timer) && timer->expires == expires)
                return 1;

In a (currently out-of-tree, but hopefully mergable soon) driver of
mine, I call mod_timer in the hot path. Every time some very frequent
event happens, I call mod_timer to move the timer back to be at
"jiffies + TIMEOUT".

>From a brief look at timer.c, it looked like __mod_timer was rather
expensive. So, as an optimization, I wanted the "timer_pending(timer)
&& timer->expires == expires" condition to be hit in most of the
cases. I accomplished this by doing:

    set_timer_slack(timer, HZ / 4);

This ensured that we'd only wind up calling __mod_timer 4 times per
second, at most.

With the removal of the slack concept, I no longer can do this. I
haven't reviewed this series in depth, but I'm wondering if you'd
recommend a different optimization instead. Or, have things been
reworked so much, that calling mod_timer is now always inexpensive?

Thanks,
Jason

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [patch 4 15/22] timer: Remove slack leftovers
  2016-07-22 11:31   ` [patch 4 15/22] timer: Remove slack leftovers Jason A. Donenfeld
@ 2016-07-22 13:04     ` Thomas Gleixner
  2016-07-22 15:18       ` Jason A. Donenfeld
  0 siblings, 1 reply; 60+ messages in thread
From: Thomas Gleixner @ 2016-07-22 13:04 UTC (permalink / raw)
  To: Jason A. Donenfeld
  Cc: LKML, Ingo Molnar, Peter Zijlstra, Paul McKenney,
	Frederic Weisbecker, Chris Mason, Arjan van de Ven, rt,
	Rik van Riel, George Spelvin, Len Brown, Josh Triplett,
	Eric Dumazet

On Fri, 22 Jul 2016, Jason A. Donenfeld wrote:
> Thomas Gleixner <tglx@linutronix.de> writes:
> > We now have implicit batching in the timer wheel. The slack is not longer
> > used. Remove it.
> >From a brief look at timer.c, it looked like __mod_timer was rather
> expensive. So, as an optimization, I wanted the "timer_pending(timer)
> && timer->expires == expires" condition to be hit in most of the
> cases. I accomplished this by doing:
> 
>     set_timer_slack(timer, HZ / 4);
> 
> This ensured that we'd only wind up calling __mod_timer 4 times per
> second, at most.
>
> With the removal of the slack concept, I no longer can do this. I
> haven't reviewed this series in depth, but I'm wondering if you'd
> recommend a different optimization instead. Or, have things been

Well, this really depends on the TIMEOUT value you have. The code now does
implicit batching for larger timeouts by queueing the timers into wheels with
coarse grained granularity. As long as your new TIMEOUT value ends up in the
same bucket then that's equivalent to the slack thing.

Can you give me a ballpark of your TIMEOUT value?

> reworked so much, that calling mod_timer is now always inexpensive?

When you take the slow (queueing) path, it's still expensive, not as bad as
the previous one, but not really cheap either.

Thanks,

	tglx

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [patch 4 15/22] timer: Remove slack leftovers
  2016-07-22 13:04     ` Thomas Gleixner
@ 2016-07-22 15:18       ` Jason A. Donenfeld
  2016-07-22 22:54         ` Jason A. Donenfeld
  0 siblings, 1 reply; 60+ messages in thread
From: Jason A. Donenfeld @ 2016-07-22 15:18 UTC (permalink / raw)
  To: Thomas Gleixner
  Cc: LKML, Ingo Molnar, Peter Zijlstra, Paul McKenney,
	Frederic Weisbecker, Chris Mason, Arjan van de Ven, rt,
	Rik van Riel, George Spelvin, Len Brown, Josh Triplett,
	Eric Dumazet

Hi Thomas,

On Fri, Jul 22, 2016 at 3:04 PM, Thomas Gleixner <tglx@linutronix.de> wrote:
>
> Well, this really depends on the TIMEOUT value you have. The code now does
> implicit batching for larger timeouts by queueing the timers into wheels with
> coarse grained granularity. As long as your new TIMEOUT value ends up in the
> same bucket then that's equivalent to the slack thing.
>
> Can you give me a ballpark of your TIMEOUT value?

Generally either 5 seconds, 10 seconds, or 25 seconds.

Are these okay? Is the 25 case substantially different from the 5 case?


> When you take the slow (queueing) path, it's still expensive, not as bad as
> the previous one, but not really cheap either.

Hmm, okay.

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [patch 4 15/22] timer: Remove slack leftovers
  2016-07-22 15:18       ` Jason A. Donenfeld
@ 2016-07-22 22:54         ` Jason A. Donenfeld
  0 siblings, 0 replies; 60+ messages in thread
From: Jason A. Donenfeld @ 2016-07-22 22:54 UTC (permalink / raw)
  To: Thomas Gleixner
  Cc: LKML, Ingo Molnar, Peter Zijlstra, Paul McKenney,
	Frederic Weisbecker, Chris Mason, Arjan van de Ven, rt,
	Rik van Riel, George Spelvin, Len Brown, Josh Triplett,
	Eric Dumazet

On Fri, Jul 22, 2016 at 5:18 PM, Jason A. Donenfeld <Jason@zx2c4.com> wrote:
> Hi Thomas,
>
> On Fri, Jul 22, 2016 at 3:04 PM, Thomas Gleixner <tglx@linutronix.de> wrote:
>>
>> Well, this really depends on the TIMEOUT value you have. The code now does
>> implicit batching for larger timeouts by queueing the timers into wheels with
>> coarse grained granularity. As long as your new TIMEOUT value ends up in the
>> same bucket then that's equivalent to the slack thing.
>>
>> Can you give me a ballpark of your TIMEOUT value?
>
> Generally either 5 seconds, 10 seconds, or 25 seconds.
>
> Are these okay? Is the 25 case substantially different from the 5 case?

I suppose, anyway, it's easy enough just to provide my own coalescing
function. Actually, I'd rather have it batch timers to fire earlier
rather than later, in my case. So, I can do something like this to
round down to the nearest ~ quarter of a second:

    static inline unsigned long slack_time(unsigned long time)
    {
       return time & ~(BIT_MASK(ilog2(HZ / 4) + 1) - 1);
    }

    mod_timer(&timer, slack_time(jiffies + TIMEOUT));

This seems to do roughly what I want.

Jason

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [patch 4 14/22] timer: Switch to a non cascading wheel
  2016-07-04  9:50 ` [patch 4 14/22] timer: Switch to a non cascading wheel Thomas Gleixner
  2016-07-07  8:45   ` [tip:timers/core] timers: Switch to a non-cascading wheel tip-bot for Thomas Gleixner
@ 2016-08-11 15:21   ` Jouni Malinen
  2016-08-11 20:25     ` [PREEMPT-RT] " rcochran
  2016-08-12 17:50     ` Rik van Riel
  1 sibling, 2 replies; 60+ messages in thread
From: Jouni Malinen @ 2016-08-11 15:21 UTC (permalink / raw)
  To: Thomas Gleixner
  Cc: LKML, Ingo Molnar, Peter Zijlstra, Paul McKenney,
	Frederic Weisbecker, Chris Mason, Arjan van de Ven, rt,
	Rik van Riel, George Spelvin, Len Brown, Josh Triplett,
	Eric Dumazet

On Mon, Jul 4, 2016 at 12:50 PM, Thomas Gleixner <tglx@linutronix.de> wrote:
> The current timer wheel has some drawbacks:
...

It looks like this change (commit
500462a9de657f86edaa102f8ab6bff7f7e43fc2 in linux.git) breaks one of
the automated test cases I'm using to test hostapd and wpa_supplicant
with mac80211_hwsim from the kernel. I'm not sure what exactly causes
this (did not really expect git bisect to point to timers..), but this
seems to be very reproducible for me under kvm (though, this
apparently did not happen on another device, so I'm not completely
sure what it is needed to reproduce) with the ap_wps_er_http_proto
test cases failing to connect 20 TCP stream sockets to a server on the
localhost. The client side is a python test script and the server is
hostapd. The failure shows up with about the 13th of those socket
connects failing while all others (both before and after this failed
one) going through.

Would you happen to have any idea why this commit has such a
difference in behavior? I'm currently working around this in my test
script with the following change, but it might be worth while to
confirm whether there is something in the kernel change that resulted
in unexpected behavior.

http://w1.fi/cgit/hostap/commit/?id=2d6a526ac3885605f34df4037fc79ad330565b23

The test code looked like this in python:

    addr = (url.hostname, url.port)
    socks = {}
    for i in range(20):
        socks[i] = socket.socket(socket.AF_INET, socket.SOCK_STREAM,
                                 socket.IPPROTO_TCP)
        socks[i].connect(addr)

With that connect() call being the failing (time out) operation and it
seemed to happen for i == 13 most of the time. This shows up only with
commit 500462a9de657f86edaa102f8ab6bff7f7e43fc2 included in the kernel
(i.e., test with commit b0d6e2dcb284f1f4dcb4b92760f49eeaf5fc0bc7 as
the kernel snapshot does not show this behavior). Changes in 500462a9
were not trivial to revert on top of the current master, so I have not
checked whether the current master branch would get rid of the failure
if only this one commit were reverted.

I can reproduce this easily, so if someone wants to get more details
of the issue, just let me know how to collect whatever would be
useful.

- Jouni

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [PREEMPT-RT] [patch 4 14/22] timer: Switch to a non cascading wheel
  2016-08-11 15:21   ` [patch 4 14/22] timer: Switch to a non cascading wheel Jouni Malinen
@ 2016-08-11 20:25     ` rcochran
  2016-08-13  9:12       ` Jouni Malinen
  2016-08-12 17:50     ` Rik van Riel
  1 sibling, 1 reply; 60+ messages in thread
From: rcochran @ 2016-08-11 20:25 UTC (permalink / raw)
  To: Jouni Malinen
  Cc: Thomas Gleixner, Rik van Riel, Len Brown, Peter Zijlstra,
	Frederic Weisbecker, LKML, George Spelvin, Josh Triplett,
	Chris Mason, Eric Dumazet, rt, Paul McKenney, Ingo Molnar,
	Arjan van de Ven

On Thu, Aug 11, 2016 at 06:21:26PM +0300, Jouni Malinen wrote:
> The test code looked like this in python:
> 
>     addr = (url.hostname, url.port)
>     socks = {}
>     for i in range(20):
>         socks[i] = socket.socket(socket.AF_INET, socket.SOCK_STREAM,
>                                  socket.IPPROTO_TCP)
>         socks[i].connect(addr)

You getting a timeout on TCP connect()?  Isn't that timeout really
long, like 75 seconds or something?
 
> I can reproduce this easily, so if someone wants to get more details
> of the issue, just let me know how to collect whatever would be
> useful.

Can you provide a simple test case or explain in more detail how you
run your test?  I would like to reproduce the issue it here.

Thanks,
Richard

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [patch 4 14/22] timer: Switch to a non cascading wheel
  2016-08-11 15:21   ` [patch 4 14/22] timer: Switch to a non cascading wheel Jouni Malinen
  2016-08-11 20:25     ` [PREEMPT-RT] " rcochran
@ 2016-08-12 17:50     ` Rik van Riel
  2016-08-12 19:14       ` Paul E. McKenney
  2016-08-16  7:57       ` Richard Cochran
  1 sibling, 2 replies; 60+ messages in thread
From: Rik van Riel @ 2016-08-12 17:50 UTC (permalink / raw)
  To: Jouni Malinen, Thomas Gleixner
  Cc: LKML, Ingo Molnar, Peter Zijlstra, Paul McKenney,
	Frederic Weisbecker, Chris Mason, Arjan van de Ven, rt,
	George Spelvin, Len Brown, Josh Triplett, Eric Dumazet

[-- Attachment #1: Type: text/plain, Size: 1647 bytes --]

On Thu, 2016-08-11 at 18:21 +0300, Jouni Malinen wrote:
> On Mon, Jul 4, 2016 at 12:50 PM, Thomas Gleixner <tglx@linutronix.de>
> wrote:
> > The current timer wheel has some drawbacks:
> ...
> 
> It looks like this change (commit
> 500462a9de657f86edaa102f8ab6bff7f7e43fc2 in linux.git) breaks one of
> the automated test cases I'm using to test hostapd and wpa_supplicant
> with mac80211_hwsim from the kernel. I'm not sure what exactly causes
> this (did not really expect git bisect to point to timers..), but
> this
> seems to be very reproducible for me under kvm (though, this
> apparently did not happen on another device, so I'm not completely
> sure what it is needed to reproduce) with the ap_wps_er_http_proto
> test cases failing to connect 20 TCP stream sockets to a server on
> the
> localhost. The client side is a python test script and the server is
> hostapd. The failure shows up with about the 13th of those socket
> connects failing while all others (both before and after this failed
> one) going through.
> 
> Would you happen to have any idea why this commit has such a
> difference in behavior? 

I have a vague hypothesis, more of a question actually.

How does the new timer wheel code handle lost timer ticks?

If a KVM guest does not run for a while, because the host
is scheduling something else, the guest generally only gets
one timer tick after the guest is scheduled back in.

If there are multiple lost ticks, they will remain lost.

Could that cause the new timer wheel code to skip over
timer buckets occasionally, or is this hypothesis bunk?

-- 

All Rights Reversed.

[-- Attachment #2: This is a digitally signed message part --]
[-- Type: application/pgp-signature, Size: 473 bytes --]

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [patch 4 14/22] timer: Switch to a non cascading wheel
  2016-08-12 17:50     ` Rik van Riel
@ 2016-08-12 19:14       ` Paul E. McKenney
  2016-08-16  8:55         ` Richard Cochran
  2016-08-16  7:57       ` Richard Cochran
  1 sibling, 1 reply; 60+ messages in thread
From: Paul E. McKenney @ 2016-08-12 19:14 UTC (permalink / raw)
  To: Rik van Riel
  Cc: Jouni Malinen, Thomas Gleixner, LKML, Ingo Molnar,
	Peter Zijlstra, Frederic Weisbecker, Chris Mason,
	Arjan van de Ven, rt, George Spelvin, Len Brown, Josh Triplett,
	Eric Dumazet

On Fri, Aug 12, 2016 at 01:50:16PM -0400, Rik van Riel wrote:
> On Thu, 2016-08-11 at 18:21 +0300, Jouni Malinen wrote:
> > On Mon, Jul 4, 2016 at 12:50 PM, Thomas Gleixner <tglx@linutronix.de>
> > wrote:
> > > The current timer wheel has some drawbacks:
> > ...
> > 
> > It looks like this change (commit
> > 500462a9de657f86edaa102f8ab6bff7f7e43fc2 in linux.git) breaks one of
> > the automated test cases I'm using to test hostapd and wpa_supplicant
> > with mac80211_hwsim from the kernel. I'm not sure what exactly causes
> > this (did not really expect git bisect to point to timers..), but
> > this
> > seems to be very reproducible for me under kvm (though, this
> > apparently did not happen on another device, so I'm not completely
> > sure what it is needed to reproduce) with the ap_wps_er_http_proto
> > test cases failing to connect 20 TCP stream sockets to a server on
> > the
> > localhost. The client side is a python test script and the server is
> > hostapd. The failure shows up with about the 13th of those socket
> > connects failing while all others (both before and after this failed
> > one) going through.
> > 
> > Would you happen to have any idea why this commit has such a
> > difference in behavior? 
> 
> I have a vague hypothesis, more of a question actually.
> 
> How does the new timer wheel code handle lost timer ticks?
> 
> If a KVM guest does not run for a while, because the host
> is scheduling something else, the guest generally only gets
> one timer tick after the guest is scheduled back in.
> 
> If there are multiple lost ticks, they will remain lost.
> 
> Could that cause the new timer wheel code to skip over
> timer buckets occasionally, or is this hypothesis bunk?

FWIW, I do appear to be seeing more lost wakeups on current mainline
than on v4.7, but not enough of a difference to get a reliable bisction
in reasonable time.

							Thanx, Paul

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [PREEMPT-RT] [patch 4 14/22] timer: Switch to a non cascading wheel
  2016-08-11 20:25     ` [PREEMPT-RT] " rcochran
@ 2016-08-13  9:12       ` Jouni Malinen
  2016-08-16  9:46         ` Richard Cochran
  0 siblings, 1 reply; 60+ messages in thread
From: Jouni Malinen @ 2016-08-13  9:12 UTC (permalink / raw)
  To: rcochran
  Cc: Thomas Gleixner, Rik van Riel, Len Brown, Peter Zijlstra,
	Frederic Weisbecker, LKML, George Spelvin, Josh Triplett,
	Chris Mason, Eric Dumazet, rt, Paul McKenney, Ingo Molnar,
	Arjan van de Ven, j

On Thu, Aug 11, 2016 at 11:25 PM,  <rcochran@linutronix.de> wrote:
> On Thu, Aug 11, 2016 at 06:21:26PM +0300, Jouni Malinen wrote:
>> The test code looked like this in python:
>>
>>     addr = (url.hostname, url.port)
>>     socks = {}
>>     for i in range(20):
>>         socks[i] = socket.socket(socket.AF_INET, socket.SOCK_STREAM,
>>                                  socket.IPPROTO_TCP)
>>         socks[i].connect(addr)
>
> You getting a timeout on TCP connect()?  Isn't that timeout really
> long, like 75 seconds or something?

Yes, it looks like a TCP connect() timeout. I use a significantly
reduced timeout in the test scripts since they are run unattended and
are supposed to terminate in reasonable amount of time.. That said,
this specific test case should not really have used as short a timeout
as it did, i.e., just one second. Interestingly enough, increasing
that to just 1.1 seconds was enough to make the test case pass..
Looking at the time it takes to execute connect(), it is 1.02 - 1.08
seconds for the timing out (with timeout=1sec) case which is now at
i=14 while all the other 19 calls take 0.0 seconds..

If I increase that 20 to 50, I get more of such about 1.03 second
results at i=17, i=34, i=48..

Looking more at what exactly is happening at the TCP layer, this is
likely related to the server behavior since listen() backlog is set to
10 and if there are 10 parallel connections, the last one if
immediately closed before reading anything. Somehow this combination
with this kernel patch applied makes one of the connect() calls take
that surprisingly long 1.02 or so seconds.

Looking at a sniffer capture (*), the three-way TCP connection goes
through fine for the first 15 connect() calls, but the 15th one does
not get a response to SYN. This SYN is the frame 47 in the capture
file with srcport == 60802. There is no SYN,ACK for it. The about one
second unexpected time for connect() comes from this, i.e., the
connection is completed only after the client side does TCP
retransmission of the SYN (frame #77) a second later and the server
side replies with RST,ACK (frame #78).

  2   0.039135    127.0.0.1 -> 127.0.0.1    TCP 74 60772 > 49152 [SYN]
Seq=0 Win=43690 Len=0 MSS=65495 SACK_PERM=1 TSval=4294937755 TSecr=0
WS=64
  3   0.039146    127.0.0.1 -> 127.0.0.1    TCP 74 49152 > 60772 [SYN,
ACK] Seq=0 Ack=1 Win=43690 Len=0 MSS=65495 SACK_PERM=1
TSval=4294937755 TSecr=4294937755 WS=64
  4   0.039156    127.0.0.1 -> 127.0.0.1    TCP 66 60772 > 49152 [ACK]
Seq=1 Ack=1 Win=43712 Len=0 TSval=4294937755 TSecr=4294937755
...
 47   0.042559    127.0.0.1 -> 127.0.0.1    TCP 74 60802 > 49152 [SYN]
Seq=0 Win=43690 Len=0 MSS=65495 SACK_PERM=1 TSval=4294937756 TSecr=0
WS=64
 77   1.119943    127.0.0.1 -> 127.0.0.1    TCP 74 [TCP
Retransmission] 60802 > 49152 [SYN] Seq=0 Win=43690 Len=0 MSS=65495
SACK_PERM=1 TSval=4294937864 TSecr=0 WS=64
 78   1.119953    127.0.0.1 -> 127.0.0.1    TCP 54 49152 > 60802 [RST,
ACK] Seq=1 Ack=1 Win=0 Len=0

So it looks like the issue is in one of the SYN,ACK frames getting
completely lost..

(*) http://w1.fi/p/tcp-lo.pcap

> Can you provide a simple test case or explain in more detail how you
> run your test?  I would like to reproduce the issue it here.

I'm not sure how to make a simple test case for this taken into
account this seems to have some unknown timing dependencies.. A quick
loop of 20 TCP socket() + connect() calls with a server side that does
listen() with backlog 10 and non-blocking operations with 10th and
following incoming sockets getting close() immediately is what the
test case ends up doing, but whether a simple program doing that
without all of python and wpa_supplicant processing giving suitable
timing is unclear..

These files describe the test setup that I'm using to run this:
http://w1.fi/cgit/hostap/plain/tests/hwsim/README
http://w1.fi/cgit/hostap/plain/tests/hwsim/vm/README

with the actual test case being executed with
tests/hwsim/vm$ ./vm-run.sh ap_wps_er_http_proto

- Jouni

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [patch 4 14/22] timer: Switch to a non cascading wheel
  2016-08-12 17:50     ` Rik van Riel
  2016-08-12 19:14       ` Paul E. McKenney
@ 2016-08-16  7:57       ` Richard Cochran
  1 sibling, 0 replies; 60+ messages in thread
From: Richard Cochran @ 2016-08-16  7:57 UTC (permalink / raw)
  To: Rik van Riel
  Cc: Jouni Malinen, Thomas Gleixner, LKML, Ingo Molnar,
	Peter Zijlstra, Paul McKenney, Frederic Weisbecker, Chris Mason,
	Arjan van de Ven, rt, George Spelvin, Len Brown, Josh Triplett,
	Eric Dumazet

On Fri, Aug 12, 2016 at 01:50:16PM -0400, Rik van Riel wrote:
> Could that cause the new timer wheel code to skip over
> timer buckets occasionally, or is this hypothesis bunk?

The new wheel is not different from the old one in this respect.  Each
base keeps its own jiffies counter.  When returning from a long sleep,
the base counter can be behind the current jiffies value by more than
one.  In __run_timers() we loop through all the missed jiffies:

	while (time_after_eq(jiffies, base->clk)) {

		levels = collect_expired_timers(base, heads);
		base->clk++;

		while (levels--)
			expire_timers(base, heads + levels);
	}

So the hypothesis is incorrect.

Thanks,
Richard

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [patch 4 14/22] timer: Switch to a non cascading wheel
  2016-08-12 19:14       ` Paul E. McKenney
@ 2016-08-16  8:55         ` Richard Cochran
  0 siblings, 0 replies; 60+ messages in thread
From: Richard Cochran @ 2016-08-16  8:55 UTC (permalink / raw)
  To: Paul E. McKenney
  Cc: Rik van Riel, Jouni Malinen, Thomas Gleixner, LKML, Ingo Molnar,
	Peter Zijlstra, Frederic Weisbecker, Chris Mason,
	Arjan van de Ven, rt, George Spelvin, Len Brown, Josh Triplett,
	Eric Dumazet

On Fri, Aug 12, 2016 at 12:14:11PM -0700, Paul E. McKenney wrote:
> FWIW, I do appear to be seeing more lost wakeups on current mainline
> than on v4.7, but not enough of a difference to get a reliable bisction
> in reasonable time.

We are seeing reproducible hangs* when running Steven's
stress-cpu-hotplug under kvm.  This hang goes back to v4.7 at least.
I couldn't reproduce this with bare metal.  Thomas thought it was a
virtualization issue, not related to the hotplug rework.  He is away
until next week.  Maybe he knows more to say...

To reproduce, run 

   while [ 1 ]; do ./stress-cpu-hotplug ; done

and wait about 20 minutes (or longer).

Thanks,
Richard

* INFO: task cpuhp/7:49 blocked for more than 120 seconds

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [PREEMPT-RT] [patch 4 14/22] timer: Switch to a non cascading wheel
  2016-08-13  9:12       ` Jouni Malinen
@ 2016-08-16  9:46         ` Richard Cochran
  2016-08-16 14:35           ` Eric Dumazet
  2016-08-17  9:05           ` Jouni Malinen
  0 siblings, 2 replies; 60+ messages in thread
From: Richard Cochran @ 2016-08-16  9:46 UTC (permalink / raw)
  To: Jouni Malinen
  Cc: rcochran, Thomas Gleixner, Rik van Riel, Len Brown,
	Peter Zijlstra, Frederic Weisbecker, LKML, George Spelvin,
	Josh Triplett, Chris Mason, Eric Dumazet, rt, Paul McKenney,
	Ingo Molnar, Arjan van de Ven, j

Jouni,

If I understand the test correctly, then the slightly different kernel
timer behavior is ok, but the test isn't quite right.  Let explain
what I mean.

First off, reading test_ap_wps.py, the point of the test is to see if
ten simultaneous connections are possible.  I guess the server
implements a hard coded limit on the number of clients.  (BTW where is
the server loop?)

You said that the server also sets 'backlog' to ten.  The backlog
controls the size of the queue holding incoming connections that are
in the SYN_RCVD or ESTABLISHED state but have not yet been
accept(2)-ed by the server.  This is *not* the same as the number of
possible simultaneous connections.

On Sat, Aug 13, 2016 at 12:12:26PM +0300, Jouni Malinen wrote:
> Yes, it looks like a TCP connect() timeout. I use a significantly
> reduced timeout in the test scripts since they are run unattended and
> are supposed to terminate in reasonable amount of time.. That said,

I did not find where the client sets the one second timeout.  Where
does this happen?

> If I increase that 20 to 50, I get more of such about 1.03 second
> results at i=17, i=34, i=48..

Can you provide the timings when the test runs on the older kernel?
 
> Looking more at what exactly is happening at the TCP layer, this is
> likely related to the server behavior since listen() backlog is set to
> 10 and if there are 10 parallel connections, the last one if
> immediately closed before reading anything.

To clarify, when the backlog is exceed, the new connection is not
closed.  Instead, the SYN is simply ignored, and the client is expect
to re-transmit the SYN in the normal TCP fashion.

> Looking at a sniffer capture (*), the three-way TCP connection goes
> through fine for the first 15 connect() calls, but the 15th one does
> not get a response to SYN. This SYN is the frame 47 in the capture
> file with srcport == 60802. There is no SYN,ACK for it. The about one
> second unexpected time for connect() comes from this, i.e., the
> connection is completed only after the client side does TCP
> retransmission of the SYN (frame #77) a second later and the server
> side replies with RST,ACK (frame #78).

This is the expected behavior.

> So it looks like the issue is in one of the SYN,ACK frames getting
> completely lost..

No, the frame is not missing.  It was never sent because the backlog
was exceeded.

Here is what I suspect is happening.  By sending 20 SYN frames to a
port with a backlog of 10, it saturates the queue.  One SYN is ignored
by the kernel, and a race begins between the connect() timeout and the
SYN re-transmission.  If the client's re-transmitted SYN and then the
server's SYN,ACK returns before the connect timeout, then the call to
connect() succeeds.  With the new timer wheel, the result of the race
is different.

There a couple of ways to deal with this.  One is to increase the
backlog on the server side.  Another is to increase the connect()
timeout to a multiple of the re-transmission interval.

Thoughts?

Thanks,
Richard

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [PREEMPT-RT] [patch 4 14/22] timer: Switch to a non cascading wheel
  2016-08-16  9:46         ` Richard Cochran
@ 2016-08-16 14:35           ` Eric Dumazet
  2016-08-17  9:05           ` Jouni Malinen
  1 sibling, 0 replies; 60+ messages in thread
From: Eric Dumazet @ 2016-08-16 14:35 UTC (permalink / raw)
  To: Richard Cochran
  Cc: Jouni Malinen, rcochran, Thomas Gleixner, Rik van Riel,
	Len Brown, Peter Zijlstra, Frederic Weisbecker, LKML,
	George Spelvin, Josh Triplett, Chris Mason, rt, Paul McKenney,
	Ingo Molnar, Arjan van de Ven, j

On Tue, Aug 16, 2016 at 5:46 AM, Richard Cochran
<richardcochran@gmail.com> wrote:
> Jouni,
>
> If I understand the test correctly, then the slightly different kernel
> timer behavior is ok, but the test isn't quite right.  Let explain
> what I mean.
>
> First off, reading test_ap_wps.py, the point of the test is to see if
> ten simultaneous connections are possible.  I guess the server
> implements a hard coded limit on the number of clients.  (BTW where is
> the server loop?)
>
> You said that the server also sets 'backlog' to ten.  The backlog
> controls the size of the queue holding incoming connections that are
> in the SYN_RCVD or ESTABLISHED state but have not yet been
> accept(2)-ed by the server.  This is *not* the same as the number of
> possible simultaneous connections.
>
> On Sat, Aug 13, 2016 at 12:12:26PM +0300, Jouni Malinen wrote:
>> Yes, it looks like a TCP connect() timeout. I use a significantly
>> reduced timeout in the test scripts since they are run unattended and
>> are supposed to terminate in reasonable amount of time.. That said,
>
> I did not find where the client sets the one second timeout.  Where
> does this happen?
>
>> If I increase that 20 to 50, I get more of such about 1.03 second
>> results at i=17, i=34, i=48..
>
> Can you provide the timings when the test runs on the older kernel?
>
>> Looking more at what exactly is happening at the TCP layer, this is
>> likely related to the server behavior since listen() backlog is set to
>> 10 and if there are 10 parallel connections, the last one if
>> immediately closed before reading anything.
>
> To clarify, when the backlog is exceed, the new connection is not
> closed.  Instead, the SYN is simply ignored, and the client is expect
> to re-transmit the SYN in the normal TCP fashion.
>
>> Looking at a sniffer capture (*), the three-way TCP connection goes
>> through fine for the first 15 connect() calls, but the 15th one does
>> not get a response to SYN. This SYN is the frame 47 in the capture
>> file with srcport == 60802. There is no SYN,ACK for it. The about one
>> second unexpected time for connect() comes from this, i.e., the
>> connection is completed only after the client side does TCP
>> retransmission of the SYN (frame #77) a second later and the server
>> side replies with RST,ACK (frame #78).
>
> This is the expected behavior.
>
>> So it looks like the issue is in one of the SYN,ACK frames getting
>> completely lost..
>
> No, the frame is not missing.  It was never sent because the backlog
> was exceeded.
>
> Here is what I suspect is happening.  By sending 20 SYN frames to a
> port with a backlog of 10, it saturates the queue.  One SYN is ignored
> by the kernel, and a race begins between the connect() timeout and the
> SYN re-transmission.  If the client's re-transmitted SYN and then the
> server's SYN,ACK returns before the connect timeout, then the call to
> connect() succeeds.  With the new timer wheel, the result of the race
> is different.
>
> There a couple of ways to deal with this.  One is to increase the
> backlog on the server side.  Another is to increase the connect()
> timeout to a multiple of the re-transmission interval.
>
> Thoughts?
>

I am coming late to the party, but yes, test looks flaky.

(Relying on having very precise SYN retransmits when listen backlog on
server side is full)

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [PREEMPT-RT] [patch 4 14/22] timer: Switch to a non cascading wheel
  2016-08-16  9:46         ` Richard Cochran
  2016-08-16 14:35           ` Eric Dumazet
@ 2016-08-17  9:05           ` Jouni Malinen
  2016-08-17  9:23             ` rcochran
  1 sibling, 1 reply; 60+ messages in thread
From: Jouni Malinen @ 2016-08-17  9:05 UTC (permalink / raw)
  To: Richard Cochran
  Cc: Jouni Malinen, rcochran, Thomas Gleixner, Rik van Riel,
	Len Brown, Peter Zijlstra, Frederic Weisbecker, LKML,
	George Spelvin, Josh Triplett, Chris Mason, Eric Dumazet, rt,
	Paul McKenney, Ingo Molnar, Arjan van de Ven

On Tue, Aug 16, 2016 at 11:46:00AM +0200, Richard Cochran wrote:
> If I understand the test correctly, then the slightly different kernel
> timer behavior is ok, but the test isn't quite right.  Let explain
> what I mean.
> 
> First off, reading test_ap_wps.py, the point of the test is to see if
> ten simultaneous connections are possible.  I guess the server
> implements a hard coded limit on the number of clients.  (BTW where is
> the server loop?)

Yes, I think I wrote that test case to hit a specific code path in the
server side. The server implementation is here:
http://w1.fi/cgit/hostap/plain/src/wps/http_server.c

> You said that the server also sets 'backlog' to ten.  The backlog
> controls the size of the queue holding incoming connections that are
> in the SYN_RCVD or ESTABLISHED state but have not yet been
> accept(2)-ed by the server.  This is *not* the same as the number of
> possible simultaneous connections.

This is indeed what seems to be the key area that had a small change in
timing. After some more experimentation on various timing, it is
starting to look like the TCP initiator side retransmission timing is
now different after this kernel commit and that is hit for the case
where the server process does not have enough time to accept() the
incoming connections and listen() backlog ends up dropping it instead.

> On Sat, Aug 13, 2016 at 12:12:26PM +0300, Jouni Malinen wrote:
> > Yes, it looks like a TCP connect() timeout. I use a significantly
> > reduced timeout in the test scripts since they are run unattended and
> > are supposed to terminate in reasonable amount of time.. That said,
> 
> I did not find where the client sets the one second timeout.  Where
> does this happen?

There is a socket.setdefaulttimeout(1) call in wps_er_start().

> > If I increase that 20 to 50, I get more of such about 1.03 second
> > results at i=17, i=34, i=48..
> 
> Can you provide the timings when the test runs on the older kernel?

I had not realized this previously due to the test case passing, but the
same retransmit SYN case was happening with older kernels, it just was
done a tiny bit faster to escape that 1.0 second timeout limit.. That
about 1.03 sec value after this kernel commit is 1.0 sec before this
kernel commit. In other words, something in this specific kernel commit
seems to add about 0.03 sec delay to the TCP SYN retransmission. That
said, I realize that this is quite unlikely timeout to use for connect()
in real world and as such, it looks simply as a side effect of a test
case that was using way too close timing requirement in the first place.

> > Looking more at what exactly is happening at the TCP layer, this is
> > likely related to the server behavior since listen() backlog is set to
> > 10 and if there are 10 parallel connections, the last one if
> > immediately closed before reading anything.
> 
> To clarify, when the backlog is exceed, the new connection is not
> closed.  Instead, the SYN is simply ignored, and the client is expect
> to re-transmit the SYN in the normal TCP fashion.

That closing was referring to what the server code in http_server.c
does, i.e., it does call close() after accept()ing the 10th parallel
connection. I had not realized that this was actually hitting the
listen() backlog in this specific case around the 15th socket, but yes,
that does indeed explain the behavior shown in the sniffer capture.

I was also able to confirm that if I add a short wait in the test loop
between each connect() call, things work fine with this kernel commit
included, i.e., the "issue" was triggered only when this listen()
backlog case was hit.

> Here is what I suspect is happening.  By sending 20 SYN frames to a
> port with a backlog of 10, it saturates the queue.  One SYN is ignored
> by the kernel, and a race begins between the connect() timeout and the
> SYN re-transmission.  If the client's re-transmitted SYN and then the
> server's SYN,ACK returns before the connect timeout, then the call to
> connect() succeeds.  With the new timer wheel, the result of the race
> is different.

Yes, that does indeed look exactly what is happening.

> There a couple of ways to deal with this.  One is to increase the
> backlog on the server side.  Another is to increase the connect()
> timeout to a multiple of the re-transmission interval.

Taken into account this is a test case for testing a specific code path
in http_server.c that is very unlikely to be hit in real world and a way
too short timeout for connect(), it is clear to me that the proper fix
here is to fix the test case to use a longer connect() timeout
especially now that I saw the 1.0 sec time with older kernel as well.

-- 
Jouni Malinen                                            PGP id EFC895FA

^ permalink raw reply	[flat|nested] 60+ messages in thread

* Re: [PREEMPT-RT] [patch 4 14/22] timer: Switch to a non cascading wheel
  2016-08-17  9:05           ` Jouni Malinen
@ 2016-08-17  9:23             ` rcochran
  0 siblings, 0 replies; 60+ messages in thread
From: rcochran @ 2016-08-17  9:23 UTC (permalink / raw)
  To: Jouni Malinen
  Cc: Richard Cochran, Jouni Malinen, Thomas Gleixner, Rik van Riel,
	Len Brown, Peter Zijlstra, Frederic Weisbecker, LKML,
	George Spelvin, Josh Triplett, Chris Mason, Eric Dumazet, rt,
	Paul McKenney, Ingo Molnar, Arjan van de Ven

On Wed, Aug 17, 2016 at 12:05:22PM +0300, Jouni Malinen wrote:
> I had not realized this previously due to the test case passing, but the
> same retransmit SYN case was happening with older kernels, it just was
> done a tiny bit faster to escape that 1.0 second timeout limit.. That
> about 1.03 sec value after this kernel commit is 1.0 sec before this
> kernel commit. In other words, something in this specific kernel commit
> seems to add about 0.03 sec delay to the TCP SYN retransmission.

Yes, and this 3% increase in the timeout expiration interval is an
expected result using the new timer wheel.  See the large comment at
the top of kernel/time/timer.c and the lkml discussion about this
series.

> That
> said, I realize that this is quite unlikely timeout to use for connect()
> in real world and as such, it looks simply as a side effect of a test
> case that was using way too close timing requirement in the first place.

Right, and user space should not rely on the exact timing of TCP SYN
re-transmission!

Thanks,
Richard

^ permalink raw reply	[flat|nested] 60+ messages in thread

end of thread, other threads:[~2016-08-17  9:23 UTC | newest]

Thread overview: 60+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-07-04  9:50 [patch 4 00/22] timer: Refactor the timer wheel Thomas Gleixner
2016-07-04  9:50 ` [patch 4 01/22] timer: Make pinned a timer property Thomas Gleixner
2016-07-07  8:39   ` [tip:timers/core] timers: Make 'pinned' " tip-bot for Thomas Gleixner
2016-07-04  9:50 ` [patch 4 02/22] x86/apic/uv: Initialize timer as pinned Thomas Gleixner
2016-07-07  8:40   ` [tip:timers/core] timers, x86/apic/uv: Initialize the UV heartbeat " tip-bot for Thomas Gleixner
2016-07-04  9:50 ` [patch 4 03/22] x86/mce: Initialize " Thomas Gleixner
2016-07-07  8:40   ` [tip:timers/core] timers, x86/mce: Initialize MCE restart " tip-bot for Thomas Gleixner
2016-07-04  9:50 ` [patch 4 04/22] cpufreq/powernv: Initialize " Thomas Gleixner
2016-07-07  8:41   ` [tip:timers/core] timers, cpufreq/powernv: Initialize the gpstate " tip-bot for Thomas Gleixner
2016-07-04  9:50 ` [patch 4 05/22] driver/net/ethernet/tile: Initialize " Thomas Gleixner
2016-07-07  8:41   ` [tip:timers/core] timers, driver/net/ethernet/tile: Initialize the egress " tip-bot for Thomas Gleixner
2016-07-04  9:50 ` [patch 4 06/22] drivers/tty/metag_da: Initialize " Thomas Gleixner
2016-07-07  8:42   ` [tip:timers/core] timers, drivers/tty/metag_da: Initialize the poll " tip-bot for Thomas Gleixner
2016-07-04  9:50 ` [patch 4 07/22] drivers/tty/mips_ejtag: Initialize " Thomas Gleixner
2016-07-07  8:42   ` [tip:timers/core] timers, drivers/tty/mips_ejtag: Initialize the poll " tip-bot for Thomas Gleixner
2016-07-04  9:50 ` [patch 4 08/22] net/ipv4/inet: Initialize timers " Thomas Gleixner
2016-07-07  8:43   ` [tip:timers/core] timers, net/ipv4/inet: Initialize connection request " tip-bot for Thomas Gleixner
2016-07-04  9:50 ` [patch 4 09/22] timer: Remove mod_timer_pinned Thomas Gleixner
2016-07-07  8:43   ` [tip:timers/core] timers: Remove the deprecated mod_timer_pinned() API tip-bot for Thomas Gleixner
2016-07-04  9:50 ` [patch 4 10/22] signal: Use hrtimer for sigtimedwait Thomas Gleixner
2016-07-07  8:43   ` [tip:timers/core] signals: Use hrtimer for sigtimedwait() tip-bot for Thomas Gleixner
2016-07-04  9:50 ` [patch 4 11/22] hlist: Add hlist_is_singular_node() helper Thomas Gleixner
2016-07-07  8:44   ` [tip:timers/core] " tip-bot for Thomas Gleixner
2016-07-04  9:50 ` [patch 4 12/22] timer: Give a few structs and members proper names Thomas Gleixner
2016-07-07  8:44   ` [tip:timers/core] timers: " tip-bot for Thomas Gleixner
2016-07-04  9:50 ` [patch 4 13/22] timer: Reduce the CPU index space to 256k Thomas Gleixner
2016-07-07  8:45   ` [tip:timers/core] timers: " tip-bot for Thomas Gleixner
2016-07-04  9:50 ` [patch 4 14/22] timer: Switch to a non cascading wheel Thomas Gleixner
2016-07-07  8:45   ` [tip:timers/core] timers: Switch to a non-cascading wheel tip-bot for Thomas Gleixner
2016-08-11 15:21   ` [patch 4 14/22] timer: Switch to a non cascading wheel Jouni Malinen
2016-08-11 20:25     ` [PREEMPT-RT] " rcochran
2016-08-13  9:12       ` Jouni Malinen
2016-08-16  9:46         ` Richard Cochran
2016-08-16 14:35           ` Eric Dumazet
2016-08-17  9:05           ` Jouni Malinen
2016-08-17  9:23             ` rcochran
2016-08-12 17:50     ` Rik van Riel
2016-08-12 19:14       ` Paul E. McKenney
2016-08-16  8:55         ` Richard Cochran
2016-08-16  7:57       ` Richard Cochran
2016-07-04  9:50 ` [patch 4 15/22] timer: Remove slack leftovers Thomas Gleixner
2016-07-07  8:46   ` [tip:timers/core] timers: Remove set_timer_slack() leftovers tip-bot for Thomas Gleixner
2016-07-22 11:31   ` [patch 4 15/22] timer: Remove slack leftovers Jason A. Donenfeld
2016-07-22 13:04     ` Thomas Gleixner
2016-07-22 15:18       ` Jason A. Donenfeld
2016-07-22 22:54         ` Jason A. Donenfeld
2016-07-04  9:50 ` [patch 4 16/22] timer: Move __run_timers() function Thomas Gleixner
2016-07-07  8:46   ` [tip:timers/core] timers: " tip-bot for Anna-Maria Gleixner
2016-07-04  9:50 ` [patch 4 17/22] timer: Optimize collect timers for NOHZ Thomas Gleixner
2016-07-07  8:47   ` [tip:timers/core] timers: Optimize collect_expired_timers() " tip-bot for Anna-Maria Gleixner
2016-07-04  9:50 ` [patch 4 18/22] tick/sched: Remove pointless empty function Thomas Gleixner
2016-07-07  8:47   ` [tip:timers/core] timers/nohz: Remove pointless tick_nohz_kick_tick() function tip-bot for Thomas Gleixner
2016-07-04  9:50 ` [patch 4 19/22] timer: Forward wheel clock whenever possible Thomas Gleixner
2016-07-07  8:48   ` [tip:timers/core] timers: Forward the " tip-bot for Thomas Gleixner
2016-07-04  9:50 ` [patch 4 20/22] timer: Only wake softirq if necessary Thomas Gleixner
2016-07-07  8:48   ` [tip:timers/core] timers: " tip-bot for Thomas Gleixner
2016-07-04  9:50 ` [patch 4 21/22] timer: Split out index calculation Thomas Gleixner
2016-07-07  8:48   ` [tip:timers/core] timers: " tip-bot for Anna-Maria Gleixner
2016-07-04  9:50 ` [patch 4 22/22] timer: Optimization for same expiry time in mod_timer() Thomas Gleixner
2016-07-07  8:49   ` [tip:timers/core] timers: Implement optimization " tip-bot for Anna-Maria Gleixner

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).