From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753199AbdLUKqa (ORCPT ); Thu, 21 Dec 2017 05:46:30 -0500 Received: from Galois.linutronix.de ([146.0.238.70]:54590 "EHLO Galois.linutronix.de" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753171AbdLUKnF (ORCPT ); Thu, 21 Dec 2017 05:43:05 -0500 From: Anna-Maria Gleixner To: LKML Cc: Thomas Gleixner , Peter Zijlstra , Ingo Molnar , keescook@chromium.org, Christoph Hellwig , John Stultz , Anna-Maria Gleixner Subject: [PATCH v4 25/36] hrtimer: Use irqsave/irqrestore around __run_hrtimer() Date: Thu, 21 Dec 2017 11:41:54 +0100 Message-Id: <20171221104205.7269-26-anna-maria@linutronix.de> X-Mailer: git-send-email 2.11.0 In-Reply-To: <20171221104205.7269-1-anna-maria@linutronix.de> References: <20171221104205.7269-1-anna-maria@linutronix.de> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org __run_hrtimer() is called with the hrtimer_cpu_base.lock held and interrupts disabled. Before invoking the timer callback the base lock is dropped, but interrupts stay disabled. The upcoming support for softirq based hrtimers requires that interrupts are enabled before the timer callback is invoked. To avoid code duplication, take hrtimer_cpu_base.lock with raw_spin_lock_irqsave(flags) at the call site and hand in the flags as argument. So raw_spin_unlock_irqrestore() before the callback invocation will either keep interrupts disabled in interrupt context or restore to interrupt enabled state when called from softirq context. Suggested-by: Peter Zijlstra Signed-off-by: Anna-Maria Gleixner --- kernel/time/hrtimer.c | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 914e91a59a51..1966867f7232 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -1161,7 +1161,8 @@ EXPORT_SYMBOL_GPL(hrtimer_active); static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base, struct hrtimer_clock_base *base, - struct hrtimer *timer, ktime_t *now) + struct hrtimer *timer, ktime_t *now, + unsigned long flags) { enum hrtimer_restart (*fn)(struct hrtimer *); int restart; @@ -1196,11 +1197,11 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base, * protected against migration to a different CPU even if the lock * is dropped. */ - raw_spin_unlock(&cpu_base->lock); + raw_spin_unlock_irqrestore(&cpu_base->lock, flags); trace_hrtimer_expire_entry(timer, now); restart = fn(timer); trace_hrtimer_expire_exit(timer); - raw_spin_lock(&cpu_base->lock); + raw_spin_lock_irq(&cpu_base->lock); /* * Note: We clear the running state after enqueue_hrtimer and @@ -1228,7 +1229,8 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base, base->running = NULL; } -static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now) +static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now, + unsigned long flags) { struct hrtimer_clock_base *base; unsigned int active = cpu_base->active_bases; @@ -1259,7 +1261,7 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now) if (basenow < hrtimer_get_softexpires_tv64(timer)) break; - __run_hrtimer(cpu_base, base, timer, &basenow); + __run_hrtimer(cpu_base, base, timer, &basenow, flags); } } } @@ -1274,13 +1276,14 @@ void hrtimer_interrupt(struct clock_event_device *dev) { struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); ktime_t expires_next, now, entry_time, delta; + unsigned long flags; int retries = 0; BUG_ON(!cpu_base->hres_active); cpu_base->nr_events++; dev->next_event = KTIME_MAX; - raw_spin_lock(&cpu_base->lock); + raw_spin_lock_irqsave(&cpu_base->lock, flags); entry_time = now = hrtimer_update_base(cpu_base); retry: cpu_base->in_hrtirq = 1; @@ -1293,7 +1296,7 @@ void hrtimer_interrupt(struct clock_event_device *dev) */ cpu_base->expires_next = KTIME_MAX; - __hrtimer_run_queues(cpu_base, now); + __hrtimer_run_queues(cpu_base, now, flags); /* Reevaluate the clock bases for the next expiry */ expires_next = __hrtimer_get_next_event(cpu_base); @@ -1303,7 +1306,7 @@ void hrtimer_interrupt(struct clock_event_device *dev) */ cpu_base->expires_next = expires_next; cpu_base->in_hrtirq = 0; - raw_spin_unlock(&cpu_base->lock); + raw_spin_unlock_irqrestore(&cpu_base->lock, flags); /* Reprogramming necessary ? */ if (!tick_program_event(expires_next, 0)) { @@ -1324,7 +1327,7 @@ void hrtimer_interrupt(struct clock_event_device *dev) * Acquire base lock for updating the offsets and retrieving * the current time. */ - raw_spin_lock(&cpu_base->lock); + raw_spin_lock_irqsave(&cpu_base->lock, flags); now = hrtimer_update_base(cpu_base); cpu_base->nr_retries++; if (++retries < 3) @@ -1337,7 +1340,8 @@ void hrtimer_interrupt(struct clock_event_device *dev) */ cpu_base->nr_hangs++; cpu_base->hang_detected = 1; - raw_spin_unlock(&cpu_base->lock); + raw_spin_unlock_irqrestore(&cpu_base->lock, flags); + delta = ktime_sub(now, entry_time); if ((unsigned int)delta > cpu_base->max_hang_time) cpu_base->max_hang_time = (unsigned int) delta; @@ -1379,6 +1383,7 @@ static inline void __hrtimer_peek_ahead_timers(void) { } void hrtimer_run_queues(void) { struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); + unsigned long flags; ktime_t now; if (__hrtimer_hres_active(cpu_base)) @@ -1396,10 +1401,10 @@ void hrtimer_run_queues(void) return; } - raw_spin_lock(&cpu_base->lock); + raw_spin_lock_irqsave(&cpu_base->lock, flags); now = hrtimer_update_base(cpu_base); - __hrtimer_run_queues(cpu_base, now); - raw_spin_unlock(&cpu_base->lock); + __hrtimer_run_queues(cpu_base, now, flags); + raw_spin_unlock_irqrestore(&cpu_base->lock, flags); } /* -- 2.11.0