LKML Archive on lore.kernel.org
 help / color / Atom feed
From: Thomas Gleixner <tglx@linutronix.de>
To: Ulrich Obergfell <uobergfe@redhat.com>
Cc: LKML <linux-kernel@vger.kernel.org>,
	Peter Zijlstra <peterz@infradead.org>,
	Ingo Molnar <mingo@kernel.org>,
	Andrew Morton <akpm@linux-foundation.org>,
	Borislav Petkov <bp@alien8.de>,
	Sebastian Siewior <bigeasy@linutronix.de>,
	Nicholas Piggin <npiggin@gmail.com>,
	Don Zickus <dzickus@redhat.com>,
	Chris Metcalf <cmetcalf@mellanox.com>
Subject: [patch V2 11/29] lockup_detector: Remove park_in_progress obfuscation
Date: Tue, 5 Sep 2017 21:19:08 +0200 (CEST)
Message-ID: <alpine.DEB.2.20.1709052038270.2393@nanos> (raw)
In-Reply-To: <alpine.DEB.2.20.1709051540310.1900@nanos>

b94f51183b06 ("kernel/watchdog: prevent false hardlockup on overloaded
system") tries to fix the following issue:

proc_write()
   set_sample_period()    <--- New sample period becoms visible
 			  <----- Broken starts
   proc_watchdog_update()
     watchdog_enable_all_cpus() 	watchdog_hrtimer_fn()
     update_watchdog_all_cpus()		   restart_timer(sample_period)
        watchdog_park_threads()

					thread->park()
					  disable_nmi()
			  <----- Broken ends

The reason why this is broken is that the update of the watchdog threshold
becomes immediately effective and visible for the hrtimer function which
uses that value to rearm the timer. But the NMI/perf side still uses the
old value up to the point where it is disabled. If the rate has been
lowered then the NMI can run fast enough to 'detect' a hard lockup because
the timer has not fired due to the longer period.

The patch 'fixed' this by adding a variable:

proc_write()
   set_sample_period()
 					<----- Broken starts
   proc_watchdog_update()
     watchdog_enable_all_cpus()		watchdog_hrtimer_fn()
     update_watchdog_all_cpus()		   restart_timer(sample_period)
         watchdog_park_threads()
 	  park_in_progress = 1
 					<----- Broken ends
	  			        nmi_watchdog()
					  if (park_in_progress)
					     return;

The only effect of this variable was to make the window where the breakage
can hit small enough that it was not longer observable in testing. From a
correctness point of view it is a pointless bandaid which merily papers
over the root cause: the unsychronized update of the variable.

Looking deeper into the related code pathes unearthed similar problems in
the watchdog_start()/stop() functions.

 watchdog_start()
	perf_nmi_event_start()
	hrtimer_start()

 watchdog_stop()
	hrtimer_cancel()
	perf_nmi_event_stop()

In both cases the call order is wrong because if the tasks gets preempted
or the VM gets scheduled out long enough after the first call, then there is
a chance that the next NMI will see a stale hrtimer interrupt count and
trigger a false positive hard lockup splat.

Get rid of park_in_progress so the code can be gradually deobfuscated and
pruned from several layers of duct tape papering over the root cause,
which has been either ignored or not understood at all.

Once this is removed the underlying problem will be fixed by rewriting the
proc interface to do a proper synchronized update.

Address the start/stop() ordering problem as well by reverting the call
order, so this part is at least correct now.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---

V2: Make the changelog technically and politically correct.

 include/linux/nmi.h   |    1 -
 kernel/watchdog.c     |   39 ++++++++++++++++++---------------------
 kernel/watchdog_hld.c |    7 ++-----
 3 files changed, 20 insertions(+), 27 deletions(-)

--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -27,7 +27,6 @@ extern void touch_softlockup_watchdog_sy
 extern void touch_all_softlockup_watchdogs(void);
 extern unsigned int  softlockup_panic;
 extern int soft_watchdog_enabled;
-extern atomic_t watchdog_park_in_progress;
 #else
 static inline void touch_softlockup_watchdog_sched(void)
 {
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -134,8 +134,6 @@ void __weak watchdog_nmi_reconfigure(voi
 #define for_each_watchdog_cpu(cpu) \
 	for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
 
-atomic_t watchdog_park_in_progress = ATOMIC_INIT(0);
-
 static u64 __read_mostly sample_period;
 
 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
@@ -320,8 +318,7 @@ static enum hrtimer_restart watchdog_tim
 	int duration;
 	int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
 
-	if (!watchdog_enabled ||
-	    atomic_read(&watchdog_park_in_progress) != 0)
+	if (!watchdog_enabled)
 		return HRTIMER_NORESTART;
 
 	/* kick the hardlockup detector */
@@ -435,33 +432,38 @@ static void watchdog_set_prio(unsigned i
 
 static void watchdog_enable(unsigned int cpu)
 {
-	struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
+	struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
 
-	/* kick off the timer for the hardlockup detector */
+	/*
+	 * Start the timer first to prevent the NMI watchdog triggering
+	 * before the timer has a chance to fire.
+	 */
 	hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 	hrtimer->function = watchdog_timer_fn;
+	hrtimer_start(hrtimer, ns_to_ktime(sample_period),
+		      HRTIMER_MODE_REL_PINNED);
 
+	/* Initialize timestamp */
+	__touch_watchdog();
 	/* Enable the perf event */
 	watchdog_nmi_enable(cpu);
 
-	/* done here because hrtimer_start can only pin to smp_processor_id() */
-	hrtimer_start(hrtimer, ns_to_ktime(sample_period),
-		      HRTIMER_MODE_REL_PINNED);
-
-	/* initialize timestamp */
 	watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
-	__touch_watchdog();
 }
 
 static void watchdog_disable(unsigned int cpu)
 {
-	struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
+	struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
 
 	watchdog_set_prio(SCHED_NORMAL, 0);
-	hrtimer_cancel(hrtimer);
-	/* disable the perf event */
-	watchdog_nmi_disable(cpu);
+	/*
+	 * Disable the perf event first. That prevents that a large delay
+	 * between disabling the timer and disabling the perf event causes
+	 * the perf NMI to detect a false positive.
+	 */
 	hardlockup_detector_perf_disable();
+	watchdog_nmi_disable(cpu);
+	hrtimer_cancel(hrtimer);
 }
 
 static void watchdog_cleanup(unsigned int cpu, bool online)
@@ -517,16 +519,11 @@ static int watchdog_park_threads(void)
 {
 	int cpu, ret = 0;
 
-	atomic_set(&watchdog_park_in_progress, 1);
-
 	for_each_watchdog_cpu(cpu) {
 		ret = kthread_park(per_cpu(softlockup_watchdog, cpu));
 		if (ret)
 			break;
 	}
-
-	atomic_set(&watchdog_park_in_progress, 0);
-
 	return ret;
 }
 
--- a/kernel/watchdog_hld.c
+++ b/kernel/watchdog_hld.c
@@ -106,15 +106,12 @@ static struct perf_event_attr wd_hw_attr
 
 /* Callback function for perf event subsystem */
 static void watchdog_overflow_callback(struct perf_event *event,
-		 struct perf_sample_data *data,
-		 struct pt_regs *regs)
+				       struct perf_sample_data *data,
+				       struct pt_regs *regs)
 {
 	/* Ensure the watchdog never gets throttled */
 	event->hw.interrupts = 0;
 
-	if (atomic_read(&watchdog_park_in_progress) != 0)
-		return;
-
 	if (__this_cpu_read(watchdog_nmi_touch) == true) {
 		__this_cpu_write(watchdog_nmi_touch, false);
 		return;

  reply index

Thread overview: 48+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-08-31  7:15 [patch 00/29] lockup_detector: Cure hotplug deadlocks and replace duct tape Thomas Gleixner
2017-08-31  7:15 ` [patch 01/29] hardlockup_detector: Provide interface to stop/restart perf events Thomas Gleixner
2017-09-06 16:14   ` Borislav Petkov
2017-08-31  7:16 ` [patch 02/29] perf/x86/intel: Sanitize PMU HT bug workaround Thomas Gleixner
2017-08-31  7:16 ` [patch 03/29] lockup_detector: Provide interface to stop from poweroff() Thomas Gleixner
2017-08-31  7:16 ` [patch 04/29] parisc: Use lockup_detector_stop() Thomas Gleixner
2017-08-31  7:16 ` [patch 05/29] lockup_detector: Remove broken suspend/resume interfaces Thomas Gleixner
2017-08-31  7:16 ` [patch 06/29] lockup_detector: Rework cpu hotplug locking Thomas Gleixner
2017-08-31  7:16 ` [patch 07/29] lockup_detector: Rename watchdog_proc_mutex Thomas Gleixner
2017-08-31  7:16 ` [patch 08/29] lockup_detector: Mark hardlockup_detector_disable() __init Thomas Gleixner
2017-08-31  7:16 ` [patch 09/29] lockup_detector/perf: Remove broken self disable on failure Thomas Gleixner
2017-08-31  7:16 ` [patch 10/29] lockup_detector/perf: Prevent cpu hotplug deadlock Thomas Gleixner
2017-09-01 19:02   ` Don Zickus
2017-09-01 19:29     ` Thomas Gleixner
2017-09-05 14:51       ` Don Zickus
2017-08-31  7:16 ` [patch 11/29] lockup_detector: Remove park_in_progress hackery Thomas Gleixner
     [not found]   ` <CAEeg4=CJohPTi8FUNWqb3egsbZnExyJapcNC7wD-2amXTsMrYw@mail.gmail.com>
2017-09-04 12:10     ` Peter Zijlstra
2017-09-05 15:15       ` Don Zickus
2017-09-05 15:42         ` Thomas Gleixner
2017-09-05 13:58     ` Thomas Gleixner
2017-09-05 19:19       ` Thomas Gleixner [this message]
2017-09-14 10:43         ` [tip:core/urgent] watchdog/core: Remove the park_in_progress obfuscation tip-bot for Thomas Gleixner
2017-08-31  7:16 ` [patch 12/29] lockup_detector: Cleanup stub functions Thomas Gleixner
2017-08-31  7:16 ` [patch 13/29] lockup_detector: Cleanup the ifdef maze Thomas Gleixner
2017-08-31  7:16 ` [patch 14/29] lockup_detector: Split out cpumask write function Thomas Gleixner
2017-08-31  7:16 ` [patch 15/29] smpboot/threads: Avoid runtime allocation Thomas Gleixner
2017-08-31  7:16 ` [patch 16/29] lockup_detector: Create new thread handling infrastructure Thomas Gleixner
2017-08-31  7:16 ` [patch 17/29] lockup_detector: Get rid of the thread teardown/setup dance Thomas Gleixner
2017-09-01 19:08   ` Don Zickus
2017-09-01 19:45     ` Thomas Gleixner
2017-08-31  7:16 ` [patch 18/29] lockup_detector: Further simplify sysctl handling Thomas Gleixner
2017-08-31  7:16 ` [patch 19/29] lockup_detector: Cleanup header mess Thomas Gleixner
2017-08-31  7:16 ` [patch 20/29] lockup_detector/sysctl: Get rid of the ifdeffery Thomas Gleixner
2017-08-31  7:16 ` [patch 21/29] lockup_detector: Cleanup sysctl variable name space Thomas Gleixner
2017-08-31  7:16 ` [patch 22/29] lockup_detector: Make watchdog_nmi_reconfigure() two stage Thomas Gleixner
2017-08-31  7:16 ` [patch 23/29] lockup_detector: Get rid of the racy update loop Thomas Gleixner
2017-08-31  7:16 ` [patch 24/29] lockup_detector/perf: Implement init time perf validation Thomas Gleixner
2017-09-07 15:58   ` Don Zickus
2017-08-31  7:16 ` [patch 25/29] lockup_detector: Implement init time detection of perf Thomas Gleixner
2017-08-31  7:16 ` [patch 26/29] lockup_detector/perf: Implement CPU enable replacement Thomas Gleixner
2017-08-31  7:16 ` [patch 27/29] lockup_detector: Use new perf CPU enable mechanism Thomas Gleixner
2017-08-31  7:16 ` [patch 28/29] lockup_detector/perf: Simplify deferred event destroy Thomas Gleixner
2017-08-31  7:16 ` [patch 29/29] lockup_detector: Cleanup hotplug locking mess Thomas Gleixner
2017-08-31 22:10 ` [patch 00/29] lockup_detector: Cure hotplug deadlocks and replace duct tape Don Zickus
2017-09-01  4:42   ` Nicholas Piggin
2017-09-01  9:18   ` Thomas Gleixner
2017-09-07 16:04 ` Don Zickus
2017-09-12 19:36 [patch V2 " Thomas Gleixner
2017-09-12 19:37 ` [patch V2 11/29] lockup_detector: Remove park_in_progress obfuscation Thomas Gleixner

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=alpine.DEB.2.20.1709052038270.2393@nanos \
    --to=tglx@linutronix.de \
    --cc=akpm@linux-foundation.org \
    --cc=bigeasy@linutronix.de \
    --cc=bp@alien8.de \
    --cc=cmetcalf@mellanox.com \
    --cc=dzickus@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@kernel.org \
    --cc=npiggin@gmail.com \
    --cc=peterz@infradead.org \
    --cc=uobergfe@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

LKML Archive on lore.kernel.org

Archives are clonable:
	git clone --mirror https://lore.kernel.org/lkml/0 lkml/git/0.git
	git clone --mirror https://lore.kernel.org/lkml/1 lkml/git/1.git
	git clone --mirror https://lore.kernel.org/lkml/2 lkml/git/2.git
	git clone --mirror https://lore.kernel.org/lkml/3 lkml/git/3.git
	git clone --mirror https://lore.kernel.org/lkml/4 lkml/git/4.git
	git clone --mirror https://lore.kernel.org/lkml/5 lkml/git/5.git
	git clone --mirror https://lore.kernel.org/lkml/6 lkml/git/6.git
	git clone --mirror https://lore.kernel.org/lkml/7 lkml/git/7.git
	git clone --mirror https://lore.kernel.org/lkml/8 lkml/git/8.git
	git clone --mirror https://lore.kernel.org/lkml/9 lkml/git/9.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 lkml lkml/ https://lore.kernel.org/lkml \
		linux-kernel@vger.kernel.org
	public-inbox-index lkml

Example config snippet for mirrors

Newsgroup available over NNTP:
	nntp://nntp.lore.kernel.org/org.kernel.vger.linux-kernel


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git