All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v7 1/2] Track hard and soft "short lockups" or "stalls."
@ 2011-08-11 23:35 Alex Neronskiy
  2011-08-11 23:35 ` [PATCH v7 2/2] Output stall data in debugfs Alex Neronskiy
  0 siblings, 1 reply; 3+ messages in thread
From: Alex Neronskiy @ 2011-08-11 23:35 UTC (permalink / raw)
  To: linux-kernel
  Cc: peterz, Ingo Molnar, Don Zickus, Mandeep Singh Baines, Alex Neronskiy

Enable the lockup watchdog to not only respond to lockups beyond a
certain threshold, but also keep track of the worst sub-threshold
lockup (called a "stall") seen so far. The stalls are communicated
to userspace via the kernel log, where a stack trace is also
included. Hard lockups are checked for more frequently to collect
additional data. Soft stalls are described by the length of time
between successive instances of scheduling of the watchdog thread,
and hard stalls are described by the number of times a hard lockup
check found that no interrupts had been recorded. Some thresholds
on these are set, to attempt to prevent floods of useless data.

Signed-off-by: Alex Neronskiy <zakmagnus@chromium.org>
---
The motivation here is to be able to gather more data about just how
"extreme" latency conditions get inside the kernel, without
necessarily triggering a threshold and potentially setting off a
panic.
The way the hard lockup watchdog works has been changed to make it
more compatible with this purpose. Previously it was hard to
collect any data from it other than "lockup" or "not lockup."
Now it has to fail 5 consecutive checks to be considered a lockup,
somewhat similarly to how soft lockups work.

 include/linux/nmi.h |    6 ++-
 kernel/sysctl.c     |    2 +-
 kernel/watchdog.c   |  124 ++++++++++++++++++++++++++++++++++++++++++++-------
 3 files changed, 113 insertions(+), 19 deletions(-)

diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 2d304ef..57cda21 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -48,9 +48,13 @@ int hw_nmi_is_cpu_stuck(struct pt_regs *);
 u64 hw_nmi_get_sample_period(int watchdog_thresh);
 extern int watchdog_enabled;
 extern int watchdog_thresh;
+extern unsigned long worst_softstall;
+#ifdef CONFIG_HARDLOCKUP_DETECTOR
+extern unsigned long worst_hardstall;
+#endif /* CONFIG_HARDLOCKUP_DETECTOR */
 struct ctl_table;
 extern int proc_dowatchdog(struct ctl_table *, int ,
 			   void __user *, size_t *, loff_t *);
-#endif
+#endif /* CONFIG_LOCKUP_DETECTOR */
 
 #endif
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 11d65b5..b8620a2 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -767,7 +767,7 @@ static struct ctl_table kern_table[] = {
 		.extra1		= &zero,
 		.extra2		= &one,
 	},
-#endif
+#endif /* CONFIG_LOCKUP_DETECTOR */
 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86)
 	{
 		.procname       = "unknown_nmi_panic",
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 36491cd..8dd9ed4 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -29,6 +29,8 @@
 
 int watchdog_enabled = 1;
 int __read_mostly watchdog_thresh = 10;
+unsigned long worst_softstall;
+static DEFINE_SPINLOCK(softstall_lock);
 
 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
 static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
@@ -40,8 +42,29 @@ static DEFINE_PER_CPU(bool, hard_watchdog_warn);
 static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
+static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_missed);
 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
-#endif
+
+unsigned long worst_hardstall;
+static DEFINE_SPINLOCK(hardstall_lock);
+
+/* The number of consecutive hard check failures before it's a lockup */
+#define hardlockup_thresh (5)
+
+/*
+ * The minimum expected number of missed interrupts in a normal hard
+ * lockup check.
+ */
+#define hardstall_thresh (2)
+
+#endif /* CONFIG_HARDLOCKUP_DETECTOR */
+
+/*
+ * The number of milliseconds by which a stall must be worse than the
+ * current worst in order to be recorded. This prevents bursts of near-
+ * identical stalls.
+ */
+#define softstall_diff_thresh (100)
 
 /* boot commands */
 /*
@@ -98,19 +121,28 @@ __setup("nosoftlockup", nosoftlockup_setup);
  * the thresholds with a factor: we make the soft threshold twice the amount of
  * time the hard threshold is.
  */
-static int get_softlockup_thresh(void)
+static inline int get_softlockup_thresh(void)
 {
 	return watchdog_thresh * 2;
 }
 
 /*
- * Returns seconds, approximately.  We don't need nanosecond
+ * This is just the period of the timer function. A "stall" shorter than
+ * this is the expected behavior and is therefore not noteworthy.
+ */
+static inline int get_softstall_thresh(void)
+{
+	return 1000 / 5 * get_softlockup_thresh();
+}
+
+/*
+ * Returns milliseconds, approximately.  We don't need nanosecond
  * resolution, and we don't need to waste time with a big divide when
- * 2^30ns == 1.074s.
+ * 2^20ns == 1.049ms.
  */
 static unsigned long get_timestamp(int this_cpu)
 {
-	return cpu_clock(this_cpu) >> 30LL;  /* 2^30 ~= 10^9 */
+	return cpu_clock(this_cpu) >> 20LL;  /* 2^20 ~= 10^6 */
 }
 
 static unsigned long get_sample_period(void)
@@ -176,25 +208,79 @@ void touch_softlockup_watchdog_sync(void)
 
 #ifdef CONFIG_HARDLOCKUP_DETECTOR
 /* watchdog detector functions */
-static int is_hardlockup(void)
+static void update_hardstall(unsigned long stall, int this_cpu)
+{
+	int update_stall = 0;
+
+	if (stall > hardstall_thresh && stall > worst_hardstall) {
+		unsigned long flags;
+		spin_lock_irqsave(&hardstall_lock, flags);
+		update_stall = stall > worst_hardstall;
+		if (update_stall)
+			worst_hardstall = stall;
+		spin_unlock_irqrestore(&hardstall_lock, flags);
+	}
+
+	if (update_stall) {
+		printk(KERN_WARNING "LOCKUP may be in progress!"
+			"Worst hard stall seen on CPU#%d: %lu interrupts missed\n",
+			this_cpu, stall);
+		dump_stack();
+	}
+}
+
+static int is_hardlockup(int this_cpu)
 {
 	unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
+	unsigned long hrint_saved = __this_cpu_read(hrtimer_interrupts_saved);
+	unsigned long ints_missed = 0;
+
+	__this_cpu_write(hrtimer_interrupts_saved, hrint);
 
-	if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
+	if (hrint_saved == hrint)
+		ints_missed = per_cpu(hrtimer_interrupts_missed, this_cpu)++;
+	else
+		__this_cpu_write(hrtimer_interrupts_missed, 0);
+
+	if (ints_missed >= hardlockup_thresh)
 		return 1;
 
-	__this_cpu_write(hrtimer_interrupts_saved, hrint);
+	update_hardstall(ints_missed, this_cpu);
 	return 0;
 }
 #endif
 
-static int is_softlockup(unsigned long touch_ts)
+static void update_softstall(unsigned long stall, int this_cpu)
 {
-	unsigned long now = get_timestamp(smp_processor_id());
+	int update_stall = 0;
+	if (stall > get_softstall_thresh() &&
+			stall > worst_softstall + softstall_diff_thresh) {
+		unsigned long flags;
+		spin_lock_irqsave(&softstall_lock, flags);
+		update_stall = stall > worst_softstall + softstall_diff_thresh;
+		if (update_stall)
+			worst_softstall = stall;
+		spin_unlock_irqrestore(&softstall_lock, flags);
+	}
+
+	if (update_stall) {
+		printk(KERN_WARNING "LOCKUP may be in progress!"
+				"Worst soft stall seen on CPU#%d: %lums\n",
+				this_cpu, stall);
+		dump_stack();
+	}
+}
+
+static int is_softlockup(unsigned long touch_ts, int this_cpu)
+{
+	unsigned long now = get_timestamp(this_cpu);
+	unsigned long stall = now - touch_ts;
 
 	/* Warn about unreasonable delays: */
-	if (time_after(now, touch_ts + get_softlockup_thresh()))
-		return now - touch_ts;
+	if (time_after(now, touch_ts + 1000 * get_softlockup_thresh()))
+		return stall;
+
+	update_softstall(stall, this_cpu);
 
 	return 0;
 }
@@ -214,6 +300,9 @@ static void watchdog_overflow_callback(struct perf_event *event,
 		 struct perf_sample_data *data,
 		 struct pt_regs *regs)
 {
+	int lockup;
+	int this_cpu = smp_processor_id();
+
 	/* Ensure the watchdog never gets throttled */
 	event->hw.interrupts = 0;
 
@@ -228,9 +317,8 @@ static void watchdog_overflow_callback(struct perf_event *event,
 	 * fired multiple times before we overflow'd.  If it hasn't
 	 * then this is a good indication the cpu is stuck
 	 */
-	if (is_hardlockup()) {
-		int this_cpu = smp_processor_id();
-
+	lockup = is_hardlockup(this_cpu);
+	if (lockup) {
 		/* only print hardlockups once */
 		if (__this_cpu_read(hard_watchdog_warn) == true)
 			return;
@@ -261,6 +349,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
 	unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
 	struct pt_regs *regs = get_irq_regs();
 	int duration;
+	int this_cpu = smp_processor_id();
 
 	/* kick the hardlockup detector */
 	watchdog_interrupt_count();
@@ -290,14 +379,14 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
 	 * indicate it is getting cpu time.  If it hasn't then
 	 * this is a good indication some task is hogging the cpu
 	 */
-	duration = is_softlockup(touch_ts);
+	duration = is_softlockup(touch_ts, this_cpu);
 	if (unlikely(duration)) {
 		/* only warn once */
 		if (__this_cpu_read(soft_watchdog_warn) == true)
 			return HRTIMER_RESTART;
 
 		printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
-			smp_processor_id(), duration,
+			this_cpu, duration >> 10L,
 			current->comm, task_pid_nr(current));
 		print_modules();
 		print_irqtrace_events(current);
@@ -371,6 +460,7 @@ static int watchdog_nmi_enable(int cpu)
 
 	wd_attr = &wd_hw_attr;
 	wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
+	do_div(wd_attr->sample_period, hardlockup_thresh);
 
 	/* Try to register using hardware perf events */
 	event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
-- 
1.7.3.1


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* [PATCH v7 2/2] Output stall data in debugfs
  2011-08-11 23:35 [PATCH v7 1/2] Track hard and soft "short lockups" or "stalls." Alex Neronskiy
@ 2011-08-11 23:35 ` Alex Neronskiy
  2011-08-16 15:16   ` Don Zickus
  0 siblings, 1 reply; 3+ messages in thread
From: Alex Neronskiy @ 2011-08-11 23:35 UTC (permalink / raw)
  To: linux-kernel
  Cc: peterz, Ingo Molnar, Don Zickus, Mandeep Singh Baines, Alex Neronskiy

Instead of using the log, use debugfs for output of both stall
lengths and stack traces. Printing to the log can result in
watchdog touches, distorting the very events being measured.
Additionally, the information will not distract from lockups
when users view the log.

A two-buffer system is used to ensure that the trace information
can always be recorded without contention.

Signed-off-by: Alex Neronskiy <zakmagnus@chromium.org>
---
Changes from last version per Peter's suggestions: different
locks, refactoring, ACCESS_ONCE.

 kernel/watchdog.c |  228 ++++++++++++++++++++++++++++++++++++++++++++++-------
 1 files changed, 199 insertions(+), 29 deletions(-)

diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 8dd9ed4..cd95886 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -23,14 +23,39 @@
 #include <linux/notifier.h>
 #include <linux/module.h>
 #include <linux/sysctl.h>
+#include <linux/stacktrace.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
 
 #include <asm/irq_regs.h>
 #include <linux/perf_event.h>
 
+#define STALL_SOFT (0)
+#define STALL_HARD (1)
+
+#define STALL_MAX_TRACE_DEPTH (50)
 int watchdog_enabled = 1;
 int __read_mostly watchdog_thresh = 10;
 unsigned long worst_softstall;
-static DEFINE_SPINLOCK(softstall_lock);
+
+/* For output to debugfs, there are two buffers, each with their own
+ * lock. Only one can ever be locked at a time. The buffers are written
+ * to in interrupt context, but read in a system call, so the writers
+ * can't wait for the readers. Thus, if a writer is excluded from using
+ * a buffer, it will instead switch to the other, uncontended one.
+ * After it's done, it will indicate that the two buffers have switched
+ * roles.
+ */
+struct stall_data {
+	unsigned long trace_entries[STALL_MAX_TRACE_DEPTH * 2];
+	struct stack_trace traces[2];
+	raw_spinlock_t locks[2];
+	struct mutex read_lock;
+	raw_spinlock_t write_lock;
+	int read_ind;
+};
+
+static struct stall_data softstall_data;
 
 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
 static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
@@ -46,7 +71,7 @@ static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_missed);
 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
 
 unsigned long worst_hardstall;
-static DEFINE_SPINLOCK(hardstall_lock);
+static struct stall_data hardstall_data;
 
 /* The number of consecutive hard check failures before it's a lockup */
 #define hardlockup_thresh (5)
@@ -210,22 +235,28 @@ void touch_softlockup_watchdog_sync(void)
 /* watchdog detector functions */
 static void update_hardstall(unsigned long stall, int this_cpu)
 {
-	int update_stall = 0;
-
 	if (stall > hardstall_thresh && stall > worst_hardstall) {
-		unsigned long flags;
-		spin_lock_irqsave(&hardstall_lock, flags);
-		update_stall = stall > worst_hardstall;
-		if (update_stall)
-			worst_hardstall = stall;
-		spin_unlock_irqrestore(&hardstall_lock, flags);
-	}
+		raw_spin_lock(&hardstall_data.write_lock);
+		if (stall > worst_hardstall) {
+			int write_ind = hardstall_data.read_ind;
+			int locked = raw_spin_trylock(&hardstall_data.
+					locks[write_ind]);
+			/* cannot wait, so if there's contention,
+			 * switch buffers */
+			if (!locked)
+				write_ind = !write_ind;
 
-	if (update_stall) {
-		printk(KERN_WARNING "LOCKUP may be in progress!"
-			"Worst hard stall seen on CPU#%d: %lu interrupts missed\n",
-			this_cpu, stall);
-		dump_stack();
+			worst_hardstall = stall;
+			hardstall_data.traces[write_ind].nr_entries = 0;
+			save_stack_trace(&hardstall_data.traces[write_ind]);
+
+			/* tell readers to use the new buffer from now on */
+			hardstall_data.read_ind = write_ind;
+			if (locked)
+				raw_spin_unlock(&hardstall_data.
+						locks[write_ind]);
+		}
+		raw_spin_unlock(&hardstall_data.write_lock);
 	}
 }
 
@@ -252,22 +283,29 @@ static int is_hardlockup(int this_cpu)
 
 static void update_softstall(unsigned long stall, int this_cpu)
 {
-	int update_stall = 0;
 	if (stall > get_softstall_thresh() &&
 			stall > worst_softstall + softstall_diff_thresh) {
-		unsigned long flags;
-		spin_lock_irqsave(&softstall_lock, flags);
-		update_stall = stall > worst_softstall + softstall_diff_thresh;
-		if (update_stall)
-			worst_softstall = stall;
-		spin_unlock_irqrestore(&softstall_lock, flags);
-	}
+		raw_spin_lock(&softstall_data.write_lock);
+		if (stall > worst_softstall + softstall_diff_thresh) {
+			int write_ind = softstall_data.read_ind;
+			int locked = raw_spin_trylock(&softstall_data.
+					locks[write_ind]);
+			/* cannot wait, so if there's contention,
+			 * switch buffers */
+			if (!locked)
+				write_ind = !write_ind;
 
-	if (update_stall) {
-		printk(KERN_WARNING "LOCKUP may be in progress!"
-				"Worst soft stall seen on CPU#%d: %lums\n",
-				this_cpu, stall);
-		dump_stack();
+			worst_softstall = stall;
+			softstall_data.traces[write_ind].nr_entries = 0;
+			save_stack_trace(&softstall_data.traces[write_ind]);
+
+			/* tell readers to use the new buffer from now on */
+			softstall_data.read_ind = write_ind;
+			if (locked)
+				raw_spin_unlock(&softstall_data.
+						locks[write_ind]);
+		}
+		raw_spin_unlock(&softstall_data.write_lock);
 	}
 }
 
@@ -667,6 +705,102 @@ static struct notifier_block __cpuinitdata cpu_nfb = {
 	.notifier_call = cpu_callback
 };
 
+static int show_stall_trace(struct seq_file *f, void *v)
+{
+	int i, end, buf_ind;
+	int type = (int) f->private;
+	raw_spinlock_t *lock;
+	struct stack_trace *trace;
+
+	/* The buf_ind is saved locally because the global read_ind
+	 * variable can be changed at any time. Using a stale value
+	 * is okay but reading inconsistent values is not, as it can
+	 * result in locking one buffer and reading the other.
+	 * For this reason, reads are also serialized (via the
+	 * read_lock); otherwise two readers can lock both buffers
+	 * at once, and then a writer will not be able to write
+	 * without contention.
+	 */
+	if (type == STALL_SOFT) {
+		mutex_lock(&softstall_data.read_lock);
+
+		buf_ind = ACCESS_ONCE(softstall_data.read_ind);
+		lock = &softstall_data.locks[buf_ind];
+		trace = &softstall_data.traces[buf_ind];
+	} else {
+#ifdef CONFIG_HARDLOCKUP_DETECTOR
+		mutex_lock(&hardstall_data.read_lock);
+
+		buf_ind = ACCESS_ONCE(hardstall_data.read_ind);
+		lock = &hardstall_data.locks[buf_ind];
+		trace = &hardstall_data.traces[buf_ind];
+#endif
+	}
+
+	end = trace->nr_entries;
+	/* don't bother printing the trailing value */
+	if (end < trace->max_entries)
+		end--;
+
+	raw_spin_lock(lock); /* excludes writers */
+	for (i = 0; i < end; i++) {
+		seq_printf(f, "[<%pK>] %pS\n", (void *)trace->entries[i],
+				(void *)trace->entries[i]);
+	}
+	raw_spin_unlock(lock);
+
+	if (type == STALL_SOFT)
+		mutex_unlock(&softstall_data.read_lock);
+#ifdef CONFIG_HARDLOCKUP_DETECTOR
+	else
+		mutex_unlock(&hardstall_data.read_lock);
+#endif
+
+	return 0;
+}
+
+static int softstall_trace_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, show_stall_trace, (void *)STALL_SOFT);
+}
+
+static int hardstall_trace_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, show_stall_trace, (void *)STALL_HARD);
+}
+
+static const struct file_operations softstall_trace_ops = {
+	.open = softstall_trace_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release
+};
+static const struct file_operations hardstall_trace_ops = {
+	.open = hardstall_trace_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release
+};
+
+void init_stall_data(struct stall_data *data)
+{
+	data->traces[0].nr_entries = 0;
+	data->traces[0].max_entries = STALL_MAX_TRACE_DEPTH;
+	data->traces[0].skip = 0;
+	data->traces[0].entries = data->trace_entries;
+	data->traces[1].nr_entries = 0;
+	data->traces[1].max_entries = STALL_MAX_TRACE_DEPTH;
+	data->traces[1].skip = 0;
+	data->traces[1].entries = data->trace_entries
+		+ STALL_MAX_TRACE_DEPTH;
+
+	raw_spin_lock_init(&data->locks[0]);
+	raw_spin_lock_init(&data->locks[1]);
+	raw_spin_lock_init(&data->write_lock);
+	mutex_init(&data->read_lock);
+	data->read_ind = 0;
+}
+
 void __init lockup_detector_init(void)
 {
 	void *cpu = (void *)(long)smp_processor_id();
@@ -678,5 +812,41 @@ void __init lockup_detector_init(void)
 	cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
 	register_cpu_notifier(&cpu_nfb);
 
+	init_stall_data(&softstall_data);
+
+#ifdef CONFIG_HARDLOCKUP_DETECTOR
+	init_stall_data(&hardstall_data);
+#endif
+
 	return;
 }
+
+/* Must be called after debugfs_init() */
+static int __init lockup_detector_debugfs_init(void)
+{
+	struct dentry *parent = debugfs_create_dir("lockup_watchdog", NULL);
+	if (parent) {
+		debugfs_create_file("softstall_trace", 0444, parent, NULL,
+				&softstall_trace_ops);
+		if (sizeof(worst_softstall) == sizeof(u32))
+			debugfs_create_u32("softstall_worst", 0644, parent,
+					(u32 *) &worst_softstall);
+		else if (sizeof(worst_softstall) == sizeof(u64))
+			debugfs_create_u64("softstall_worst", 0644, parent,
+					(u64 *) &worst_softstall);
+
+#ifdef CONFIG_HARDLOCKUP_DETECTOR
+		debugfs_create_file("hardstall_trace", 0444, parent, NULL,
+				&hardstall_trace_ops);
+		if (sizeof(worst_hardstall) == sizeof(u32))
+			debugfs_create_u32("hardstall_worst", 0644, parent,
+					(u32 *) &worst_hardstall);
+		else if (sizeof(worst_hardstall) == sizeof(u64))
+			debugfs_create_u64("hardstall_worst", 0644, parent,
+					(u64 *) &worst_hardstall);
+#endif
+	}
+
+	return 0;
+}
+postcore_initcall(lockup_detector_debugfs_init);
-- 
1.7.3.1


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH v7 2/2] Output stall data in debugfs
  2011-08-11 23:35 ` [PATCH v7 2/2] Output stall data in debugfs Alex Neronskiy
@ 2011-08-16 15:16   ` Don Zickus
  0 siblings, 0 replies; 3+ messages in thread
From: Don Zickus @ 2011-08-16 15:16 UTC (permalink / raw)
  To: Alex Neronskiy; +Cc: linux-kernel, peterz, Ingo Molnar, Mandeep Singh Baines

On Thu, Aug 11, 2011 at 04:35:08PM -0700, Alex Neronskiy wrote:
> Instead of using the log, use debugfs for output of both stall
> lengths and stack traces. Printing to the log can result in
> watchdog touches, distorting the very events being measured.
> Additionally, the information will not distract from lockups
> when users view the log.
> 
> A two-buffer system is used to ensure that the trace information
> can always be recorded without contention.
> 
> Signed-off-by: Alex Neronskiy <zakmagnus@chromium.org>
> ---
> Changes from last version per Peter's suggestions: different
> locks, refactoring, ACCESS_ONCE.

I think I am going to ignore these patches for now until I see if you can
solve things using Ingo's idea.

Cheers,
Don

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2011-08-16 15:16 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2011-08-11 23:35 [PATCH v7 1/2] Track hard and soft "short lockups" or "stalls." Alex Neronskiy
2011-08-11 23:35 ` [PATCH v7 2/2] Output stall data in debugfs Alex Neronskiy
2011-08-16 15:16   ` Don Zickus

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.