linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Mike Galbraith <umgwanakikbuti@gmail.com>
To: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: linux-rt-users <linux-rt-users@vger.kernel.org>,
	LKML <linux-kernel@vger.kernel.org>,
	Thomas Gleixner <tglx@linutronix.de>,
	rostedt@goodmis.org, John Kacur <jkacur@redhat.com>
Subject: [patch] rt/sched: fix resursion when CONTEXT_TRACKING and PREEMPT_LAZY are enabled
Date: Fri, 16 May 2014 15:53:31 +0200	[thread overview]
Message-ID: <1400248411.5877.10.camel@marge.simpson.net> (raw)
In-Reply-To: <20140513154002.GD15049@linutronix.de>

On Tue, 2014-05-13 at 17:40 +0200, Sebastian Andrzej Siewior wrote: 
> * Mike Galbraith | 2014-05-10 06:15:03 [+0200]:
> 
> >On Fri, 2014-05-09 at 20:12 +0200, Sebastian Andrzej Siewior wrote:
> >
> >> Known issues:
> >> 
> >>       - bcache is disabled.
> >> 
> >>       - lazy preempt on x86_64 leads to a crash with some load.
> >
> >That is only with NO_HZ_FUL enabled here.  Box blows the stack during
> >task exit, eyeballing hasn't spotted the why.
> 
> Even if I disable NO_HZ_FULL it explodes as soon as hackbench starts.

Ah, you didn't turn CONTEXT_TRACKING off too.  The below made the dirty
little SOB die here.

If context tracking is enabled, we can recurse, and explode violently.
Add missing checks to preempt_schedule_context().

Fix other inconsistencies spotted while searching.

Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
---
 arch/x86/include/asm/thread_info.h |    1 +
 include/linux/preempt.h            |    2 +-
 include/linux/preempt_mask.h       |   10 ++++++++--
 kernel/context_tracking.c          |    2 +-
 kernel/fork.c                      |    1 +
 kernel/sched/core.c                |   16 +++++-----------
 kernel/sched/fair.c                |    2 +-
 7 files changed, 18 insertions(+), 16 deletions(-)

--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -51,6 +51,7 @@ struct thread_info {
 	.flags		= 0,			\
 	.cpu		= 0,			\
 	.saved_preempt_count = INIT_PREEMPT_COUNT,	\
+	.preempt_lazy_count = 0,		\
 	.addr_limit	= KERNEL_DS,		\
 	.restart_block = {			\
 		.fn = do_no_restart_syscall,	\
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -91,8 +91,8 @@ do { \
 
 #define preempt_lazy_enable() \
 do { \
-	dec_preempt_lazy_count(); \
 	barrier(); \
+	dec_preempt_lazy_count(); \
 	preempt_check_resched(); \
 } while (0)
 
--- a/include/linux/preempt_mask.h
+++ b/include/linux/preempt_mask.h
@@ -118,9 +118,15 @@ extern int in_serving_softirq(void);
 		((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET)
 
 #ifdef CONFIG_PREEMPT_COUNT
-# define preemptible()	(preempt_count() == 0 && !irqs_disabled())
+# define preemptible()		(preempt_count() == 0 && !irqs_disabled())
+#ifdef CONFIG_PREEMPT_LAZY
+# define preemptible_lazy()	(preempt_lazy_count() !=0 && !need_resched_now())
 #else
-# define preemptible()	0
+# define preemptible_lazy()	1
+#endif
+#else
+# define preemptible()		0
+# define preemptible_lazy()	0
 #endif
 
 #endif /* LINUX_PREEMPT_MASK_H */
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -124,7 +124,7 @@ asmlinkage void __sched notrace preempt_
 {
 	enum ctx_state prev_ctx;
 
-	if (likely(!preemptible()))
+	if (likely(!preemptible() || !preemptible_lazy()))
 		return;
 
 	/*
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -329,6 +329,7 @@ static struct task_struct *dup_task_stru
 	setup_thread_stack(tsk, orig);
 	clear_user_return_notifier(tsk);
 	clear_tsk_need_resched(tsk);
+	clear_tsk_need_resched_lazy(tsk);
 	stackend = end_of_stack(tsk);
 	*stackend = STACK_END_MAGIC;	/* for overflow detection */
 
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2861,8 +2861,8 @@ void migrate_enable(void)
 		p->migrate_disable = 0;
 
 	unpin_current_cpu();
-	preempt_enable();
 	preempt_lazy_enable();
+	preempt_enable();
 }
 EXPORT_SYMBOL(migrate_enable);
 #else
@@ -3096,19 +3096,13 @@ asmlinkage void __sched notrace preempt_
 {
 	/*
 	 * If there is a non-zero preempt_count or interrupts are disabled,
-	 * we do not want to preempt the current task. Just return..
+	 * we do not want to preempt the current task. Just return.  For
+	 * lazy preemption we also check for non-zero preempt_count_lazy,
+	 * and bail if no immediate preemption is required.
 	 */
-	if (likely(!preemptible()))
+	if (likely(!preemptible() || !preemptible_lazy()))
 		return;
 
-#ifdef CONFIG_PREEMPT_LAZY
-	/*
-	 * Check for lazy preemption
-	 */
-	if (current_thread_info()->preempt_lazy_count &&
-			!test_thread_flag(TIF_NEED_RESCHED))
-		return;
-#endif
 	do {
 		__preempt_count_add(PREEMPT_ACTIVE);
 		/*
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4446,7 +4446,7 @@ static void check_preempt_wakeup(struct
 	 * prevents us from potentially nominating it as a false LAST_BUDDY
 	 * below.
 	 */
-	if (test_tsk_need_resched(curr))
+	if (test_tsk_need_resched(curr) || test_tsk_need_resched_lazy(curr))
 		return;
 
 	/* Idle tasks are by definition preempted by non-idle tasks. */



  parent reply	other threads:[~2014-05-16 13:53 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-05-09 18:12 [ANNOUNCE] 3.14.3-rt5 Sebastian Andrzej Siewior
2014-05-09 22:54 ` Pavel Vasilyev
2014-05-13 15:33   ` Sebastian Andrzej Siewior
2014-05-10  4:15 ` Mike Galbraith
2014-05-13 15:40   ` Sebastian Andrzej Siewior
2014-05-14  3:10     ` Mike Galbraith
2014-05-16 13:53     ` Mike Galbraith [this message]
2014-05-25  8:16       ` [patch v2] rt/sched: fix resursion when CONTEXT_TRACKING and PREEMPT_LAZY are enabled Mike Galbraith
2014-05-13 13:30 ` [ANNOUNCE] 3.14.3-rt5 Juri Lelli
2015-02-16 11:29   ` Sebastian Andrzej Siewior
2015-02-16 12:34     ` Juri Lelli
     [not found] ` <1400297819.9493.11.camel@marge.simpson.net>
2014-05-27 18:18   ` [PATCH 3.14-rt] sched/numa: Fix task_numa_free() lockdep splat Steven Rostedt
2014-05-27 18:25     ` Peter Zijlstra
2014-05-27 18:52       ` Steven Rostedt
2014-05-27 18:53         ` Steven Rostedt
2014-06-05 14:33       ` [tip:sched/urgent] sched/numa: Fix use of spin_{un}lock_irq() when interrupts are disabled tip-bot for Steven Rostedt

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1400248411.5877.10.camel@marge.simpson.net \
    --to=umgwanakikbuti@gmail.com \
    --cc=bigeasy@linutronix.de \
    --cc=jkacur@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-rt-users@vger.kernel.org \
    --cc=rostedt@goodmis.org \
    --cc=tglx@linutronix.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).