* [PATCH 2.6.10-mm2 Resend] Fix preemption race [1/3] (Core/i386)
@ 2005-01-12 11:49 tglx
2005-01-12 11:57 ` [PATCH 2.6.10-mm2 Resend] Make use of preempt_schedule_irq [2/3] (PPC) Thomas Gleixner
` (2 more replies)
0 siblings, 3 replies; 4+ messages in thread
From: tglx @ 2005-01-12 11:49 UTC (permalink / raw)
To: akpm; +Cc: mingo, linux-kernel
The idle-thread-preemption-fix.patch introduced a race, which is not
critical, but might give us an extra turn through the scheduler. When
interrupts are reenabled in entry.c and an interrupt occures before we
reach the add_preempt_schedule() in preempt_schedule we get rescheduled
again in the return from interrupt path.
The patch prevents this by leaving interrupts disabled and calling a
a seperate function preempt_schedule_irq().
This split adds different plausibility checks for irq context calls
and kernel calls.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/i386/kernel/entry.S | 6 ++----
kernel/sched.c | 42 ++++++++++++++++++++++++++++++++++++++++++
2 files changed, 44 insertions(+), 4 deletions(-)
---
Index: 2.6.10-mm2/kernel/sched.c
===================================================================
--- 2.6.10-mm2/kernel/sched.c (revision 148)
+++ 2.6.10-mm2/kernel/sched.c (working copy)
@@ -2870,6 +2870,48 @@
}
EXPORT_SYMBOL(preempt_schedule);
+
+/*
+ * this is is the entry point to schedule() from kernel preemption
+ * off of irq context.
+ * Note, that this is called and return with irqs disabled. This will
+ * protect us against recursive calling from irq.
+ */
+asmlinkage void __sched preempt_schedule_irq(void)
+{
+ struct thread_info *ti = current_thread_info();
+#ifdef CONFIG_PREEMPT_BKL
+ struct task_struct *task = current;
+ int saved_lock_depth;
+#endif
+ /* Catch callers which need to be fixed*/
+ BUG_ON(ti->preempt_count || !irqs_disabled());
+
+need_resched:
+ add_preempt_count(PREEMPT_ACTIVE);
+ /*
+ * We keep the big kernel semaphore locked, but we
+ * clear ->lock_depth so that schedule() doesnt
+ * auto-release the semaphore:
+ */
+#ifdef CONFIG_PREEMPT_BKL
+ saved_lock_depth = task->lock_depth;
+ task->lock_depth = -1;
+#endif
+ local_irq_enable();
+ schedule();
+ local_irq_disable();
+#ifdef CONFIG_PREEMPT_BKL
+ task->lock_depth = saved_lock_depth;
+#endif
+ sub_preempt_count(PREEMPT_ACTIVE);
+
+ /* we could miss a preemption opportunity between schedule and now */
+ barrier();
+ if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
+ goto need_resched;
+}
+
#endif /* CONFIG_PREEMPT */
int default_wake_function(wait_queue_t *curr, unsigned mode, int sync, void *key)
Index: 2.6.10-mm2/arch/i386/kernel/entry.S
===================================================================
--- 2.6.10-mm2/arch/i386/kernel/entry.S (revision 148)
+++ 2.6.10-mm2/arch/i386/kernel/entry.S (working copy)
@@ -189,6 +189,7 @@
#ifdef CONFIG_PREEMPT
ENTRY(resume_kernel)
+ cli
cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
jnz restore_all
need_resched:
@@ -197,10 +198,7 @@
jz restore_all
testl $IF_MASK,EFLAGS(%esp) # interrupts off (exception path) ?
jz restore_all
- sti
- call preempt_schedule
- cli
- movl $0,TI_preempt_count(%ebp)
+ call preempt_schedule_irq();
jmp need_resched
#endif
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH 2.6.10-mm2 Resend] Make use of preempt_schedule_irq [2/3] (PPC)
2005-01-12 11:49 [PATCH 2.6.10-mm2 Resend] Fix preemption race [1/3] (Core/i386) tglx
@ 2005-01-12 11:57 ` Thomas Gleixner
2005-01-12 12:08 ` [PATCH 2.6.10-mm2 Resend] Make use of preempt_schedule_irq [3/3] (ARM) Thomas Gleixner
2005-01-13 0:05 ` [PATCH 2.6.10-mm2 Resend] Fix preemption race [1/3] (Core/i386) Andrew Morton
2 siblings, 0 replies; 4+ messages in thread
From: Thomas Gleixner @ 2005-01-12 11:57 UTC (permalink / raw)
To: Tom Rini; +Cc: Ingo Molnar, LKML, Andrew Morton
Make use of the new preempt_schedule_irq function.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
entry.S | 12 +-----------
1 files changed, 1 insertion(+), 11 deletions(-)
---
Index: 2.6.10-mm2/arch/ppc/kernel/entry.S
===================================================================
--- 2.6.10-mm2/arch/ppc/kernel/entry.S (revision 148)
+++ 2.6.10-mm2/arch/ppc/kernel/entry.S (working copy)
@@ -624,18 +624,8 @@
beq+ restore
andi. r0,r3,MSR_EE /* interrupts off? */
beq restore /* don't schedule if so */
-1: lis r0,PREEMPT_ACTIVE@h
- stw r0,TI_PREEMPT(r9)
- ori r10,r10,MSR_EE
- SYNC
- MTMSRD(r10) /* hard-enable interrupts */
- bl schedule
- LOAD_MSR_KERNEL(r10,MSR_KERNEL)
- SYNC
- MTMSRD(r10) /* disable interrupts */
+1: bl preempt_schedule_irq
rlwinm r9,r1,0,0,18
- li r0,0
- stw r0,TI_PREEMPT(r9)
lwz r3,TI_FLAGS(r9)
andi. r0,r3,_TIF_NEED_RESCHED
bne- 1b
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH 2.6.10-mm2 Resend] Make use of preempt_schedule_irq [3/3] (ARM)
2005-01-12 11:49 [PATCH 2.6.10-mm2 Resend] Fix preemption race [1/3] (Core/i386) tglx
2005-01-12 11:57 ` [PATCH 2.6.10-mm2 Resend] Make use of preempt_schedule_irq [2/3] (PPC) Thomas Gleixner
@ 2005-01-12 12:08 ` Thomas Gleixner
2005-01-13 0:05 ` [PATCH 2.6.10-mm2 Resend] Fix preemption race [1/3] (Core/i386) Andrew Morton
2 siblings, 0 replies; 4+ messages in thread
From: Thomas Gleixner @ 2005-01-12 12:08 UTC (permalink / raw)
To: Russell King - ARM Linux; +Cc: Ingo Molnar, LKML, Andrew Morton
[-- Attachment #1: Type: text/plain, Size: 106 bytes --]
Make use of the new preempt_schedule_irq function.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
[-- Attachment #2: entry-armv.S.diff --]
[-- Type: text/x-patch, Size: 800 bytes --]
Index: 2.6.10-mm2-lockinit/arch/arm/kernel/entry-armv.S
===================================================================
--- 2.6.10-mm2-lockinit/arch/arm/kernel/entry-armv.S (revision 148)
+++ 2.6.10-mm2-lockinit/arch/arm/kernel/entry-armv.S (working copy)
@@ -136,11 +136,9 @@
ldr r1, [r6, #8] @ local_bh_count
adds r0, r0, r1
movne pc, lr
- mov r7, #PREEMPT_ACTIVE
- str r7, [r8, #TI_PREEMPT] @ set PREEMPT_ACTIVE
-1: enable_irq r2 @ enable IRQs
- bl schedule
- disable_irq r0 @ disable IRQs
+ mov r7, #0 @ preempt_schedule_irq
+ str r7, [r8, #TI_PREEMPT] @ expects preempt_count == 0
+1: bl preempt_schedule_irq @ irq en/disable is done inside
ldr r0, [r8, #TI_FLAGS] @ get new tasks TI_FLAGS
tst r0, #_TIF_NEED_RESCHED
beq preempt_return @ go again
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH 2.6.10-mm2 Resend] Fix preemption race [1/3] (Core/i386)
2005-01-12 11:49 [PATCH 2.6.10-mm2 Resend] Fix preemption race [1/3] (Core/i386) tglx
2005-01-12 11:57 ` [PATCH 2.6.10-mm2 Resend] Make use of preempt_schedule_irq [2/3] (PPC) Thomas Gleixner
2005-01-12 12:08 ` [PATCH 2.6.10-mm2 Resend] Make use of preempt_schedule_irq [3/3] (ARM) Thomas Gleixner
@ 2005-01-13 0:05 ` Andrew Morton
2 siblings, 0 replies; 4+ messages in thread
From: Andrew Morton @ 2005-01-13 0:05 UTC (permalink / raw)
To: tglx; +Cc: mingo, linux-kernel
tglx@linutronix.de wrote:
>
> --- 2.6.10-mm2/arch/i386/kernel/entry.S (revision 148)
> +++ 2.6.10-mm2/arch/i386/kernel/entry.S (working copy)
> @@ -189,6 +189,7 @@
>
> #ifdef CONFIG_PREEMPT
> ENTRY(resume_kernel)
> + cli
> cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
> jnz restore_all
> need_resched:
> @@ -197,10 +198,7 @@
> jz restore_all
> testl $IF_MASK,EFLAGS(%esp) # interrupts off (exception path) ?
> jz restore_all
> - sti
> - call preempt_schedule
> - cli
> - movl $0,TI_preempt_count(%ebp)
> + call preempt_schedule_irq();
whee, who needs a C compiler anyway? Did that actually assemble?
I'll fix that one up ;)
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2005-01-13 0:06 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2005-01-12 11:49 [PATCH 2.6.10-mm2 Resend] Fix preemption race [1/3] (Core/i386) tglx
2005-01-12 11:57 ` [PATCH 2.6.10-mm2 Resend] Make use of preempt_schedule_irq [2/3] (PPC) Thomas Gleixner
2005-01-12 12:08 ` [PATCH 2.6.10-mm2 Resend] Make use of preempt_schedule_irq [3/3] (ARM) Thomas Gleixner
2005-01-13 0:05 ` [PATCH 2.6.10-mm2 Resend] Fix preemption race [1/3] (Core/i386) Andrew Morton
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).