]> git.neil.brown.name Git - history.git/commitdiff
[PATCH] sched: fix preemption race (Core/i386)
authorThomas Gleixner <tglx@linutronix.de>
Wed, 2 Feb 2005 00:52:55 +0000 (16:52 -0800)
committerLinus Torvalds <torvalds@ppc970.osdl.org>
Wed, 2 Feb 2005 00:52:55 +0000 (16:52 -0800)
The idle-thread-preemption-fix.patch introduced a race, which is not
critical, but might give us an extra turn through the scheduler.  When
interrupts are reenabled in entry.c and an interrupt occures before we
reach the add_preempt_schedule() in preempt_schedule we get rescheduled
again in the return from interrupt path.

The patch prevents this by leaving interrupts disabled and calling a a
seperate function preempt_schedule_irq().

This split adds different plausibility checks for irq context calls and
kernel calls.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
arch/i386/kernel/entry.S
kernel/sched.c

index 8bda886f21db6c4a9e0fc14a70c72c60e3d9a349..f15856e405341c04ec1dfd63e8fb3e237f03ab65 100644 (file)
@@ -176,6 +176,7 @@ ENTRY(resume_userspace)
 
 #ifdef CONFIG_PREEMPT
 ENTRY(resume_kernel)
+       cli
        cmpl $0,TI_preempt_count(%ebp)  # non-zero preempt_count ?
        jnz restore_all
 need_resched:
@@ -184,10 +185,7 @@ need_resched:
        jz restore_all
        testl $IF_MASK,EFLAGS(%esp)     # interrupts off (exception path) ?
        jz restore_all
-       sti
-       call preempt_schedule
-       cli
-       movl $0,TI_preempt_count(%ebp)
+       call preempt_schedule_irq
        jmp need_resched
 #endif
 
index 3cb1b2a6b5212f3a6c30c6ca95dd76cadc84989a..f708d10e775033a3a2c5b366138078bc68d3890f 100644 (file)
@@ -2867,6 +2867,48 @@ need_resched:
 }
 
 EXPORT_SYMBOL(preempt_schedule);
+
+/*
+ * this is is the entry point to schedule() from kernel preemption
+ * off of irq context.
+ * Note, that this is called and return with irqs disabled. This will
+ * protect us against recursive calling from irq.
+ */
+asmlinkage void __sched preempt_schedule_irq(void)
+{
+       struct thread_info *ti = current_thread_info();
+#ifdef CONFIG_PREEMPT_BKL
+       struct task_struct *task = current;
+       int saved_lock_depth;
+#endif
+       /* Catch callers which need to be fixed*/
+       BUG_ON(ti->preempt_count || !irqs_disabled());
+
+need_resched:
+       add_preempt_count(PREEMPT_ACTIVE);
+       /*
+        * We keep the big kernel semaphore locked, but we
+        * clear ->lock_depth so that schedule() doesnt
+        * auto-release the semaphore:
+        */
+#ifdef CONFIG_PREEMPT_BKL
+       saved_lock_depth = task->lock_depth;
+       task->lock_depth = -1;
+#endif
+       local_irq_enable();
+       schedule();
+       local_irq_disable();
+#ifdef CONFIG_PREEMPT_BKL
+       task->lock_depth = saved_lock_depth;
+#endif
+       sub_preempt_count(PREEMPT_ACTIVE);
+
+       /* we could miss a preemption opportunity between schedule and now */
+       barrier();
+       if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
+               goto need_resched;
+}
+
 #endif /* CONFIG_PREEMPT */
 
 int default_wake_function(wait_queue_t *curr, unsigned mode, int sync, void *key)