*/
struct runqueue {
spinlock_t lock;
+ spinlock_t frozen;
unsigned long nr_running, nr_switches, expired_timestamp;
task_t *curr, *idle;
prio_array_t *active, *expired, arrays[2];
#if CONFIG_SMP || CONFIG_PREEMPT
asmlinkage void schedule_tail(void)
{
- spin_unlock_irq(&this_rq()->lock);
+ spin_unlock_irq(&this_rq()->frozen);
}
#endif
if (likely(prev != next)) {
rq->nr_switches++;
rq->curr = next;
+ spin_lock(&rq->frozen);
+ spin_unlock(&rq->lock);
+
context_switch(prev, next);
+
/*
* The runqueue pointer might be from another CPU
* if the new task was last running on a different
* CPU - thus re-load it.
*/
- barrier();
+ mb();
rq = this_rq();
+ spin_unlock_irq(&rq->frozen);
+ } else {
+ spin_unlock_irq(&rq->lock);
}
- spin_unlock_irq(&rq->lock);
reacquire_kernel_lock(current);
preempt_enable_no_resched();
rq->active = rq->arrays;
rq->expired = rq->arrays + 1;
spin_lock_init(&rq->lock);
+ spin_lock_init(&rq->frozen);
INIT_LIST_HEAD(&rq->migration_queue);
for (j = 0; j < 2; j++) {