- init thread needs to have preempt_count of 1 until sched_init().
(William Lee Irwin III)
- clean up the irq-mask macros. (Linus)
- add barrier() to irq_enter() and irq_exit(). (based on Oleg Nesterov's
comment.)
- move the irqs-off check into preempt_schedule() and remove
CONFIG_DEBUG_IRQ_SCHEDULE.
- remove spin_unlock_no_resched() and comment the affected places more
agressively.
- slab.c needs to spin_unlock_no_resched(), instead of spin_unlock(). (It
also has to check for preemption in the right spot.) This should fix
the memory corruption.
- irq_exit() needs to run softirqs if interrupts not active - in the
previous patch it ran them when preempt_count() was 0, which is
incorrect.
- spinlock macros are updated to enable preemption after enabling
interrupts. Besides avoiding false positive warnings, this also
- fork.c has to call scheduler_tick() with preemption disabled -
otherwise scheduler_tick()'s spin_unlock can preempt!
- irqs_disabled() macro introduced.
- [ all other local_irq_enable() or sti instances conditional on
CONFIG_DEBUG_IRQ_SCHEDULE are to fix false positive warnings. ]
- fix buggy in_softirq(). Fortunately the bug made the test broader,
which didnt result in algorithmical breakage, just suboptimal
performance.
- move do_softirq() processing into irq_exit() => this also fixes the
softirq processing bugs present in apic.c IRQ handlers that did not
test for softirqs after irq_exit().
- simplify local_bh_enable().
apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
- local_save_flags(flags);
- local_irq_disable();
+ local_irq_save(flags);
disable_local_APIC();
rdmsr(MSR_IA32_APICBASE, l, h);
l &= ~MSR_IA32_APICBASE_ENABLE;
unsigned int l, h;
unsigned long flags;
- local_save_flags(flags);
- local_irq_disable();
+ local_irq_save(flags);
rdmsr(MSR_IA32_APICBASE, l, h);
l &= ~MSR_IA32_APICBASE_BASE;
l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE;
irq_enter();
smp_local_timer_interrupt(®s);
irq_exit();
-
- if (softirq_pending(cpu))
- do_softirq();
}
/*
# userspace resumption stub bypassing syscall exit tracing
ALIGN
-ret_from_intr:
ret_from_exception:
+ preempt_stop
+ret_from_intr:
+ GET_THREAD_INFO(%ebx)
movl EFLAGS(%esp), %eax # mix EFLAGS and CS
movb CS(%esp), %al
testl $(VM_MASK | 3), %eax
work_notifysig: # deal with pending signals and
# notify-resume requests
- testl $(VM_MASK),EFLAGS(%esp)
+ testl $VM_MASK, EFLAGS(%esp)
movl %esp, %eax
jne work_notifysig_v86 # returning to kernel-space or
# vm86-space
common_interrupt:
SAVE_ALL
call do_IRQ
- GET_THREAD_INFO(%ebx)
jmp ret_from_intr
#define BUILD_INTERRUPT(name, nr) \
ENTRY(name) \
pushl $nr-256; \
SAVE_ALL \
- GET_THREAD_INFO(%ebx); \
call smp_/**/name; \
jmp ret_from_intr;
movl $(__KERNEL_DS), %edx
movl %edx, %ds
movl %edx, %es
- GET_THREAD_INFO(%ebx)
call *%edi
addl $8, %esp
- preempt_stop
jmp ret_from_exception
ENTRY(coprocessor_error)
pushl $0 # temporary storage for ORIG_EIP
call math_emulate
addl $4, %esp
- preempt_stop
jmp ret_from_exception
ENTRY(debug)
#if CONFIG_SMP
inline void synchronize_irq(unsigned int irq)
{
+ /* is there anything to synchronize with? */
+ if (!irq_desc[irq].action)
+ return;
+
while (irq_desc[irq].status & IRQ_INPROGRESS)
cpu_relax();
}
irq_exit();
- if (softirq_pending(cpu))
- do_softirq();
return 1;
}
static void set_mtrr_prepare_save (struct set_mtrr_context *ctxt)
{
/* Disable interrupts locally */
- local_save_flags (ctxt->flags); local_irq_disable ();
+ local_irq_save(ctxt->flags);
if ( mtrr_if != MTRR_IF_INTEL && mtrr_if != MTRR_IF_CYRIX_ARR )
return;
arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
/* Save flags and disable interrupts */
- local_save_flags (flags); local_irq_disable ();
+ local_irq_save(flags);
ccr3 = getCx86 (CX86_CCR3);
setCx86 (CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
unsigned long cfg;
unsigned long flags;
- local_save_flags(flags);
- local_irq_disable();
-
+ local_irq_save(flags);
/*
* Wait for idle.
* should be modified to do 1 message per cluster ID - mbligh
*/
- local_save_flags(flags);
- local_irq_disable();
+ local_irq_save(flags);
for (query_cpu = 0; query_cpu < NR_CPUS; ++query_cpu) {
query_mask = 1 << query_cpu;
unsigned int tmp;
unsigned long flags;
- local_save_flags(flags); local_irq_disable();
+ local_irq_save(flags);
/*
* Check if configuration type 1 works.
unsigned long entry; /* %edx */
unsigned long flags;
- local_save_flags(flags); local_irq_disable();
+ local_irq_save(flags);
__asm__("lcall *(%%edi); cld"
: "=a" (return_code),
"=b" (address),
if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
pci_indirect.address = pcibios_entry + PAGE_OFFSET;
- local_save_flags(flags); local_irq_disable();
+ local_irq_save(flags);
__asm__(
"lcall *(%%edi); cld\n\t"
"jc 1f\n\t"
{
unsigned long flags;
- local_save_flags(flags); local_irq_disable(); // FIXME: is this safe?
+ local_irq_save(flags); // FIXME: is this safe?
if (tty->ldisc.flush_buffer)
tty->ldisc.flush_buffer(tty);
if (tty->driver.flush_buffer)
fp = tty->flip.flag_buf + TTY_FLIPBUF_SIZE;
tty->flip.buf_num = 0;
- local_save_flags(flags); local_irq_disable(); // FIXME: is this safe?
+ local_irq_save(flags); // FIXME: is this safe?
tty->flip.char_buf_ptr = tty->flip.char_buf;
tty->flip.flag_buf_ptr = tty->flip.flag_buf;
} else {
fp = tty->flip.flag_buf;
tty->flip.buf_num = 1;
- local_save_flags(flags); local_irq_disable(); // FIXME: is this safe?
+ local_irq_save(flags); // FIXME: is this safe?
tty->flip.char_buf_ptr = tty->flip.char_buf + TTY_FLIPBUF_SIZE;
tty->flip.flag_buf_ptr = tty->flip.flag_buf + TTY_FLIPBUF_SIZE;
}
if (hz > 20 && hz < 32767)
count = 1193180 / hz;
- local_save_flags(flags); // FIXME: is this safe?
- local_irq_disable();
+ local_irq_save(flags); // FIXME: is this safe?
del_timer(&sound_timer);
if (count) {
/* enable counter 2 */
{
unsigned long flags;
- local_save_flags(flags); // FIXME: is this safe?
- local_irq_disable();
-
#if 0
- if (__MOD_IN_USE(ata_ops(drive)->owner)) {
- local_irq_restore(flags); // FIXME: is this safe?
+ if (__MOD_IN_USE(ata_ops(drive)->owner))
return 1;
- }
#endif
- if (drive->usage || drive->busy || !ata_ops(drive)) {
- local_irq_restore(flags); // FIXME: is this safe?
+ if (drive->usage || drive->busy || !ata_ops(drive))
return 1;
- }
#if defined(CONFIG_BLK_DEV_ISAPNP) && defined(CONFIG_ISAPNP) && defined(MODULE)
pnpide_init(0);
#endif
drive->driver = NULL;
- local_irq_restore(flags); // FIXME: is this safe?
-
return 0;
}
t = gameport_time(gameport, A3D_MAX_START);
s = gameport_time(gameport, A3D_MAX_STROBE);
- local_save_flags(flags);
- local_irq_disable();
+ local_irq_save(flags);
gameport_trigger(gameport);
v = gameport_read(gameport);
s[i] = 0;
}
- local_save_flags(flags);
- local_irq_disable();
+ local_irq_save(flags);
gameport_trigger(gameport);
v = z = gameport_read(gameport);
loopout = (ANALOG_LOOP_TIME * port->loop) / 1000;
timeout = ANALOG_MAX_TIME * port->speed;
- local_save_flags(flags);
- local_irq_disable();
+ local_irq_save(flags);
gameport_trigger(gameport);
GET_TIME(now);
local_irq_restore(flags);
t[i] = COBRA_MAX_STROBE;
}
- local_save_flags(flags);
- local_irq_disable();
+ local_irq_save(flags);
u = gameport_read(gameport);
* Request the pad to transmit data
*/
- local_save_flags(flags);
- local_irq_disable();
+ local_irq_save(flags);
for (i = 0; i < GC_N64_REQUEST_LENGTH; i++) {
parport_write_data(gc->pd->port, GC_N64_POWER_W | ((GC_N64_REQUEST >> i) & 1 ? GC_N64_OUT : 0));
udelay(GC_N64_DWS);
parport_write_data(gc->pd->port, GC_PSX_CLOCK | GC_PSX_POWER); /* Deselect, begin command */
udelay(GC_PSX_DELAY * 2);
- local_save_flags(flags);
- local_irq_disable();
+ local_irq_save(flags);
gc_psx_command(gc, 0x01); /* Access pad */
id = gc_psx_command(gc, 0x42); /* Get device id */
i = 0;
- local_save_flags(flags);
- local_irq_disable();
+ local_irq_save(flags);
gameport_trigger(gameport);
v = gameport_read(gameport);;
unsigned long flags;
int i, t;
- local_save_flags(flags);
- local_irq_disable();
+ local_irq_save(flags);
i = 0;
do {
t = strobe;
i = 0;
- local_save_flags(flags);
- local_irq_disable();
+ local_irq_save(flags);
v = gameport_read(gameport) >> shift;
status = buf = i = j = 0;
t = strobe;
- local_save_flags(flags);
- local_irq_disable();
+ local_irq_save(flags);
v = w = (gameport_read(gameport) >> shift) & 3;
t = gameport_time(gameport, GUILLEMOT_MAX_START);
s = gameport_time(gameport, GUILLEMOT_MAX_STROBE);
- local_save_flags(flags);
- local_irq_disable();
+ local_irq_save(flags);
gameport_trigger(gameport);
v = gameport_read(gameport);
t = gameport_time(gameport, INTERACT_MAX_START);
s = gameport_time(gameport, INTERACT_MAX_STROBE);
- local_save_flags(flags);
- local_irq_disable();
+ local_irq_save(flags);
gameport_trigger(gameport);
v = gameport_read(gameport);
pending = 0;
sched = 0;
- local_save_flags(flags); /* Quiet, please */
- local_irq_disable();
+ local_irq_save(flags); /* Quiet, please */
gameport_trigger(gameport); /* Trigger */
v = gameport_read(gameport);
unsigned long flags;
int i, t;
- local_save_flags(flags);
- local_irq_disable();
+ local_irq_save(flags);
i = 0;
do {
i[k] = j[k] = 0;
}
- local_save_flags(flags);
- local_irq_disable();
+ local_irq_save(flags);
gameport_trigger(gameport);
w = gameport_read(gameport) >> 4;
* Don't take interrupts on this CPU will bit banging
* the %#%#@$ I2C device
*/
- local_save_flags(flags);
- local_irq_disable();
+ local_irq_save(flags);
eeprom_start(regs);
#define spin_trylock_irqsave(lock, flags) \
({ \
int success; \
- local_save_flags(flags); \
- local_irq_disable(); \
+ local_irq_save(flags); \
success = spin_trylock(lock); \
if (success == 0) \
local_irq_restore(flags); \
* function we resestablish the old environment.
*
* Note : as we don't need a system wide lock, therefore
- * we shouldn't use cli(), but local_irq_disable() as this
+ * we shouldn't use cli(), but local_irq_save() as this
* affects the current CPU only.
*/
- local_save_flags (flags);
- local_irq_disable ();
+ local_irq_save(flags);
/*
* disable all interrupts
* also require to run disabled.
*
* Note : as no global lock is required, we must not use
- * cli(), but local_irq_disable() instead.
+ * cli(), but local_irq_save() instead.
*/
- local_save_flags (flags);
- local_irq_disable ();
+ local_irq_save(flags);
rdc_ccw = &ioinfo[irq]->senseccw;
int emulated = 0; /* no i/O handler installed */
int retry = 5; /* retry count */
- local_save_flags (flags);
- local_irq_disable ();
+ local_irq_save(flags);
if (!ioinfo[irq]->ui.flags.ready) {
pdevstat = &devstat;
found = 0; /* init ... */
- local_save_flags(flags);
- local_irq_disable();
+ local_irq_save(flags);
do {
printk("buffer layer error at %s:%d\n", file, line);
#ifdef CONFIG_X86
printk("Pass this trace through ksymoops for reporting\n");
- {
- extern void show_stack(long *esp);
- show_stack(0);
- }
+ show_stack(0);
#endif
}
EXPORT_SYMBOL(__buffer_error);
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
-#define IRQ_OFFSET 64
+/*
+ * We put the hardirq and softirq counter into the preemption
+ * counter. The bitmask has the following meaning:
+ *
+ * - bits 0-7 are the preemption count (max preemption depth: 256)
+ * - bits 8-15 are the softirq count (max # of softirqs: 256)
+ * - bits 16-23 are the hardirq count (max # of hardirqs: 256)
+ *
+ * - ( bit 26 is the PREEMPT_ACTIVE flag. )
+ *
+ * PREEMPT_MASK: 0x000000ff
+ * HARDIRQ_MASK: 0x0000ff00
+ * SOFTIRQ_MASK: 0x00ff0000
+ * IRQ_MASK: 0x00ffff00
+ */
+
+#define PREEMPT_BITS 8
+#define SOFTIRQ_BITS 8
+#define HARDIRQ_BITS 8
+
+#define PREEMPT_SHIFT 0
+#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
+#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
+
+#define __MASK(x) ((1UL << (x))-1)
+
+#define PREEMPT_MASK (__MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
+#define HARDIRQ_MASK (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
+#define SOFTIRQ_MASK (__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
+
+#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
+#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
+#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK))
+
+#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
+#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
+#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
+
+#define IRQ_MASK (HARDIRQ_MASK | SOFTIRQ_MASK)
+
+/*
+ * The hardirq mask has to be large enough to have
+ * space for potentially all IRQ sources in the system
+ * nesting on a single CPU:
+ */
+#if (1 << HARDIRQ_BITS) < NR_IRQS
+# error HARDIRQ_BITS is too low!
+#endif
/*
- * Are we in an interrupt context? Either doing bottom half
- * or hardware interrupt processing?
+ * Are we doing bottom half or hardware interrupt processing?
+ * Are we in a softirq context? Interrupt context?
*/
-#define in_interrupt() \
- ((preempt_count() & ~PREEMPT_ACTIVE) >= IRQ_OFFSET)
+#define in_irq() (hardirq_count())
+#define in_softirq() (softirq_count())
+#define in_interrupt() (irq_count())
-#define in_irq in_interrupt
#define hardirq_trylock() (!in_interrupt())
#define hardirq_endlock() do { } while (0)
-#define irq_enter() (preempt_count() += IRQ_OFFSET)
-#define irq_exit() (preempt_count() -= IRQ_OFFSET)
+#define irq_enter() (preempt_count() += HARDIRQ_OFFSET)
+
+#if CONFIG_PREEMPT
+# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
+#else
+# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
+#endif
+#define irq_exit() \
+do { \
+ preempt_count() -= IRQ_EXIT_OFFSET; \
+ if (!in_interrupt() && softirq_pending(smp_processor_id())) \
+ do_softirq(); \
+ preempt_enable_no_resched(); \
+} while (0)
#ifndef CONFIG_SMP
# define synchronize_irq(irq) barrier()
extern void synchronize_irq(unsigned int irq);
#endif /* CONFIG_SMP */
+extern void show_stack(unsigned long * esp);
+
#endif /* __ASM_HARDIRQ_H */
#include <asm/hardirq.h>
#define local_bh_disable() \
- do { preempt_count() += IRQ_OFFSET; barrier(); } while (0)
+ do { preempt_count() += SOFTIRQ_OFFSET; barrier(); } while (0)
#define __local_bh_enable() \
- do { barrier(); preempt_count() -= IRQ_OFFSET; } while (0)
+ do { barrier(); preempt_count() -= SOFTIRQ_OFFSET; } while (0)
#define local_bh_enable() \
do { \
- if (unlikely((preempt_count() == IRQ_OFFSET) && \
- softirq_pending(smp_processor_id()))) { \
- __local_bh_enable(); \
+ __local_bh_enable(); \
+ if (unlikely(!in_interrupt() && softirq_pending(smp_processor_id()))) \
do_softirq(); \
- preempt_check_resched(); \
- } else { \
- __local_bh_enable(); \
- preempt_check_resched(); \
- } \
+ preempt_check_resched(); \
} while (0)
-#define in_softirq() in_interrupt()
-
#endif /* __ASM_SOFTIRQ_H */
/* used in the idle loop; sti takes one instruction cycle to complete */
#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
+#define irqs_disabled() \
+({ \
+ unsigned long flags; \
+ local_save_flags(flags); \
+ !(flags & (1<<9)); \
+})
+
/* For spinlocks etc */
#define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory")
-/*
- * Compatibility macros - they will be removed after some time.
- */
-#if !CONFIG_SMP
-# define sti() local_irq_enable()
-# define cli() local_irq_disable()
-# define save_flags(flags) local_save_flags(flags)
-# define restore_flags(flags) local_irq_restore(flags)
-#endif
-
/*
* disable hlt during certain critical i/o operations
*/
/*
* macros/functions for gaining access to the thread information structure
+ *
+ * preempt_count needs to be 1 initially, until the scheduler is functional.
*/
#ifndef __ASSEMBLY__
#define INIT_THREAD_INFO(tsk) \
exec_domain: &default_exec_domain, \
flags: 0, \
cpu: 0, \
+ preempt_count: 1, \
addr_limit: KERNEL_DS, \
}
#define write_lock_irq(lock) do { local_irq_disable(); write_lock(lock); } while (0)
#define write_lock_bh(lock) do { local_bh_disable(); write_lock(lock); } while (0)
-#define spin_unlock_irqrestore(lock, flags) do { spin_unlock(lock); local_irq_restore(flags); } while (0)
+#define spin_unlock_irqrestore(lock, flags) do { _raw_spin_unlock(lock); local_irq_restore(flags); preempt_enable(); } while (0)
#define _raw_spin_unlock_irqrestore(lock, flags) do { _raw_spin_unlock(lock); local_irq_restore(flags); } while (0)
-#define spin_unlock_irq(lock) do { spin_unlock(lock); local_irq_enable(); } while (0)
-#define spin_unlock_bh(lock) do { spin_unlock(lock); local_bh_enable(); } while (0)
+#define spin_unlock_irq(lock) do { _raw_spin_unlock(lock); local_irq_enable(); preempt_enable(); } while (0)
+#define spin_unlock_bh(lock) do { spin_unlock(lock); local_bh_enable(); } while (0)
-#define read_unlock_irqrestore(lock, flags) do { read_unlock(lock); local_irq_restore(flags); } while (0)
-#define read_unlock_irq(lock) do { read_unlock(lock); local_irq_enable(); } while (0)
+#define read_unlock_irqrestore(lock, flags) do { _raw_read_unlock(lock); local_irq_restore(flags); preempt_enable(); } while (0)
+#define read_unlock_irq(lock) do { _raw_read_unlock(lock); local_irq_enable(); preempt_enable(); } while (0)
#define read_unlock_bh(lock) do { read_unlock(lock); local_bh_enable(); } while (0)
-#define write_unlock_irqrestore(lock, flags) do { write_unlock(lock); local_irq_restore(flags); } while (0)
-#define write_unlock_irq(lock) do { write_unlock(lock); local_irq_enable(); } while (0)
+#define write_unlock_irqrestore(lock, flags) do { _raw_write_unlock(lock); local_irq_restore(flags); preempt_enable(); } while (0)
+#define write_unlock_irq(lock) do { _raw_write_unlock(lock); local_irq_enable(); preempt_enable(); } while (0)
#define write_unlock_bh(lock) do { write_unlock(lock); local_bh_enable(); } while (0)
#define spin_trylock_bh(lock) ({ int __r; local_bh_disable();\
__r = spin_trylock(lock); \
preempt_enable(); \
} while (0)
-#define spin_unlock_no_resched(lock) \
-do { \
- _raw_spin_unlock(lock); \
- preempt_enable_no_resched(); \
-} while (0)
-
#define read_lock(lock) ({preempt_disable(); _raw_read_lock(lock);})
#define read_unlock(lock) ({_raw_read_unlock(lock); preempt_enable();})
#define write_lock(lock) ({preempt_disable(); _raw_write_lock(lock);})
#define spin_lock(lock) _raw_spin_lock(lock)
#define spin_trylock(lock) _raw_spin_trylock(lock)
#define spin_unlock(lock) _raw_spin_unlock(lock)
-#define spin_unlock_no_resched(lock) _raw_spin_unlock(lock)
#define read_lock(lock) _raw_read_lock(lock)
#define read_unlock(lock) _raw_read_unlock(lock)
* total amount of pending timeslices in the system doesnt change,
* resulting in more scheduling fairness.
*/
- local_save_flags(flags);
- local_irq_disable();
+ local_irq_save(flags);
p->time_slice = (current->time_slice + 1) >> 1;
current->time_slice >>= 1;
+ p->sleep_timestamp = jiffies;
if (!current->time_slice) {
/*
* This case is rare, it happens when the parent has only
* runqueue lock is not a problem.
*/
current->time_slice = 1;
+ preempt_disable();
scheduler_tick(0, 0);
- }
- p->sleep_timestamp = jiffies;
- local_irq_restore(flags);
+ local_irq_restore(flags);
+ preempt_enable();
+ } else
+ local_irq_restore(flags);
/*
* Ok, add it to the run-queues and make it
if (p == rq->idle) {
/* note: this timer irq context must be accounted for as well */
- if (preempt_count() >= 2*IRQ_OFFSET)
+ if (irq_count() >= 2*HARDIRQ_OFFSET)
kstat.per_cpu_system[cpu] += system;
#if CONFIG_SMP
idle_tick();
if (unlikely(ti->preempt_count))
return;
+ if (unlikely(irqs_disabled())) {
+ preempt_disable();
+ printk("bad: schedule() with irqs disabled!\n");
+ show_stack(NULL);
+ preempt_enable_no_resched();
+ }
need_resched:
ti->preempt_count = PREEMPT_ACTIVE;
wait_queue_t wait; \
init_waitqueue_entry(&wait, current);
-#define SLEEP_ON_HEAD \
+#define SLEEP_ON_HEAD \
spin_lock_irqsave(&q->lock,flags); \
__add_wait_queue(q, &wait); \
spin_unlock(&q->lock);
list_add_tail(¤t->run_list, array->queue + current->prio);
__set_bit(current->prio, array->bitmap);
}
- spin_unlock_no_resched(&rq->lock);
+ /*
+ * Since we are going to call schedule() anyway, there's
+ * no need to preempt:
+ */
+ _raw_spin_unlock(&rq->lock);
+ preempt_enable_no_resched();
schedule();
runqueue_t *idle_rq = cpu_rq(cpu), *rq = cpu_rq(task_cpu(idle));
unsigned long flags;
- local_save_flags(flags);
- local_irq_disable();
+ local_irq_save(flags);
double_rq_lock(idle_rq, rq);
idle_rq->curr = idle_rq->idle = idle;
/* Set the preempt count _outside_ the spinlocks! */
#if CONFIG_PREEMPT
idle->thread_info->preempt_count = (idle->lock_depth >= 0);
+#else
+ idle->thread_info->preempt_count = 0;
#endif
}
cc_entry(cc)[cc->avail++] =
kmem_cache_alloc_one_tail(cachep, slabp);
}
- spin_unlock(&cachep->spinlock);
+ /*
+ * CAREFUL: do not enable preemption yet, the per-CPU
+ * entries rely on us being atomic.
+ */
+ _raw_spin_unlock(&cachep->spinlock);
if (cc->avail)
return cc_entry(cc)[--cc->avail];
} else {
STATS_INC_ALLOCMISS(cachep);
objp = kmem_cache_alloc_batch(cachep,flags);
+ local_irq_restore(save_flags);
+ /* end of non-preemptible region */
+ preempt_enable();
if (!objp)
goto alloc_new_slab_nolock;
+ return objp;
}
} else {
spin_lock(&cachep->spinlock);
alloc_new_slab:
#ifdef CONFIG_SMP
spin_unlock(&cachep->spinlock);
-alloc_new_slab_nolock:
#endif
local_irq_restore(save_flags);
+#ifdef CONFIG_SMP
+alloc_new_slab_nolock:
+#endif
if (kmem_cache_grow(cachep, flags))
/* Someone may have stolen our objs. Doesn't matter, we'll
* just come back here again.
dst_release(skb->dst);
if(skb->destructor) {
- if (0 && in_irq())
+ if (in_irq())
printk(KERN_WARNING "Warning: kfree_skb on "
"hard IRQ %p\n", NET_CALLER(skb));
skb->destructor(skb);
int i;
unsigned long flags;
- /* the local_irq_disable stunt is to send the data within the command window */
+ /* the local_irq_save stunt is to send the data within the command window */
for (i = 0; i < 0xffff; i++) {
- local_save_flags(flags);
- local_irq_disable();
+ local_irq_save(flags);
if (!(inb(s->sbbase+0xc) & 0x80)) {
outb(data, s->sbbase+0xc);
local_irq_restore(flags);