#### cli()/sti() removal guide, started by Ingo Molnar <mingo@redhat.com>
-as of 2.5.28, four popular macros have been removed on SMP, and
+as of 2.5.28, five popular macros have been removed on SMP, and
are being phased out on UP:
- cli(), sti(), save_flags(flags), restore_flags(flags)
+ cli(), sti(), save_flags(flags), save_flags_cli(flags), restore_flags(flags)
until now it was possible to protect driver code against interrupt
handlers via a cli(), but from now on other, more lightweight methods
cli() on the other hand was used by many drivers, and extended
the critical section to the whole IRQ handler function - creating
serious lock contention.
-
+
to make the transition easier, we've still kept the cli(), sti(),
-save_flags() and restore_flags() macros defined on UP systems - but
-their usage will be phased out until the 2.6 kernel is released.
+save_flags(), save_flags_cli() and restore_flags() macros defined
+on UP systems - but their usage will be phased out until 2.6 is
+released.
drivers that want to disable local interrupts (interrupts on the
-current CPU), can use the following four macros:
+current CPU), can use the following five macros:
- __cli(), __sti(), __save_flags(flags), __restore_flags(flags)
+ local_irq_disable(), local_irq_enable(), local_irq_save(flags),
+ local_irq_save_off(flags), local_irq_restore(flags)
but beware, their meaning and semantics are much simpler, far from
-that of cli(), sti(), save_flags(flags) and restore_flags(flags).
+that of the old cli(), sti(), save_flags(flags) and restore_flags(flags)
+SMP meaning:
+
+ local_irq_disable() => turn local IRQs off
+
+ local_irq_enable() => turn local IRQs on
+ local_irq_save(flags) => save the current IRQ state into flags. The
+ state can be on or off. (on some
+ architectures there's even more bits in it.)
+
+ local_irq_save_off(flags) => save the current IRQ state into flags and
+ disable interrupts.
+
+ local_irq_restore(flags) => restore the IRQ state from flags.
+
+(local_irq_save can save both irqs on and irqs off state, and
+local_irq_restore can restore into both irqs on and irqs off state.)
another related change is that synchronize_irq() now takes a parameter:
synchronize_irq(irq). This change too has the purpose of making SMP
-synchronization more lightweight - this way you can wait for your own
-interrupt handler to finish, no need to wait for other IRQ sources.
+to make the transition easier, we've still kept the cli(), sti(),
+save_flags() and restore_flags() macros defined on UP systems - but
+their usage will be phased out until the 2.6 kernel is released.
why were these changes done? The main reason was the architectural burden
any locks or interrupts are disabled, since preemption is implicitly disabled
in those cases.
+But keep in mind that 'irqs disabled' is a fundamentally unsafe way of
+disabling preemption - any spin_unlock() decreasing the preemption count
+to 0 might trigger a reschedule. A simple printk() might trigger a reschedule.
+So use this implicit preemption-disabling property only if you know that the
+affected codepath does not do any of this. Best policy is to use this only for
+small, atomic code that you wrote and which calls no complex functions.
+
Example:
cpucache_t *cc; /* this is per-CPU */
{
unsigned long v;
+ irq_enter();
/*
* Check if this really is a spurious interrupt and ACK it
* if it is a vectored one. Just in case...
/* see sw-dev-man vol 3, chapter 7.4.13.5 */
printk(KERN_INFO "spurious APIC interrupt on CPU#%d, should never happen.\n",
smp_processor_id());
+ irq_exit();
}
/*
{
unsigned long v, v1;
+ irq_enter();
/* First tickle the hardware, only then report what went on. -- REW */
v = apic_read(APIC_ESR);
apic_write(APIC_ESR, 0);
*/
printk (KERN_ERR "APIC error on CPU%d: %02lx(%02lx)\n",
smp_processor_id(), v , v1);
+ irq_exit();
}
/*
asmlinkage void smp_thermal_interrupt(struct pt_regs regs)
{
+ irq_enter();
vendor_thermal_interrupt(®s);
+ irq_exit();
}
/* P4/Xeon Thermal regulation detect and init */
# userspace resumption stub bypassing syscall exit tracing
ALIGN
ret_from_intr:
- preempt_stop
ret_from_exception:
movl EFLAGS(%esp), %eax # mix EFLAGS and CS
movb CS(%esp), %al
clear_bit(cpu, &flush_cpumask);
out:
- put_cpu();
+ put_cpu_no_resched();
}
static void flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
static void cp_stop_hw (struct cp_private *cp)
{
+ struct net_device *dev = cp->dev;
+
cpw16(IntrMask, 0);
cpr16(IntrMask);
cpw8(Cmd, 0);
cpw16(CpCmd, 0);
cpr16(CpCmd);
cpw16(IntrStatus, ~(cpr16(IntrStatus)));
- synchronize_irq();
+ synchronize_irq(dev->irq);
udelay(10);
cp->rx_tail = 0;
spin_unlock_irqrestore (&tp->lock, flags);
- synchronize_irq ();
+ /* TODO: isn't this code racy? we synchronize the IRQ and then free it, */
+ /* but another IRQ could've happened in between the sync and free */
+ synchronize_irq (dev->irq);
free_irq (dev->irq, dev);
rtl8139_tx_clear (tp);
spin_unlock_irq(&nl->lock);
if (error == HS_TIMEOUT) {
DISABLE(dev->irq);
- synchronize_irq();
+ synchronize_irq(dev->irq);
}
disable_parport_interrupts (dev);
netif_stop_queue (dev);
if (c0 & 0x08) {
spin_unlock_irq(&nl->lock);
DISABLE(dev->irq);
- synchronize_irq();
+ synchronize_irq(dev->irq);
if (nl->connection == PLIP_CN_RECEIVE) {
/* Interrupted.
We don't need to enable irq,
netif_stop_queue (dev);
DISABLE(dev->irq);
- synchronize_irq();
+ synchronize_irq(dev->irq);
if (dev->irq == -1)
{
/* Update the error counts. */
__de_get_stats(de);
- synchronize_irq();
+ synchronize_irq(dev->irq);
de_clean_rings(de);
de_init_hw(de);
{ \
unsigned long flags; \
\
- save_flags(flags); \
- cli(); \
trace.buf[trace.next].name = (w); \
trace.buf[trace.next].time = jiffies; \
trace.buf[trace.next].index = (i); \
trace.buf[trace.next].addr = (long) (a); \
trace.next = (trace.next + 1) & (TRACE_BUF_LEN - 1); \
- restore_flags(flags); \
}
#else
ENTER("isp1020_load_parameters");
- save_flags(flags);
- cli();
-
hwrev = isp_inw(host, ISP_CFG0) & ISP_CFG0_HWMSK;
isp_cfg1 = ISP_CFG1_F64 | ISP_CFG1_BENAB;
if (hwrev == ISP_CFG0_1040A) {
isp1020_mbox_command(host, param);
if (param[0] != MBOX_COMMAND_COMPLETE) {
- restore_flags(flags);
printk("qlogicisp : set initiator id failure\n");
return 1;
}
isp1020_mbox_command(host, param);
if (param[0] != MBOX_COMMAND_COMPLETE) {
- restore_flags(flags);
printk("qlogicisp : set retry count failure\n");
return 1;
}
isp1020_mbox_command(host, param);
if (param[0] != MBOX_COMMAND_COMPLETE) {
- restore_flags(flags);
printk("qlogicisp : async data setup time failure\n");
return 1;
}
isp1020_mbox_command(host, param);
if (param[0] != MBOX_COMMAND_COMPLETE) {
- restore_flags(flags);
printk("qlogicisp : set active negation state failure\n");
return 1;
}
isp1020_mbox_command(host, param);
if (param[0] != MBOX_COMMAND_COMPLETE) {
- restore_flags(flags);
printk("qlogicisp : set pci control parameter failure\n");
return 1;
}
isp1020_mbox_command(host, param);
if (param[0] != MBOX_COMMAND_COMPLETE) {
- restore_flags(flags);
printk("qlogicisp : set tag age limit failure\n");
return 1;
}
isp1020_mbox_command(host, param);
if (param[0] != MBOX_COMMAND_COMPLETE) {
- restore_flags(flags);
printk("qlogicisp : set selection timeout failure\n");
return 1;
}
isp1020_mbox_command(host, param);
if (param[0] != MBOX_COMMAND_COMPLETE) {
- restore_flags(flags);
printk("qlogicisp : set target parameter failure\n");
return 1;
}
isp1020_mbox_command(host, param);
if (param[0] != MBOX_COMMAND_COMPLETE) {
- restore_flags(flags);
printk("qlogicisp : set device queue "
"parameter failure\n");
return 1;
isp1020_mbox_command(host, param);
if (param[0] != MBOX_COMMAND_COMPLETE) {
- restore_flags(flags);
printk("qlogicisp : set response queue failure\n");
return 1;
}
isp1020_mbox_command(host, param);
if (param[0] != MBOX_COMMAND_COMPLETE) {
- restore_flags(flags);
printk("qlogicisp : set request queue failure\n");
return 1;
}
- restore_flags(flags);
-
LEAVE("isp1020_load_parameters");
return 0;
#include <asm/hardirq.h>
#include <asm/softirq.h>
+/*
+ * Temporary defines for UP kernels, until all code gets fixed.
+ */
+#if !CONFIG_SMP
+# define cli() local_irq_disable()
+# define sti() local_irq_enable()
+# define save_flags(x) local_irq_save(x)
+# define restore_flags(x) local_irq_restore(x)
+# define save_and_cli(x) local_irq_save_off(x)
+#endif
/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
#endif /* !SMP */
-#define get_cpu() ({ preempt_disable(); smp_processor_id(); })
-#define put_cpu() preempt_enable()
+#define get_cpu() ({ preempt_disable(); smp_processor_id(); })
+#define put_cpu() preempt_enable()
+#define put_cpu_no_resched() preempt_enable_no_resched()
#endif /* __LINUX_SMP_H */