#include <asm/bitops.h>
#include <asm/pgtable.h>
#include <asm/io.h>
-#include <linux/io_trace.h>
#ifdef CONFIG_MTRR
# include <asm/mtrr.h>
int count = 0;
unsigned int cfg;
- IO_trace (IO_smp_wait_apic_start, 0, 0, 0, 0);
while (count < 1000)
{
cfg = slow_ICR;
if (!(cfg&(1<<12))) {
- IO_trace (IO_smp_wait_apic_end, 0, 0, 0, 0);
if (count)
atomic_add(count, (atomic_t*)&ipi_count);
return cfg;
/*
* Send the IPI. The write to APIC_ICR fires this off.
*/
-
- IO_trace (IO_smp_send_ipi, shortcut, vector, cfg, 0);
-
apic_write(APIC_ICR, cfg);
#if FORCE_APIC_SERIALIZATION
__restore_flags(flags);
/*
* Send the IPI. The write to APIC_ICR fires this off.
*/
-
- IO_trace (IO_smp_send_ipi, dest, vector, cfg, 0);
-
apic_write(APIC_ICR, cfg);
#if FORCE_APIC_SERIALIZATION
__restore_flags(flags);
__save_flags(flags);
__cli();
- IO_trace (IO_smp_message, 0, 0, 0, 0);
-
send_IPI_allbutself(INVALIDATE_TLB_VECTOR);
/*
*/
asmlinkage void smp_reschedule_interrupt(void)
{
- IO_trace (IO_smp_reschedule, current->need_resched,
- current->priority, current->counter, 0);
-
ack_APIC_irq();
}
*/
asmlinkage void smp_invalidate_interrupt(void)
{
- IO_trace (IO_smp_tlbflush,
- atomic_read((atomic_t *)&smp_invalidate_needed), 0, 0, 0);
-
if (test_and_clear_bit(smp_processor_id(), &smp_invalidate_needed))
local_flush_tlb();
* Force strict CPU ordering.
* And yes, this is required on UP too when we're talking
* to devices.
+ *
+ * For now, "wmb()" doesn't actually do anything, as all
+ * intel CPU's follow what intel calls a *Processor Order*,
+ * in which all writes are seen in the program order even
+ * outside the CPU.
+ *
+ * I expect future intel CPU's to have a weaker ordering,
+ * but I'd also expect them to finally get their act together
+ * and add some real memory barriers if so.
*/
#define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
+#define rmb() mb()
+#define wmb() __asm__ __volatile__ ("": : :"memory")
/* interrupt control.. */
#define __sti() __asm__ __volatile__ ("sti": : :"memory")