endif
# Device support
-ifdef CONFIG_ALPHA_MIATA
+ifneq ($(CONFIG_ALPHA_MIATA)$(CONFIG_ALPHA_DP264),)
O_OBJS += es1888.o
endif
ifneq ($(CONFIG_ALPHA_SX164)$(CONFIG_ALPHA_MIATA)$(CONFIG_ALPHA_DP264),)
extern void __remqu (void);
EXPORT_SYMBOL(alpha_mv);
-EXPORT_SYMBOL(local_bh_count);
-EXPORT_SYMBOL(local_irq_count);
EXPORT_SYMBOL(enable_irq);
EXPORT_SYMBOL(disable_irq);
EXPORT_SYMBOL(disable_irq_nosync);
EXPORT_SYMBOL(write_lock);
EXPORT_SYMBOL(read_lock);
#endif
+#else /* __SMP__ */
+EXPORT_SYMBOL(__local_bh_count);
+EXPORT_SYMBOL(__local_irq_count);
#endif /* __SMP__ */
/*
{
/*
* Set up the PCI->physical memory translation windows.
- * For now, windows 1,2 and 3 are disabled. In the future, we may
+ * For now, windows 2 and 3 are disabled. In the future, we may
* want to use them to do scatter/gather DMA.
*
- * Window 0 goes at 1 GB and is 1 GB large.
+ * Window 0 goes at 2 GB and is 1 GB large.
+ * Window 1 goes at 3 GB and is 1 GB large.
*/
- *(vuip)PYXIS_W0_BASE = 1U | (PYXIS_DMA_WIN_BASE_DEFAULT & 0xfff00000U);
+ *(vuip)PYXIS_W0_BASE = PYXIS_DMA_WIN_BASE_DEFAULT | 1UL;
*(vuip)PYXIS_W0_MASK = (PYXIS_DMA_WIN_SIZE_DEFAULT - 1) & 0xfff00000U;
*(vuip)PYXIS_T0_BASE = 0;
- *(vuip)PYXIS_W1_BASE = 0x0 ;
- *(vuip)PYXIS_W2_BASE = 0x0 ;
- *(vuip)PYXIS_W3_BASE = 0x0 ;
+ *(vuip)PYXIS_W1_BASE = (PYXIS_DMA_WIN_BASE_DEFAULT +
+ PYXIS_DMA_WIN_SIZE_DEFAULT) | 1U;
+ *(vuip)PYXIS_W1_MASK = (PYXIS_DMA_WIN_SIZE_DEFAULT - 1) & 0xfff00000U;
+ *(vuip)PYXIS_T1_BASE = PYXIS_DMA_WIN_SIZE_DEFAULT;
+
+ *(vuip)PYXIS_W2_BASE = 0x0;
+ *(vuip)PYXIS_W3_BASE = 0x0;
mb();
}
continue;
inb(0x022a); /* pause */
outb(0xc6, 0x022c); /* enable extended mode */
+ inb(0x022a); /* pause, also forces the write */
while (inb(0x022c) & 0x80) /* wait for bit 7 to deassert */
continue;
outb(0xb1, 0x022c); /* setup for write to Interrupt CR */
while (inb(0x022c) & 0x80) /* wait for bit 7 to deassert */
continue;
outb(0x18, 0x022c); /* set DMA channel 1 */
+ inb(0x022c); /* force the write */
}
#define vulp volatile unsigned long *
#define vuip volatile unsigned int *
-unsigned int local_irq_count[NR_CPUS];
-unsigned int local_bh_count[NR_CPUS];
-unsigned long hardirq_no[NR_CPUS];
+/* Only uniprocessor needs this IRQ/BH locking depth, on SMP it lives
+ in the per-cpu structure for cache reasons. */
+#ifndef __SMP__
+int __local_irq_count;
+int __local_bh_count;
+#endif
#if NR_IRQS > 64
# error Unable to handle more than 64 irq levels.
static void show(char * str, void *where);
+#define SYNC_OTHER_CPUS(x) udelay((x)+1);
+
static inline void
wait_on_irq(int cpu, void *where)
{
* already executing in one..
*/
if (!atomic_read(&global_irq_count)) {
- if (local_bh_count[cpu] ||
- !atomic_read(&global_bh_count))
+ if (local_bh_count(cpu)
+ || !atomic_read(&global_bh_count))
break;
}
count = MAXCOUNT;
}
__sti();
-#if 0
- SYNC_OTHER_CORES(cpu);
-#else
- udelay(cpu+1);
-#endif
+ SYNC_OTHER_CPUS(cpu);
__cli();
if (atomic_read(&global_irq_count))
continue;
- if (global_irq_lock.lock)
+ if (spin_is_locked(&global_irq_lock))
continue;
- if (!local_bh_count[cpu] &&
- atomic_read(&global_bh_count))
+ if (!local_bh_count(cpu)
+ && atomic_read(&global_bh_count))
continue;
if (spin_trylock(&global_irq_lock))
break;
void
__global_cli(void)
{
- int cpu;
+ int cpu = smp_processor_id();
void *where = __builtin_return_address(0);
/*
* Maximize ipl. If ipl was previously 0 and if this thread
* is not in an irq, then take global_irq_lock.
*/
- if ((swpipl(7) == 0) && !local_irq_count[cpu = smp_processor_id()])
+ if (swpipl(7) == 0 && !local_irq_count(cpu))
get_irqlock(cpu, where);
}
{
int cpu = smp_processor_id();
- if (!local_irq_count[cpu]) {
+ if (!local_irq_count(cpu))
release_irqlock(cpu);
- }
__sti();
}
retval = 2 + local_enabled;
/* Check for global flags if we're not in an interrupt. */
- if (!local_irq_count[cpu]) {
+ if (!local_irq_count(cpu)) {
if (local_enabled)
retval = 1;
if (global_irq_holder == cpu)
#define STUCK \
if (!--stuck) { \
printk("irq_enter stuck (irq=%d, cpu=%d, global=%d)\n", \
- irq, cpu,global_irq_holder); \
+ irq, cpu, global_irq_holder); \
stuck = INIT_STUCK; \
}
hardirq_enter(cpu, irq);
barrier();
- while (global_irq_lock.lock) {
+ while (spin_is_locked(&global_irq_lock)) {
if (cpu == global_irq_holder) {
- int globl_locked = global_irq_lock.lock;
+ int globl_locked = spin_is_locked(&global_irq_lock);
int globl_icount = atomic_read(&global_irq_count);
- int local_count = local_irq_count[cpu];
+ int local_count = local_irq_count(cpu);
/* It is very important that we load the state
variables before we do the first call to
#endif
int cpu = smp_processor_id();
- int global_count = atomic_read(&global_irq_count);
- int local_count0 = local_irq_count[0];
- int local_count1 = local_irq_count[1];
- long hardirq_no0 = hardirq_no[0];
- long hardirq_no1 = hardirq_no[1];
-
printk("\n%s, CPU %d: %p\n", str, cpu, where);
- printk("irq: %d [%d(0x%016lx) %d(0x%016lx)]\n", global_count,
- local_count0, hardirq_no0, local_count1, hardirq_no1);
+ printk("irq: %d [%d %d]\n",
+ atomic_read(&global_irq_count),
+ cpu_data[0].irq_count,
+ cpu_data[1].irq_count);
printk("bh: %d [%d %d]\n",
- atomic_read(&global_bh_count), local_bh_count[0],
- local_bh_count[1]);
+ atomic_read(&global_bh_count),
+ cpu_data[0].bh_count,
+ cpu_data[1].bh_count);
#if 0
stack = (unsigned long *) &str;
for (i = 40; i ; i--) {
count = ~0;
}
/* nothing .. wait for the other bh's to go away */
+ barrier();
} while (atomic_read(&global_bh_count) != 0);
}
void
synchronize_bh(void)
{
- if (atomic_read(&global_bh_count)) {
- int cpu = smp_processor_id();
- if (!local_irq_count[cpu] && !local_bh_count[cpu]) {
- wait_on_bh();
- }
- }
+ if (atomic_read(&global_bh_count) && !in_interrupt())
+ wait_on_bh();
}
/*
void
synchronize_irq(void)
{
+#if 0
+ /* Joe's version. */
int cpu = smp_processor_id();
int local_count;
int global_count;
mb();
do {
- local_count = local_irq_count[cpu];
+ local_count = local_irq_count(cpu);
global_count = atomic_read(&global_irq_count);
if (DEBUG_SYNCHRONIZE_IRQ && (--countdown == 0)) {
printk("%d:%d/%d\n", cpu, local_count, global_count);
break;
}
} while (global_count != local_count);
+#else
+ /* Jay's version. */
+ if (atomic_read(&global_irq_count)) {
+ cli();
+ sti();
+ }
+#endif
}
#else /* !__SMP__ */
-#define irq_enter(cpu, irq) (++local_irq_count[cpu])
-#define irq_exit(cpu, irq) (--local_irq_count[cpu])
+#define irq_enter(cpu, irq) (++local_irq_count(cpu))
+#define irq_exit(cpu, irq) (--local_irq_count(cpu))
#endif /* __SMP__ */
unsigned long a3, unsigned long a4, unsigned long a5,
struct pt_regs regs)
{
- unsigned long flags;
-
switch (type) {
case 0:
#ifdef __SMP__
- __save_and_cli(flags);
handle_ipi(®s);
- __restore_flags(flags);
return;
#else
printk("Interprocessor interrupt? You must be kidding\n");
#endif
break;
case 1:
- __save_and_cli(flags);
handle_irq(RTC_IRQ, -1, ®s);
- __restore_flags(flags);
return;
case 2:
alpha_mv.machine_check(vector, la_ptr, ®s);
return;
case 3:
- __save_and_cli(flags);
alpha_mv.device_interrupt(vector, ®s);
- __restore_flags(flags);
return;
case 4:
perf_irq(vector, ®s);
#define TIMER_IRQ RTC_IRQ /* timer is the rtc */
#endif
+extern char _stext;
+static inline void alpha_do_profile (unsigned long pc)
+{
+ if (prof_buffer && current->pid) {
+ pc -= (unsigned long) &_stext;
+ pc >>= prof_shift;
+ /*
+ * Don't ignore out-of-bounds PC values silently,
+ * put them into the last histogram slot, so if
+ * present, they will show up as a sharp peak.
+ */
+ if (pc > prof_len - 1)
+ pc = prof_len - 1;
+ atomic_inc((atomic_t *)&prof_buffer[pc]);
+ }
+}
return -EINVAL;
cpu = (struct percpu_struct*)
((char*)hwrpb + hwrpb->processor_offset);
+ w = cpu->type;
if (put_user(w, (unsigned long *)buffer))
return -EFAULT;
return 1;
}
#ifdef __SMP__
-void
+int
cpu_idle(void *unused)
{
/* An endless idle loop with no priority at all. */
p->tss.ksp = (unsigned long) childstack;
p->tss.pal_flags = 1; /* set FEN, clear everything else */
p->tss.flags = current->tss.flags;
- p->tss.mm_context = p->tss.asn = 0;
return 0;
}
/* process.c */
extern void generic_kill_arch (int mode, char *reboot_cmd);
-extern void cpu_idle(void *) __attribute__((noreturn));
+extern int cpu_idle(void *) __attribute__((noreturn));
/* ptrace.c */
extern int ptrace_set_bpt (struct task_struct *child);
int a4, int a5, struct pt_regs regs)
{
struct task_struct *child;
- unsigned long tmp;
long ret;
lock_kernel();
cpu_data[cpuid].loops_per_sec = loops_per_sec;
cpu_data[cpuid].last_asn
= (cpuid << WIDTH_HARDWARE_ASN) + ASN_FIRST_VERSION;
+ cpu_data[cpuid].irq_count = 0;
+ cpu_data[cpuid].bh_count = 0;
}
/*
{
cpu_data[cpuid].prof_counter = 1;
cpu_data[cpuid].prof_multiplier = 1;
-
-#ifdef NOT_YET_PROFILING
- load_profile_irq(mid_xlate[cpu], lvl14_resolution);
- if (cpu == smp_boot_cpuid)
- enable_pil_irq(14);
-#endif
}
/*
smp_percpu_timer_interrupt(struct pt_regs *regs)
{
int cpu = smp_processor_id();
- int user = user_mode(regs);
+ unsigned long user = user_mode(regs);
struct cpuinfo_alpha *data = &cpu_data[cpu];
-#ifdef NOT_YET_PROFILING
- clear_profile_irq(mid_xlate[cpu]);
+ /* Record kernel PC. */
if (!user)
alpha_do_profile(regs->pc);
-#endif
if (!--data->prof_counter) {
/* We need to make like a normal interrupt -- otherwise
int __init
setup_profiling_timer(unsigned int multiplier)
{
-#ifdef NOT_YET_PROFILING
- int i;
- unsigned long flags;
-
- /* Prevent level14 ticker IRQ flooding. */
- if((!multiplier) || (lvl14_resolution / multiplier) < 500)
- return -EINVAL;
-
- save_and_cli(flags);
- for (i = 0; i < NR_CPUS; i++) {
- if (cpu_present_mask & (1L << i)) {
- load_profile_irq(mid_xlate[i],
- lvl14_resolution / multiplier);
- prof_multiplier[i] = multiplier;
- }
- }
- restore_flags(flags);
-
- return 0;
-#else
return -EINVAL;
-#endif
}
\f
void
flush_tlb_mm(struct mm_struct *mm)
{
- if (mm == current->mm)
+ if (mm == current->mm) {
flush_tlb_current(mm);
- else
+ if (atomic_read(&mm->count) == 1)
+ return;
+ } else
flush_tlb_other(mm);
if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) {
struct flush_tlb_page_struct data;
struct mm_struct *mm = vma->vm_mm;
+ if (mm == current->mm) {
+ flush_tlb_current_page(mm, vma, addr);
+ if (atomic_read(&mm->count) == 1)
+ return;
+ } else
+ flush_tlb_other(mm);
+
data.vma = vma;
data.mm = mm;
data.addr = addr;
- if (mm == current->mm)
- flush_tlb_current_page(mm, vma, addr);
- else
- flush_tlb_other(mm);
-
if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) {
printk(KERN_CRIT "flush_tlb_page: timed out\n");
}
{
layout_all_busses(DEFAULT_IO_BASE, DEFAULT_MEM_BASE);
common_pci_fixup(monet_map_irq, monet_swizzle);
- /* es1888_init(); */ /* later? */
SMC669_Init(1);
+ es1888_init();
}
static void __init
smp_percpu_timer_interrupt(regs);
if (smp_processor_id() != smp_boot_cpuid)
return;
+#else
+ /* Not SMP, do kernel PC profiling here. */
+ if (!user_mode(regs))
+ alpha_do_profile(regs->pc);
#endif
write_lock(&xtime_lock);
* FPCR_INV if invalid operation occurred, etc.
*/
unsigned long
-ieee_CVTQT (int f, unsigned long a, unsigned long *b)
+ieee_CVTQT (int f, long a, unsigned long *b)
{
EXTENDED op_b;
- op_b.s = 0;
- op_b.f[0] = a;
- op_b.f[1] = 0;
- if (sign(a) < 0) {
- op_b.s = 1;
- op_b.f[0] = -a;
+ if (a != 0) {
+ op_b.s = (a < 0 ? 1 : 0);
+ op_b.f[0] = (a < 0 ? -a : a);
+ op_b.f[1] = 0;
+ op_b.e = 55;
+ normalize(&op_b);
+ return round_t_ieee(f, &op_b, b);
+ } else {
+ *b = 0;
+ return 0;
}
- op_b.e = 55;
- normalize(&op_b);
- return round_t_ieee(f, &op_b, b);
}
extern unsigned long ieee_CVTST (int rm, unsigned long a, unsigned long *b);
extern unsigned long ieee_CVTTS (int rm, unsigned long a, unsigned long *b);
extern unsigned long ieee_CVTQS (int rm, unsigned long a, unsigned long *b);
-extern unsigned long ieee_CVTQT (int rm, unsigned long a, unsigned long *b);
+extern unsigned long ieee_CVTQT (int rm, long a, unsigned long *b);
extern unsigned long ieee_CVTTQ (int rm, unsigned long a, unsigned long *b);
extern unsigned long ieee_CMPTEQ (unsigned long a, unsigned long b,
get_new_mmu_context(struct task_struct *p, struct mm_struct *mm)
{
unsigned long new = __get_new_mmu_context(p, mm);
- p->tss.mm_context = new;
+ mm->context = new;
p->tss.asn = new & HARDWARE_ASN_MASK;
}
extern unsigned long free_area_init(unsigned long, unsigned long);
-static inline struct thread_struct *
+static inline unsigned long
load_PCB(struct thread_struct * pcb)
{
register unsigned long sp __asm__("$30");
unsigned long newptbr;
struct memclust_struct * cluster;
struct memdesc_struct * memdesc;
- struct thread_struct *original_pcb_ptr;
+ unsigned long original_pcb_ptr;
/* initialize mem_map[] */
start_mem = free_area_init(start_mem, end_mem);
since KSEG values also happen to work, folks get confused.
Check this here. */
- if ((unsigned long)original_pcb_ptr < PAGE_OFFSET) {
- original_pcb_ptr = (struct thread_struct *)
- phys_to_virt((unsigned long) original_pcb_ptr);
+ if (original_pcb_ptr < PAGE_OFFSET) {
+ original_pcb_ptr = (unsigned long)
+ phys_to_virt(original_pcb_ptr);
}
- original_pcb = *original_pcb_ptr;
+ original_pcb = *(struct thread_struct *) original_pcb_ptr;
return start_mem;
}
* linux/kernel/ldt.c
*
* Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
- * Copyright (C) 1998 Ingo Molnar
+ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
*/
#include <linux/errno.h>
:"=&a" (retval), "=&S" (d0)
:"0" (__NR_clone), "i" (__NR_exit),
"r" (arg), "r" (fn),
- "b" (flags | CLONE_VM)
+ "b" (flags | CLONE_VM | CLONE_TLB)
: "memory");
return retval;
}
unsigned long new_cr3 = next->cr3;
tss->cr3 = new_cr3;
- if (new_cr3 != prev->cr3)
+ if (new_cr3 != prev->cr3)
asm volatile("movl %0,%%cr3": :"r" (new_cr3));
}
checks:
@$(MAKE) -C arch/$(ARCH)/kernel checks
-BOOT_TARGETS = netboot znetboot zImage floppy install \
- vmlinux.coff znetboot.initrd zImage.initrd vmlinux.coff.initrd
+BOOT_TARGETS = zImage znetboot.initrd zImage.initrd
ifdef CONFIG_MBX
$(BOOT_TARGETS): $(CHECKS) vmlinux
@$(MAKECOFFBOOT) $@
@$(MAKEBOOT) $@
@$(MAKECHRPBOOT) $@
+
+znetboot: $(CHECKS) vmlinux
+ifdef CONFIG_SMP
+ifdef CONFIG_PPC64
+ cp -f vmlinux /tftpboot/vmlinux.smp.64
+else
+ cp -f vmlinux /tftpboot/vmlinux.smp
+endif
+else
+ifdef CONFIG_PPC64
+ cp -f vmlinux /tftpboot/vmlinux.64
+else
+ cp -f vmlinux /tftpboot/vmlinux
+endif
+endif
+ @$(MAKECOFFBOOT) $@
+ @$(MAKEBOOT) $@
+ @$(MAKECHRPBOOT) $@
endif
pmac_config:
custom.intreq = 0x7fff;
#ifdef CONFIG_APUS
- /* Clear any inter-CPU interrupt requests. Circumvents bug in
+ /* Clear any inter-CPU interupt requests. Circumvents bug in
Blizzard IPL emulation HW (or so it appears). */
APUS_WRITE(APUS_INT_LVL, INTLVL_SETRESET | INTLVL_MASK);
#include <linux/kd.h>
#include <linux/tty.h>
#include <linux/console.h>
+#include <linux/init.h>
#include <asm/bootinfo.h>
#include <asm/setup.h>
sa = *(unsigned long *)PROG_START+PROG_START;
printf("start address = 0x%x\n\r", sa);
- (*(void (*)())sa)(a1, a2, prom, 0, 0);
+ (*(void (*)())sa)(0, 0, prom, a1, a2);
printf("returned?\n\r");
-# $Id: config.in,v 1.94 1999/06/25 11:00:07 davem Exp $
+# $Id: config.in,v 1.95 1999/07/03 08:57:06 davem Exp $
# For a description of the syntax of this configuration file,
# see the Configure script.
#
#define python_config_data(bus) ((0xfef00000+0xf8010)-(bus*0x100000))
#define PYTHON_CFA(b, d, o) (0x80 | ((b<<6) << 8) | ((d) << 16) \
| (((o) & ~3) << 24))
-unsigned int python_busnr = 1;
+unsigned int python_busnr = 0;
int python_pcibios_read_config_byte(unsigned char bus, unsigned char dev_fn,
unsigned char offset, unsigned char *val)
} else if ( !strncmp("IBM,7043-260",
get_property(find_path_device("/"), "name", NULL),12) )
{
- pci_dram_offset = 0x80000000;
+ pci_dram_offset = 0x0;
isa_mem_base = 0xc0000000;
isa_io_base = 0xf8000000;
}
void chrp_time_init(void);
void chrp_setup_pci_ptrs(void);
+extern void chrp_progress(char *, unsigned short);
+void chrp_event_scan(void);
extern int pckbd_setkeycode(unsigned int scancode, unsigned int keycode);
extern int pckbd_getkeycode(unsigned int scancode);
chrp_setup_pci_ptrs();
#ifdef CONFIG_BLK_DEV_INITRD
/* take care of initrd if we have one */
- if ( r3 )
+ if ( r6 )
{
- initrd_start = r3 + KERNELBASE;
- initrd_end = r3 + r4 + KERNELBASE;
+ initrd_start = r6 + KERNELBASE;
+ initrd_end = r6 + r7 + KERNELBASE;
}
#endif /* CONFIG_BLK_DEV_INITRD */
ppc_md.ppc_kbd_sysrq_xlate = pckbd_sysrq_xlate;
SYSRQ_KEY = 0x54;
#endif
+ if ( rtas_data )
+ ppc_md.progress = chrp_progress;
#endif
#endif
* Print the banner, then scroll down so boot progress
* can be printed. -- Cort
*/
- chrp_progress("Linux/PPC "UTS_RELEASE"\n");
+ if ( ppc_md.progress ) ppc_md.progress("Linux/PPC "UTS_RELEASE"\n", 0x0);
}
-void chrp_progress(char *s)
+void chrp_progress(char *s, unsigned short hex)
{
extern unsigned int rtas_data;
+ unsigned long width;
+ struct device_node *root;
+ char *os = s;
+ if ( (root = find_path_device("/rtas")) )
+ width = *(unsigned long *)get_property(root, "ibm,display-line-length", NULL);
+ else
+ width = 0x10;
+
if ( (_machine != _MACH_chrp) || !rtas_data )
return;
- call_rtas( "display-character", 1, 1, NULL, '\r' );
- while ( *s )
- call_rtas( "display-character", 1, 1, NULL, *s++ );
+ if ( call_rtas( "display-character", 1, 1, NULL, '\r' ) )
+ {
+ /* assume no display-character RTAS method - use hex display */
+ return;
+ }
+ while ( *os )
+ call_rtas( "display-character", 1, 1, NULL, *os++ );
+ /* scan back for the last newline or carriage return */
+ for ( os-- ; (*os != '\n') && (*os != '\r') && (os > s) ; os--, width-- )
+ /* nothing */ ;
+ /*while ( width-- )*/
+ call_rtas( "display-character", 1, 1, NULL, ' ' );
}
/*
* arch/ppc/kernel/head.S
*
- * $Id: head.S,v 1.133 1999/05/20 05:13:08 cort Exp $
+ * $Id: head.S,v 1.134 1999/06/30 05:05:52 paulus Exp $
*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
oris r21,r11,(KERNELBASE+0x20000000)@h
mtspr DBAT2L,r18 /* N.B. 6xx (not 601) have valid */
mtspr DBAT2U,r21 /* bit in upper BAT register */
- mtspr IBAT2L,r28
+ mtspr IBAT2L,r18
mtspr IBAT2U,r21
#endif /* CONFIG_PPC64 */
#endif
lwz r6,GPR4(r1)
lwz r7,GPR5(r1)
lwz r8,GPR6(r1)
- mr r9,r2
+ lwz r9,GPR7(r1)
+ bl printk
+ lis r3,77f@ha
+ addi r3,r3,77f@l
+ lwz r4,GPR8(r1)
+ lwz r5,GPR9(r1)
+ mr r6,r2
bl printk
lwz r0,GPR0(r1)
lwz r3,GPR3(r1)
66: li r3,ENOSYS
b 52b
#ifdef SHOW_SYSCALLS
-7: .string "syscall %d(%x, %x, %x, %x), current=%p\n"
+7: .string "syscall %d(%x, %x, %x, %x, %x, "
+77: .string "%x, %x), current=%p\n"
79: .string " -> %x\n"
.align 2
#endif
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/irq.h>
+#include <asm/bitops.h>
#include <asm/gg2.h>
#include <asm/cache.h>
#include <asm/prom.h>
if (!OpenPIC)
panic("No OpenPIC found");
+ if ( ppc_md.progress ) ppc_md.progress("openpic enter",0x122);
+
t = openpic_read(&OpenPIC->Global.Feature_Reporting0);
switch (t & OPENPIC_FEATURE_VERSION_MASK) {
case 1:
OPENPIC_FEATURE_LAST_PROCESSOR_SHIFT) + 1;
NumSources = ((t & OPENPIC_FEATURE_LAST_SOURCE_MASK) >>
OPENPIC_FEATURE_LAST_SOURCE_SHIFT) + 1;
+
printk("OpenPIC Version %s (%d CPUs and %d IRQ sources) at %p\n", version,
NumProcessors, NumSources, OpenPIC);
timerfreq = openpic_read(&OpenPIC->Global.Timer_Frequency);
if ( main_pic )
{
+ if ( ppc_md.progress ) ppc_md.progress("openpic main",0x3ff);
+
/* Initialize timer interrupts */
for (i = 0; i < OPENPIC_NUM_TIMERS; i++) {
/* Disabled, Priority 0 */
openpic_initipi(i, 0, OPENPIC_VEC_IPI+i);
}
+ if ( ppc_md.progress ) ppc_md.progress("openpic initirq",0x3bb);
/* Initialize external interrupts */
/* SIOint (8259 cascade) is special */
openpic_initirq(0, 8, OPENPIC_VEC_SOURCE, 1, 1);
+
+ if ( ppc_md.progress ) ppc_md.progress("openpic map",0x3cc);
/* Processor 0 */
openpic_mapirq(0, 1<<0);
for (i = 1; i < NumSources; i++) {
openpic_set_priority(0, 0);
openpic_disable_8259_pass_through();
}
+ if ( ppc_md.progress ) ppc_md.progress("openpic exit",0x222);
}
#include <linux/string.h>
#include <linux/init.h>
#include <linux/config.h>
+#include <linux/pci.h>
#include <linux/openpic.h>
#include <asm/processor.h>
#include <asm/ide.h>
#include <asm/machdep.h>
#include <asm/keyboard.h>
+#include <asm/dma.h>
#include "time.h"
#include "local_irq.h"
{
Scsi_Disk *dp;
int i;
-
+#ifdef CONFIG_BLK_DEV_SD
for (dp = rscsi_disks, i = 0; i < sd_template.dev_max; ++i, ++dp)
if (dp->device != NULL && dp->device->host == host
&& dp->device->id == tgt)
return MKDEV_SD(i);
+#endif /* CONFIG_BLK_DEV_SD */
return 0;
}
#endif
void
pmac_ide_insw(ide_ioreg_t port, void *buf, int ns)
{
- _insw_ns(port+_IO_BASE, buf, ns);
+ _insw_ns((unsigned short *)(port+_IO_BASE), buf, ns);
}
void
pmac_ide_outsw(ide_ioreg_t port, void *buf, int ns)
{
- _outsw_ns(port+_IO_BASE, buf, ns);
+ _outsw_ns((unsigned short *)(port+_IO_BASE), buf, ns);
}
int
#include <asm/system.h>
#include <asm/signal.h>
+#include <asm/system.h>
#include <asm/kgdb.h>
#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include <asm/residual.h>
#include <asm/io.h>
#include <asm/pgtable.h>
+#include <linux/ide.h>
#include <asm/ide.h>
#include <asm/cache.h>
#include <asm/dma.h>
/*
- * $Id: process.c,v 1.86 1999/06/17 21:53:46 cort Exp $
+ * $Id: process.c,v 1.87 1999/07/03 08:57:07 davem Exp $
*
* linux/arch/ppc/kernel/process.c
*
/*
- * $Id: prom.c,v 1.61 1999/06/17 06:05:52 paulus Exp $
+ * $Id: prom.c,v 1.62 1999/07/02 19:59:31 cort Exp $
*
* Procedures for interfacing to the Open Firmware PROM on
* Power Macintosh computers.
return;
/* copy the holding pattern code to someplace safe (8M) */
- memcpy( (void *)(8<<20), RELOC(__secondary_hold), 0x10000 );
- for (i = 8<<20; i < ((8<<20)+0x10000); i += 32)
+ memcpy( (void *)(8<<20), RELOC(__secondary_hold), 0x100 );
+ for (i = 8<<20; i < ((8<<20)+0x100); i += 32)
{
asm volatile("dcbf 0,%0" : : "r" (i) : "memory");
asm volatile("icbi 0,%0" : : "r" (i) : "memory");
regs->msr &= ~MSR_SE;
}
+#if 0
/*
* This routine gets a long from any process space by following the page
* tables. NOTE! You should check that the long isn't on a page boundary,
put_long(tsk, vma,addr,data);
return 0;
}
+#endif
asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
{
struct task_struct *child;
int ret = -EPERM;
+ unsigned long flags;
lock_kernel();
if (request == PTRACE_TRACEME) {
if (pid == 1) /* you may not mess with init */
goto out;
ret = -ESRCH;
- if (!(child = find_task_by_pid(pid)))
+ read_lock(&tasklist_lock);
+ child = find_task_by_pid(pid);
+ read_unlock(&tasklist_lock); /* FIXME!!! */
+ if ( !child )
goto out;
ret = -EPERM;
if (request == PTRACE_ATTACH) {
if (child->flags & PF_PTRACED)
goto out;
child->flags |= PF_PTRACED;
+
+ write_lock_irqsave(&tasklist_lock, flags);
if (child->p_pptr != current) {
REMOVE_LINKS(child);
child->p_pptr = current;
SET_LINKS(child);
}
+ write_unlock_irqrestore(&tasklist_lock, flags);
+
send_sig(SIGSTOP, child, 1);
ret = 0;
goto out;
goto out;
switch (request) {
- /* If I and D space are separate, these will need to be fixed. */
+ /* when I and D space are separate, these will need to be fixed. */
case PTRACE_PEEKTEXT: /* read word at location addr. */
case PTRACE_PEEKDATA: {
unsigned long tmp;
+ int copied;
- down(&child->mm->mmap_sem);
- ret = read_long(child, addr, &tmp);
- up(&child->mm->mmap_sem);
- if (ret < 0)
+ copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
+ ret = -EIO;
+ if (copied != sizeof(tmp))
goto out;
- ret = verify_area(VERIFY_WRITE, (void *) data, sizeof(long));
- if (!ret)
- put_user(tmp, (unsigned long *) data);
+ ret = put_user(tmp,(unsigned long *) data);
goto out;
}
-
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
unsigned long tmp;
/* If I and D space are separate, this will have to be fixed. */
case PTRACE_POKETEXT: /* write the word at location addr. */
case PTRACE_POKEDATA:
- down(&child->mm->mmap_sem);
- ret = write_long(child,addr,data);
- up(&child->mm->mmap_sem);
+ ret = 0;
+ if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
+ goto out;
+ ret = -EIO;
goto out;
-
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
ret = -EIO;
if ((addr & 3) || addr < 0 || addr >= ((PT_FPR0 + 64) << 2))
goto out;
child->flags &= ~PF_TRACESYS;
set_single_step(child);
- wake_up_process(child);
child->exit_code = data;
/* give it a chance to run. */
+ wake_up_process(child);
ret = 0;
goto out;
}
child->flags &= ~(PF_PTRACED|PF_TRACESYS);
wake_up_process(child);
child->exit_code = data;
+ write_lock_irqsave(&tasklist_lock, flags);
REMOVE_LINKS(child);
child->p_pptr = child->p_opptr;
SET_LINKS(child);
+ write_unlock_irqrestore(&tasklist_lock, flags);
/* make sure the single step bit is not set. */
clear_single_step(child);
ret = 0;
asmlinkage void syscall_trace(void)
{
- lock_kernel();
if ((current->flags & (PF_PTRACED|PF_TRACESYS))
!= (PF_PTRACED|PF_TRACESYS))
goto out;
current->exit_code = 0;
}
out:
- unlock_kernel();
}
/*
- * $Id: setup.c,v 1.136 1999/06/18 07:11:35 cort Exp $
+ * $Id: setup.c,v 1.138 1999/07/11 16:32:21 cort Exp $
* Common prep/pmac/chrp boot and setup code.
*/
#endif
#include <asm/bootx.h>
#include <asm/machdep.h>
+#include <asm/ide.h>
extern void pmac_init(unsigned long r3,
unsigned long r4,
#ifdef __SMP__
if ( first_cpu_booted ) return 0;
#endif /* __SMP__ */
+ if ( ppc_md.progress ) ppc_md.progress("id mach(): start", 0x100);
#ifndef CONFIG_MACH_SPECIFIC
/* boot loader will tell us if we're APUS */
default:
printk("Unknown machine type in identify_machine!\n");
}
-
/* Check for nobats option (used in mapin_ram). */
if (strstr(cmd_line, "nobats")) {
extern int __map_without_bats;
__map_without_bats = 1;
}
-
+
+ if ( ppc_md.progress ) ppc_md.progress("id mach(): done", 0x200);
return 0;
}
*memory_end_p = (unsigned long) end_of_DRAM;
ppc_md.setup_arch(memory_start_p, memory_end_p);
+ /* clear the progress line */
+ if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff);
}
void ppc_generic_ide_fix_driveid(struct hd_driveid *id)
/*
- * $Id: smp.c,v 1.54 1999/06/24 17:13:34 cort Exp $
+ * $Id: smp.c,v 1.55 1999/07/03 08:57:09 davem Exp $
*
* Smp support for ppc.
*
/*
- * $Id: init.c,v 1.170 1999/06/29 12:33:51 davem Exp $
+ * $Id: init.c,v 1.171 1999/07/08 23:20:14 cort Exp $
*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
#include <asm/mbx.h>
#include <asm/smp.h>
#include <asm/bootx.h>
+#include <asm/machdep.h>
/* APUS includes */
#include <asm/setup.h>
#include <asm/amigahw.h>
} else {
p = find_mem_piece(PAGE_SIZE, PAGE_SIZE);
}
- /*memset(p, 0, PAGE_SIZE);*/
__clear_user(p, PAGE_SIZE);
return p;
}
#ifdef __SMP__
if ( first_cpu_booted ) return;
#endif /* __SMP__ */
-
+ if ( ppc_md.progress ) ppc_md.progress("MMU:enter", 0x111);
#ifndef CONFIG_8xx
if (have_of)
end_of_DRAM = pmac_find_end_of_memory();
else /* prep */
end_of_DRAM = prep_find_end_of_memory();
+ if ( ppc_md.progress ) ppc_md.progress("MMU:hash init", 0x300);
hash_init();
_SDR1 = __pa(Hash) | (Hash_mask >> 10);
ioremap_base = 0xf8000000;
+ if ( ppc_md.progress ) ppc_md.progress("MMU:mapin", 0x301);
/* Map in all of RAM starting at KERNELBASE */
mapin_ram();
* the io areas. RAM was mapped by mapin_ram().
* -- Cort
*/
+ if ( ppc_md.progress ) ppc_md.progress("MMU:setbat", 0x302);
switch (_machine) {
case _MACH_prep:
setbat(0, 0x80000000, 0x80000000, 0x10000000, IO_PAGE);
ioremap(0x80000000, 0x4000);
ioremap(0x81000000, 0x4000);
#endif /* CONFIG_8xx */
+ if ( ppc_md.progress ) ppc_md.progress("MMU:exit", 0x211);
}
/*
int i;
/* max amount of RAM we allow -- Cort */
-#define RAM_LIMIT (768<<20)
+#define RAM_LIMIT (256<<20)
memory_node = find_devices("memory");
if (memory_node == NULL) {
extern unsigned int hash_page_patch_A[], hash_page_patch_B[],
hash_page_patch_C[], hash_page[];
+ if ( ppc_md.progress ) ppc_md.progress("hash:enter", 0x105);
/*
* Allow 64k of hash table for every 16MB of memory,
* up to a maximum of 2MB.
}
#endif /* NO_RELOAD_HTAB */
+ if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
/* Find some memory for the hash table. */
if ( Hash_size )
Hash = find_mem_piece(Hash_size, Hash_size);
#else
#define b(x) (x)
#endif
- /*memset(Hash, 0, Hash_size);*/
- __clear_user(Hash, Hash_size);
Hash_end = (PTE *) ((unsigned long)Hash + Hash_size);
-
+ __clear_user(Hash, Hash_size);
+
+ if ( ppc_md.progress ) ppc_md.progress("hash:patch", 0x345);
/*
* Patch up the instructions in head.S:hash_page
*/
flush_icache_range((unsigned long) b(hash_page),
(unsigned long) b(hash_page + 1));
}
+ if ( ppc_md.progress ) ppc_md.progress("hash:done", 0x205);
}
#endif /* ndef CONFIG_8xx */
a file structure */
lo->lo_backing_file = NULL;
} else if (S_ISREG(inode->i_mode)) {
- if (!inode->i_op->bmap) {
+ if (!inode->i_op->get_block) {
printk(KERN_ERR "loop: device has no block access/not implemented\n");
goto out_putf;
}
* This driver is for PCnet32 and PCnetPCI based ethercards
*/
-static const char *version = "pcnet32.c:v1.21 31.3.99 tsbogend@alpha.franken.de\n";
+static const char *version = "pcnet32.c:v1.23 6.7.1999 tsbogend@alpha.franken.de\n";
#include <linux/config.h>
#include <linux/module.h>
* rewritten PCI card detection
* added dwio mode to get driver working on some PPC machines
* v1.21: added mii selection and mii ioctl
+ * v1.22: changed pci scanning code to make PPC people happy
+ * fixed switching to 32bit mode in pcnet32_open() (thanks
+ * to Michael Richard <mcr@solidum.com> for noticing this one)
+ * added sub vendor/device id matching (thanks again to
+ * Michael Richard <mcr@solidum.com>)
+ * added chip id for 79c973/975 (thanks to Zach Brown <zab@zabbo.net>)
+ * v1.23 fixed small bug, when manual selecting MII speed/duplex
*/
#define PCNET32_TOTAL_SIZE 0x20
+/* some PCI ids */
+#ifndef PCI_DEVICE_ID_AMD_LANCE
+#define PCI_VENDOR_ID_AMD 0x1022
+#define PCI_DEVICE_ID_AMD_LANCE 0x2000
+#endif
+#ifndef PCI_DEVICE_ID_AMD_PCNETHOME
+#define PCI_DEVICE_ID_AMD_PCNETHOME 0x2001
+#endif
+
+
#define CRC_POLYNOMIAL_LE 0xedb88320UL /* Ethernet CRC, little endian */
/* The PCNET32 Rx and Tx ring descriptors. */
struct pcnet32_pci_id_info {
const char *name;
- u16 vendor_id, device_id, device_id_mask, flags;
+ u16 vendor_id, device_id, svid, sdid, flags;
int io_size;
int (*probe1) (struct device *, unsigned long, unsigned char, int, int);
};
static struct pcnet32_pci_id_info pcnet32_tbl[] = {
{ "AMD PCnetPCI series",
- 0x1022, 0x2000, 0xfffe, PCI_USES_IO|PCI_USES_MASTER, PCNET32_TOTAL_SIZE,
+ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE, 0, 0,
+ PCI_USES_IO|PCI_USES_MASTER, PCNET32_TOTAL_SIZE,
+ pcnet32_probe1},
+ { "AMD PCnetHome series",
+ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_PCNETHOME, 0, 0,
+ PCI_USES_IO|PCI_USES_MASTER, PCNET32_TOTAL_SIZE,
pcnet32_probe1},
{0,}
};
#if defined(CONFIG_PCI)
if (pci_present()) {
- struct pci_dev *pdev;
- unsigned char pci_bus, pci_device_fn;
- int pci_index;
+ struct pci_dev *pdev = NULL;
printk("pcnet32.c: PCI bios is present, checking for devices...\n");
- for (pci_index = 0; pci_index < 0xff; pci_index++) {
- u16 vendor, device, pci_command;
+ while ((pdev = pci_find_class (PCI_CLASS_NETWORK_ETHERNET<<8, pdev))) {
+ u16 pci_command;
int chip_idx;
+ u16 sdid,svid;
- if (pcibios_find_class (PCI_CLASS_NETWORK_ETHERNET << 8,
- pci_index, &pci_bus, &pci_device_fn) != PCIBIOS_SUCCESSFUL)
- break;
-
- pcibios_read_config_word(pci_bus, pci_device_fn, PCI_VENDOR_ID, &vendor);
- pcibios_read_config_word(pci_bus, pci_device_fn, PCI_DEVICE_ID, &device);
-
+ pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &sdid);
+ pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &svid);
for (chip_idx = 0; pcnet32_tbl[chip_idx].vendor_id; chip_idx++)
- if (vendor == pcnet32_tbl[chip_idx].vendor_id &&
- (device & pcnet32_tbl[chip_idx].device_id_mask) == pcnet32_tbl[chip_idx].device_id)
+ if ((pdev->vendor == pcnet32_tbl[chip_idx].vendor_id) &&
+ (pdev->device == pcnet32_tbl[chip_idx].device_id) &&
+ (pcnet32_tbl[chip_idx].svid == 0 ||
+ (svid == pcnet32_tbl[chip_idx].svid)) &&
+ (pcnet32_tbl[chip_idx].sdid == 0 ||
+ (sdid == pcnet32_tbl[chip_idx].sdid)))
break;
if (pcnet32_tbl[chip_idx].vendor_id == 0)
continue;
- pdev = pci_find_slot(pci_bus, pci_device_fn);
ioaddr = pdev->base_address[0] & PCI_BASE_ADDRESS_IO_MASK;
#if defined(ADDR_64BITS) && defined(__alpha__)
ioaddr |= ((long)pdev->base_address[1]) << 32;
chipname = "PCnet/FAST+ 79C972";
fdx = 1; mii = 1;
break;
+ case 0x2625:
+ chipname = "PCnet/FAST III 79C973";
+ fdx = 1; mii = 1;
+ break;
case 0x2626:
chipname = "PCnet/Home 79C978";
fdx = 1;
printk("pcnet32: pcnet32 media reset to %#x.\n", media);
a->write_bcr (ioaddr, 49, media);
break;
+ case 0x2627:
+ chipname = "PCnet/FAST III 79C975";
+ fdx = 1; mii = 1;
default:
printk("pcnet32: PCnet version %#x, no PCnet32 chip.\n",chip_version);
return ENODEV;
lp->a.reset (ioaddr);
/* switch pcnet32 to 32bit mode */
- lp->a.write_csr (ioaddr, 20, 2);
+ lp->a.write_bcr (ioaddr, 20, 2);
if (pcnet32_debug > 1)
printk("%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n",
val |= 0x10;
lp->a.write_csr (ioaddr, 124, val);
- if (lp->mii & (lp->options & PORT_ASEL)) {
+ if (lp->mii & !(lp->options & PORT_ASEL)) {
val = lp->a.read_bcr (ioaddr, 32) & ~0x38; /* disable Auto Negotiation, set 10Mpbs, HD */
if (lp->options & PORT_FD)
val |= 0x10;
struct device *dev = (struct device *)dev_id;
struct pcnet32_private *lp;
unsigned long ioaddr;
- u16 csr0;
+ u16 csr0,rap;
int boguscnt = max_interrupt_work;
int must_restart;
dev->interrupt = 1;
+ rap = lp->a.read_rap(ioaddr);
while ((csr0 = lp->a.read_csr (ioaddr, 0)) & 0x8600 && --boguscnt >= 0) {
/* Acknowledge all of the current interrupt sources ASAP. */
lp->a.write_csr (ioaddr, 0, csr0 & ~0x004f);
/* Clear any other interrupt, and set interrupt enable. */
lp->a.write_csr (ioaddr, 0, 0x7940);
+ lp->a.write_rap(ioaddr,rap);
if (pcnet32_debug > 4)
printk("%s: exiting interrupt, csr0=%#4.4x.\n",
free = test_and_clear_bit(PG_free_after, &page->flags);
- if (page->owner != -1)
+ if (page->owner != (void *)-1)
PAGE_BUG(page);
- page->owner = (int)current;
+ page->owner = current;
UnlockPage(page);
if (free)
if (!PageLocked(page))
BUG();
- if (page->owner != (int)current)
+ if (page->owner != current)
PAGE_BUG(page);
/*
* Allocate async buffer heads pointing to this page, just for I/O.
}
if (!page->buffers)
BUG();
- page->owner = -1;
+ page->owner = (void *)-1;
head = page->buffers;
bh = head;
} else {
if (!nr && rw == READ) {
SetPageUptodate(page);
- page->owner = (int)current;
+ page->owner = current;
UnlockPage(page);
}
if (nr && (rw == WRITE))
blocks = PAGE_SIZE >> inode->i_sb->s_blocksize_bits;
iblock = page->offset >> inode->i_sb->s_blocksize_bits;
- page->owner = -1;
+ page->owner = (void *)-1;
head = page->buffers;
bh = head;
nr = 0;
* uptodate as well.
*/
SetPageUptodate(page);
- page->owner = (int)current;
+ page->owner = current;
UnlockPage(page);
}
return 0;
goto out;
if (func == 1) {
- error = sync_old_buffers();
- goto out;
+ struct mm_struct *user_mm;
+ /*
+ * bdflush will spend all of it's time in kernel-space,
+ * without touching user-space, so we can switch it into
+ * 'lazy TLB mode' to reduce the cost of context-switches
+ * to and from bdflush.
+ */
+ user_mm = current->mm;
+ mmget(user_mm);
+ current->flags |= PF_LAZY_TLB;
+
+ error = sync_old_buffers();
+
+ current->flags &= ~PF_LAZY_TLB;
+ SET_PAGE_DIR(current, user_mm->pgd);
+ mmput(current->mm);
+ current->mm = user_mm;
+
+ goto out;
}
/* Basically func 1 means read param 1, 2 means write param 1, etc */
return error;
out_putf:
+ write_unlock(&files->file_lock);
fput(file);
goto out;
}
/* Writable file? */
if (S_ISREG(inode->i_mode) && (file->f_mode & FMODE_WRITE))
- return 0;
+ goto too_bad;
}
file_list_unlock();
return 1; /* Tis' cool bro. */
#define PYXIS_MEM_R2_MASK 0x07ffffff /* SPARSE Mem region 2 mask is 27 bits */
#define PYXIS_MEM_R3_MASK 0x03ffffff /* SPARSE Mem region 3 mask is 26 bits */
-#define PYXIS_DMA_WIN_BASE_DEFAULT (1024*1024*1024)
-#define PYXIS_DMA_WIN_SIZE_DEFAULT (1024*1024*1024)
+#define PYXIS_DMA_WIN_BASE_DEFAULT (2UL*1024*1024*1024)
+#define PYXIS_DMA_WIN_SIZE_DEFAULT (1UL*1024*1024*1024)
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SRM_SETUP)
#define PYXIS_DMA_WIN_BASE alpha_mv.dma_win_base
#include <linux/tasks.h>
-extern unsigned int local_irq_count[NR_CPUS];
-extern unsigned long hardirq_no[NR_CPUS];
+#ifndef __SMP__
+extern int __local_irq_count;
+#define local_irq_count(cpu) ((void)(cpu), __local_irq_count)
+#else
+#define local_irq_count(cpu) (cpu_data[cpu].irq_count)
+#endif
/*
* Are we in an interrupt context? Either doing bottom half
#define in_interrupt() \
({ \
int __cpu = smp_processor_id(); \
- (local_irq_count[__cpu] + local_bh_count[__cpu]) != 0; \
+ (local_irq_count(__cpu) + local_bh_count(__cpu)) != 0; \
})
#ifndef __SMP__
-#define hardirq_trylock(cpu) (local_irq_count[cpu] == 0)
+#define hardirq_trylock(cpu) (local_irq_count(cpu) == 0)
#define hardirq_endlock(cpu) ((void) 0)
-#define hardirq_enter(cpu, irq) (local_irq_count[cpu]++)
-#define hardirq_exit(cpu, irq) (local_irq_count[cpu]--)
+#define hardirq_enter(cpu, irq) (local_irq_count(cpu)++)
+#define hardirq_exit(cpu, irq) (local_irq_count(cpu)--)
#define synchronize_irq() barrier()
static inline void hardirq_enter(int cpu, int irq)
{
- ++local_irq_count[cpu];
+ ++local_irq_count(cpu);
atomic_inc(&global_irq_count);
- hardirq_no[cpu] |= 1L << irq; /* debugging only */
}
static inline void hardirq_exit(int cpu, int irq)
{
- hardirq_no[cpu] &= ~(1L << irq); /* debugging only */
atomic_dec(&global_irq_count);
- --local_irq_count[cpu];
+ --local_irq_count(cpu);
}
static inline int hardirq_trylock(int cpu)
{
- return !atomic_read(&global_irq_count) && !global_irq_lock.lock;
+ return (!atomic_read(&global_irq_count)
+ && !spin_is_locked(&global_irq_lock));
}
#define hardirq_endlock(cpu) ((void)0)
#endif /* __SMP__ */
#define WIDTH_HARDWARE_ASN 8
+#ifdef __SMP__
#define WIDTH_THIS_PROCESSOR 5
+#else
+#define WIDTH_THIS_PROCESSOR 0
+#endif
#define ASN_FIRST_VERSION (1UL << (WIDTH_THIS_PROCESSOR + WIDTH_HARDWARE_ASN))
#define HARDWARE_ASN_MASK ((1UL << WIDTH_HARDWARE_ASN) - 1)
unsigned long asn = cpu_last_asn(smp_processor_id());
unsigned long next = asn + 1;
- if ((next ^ asn) & ~MAX_ASN) {
+ if ((asn & HARDWARE_ASN_MASK) >= MAX_ASN) {
tbiap();
next = (asn & ~HARDWARE_ASN_MASK) + ASN_FIRST_VERSION;
}
cpu_last_asn(smp_processor_id()) = next;
- mm->context = next; /* full version + asn */
return next;
}
/* As described, ASN's are broken. But we can optimize for
switching between threads -- if the mm is unchanged from
current we needn't flush. */
+ /* ??? May not be needed because EV4 PALcode recognizes that
+ ASN's are broken and does a tbiap itself on swpctx, under
+ the "Must set ASN or flush" rule. At least this is true
+ for a 1992 SRM, reports Joseph Martin (jmartin@hlo.dec.com).
+ I'm going to leave this here anyway, just to Be Sure. -- r~ */
+
if (current->mm != p->mm)
tbiap();
}
{
/* Check if our ASN is of an older version, or on a different CPU,
and thus invalid. */
+ /* ??? If we have two threads on different cpus, we'll continually
+ fight over the context. Find a way to record a per-mm, per-cpu
+ value for the asn. */
- long asn = cpu_last_asn(smp_processor_id());
+ unsigned long asn = cpu_last_asn(smp_processor_id());
struct mm_struct *mm = p->mm;
- long mmc = mm->context;
+ unsigned long mmc = mm->context;
- if ((p->tss.mm_context ^ asn) & ~HARDWARE_ASN_MASK) {
- if ((mmc ^ asn) & ~HARDWARE_ASN_MASK)
- mmc = __get_new_mmu_context(p, mm);
- p->tss.mm_context = mmc;
- p->tss.asn = mmc & HARDWARE_ASN_MASK;
+ if ((mmc ^ asn) & ~HARDWARE_ASN_MASK) {
+ mmc = __get_new_mmu_context(p, mm);
+ mm->context = mmc;
}
+
+ /* Always update the PCB ASN. Another thread may have allocated
+ a new mm->context (via flush_tlb_mm) without the ASN serial
+ number wrapping. We have no way to detect when this is needed. */
+ p->tss.asn = mmc & HARDWARE_ASN_MASK;
}
#ifdef CONFIG_ALPHA_GENERIC
#ifndef _ASM_ALPHA_PARAM_H
#define _ASM_ALPHA_PARAM_H
+/* ??? Gross. I don't want to parameterize this, and supposedly the
+ hardware ignores reprogramming. We also need userland buy-in to the
+ change in HZ, since this is visible in the wait4 resources etc. */
+
#ifndef HZ
-# define HZ 1024
+# ifndef CONFIG_ALPHA_RAWHIDE
+# define HZ 1024
+# else
+# define HZ 1200
+# endif
#endif
#define EXEC_PAGESIZE 8192
__EXTERN_INLINE void
ev5_flush_tlb_current(struct mm_struct *mm)
{
- mm->context = 0;
get_new_mmu_context(current, mm);
reload_context(current);
}
*/
unsigned long flags;
- /* The full version of the ASN including serial number.
-
- Two threads running on two different processors must of necessity
- have different serial numbers. Having this duplicated from
- mm->context allows them to be slightly out of sync preventing
- the asn from incrementing each and every time the two threads
- are scheduled. */
- unsigned long mm_context;
-
/* Perform syscall argument validation (get/set_fs). */
mm_segment_t fs;
0, 0, 0, \
0, 0, 0, \
0, 0, 0, \
- 0, 0, \
+ 0, \
KERNEL_DS \
}
};
#ifdef __KERNEL__
-#define user_mode(regs) ((regs)->ps & 8)
+#define user_mode(regs) (((regs)->ps & 8) != 0)
#define instruction_pointer(regs) ((regs)->pc)
extern void show_regs(struct pt_regs *);
#endif
#ifdef __SMP__
#include <linux/tasks.h>
-#include <asm/init.h>
#include <asm/pal.h>
struct cpuinfo_alpha {
unsigned long ipi_count;
unsigned long prof_multiplier;
unsigned long prof_counter;
-} __cacheline_aligned;
+ int irq_count, bh_count;
+} __attribute__((aligned(64)));
extern struct cpuinfo_alpha cpu_data[NR_CPUS];
#include <asm/atomic.h>
#include <asm/hardirq.h>
-extern unsigned int local_bh_count[NR_CPUS];
+#ifndef __SMP__
+extern int __local_bh_count;
+#define local_bh_count(cpu) ((void)(cpu), __local_bh_count)
+#else
+#define local_bh_count(cpu) (cpu_data[cpu].bh_count)
+#endif
extern inline void cpu_bh_disable(int cpu)
{
- local_bh_count[cpu]++;
+ local_bh_count(cpu)++;
mb();
}
extern inline void cpu_bh_enable(int cpu)
{
mb();
- local_bh_count[cpu]--;
+ local_bh_count(cpu)--;
}
extern inline int cpu_bh_trylock(int cpu)
{
- return local_bh_count[cpu] ? 0 : (local_bh_count[cpu] = 1);
+ return local_bh_count(cpu) ? 0 : (local_bh_count(cpu) = 1);
}
extern inline void cpu_bh_endlock(int cpu)
{
- local_bh_count[cpu] = 0;
+ local_bh_count(cpu) = 0;
}
#define local_bh_enable() cpu_bh_enable(smp_processor_id())
#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
#endif
-#define spin_lock_init(lock) ((void) 0)
-#define spin_lock(lock) ((void) 0)
-#define spin_trylock(lock) (1)
-#define spin_unlock_wait(lock) ((void) 0)
-#define spin_unlock(lock) ((void) 0)
+#define spin_lock_init(lock) ((void)(lock))
+#define spin_lock(lock) ((void)(lock))
+#define spin_trylock(lock) ((void)(lock), 1)
+#define spin_unlock_wait(lock) ((void)(lock))
+#define spin_unlock(lock) ((void)(lock))
+#define spin_is_locked(lock) ((void)(lock), 0)
/*
* Read-write spinlocks, allowing multiple readers
#define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
#endif
-#define read_lock(lock) ((void) 0)
-#define read_unlock(lock) ((void) 0)
-#define write_lock(lock) ((void) 0)
-#define write_unlock(lock) ((void) 0)
+#define read_lock(lock) ((void)(lock))
+#define read_unlock(lock) ((void)(lock))
+#define write_lock(lock) ((void)(lock))
+#define write_unlock(lock) ((void)(lock))
#else /* __SMP__ */
#define spin_lock_init(x) ((x)->lock = 0)
#endif
-#define spin_unlock_wait(x) \
- ({ do { barrier(); } while(((volatile spinlock_t *)x)->lock); })
+#define spin_is_locked(x) ((x)->lock != 0)
+#define spin_unlock_wait(x) ({ do { barrier(); } while ((x)->lock); })
typedef struct { unsigned long a[100]; } __dummy_lock_t;
#define __dummy_lock(lock) (*(__dummy_lock_t *)(lock))
/*
* get a new mmu context.. x86's don't know much about contexts,
- * but we have to reload the new LDT in exec().
+ * but we have to reload the new LDT in exec().
+ *
+ * We implement lazy MMU context-switching on x86 to optimize context
+ * switches done to/from kernel threads. Kernel threads 'inherit' the
+ * previous MM, so Linux doesnt have to flush the TLB. In most cases
+ * we switch back to the same process so we preserve the TLB cache.
+ * This all means that kernel threads have about as much overhead as
+ * a function call ...
*/
-#define get_mmu_context(tsk) do { } while(0)
+#define get_mmu_context(prev, next) \
+ do { if (next->flags & PF_LAZY_TLB) \
+ { mmget(prev->mm); next->mm = prev->mm; \
+ next->thread.cr3 = prev->thread.cr3; } } while(0)
+
+#define put_mmu_context(prev, next) \
+ do { if (prev->flags & PF_LAZY_TLB) \
+ { mmput(prev->mm); } } while(0)
#define init_new_context(mm) do { } while(0)
/*
do { \
unsigned long __pgdir = __pa(pgdir); \
(tsk)->thread.cr3 = __pgdir; \
+ /* do not inherit lazy-TLB after exec() */ \
+ if ((pgdir != swapper_pg_dir) && ((tsk)->flags & PF_LAZY_TLB)) \
+ (tsk)->flags &= ~PF_LAZY_TLB; \
if ((tsk) == current) \
__asm__ __volatile__("movl %0,%%cr3": :"r" (__pgdir)); \
} while (0)
unsigned long heartbeat_reset;
unsigned long heartbeat_count;
+ void (*progress)(char *, unsigned short);
+
unsigned char (*nvram_read_val)(int addr);
void (*nvram_write_val)(int addr, unsigned char val);
extern void cvt_fd(float *from, double *to, unsigned long *fpscr);
extern void cvt_df(double *from, float *to, unsigned long *fpscr);
extern int call_rtas(const char *, int, int, unsigned long *, ...);
-extern void chrp_progress(char *);
-void chrp_event_scan(void);
struct device_node;
extern void note_scsi_host(struct device_node *, void *);
wait_queue_head_t wait;
struct page **pprev_hash;
struct buffer_head * buffers;
- int owner; /* temporary debugging check */
+ void *owner; /* temporary debugging check */
} mem_map_t;
#define get_page(p) do { atomic_inc(&(p)->count); \
do { int _ret = test_and_set_bit(PG_locked, &(page)->flags); \
if (_ret) PAGE_BUG(page); \
if (page->owner) PAGE_BUG(page); \
- page->owner = (int)current; } while (0)
+ page->owner = current; } while (0)
#define TryLockPage(page) ({ int _ret = test_and_set_bit(PG_locked, &(page)->flags); \
- if (!_ret) page->owner = (int)current; _ret; })
+ if (!_ret) page->owner = current; _ret; })
#define UnlockPage(page) do { \
- if (page->owner != (int)current) { \
+ if (page->owner != current) { \
BUG(); } page->owner = 0; \
if (!test_and_clear_bit(PG_locked, &(page)->flags)) { \
PAGE_BUG(page); } wake_up(&page->wait); } while (0)
#define CLONE_PID 0x00001000 /* set if pid shared */
#define CLONE_PTRACE 0x00002000 /* set if we want to let tracing continue on the child too */
#define CLONE_VFORK 0x00004000 /* set if the parent wants the child to wake it up on mm_release */
+#define CLONE_TLB 0x00008000 /* system thread does lazy TLB flushing (kernel-internal only!) */
/*
* These are the constant used to fake the fixed-point load-average
#define PF_SIGNALED 0x00000400 /* killed by a signal */
#define PF_MEMALLOC 0x00000800 /* Allocating memory */
#define PF_VFORK 0x00001000 /* Wake up parent in mm_release */
+#define PF_LAZY_TLB 0x00002000 /* thread does lazy TLB switching */
#define PF_USEDFPU 0x00100000 /* task used FPU this quantum (SMP) */
#define PF_DTRACE 0x00200000 /* delayed trace (used on m68k, i386) */
new_flags &= ~(PF_PTRACED|PF_TRACESYS);
if (clone_flags & CLONE_VFORK)
new_flags |= PF_VFORK;
+ if ((clone_flags & CLONE_TLB) && capable(CAP_SYS_ADMIN))
+ new_flags |= PF_LAZY_TLB;
p->flags = new_flags;
}
#endif /* __SMP__ */
kstat.context_swtch++;
- get_mmu_context(next);
+ /*
+ * there are 3 processes which are affected by a context switch:
+ *
+ * prev == .... ==> (last => next)
+ *
+ * It's the 'much more previous' 'prev' that is on next's stack,
+ * but prev is set to (the just run) 'last' process by switch_to().
+ * This might sound slightly confusing but makes tons of sense.
+ */
+ get_mmu_context(prev, next);
switch_to(prev, next, prev);
+ put_mmu_context(prev, next);
__schedule_tail(prev);
same_process:
printk("%5d ", p->p_cptr->pid);
else
printk(" ");
+ if (p->flags & PF_LAZY_TLB)
+ printk(" (L-TLB) ");
+ else
+ printk(" (NOTLB) ");
if (p->p_ysptr)
printk("%7d", p->p_ysptr->pid);
else
init_bh(TIMER_BH, timer_bh);
init_bh(TQUEUE_BH, tqueue_bh);
init_bh(IMMEDIATE_BH, immediate_bh);
+
+ /*
+ * The boot idle thread does lazy MMU switching as well:
+ */
+ mmget(&init_mm);
+ current->flags |= PF_LAZY_TLB;
}
flags = page->flags & ~((1 << PG_uptodate) | (1 << PG_error));
page->flags = flags | ((1 << PG_locked) | (1 << PG_referenced));
- page->owner = (int)current; /* REMOVEME */
+ page->owner = current; /* REMOVEME */
get_page(page);
page->offset = offset;
add_page_to_inode_queue(inode, page);
if (!PageLocked(page)) {
PAGE_BUG(page);
} else {
- if (page->owner != (int)current) {
+ if (page->owner != current) {
PAGE_BUG(page);
}
}
flush_cache_range(mm, new_addr, new_addr + len);
while ((offset += PAGE_SIZE) < len)
move_one_page(mm, new_addr + offset, old_addr + offset);
- zap_page_range(mm, new_addr, new_addr + len);
+ zap_page_range(mm, new_addr, len);
flush_tlb_range(mm, new_addr, new_addr + len);
return -1;
}