D: Miscellaneous MCA-support
N: Matt Welsh
-E: mdw@sunsite.unc.edu
-D: Linux Documentation Project coordinator
-D: Author, _Running_Linux_ and I&GS guide
-D: Linuxdoc-SGML formatting system
+E: mdw@metalab.unc.edu
+W: http://www.cs.berkeley.edu/~mdw
+D: Original Linux Documentation Project coordinator
+D: Author, "Running Linux" (O'Reilly)
+D: Author, "Linux Installation and Getting Started" (LDP) and several HOWTOs
+D: Linuxdoc-SGML formatting system (now SGML-Tools)
+D: Device drivers for various high-speed network interfaces (Myrinet, ATM)
D: Keithley DAS1200 device driver
-D: Maintainer of sunsite WWW and FTP, moderator c.o.l.answers
-S: Cornell University Computer Science Department
-S: Robotics and Vision Laboratory
-S: 4130 Upson Hall
-S: Ithaca, New York 14850
+D: Original maintainer of sunsite WWW and FTP sites
+D: Original moderator of c.o.l.announce and c.o.l.answers
+S: Computer Science Division
+S: UC Berkeley
+S: Berkeley, CA 94720-1776
S: USA
N: Greg Wettstein
section (except for CONFIG_IP_ROUTE_TOS and CONFIG_IP_ROUTE_FWMARK).
At the moment, few devices support fast switching (tulip is one of
them, modified 8390 can be found at
- ftp://ftp.inr.ac.ru/ip-routing/fastroute-8390.tar.gz).
+ ftp://ftp.inr.ac.ru/ip-routing/fastroute/fastroute-8390.tar.gz).
If unsure, say N.
during periods of extremal congestion. At the moment only a couple
of device drivers support it (really only one -- tulip, modified
8390 can be found at
- ftp://ftp.inr.ac.ru/ip-routing/fastroute-8390.tar.gz). Really, this
- option is applicable to any machine attached to a fast enough
+ ftp://ftp.inr.ac.ru/ip-routing/fastroute/fastroute-8390.tar.gz).
+ Really, this option is applicable to any machine attached to a fast enough
network, and even a 10 Mb NIC is able to kill a not very slow box,
such as a 120MHz Pentium.
VERSION = 2
PATCHLEVEL = 3
-SUBLEVEL = 12
+SUBLEVEL = 13
EXTRAVERSION =
ARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ -e s/arm.*/arm/ -e s/sa110/arm/)
* HACK: the PCI-to-EISA bridge does not seem to identify
* itself as a bridge... :-(
*/
- if (dev->vendor == PCI_VENDOR_ID_INTEL &&
- dev->device == PCI_DEVICE_ID_INTEL_82375) {
+ if (dev->vendor == PCI_VENDOR_ID_INTEL
+ && dev->device == PCI_DEVICE_ID_INTEL_82375) {
dev->class = PCI_CLASS_BRIDGE_EISA;
DBG_DEVS(("layout_dev: ignoring PCEB...\n"));
return;
}
- if (dev->vendor == PCI_VENDOR_ID_INTEL &&
- dev->device == PCI_DEVICE_ID_INTEL_82378) {
+ if (dev->vendor == PCI_VENDOR_ID_INTEL
+ && dev->device == PCI_DEVICE_ID_INTEL_82378) {
dev->class = PCI_CLASS_BRIDGE_ISA;
DBG_DEVS(("layout_dev: ignoring SIO...\n"));
return;
}
- /*
- * We don't have code that will init the CYPRESS bridge correctly
- * so we do the next best thing, and depend on the previous
- * console code to do the right thing, and ignore it here... :-\
- */
- if (dev->vendor == PCI_VENDOR_ID_CONTAQ &&
- dev->device == PCI_DEVICE_ID_CONTAQ_82C693) {
+ if (dev->vendor == PCI_VENDOR_ID_CONTAQ
+ && dev->device == PCI_DEVICE_ID_CONTAQ_82C693
+ && dev->class >> 8 == PCI_CLASS_BRIDGE_ISA) {
DBG_DEVS(("layout_dev: ignoring CYPRESS bridge...\n"));
return;
}
base &= PCI_BASE_ADDRESS_IO_MASK;
mask = (~base << 1) | 0x1;
size = (mask & base) & 0xffffffff;
+
+ /* We don't want to disturb normal IDE functions, so
+ we don't touch the first two I/O ports on the
+ Cypress. */
+ if (dev->vendor == PCI_VENDOR_ID_CONTAQ
+ && dev->device == PCI_DEVICE_ID_CONTAQ_82C693
+ && dev->class >> 8 == PCI_CLASS_BRIDGE_ISA
+ && idx < 2) {
+ continue;
+ }
+
/*
* Aligning to 0x800 rather than the minimum base of
* 0x400 is an attempt to avoid having devices in
if (dev->class >> 8 == PCI_CLASS_NOT_DEFINED ||
dev->class >> 8 == PCI_CLASS_NOT_DEFINED_VGA ||
dev->class >> 8 == PCI_CLASS_STORAGE_IDE ||
- dev->class >> 16 == PCI_BASE_CLASS_DISPLAY)
- {
+ dev->class >> 16 == PCI_BASE_CLASS_DISPLAY) {
/*
* All of these (may) have I/O scattered all around
* and may not use i/o-base address registers at all.
#include <linux/config.h>
-/* Whee. TSUNAMI doesn't have an HAE. Fix things up for the GENERIC
- kernel by defining the HAE address to be that of the cache. Now
- we can read and write it as we like. ;-) */
+/* Whee. Both TSUNAMI and POLARIS don't have an HAE. Fix things up for
+ the GENERIC kernel by defining the HAE address to be that of the cache.
+ Now we can read and write it as we like. ;-) */
#define TSUNAMI_HAE_ADDRESS (&alpha_mv.hae_cache)
-
-/* Whee. POLARIS doesn't have an HAE. Fix things up for the GENERIC
- kernel by defining the HAE address to be that of the cache. Now
- we can read and write it as we like. ;-) */
#define POLARIS_HAE_ADDRESS (&alpha_mv.hae_cache)
/* Only a few systems don't define IACK_SC, handling all interrupts through
#define DO_EV4_MMU \
max_asn: EV4_MAX_ASN, \
- mv_get_mmu_context: ev4_get_mmu_context, \
+ mv_switch_mm: ev4_switch_mm, \
+ mv_activate_mm: ev4_activate_mm, \
mv_flush_tlb_current: ev4_flush_tlb_current, \
mv_flush_tlb_other: ev4_flush_tlb_other, \
mv_flush_tlb_current_page: ev4_flush_tlb_current_page
#define DO_EV5_MMU \
max_asn: EV5_MAX_ASN, \
- mv_get_mmu_context: ev5_get_mmu_context, \
+ mv_switch_mm: ev5_switch_mm, \
+ mv_activate_mm: ev5_activate_mm, \
mv_flush_tlb_current: ev5_flush_tlb_current, \
mv_flush_tlb_other: ev5_flush_tlb_other, \
mv_flush_tlb_current_page: ev5_flush_tlb_current_page
#define DO_EV6_MMU \
max_asn: EV6_MAX_ASN, \
- mv_get_mmu_context: ev5_get_mmu_context, \
+ mv_switch_mm: ev5_switch_mm, \
+ mv_activate_mm: ev5_activate_mm, \
mv_flush_tlb_current: ev5_flush_tlb_current, \
mv_flush_tlb_other: ev5_flush_tlb_other, \
mv_flush_tlb_current_page: ev5_flush_tlb_current_page
w = ieee_fpcr_to_swcr(fpcr);
if (!(fpcr & FPCR_UNDZ)) {
w &= ~IEEE_TRAP_ENABLE_UNF;
- w |= current->tss.flags & IEEE_TRAP_ENABLE_UNF;
+ w |= (current->thread.flags
+ & IEEE_TRAP_ENABLE_UNF);
}
} else {
/* Otherwise we are forced to do everything in sw. */
- w = current->tss.flags & IEEE_SW_MASK;
+ w = current->thread.flags & IEEE_SW_MASK;
}
if (put_user(w, (unsigned long *) buffer))
case GSI_UACPROC:
if (nbytes < sizeof(unsigned int))
return -EINVAL;
- w = (current->tss.flags >> UAC_SHIFT) & UAC_BITMASK;
+ w = (current->thread.flags >> UAC_SHIFT) & UAC_BITMASK;
if (put_user(w, (unsigned int *)buffer))
return -EFAULT;
return 1;
/* Update softare trap enable bits. */
if (get_user(swcr, (unsigned long *)buffer))
return -EFAULT;
- current->tss.flags &= ~IEEE_SW_MASK;
- current->tss.flags |= swcr & IEEE_SW_MASK;
+ current->thread.flags &= ~IEEE_SW_MASK;
+ current->thread.flags |= swcr & IEEE_SW_MASK;
/* Update the real fpcr. Keep UNFD off if not UNDZ. */
fpcr = rdfpcr();
return -EFAULT;
switch (v) {
case SSIN_UACPROC:
- current->tss.flags &=
+ current->thread.flags &=
~(UAC_BITMASK << UAC_SHIFT);
- current->tss.flags |=
+ current->thread.flags |=
(w & UAC_BITMASK) << UAC_SHIFT;
break;
that EV6 defines UNFD valid only with UNDZ, which we don't want
for IEEE conformance -- so that disabled bit remains in software. */
- current->tss.flags &= ~IEEE_SW_MASK;
+ current->thread.flags &= ~IEEE_SW_MASK;
wrfpcr(FPCR_DYN_NORMAL | FPCR_INVD | FPCR_DZED | FPCR_OVFD | FPCR_INED);
}
#else
childstack->r26 = (unsigned long) ret_from_sys_call;
#endif
- p->tss.usp = usp;
- p->tss.ksp = (unsigned long) childstack;
- p->tss.pal_flags = 1; /* set FEN, clear everything else */
- p->tss.flags = current->tss.flags;
+ p->thread.usp = usp;
+ p->thread.ksp = (unsigned long) childstack;
+ p->thread.pal_flags = 1; /* set FEN, clear everything else */
+ p->thread.flags = current->thread.flags;
return 0;
}
extern void rtc_init_pit(void);
extern void generic_init_pit(void);
extern unsigned long est_cycle_freq;
+extern struct resource timer_resource;
/* smc37c93x.c */
extern void SMC93x_Init(void);
long *addr;
if (regno == 30) {
- addr = &task->tss.usp;
+ addr = &task->thread.usp;
} else if (regno == 31 || regno > 64) {
zero = 0;
addr = &zero;
* branch (emulation can be tricky for fp branches).
*/
displ = ((s32)(insn << 11)) >> 9;
- child->tss.bpt_addr[nsaved++] = pc + 4;
+ child->thread.bpt_addr[nsaved++] = pc + 4;
if (displ) /* guard against unoptimized code */
- child->tss.bpt_addr[nsaved++] = pc + 4 + displ;
+ child->thread.bpt_addr[nsaved++] = pc + 4 + displ;
DBG(DBG_BPT, ("execing branch\n"));
} else if (op_code == 0x1a) {
reg_b = (insn >> 16) & 0x1f;
- child->tss.bpt_addr[nsaved++] = get_reg(child, reg_b);
+ child->thread.bpt_addr[nsaved++] = get_reg(child, reg_b);
DBG(DBG_BPT, ("execing jump\n"));
} else {
- child->tss.bpt_addr[nsaved++] = pc + 4;
+ child->thread.bpt_addr[nsaved++] = pc + 4;
DBG(DBG_BPT, ("execing normal insn\n"));
}
/* install breakpoints: */
for (i = 0; i < nsaved; ++i) {
- res = read_int(child, child->tss.bpt_addr[i], &insn);
+ res = read_int(child, child->thread.bpt_addr[i], &insn);
if (res < 0)
return res;
- child->tss.bpt_insn[i] = insn;
- DBG(DBG_BPT, (" -> next_pc=%lx\n", child->tss.bpt_addr[i]));
- res = write_int(child, child->tss.bpt_addr[i], BREAKINST);
+ child->thread.bpt_insn[i] = insn;
+ DBG(DBG_BPT, (" -> next_pc=%lx\n", child->thread.bpt_addr[i]));
+ res = write_int(child, child->thread.bpt_addr[i], BREAKINST);
if (res < 0)
return res;
}
- child->tss.bpt_nsaved = nsaved;
+ child->thread.bpt_nsaved = nsaved;
return 0;
}
int
ptrace_cancel_bpt(struct task_struct * child)
{
- int i, nsaved = child->tss.bpt_nsaved;
+ int i, nsaved = child->thread.bpt_nsaved;
- child->tss.bpt_nsaved = 0;
+ child->thread.bpt_nsaved = 0;
if (nsaved > 2) {
printk("ptrace_cancel_bpt: bogus nsaved: %d!\n", nsaved);
}
for (i = 0; i < nsaved; ++i) {
- write_int(child, child->tss.bpt_addr[i],
- child->tss.bpt_insn[i]);
+ write_int(child, child->thread.bpt_addr[i],
+ child->thread.bpt_insn[i]);
}
return (nsaved != 0);
}
ret = -EIO;
if ((unsigned long) data > _NSIG)
goto out;
- child->tss.bpt_nsaved = -1; /* mark single-stepping */
+ child->thread.bpt_nsaved = -1; /* mark single-stepping */
child->flags &= ~PF_TRACESYS;
wake_up_process(child);
child->exit_code = data;
/* Initialize the CPU's HWPCB to something just good enough for
us to get started. Immediately after starting, we'll swpctx
- to the target idle task's tss. Reuse the stack in the mean
+ to the target idle task's ptb. Reuse the stack in the mean
time. Precalculate the target PCBB. */
hwpcb->ksp = (unsigned long) idle + sizeof(union task_union) - 16;
hwpcb->usp = 0;
- hwpcb->ptbr = idle->tss.ptbr;
+ hwpcb->ptbr = idle->thread.ptbr;
hwpcb->pcc = 0;
hwpcb->asn = 0;
- hwpcb->unique = virt_to_phys(&idle->tss);
- hwpcb->flags = idle->tss.pal_flags;
+ hwpcb->unique = virt_to_phys(&idle->thread);
+ hwpcb->flags = idle->thread.pal_flags;
hwpcb->res1 = hwpcb->res2 = 0;
DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwcpb->unique));
DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
- cpuid, idle->state, idle->tss.pal_flags));
+ cpuid, idle->state, idle->thread.pal_flags));
/* Setup HWRPB fields that SRM uses to activate secondary CPU */
hwrpb->CPU_restart = __smp_callin;
HWRPB.CPU_restart says to start. But this gets all the other
task-y sort of data structures set up like we wish. */
kernel_thread((void *)__smp_callin, NULL, CLONE_PID|CLONE_VM);
- idle = task[cpunum];
- if (!idle)
- panic("No idle process for CPU %d", cpuid);
- idle->processor = cpuid;
+
+ idle = init_task.prev_task;
+ if (!idle)
+ panic("No idle process for CPU %d", cpunum);
+ del_from_runqueue(idle);
+ init_tasks[cpunum] = idle;
+ idle->processor = cpuid;
/* Schedule the first task manually. */
/* ??? Ingo, what is this? */
{
/* FIXME: What do we do with ruffian_get_bank_size above? */
+#if 1
+ pyxis_init_arch();
+#else
pyxis_enable_errors();
if (!pyxis_srm_window_setup()) {
printk("ruffian_init_arch: Skipping window register rewrites."
"\n... Trust DeskStation firmware!\n");
}
pyxis_finish_init_arch();
+#endif
}
static void
ruffian_init_pit (void)
{
/* Ruffian depends on the system timer established in MILO! */
- request_region(0x70, 0x10, "timer");
+ timer_resource.start = 0x70;
+ timer_resource.end = 0x70 + 0x10;
+ request_resource(&ioport_resource, &timer_resource);
outb(0xb6, 0x43); /* pit counter 2: speaker */
outb(0x31, 0x42);
static int set_rtc_mmss(unsigned long);
+#ifdef CONFIG_RTC
+struct resource timer_resource = { "pit", 0x40, 0x40+0x20 };
+#else
+struct resource timer_resource = { "rtc", 0, 0 };
+#endif
+
/*
* Shift amount by which scaled_ticks_per_cycle is scaled. Shifting
CMOS_WRITE(control, RTC_CONTROL);
(void) CMOS_READ(RTC_INTR_FLAGS);
- request_region(0x40, 0x20, "timer"); /* reserve pit */
+ request_resource(&ioport_resource, &timer_resource);
/* Setup interval timer. */
outb(0x34, 0x43); /* binary, mode 2, LSB/MSB, ch 0 */
}
(void) CMOS_READ(RTC_INTR_FLAGS);
- request_region(RTC_PORT(0), 0x10, "timer"); /* reserve rtc */
+ timer_resource.start = RTC_PORT(0);
+ timer_resource.end = RTC_PORT(0) + 0x10;
+ request_resource(&ioport_resource, &timer_resource);
outb(0x36, 0x43); /* pit counter 0: system timer */
outb(0x00, 0x40);
/* setup timer */
irq_handler = timer_interrupt;
- if (request_irq(TIMER_IRQ, irq_handler, 0, "timer", NULL))
+ if (request_irq(TIMER_IRQ, irq_handler, 0, timer_resource.name, NULL))
panic("Could not allocate timer IRQ!");
}
dik_show_code((unsigned int *)regs->pc);
dik_show_trace((unsigned long *)(regs+1));
- if (current->tss.flags & (1UL << 63)) {
+ if (current->thread.flags & (1UL << 63)) {
printk("die_if_kernel recursion detected.\n");
sti();
while (1);
}
- current->tss.flags |= (1UL << 63);
+ current->thread.flags |= (1UL << 63);
do_exit(SIGSEGV);
}
unsigned long a2, unsigned long a3, unsigned long a4,
unsigned long a5, struct pt_regs regs)
{
- die_if_kernel("Instruction fault", ®s, type, 0);
+ die_if_kernel((type == 1 ? "Kernel Bug" : "Instruction fault"),
+ ®s, type, 0);
+
switch (type) {
case 0: /* breakpoint */
if (ptrace_cancel_bpt(current)) {
dik_show_code((unsigned int *)pc);
dik_show_trace((unsigned long *)(®s+1));
- if (current->tss.flags & (1UL << 63)) {
+ if (current->thread.flags & (1UL << 63)) {
printk("die_if_kernel recursion detected.\n");
sti();
while (1);
}
- current->tss.flags |= (1UL << 63);
+ current->thread.flags |= (1UL << 63);
do_exit(SIGSEGV);
}
/* Check the UAC bits to decide what the user wants us to do
with the unaliged access. */
- uac_bits = (current->tss.flags >> UAC_SHIFT) & UAC_BITMASK;
+ uac_bits = (current->thread.flags >> UAC_SHIFT) & UAC_BITMASK;
if (!(uac_bits & UAC_NOPRINT)) {
if (cnt >= 5 && jiffies - last_time > 5*HZ) {
cnt = 0;
alpha_fp_emul (unsigned long pc)
{
unsigned long op_fun, fa, fb, fc, func, mode;
- unsigned long fpcw = current->tss.flags;
+ unsigned long fpcw = current->thread.flags;
unsigned long va, vb, vc, res, fpcr;
__u32 insn;
*/
if (res) {
/* Record exceptions in software control word. */
- current->tss.flags = fpcw |= res >> 35;
+ current->thread.flags = fpcw |= res >> 35;
/* Update hardware control register */
fpcr &= (~FPCR_MASK | FPCR_DYN_MASK);
unsigned long last_asn = ASN_FIRST_VERSION;
#endif
-void
-get_new_mmu_context(struct task_struct *p, struct mm_struct *mm)
+void ev5_flush_tlb_current(struct mm_struct *mm)
{
- unsigned long new = __get_new_mmu_context(p, mm);
- mm->context = new;
- p->tss.asn = new & HARDWARE_ASN_MASK;
+ ev5_activate_mm(NULL, mm, smp_processor_id());
}
{
register unsigned long sp __asm__("$30");
pcb->ksp = sp;
- return __reload_tss(pcb);
+ return __reload_thread(pcb);
}
/*
}
/* Also set up the real kernel PCB while we're at it. */
- init_task.tss.ptbr = newptbr;
- init_task.tss.pal_flags = 1; /* set FEN, clear everything else */
- init_task.tss.flags = 0;
- original_pcb_ptr = load_PCB(&init_task.tss);
+ init_task.thread.ptbr = newptbr;
+ init_task.thread.pal_flags = 1; /* set FEN, clear everything else */
+ init_task.thread.flags = 0;
+ original_pcb_ptr = load_PCB(&init_task.thread);
tbia();
/* Save off the contents of the original PCB so that we can
static void set_time(void);
static void check_events(void);
-static void do_apm_timer(unsigned long);
static int do_open(struct inode *, struct file *);
static int do_release(struct inode *, struct file *);
static int debug = 0;
static int apm_disabled = 0;
-static DECLARE_WAIT_QUEUE_HEAD(process_list);
+static DECLARE_WAIT_QUEUE_HEAD(apm_waitqueue);
static struct apm_bios_struct * user_list = NULL;
-static struct timer_list apm_timer;
-
static char driver_version[] = "1.9"; /* no spaces */
#ifdef APM_DEBUG
break;
}
}
- wake_up_interruptible(&process_list);
+ wake_up_interruptible(&apm_waitqueue);
return 1;
}
}
}
-static void do_apm_timer(unsigned long unused)
+/*
+ * This is the APM thread main loop.
+ */
+static void apm_mainloop(void)
{
- int err;
+ DECLARE_WAITQUEUE(wait, current);
+ apm_enabled = 1;
- static int pending_count = 0;
+ add_wait_queue(&apm_waitqueue, &wait);
+ for (;;) {
+ static int pending_count = 0;
+ int err;
- if (((standbys_pending > 0) || (suspends_pending > 0))
- && (apm_bios_info.version > 0x100)
- && (pending_count-- <= 0)) {
- pending_count = 4;
+ current->state = TASK_INTERRUPTIBLE;
+ schedule_timeout(APM_CHECK_TIMEOUT);
- err = apm_set_power_state(APM_STATE_BUSY);
- if (err)
- apm_error("busy", err);
- }
+ if (((standbys_pending > 0) || (suspends_pending > 0))
+ && (apm_bios_info.version > 0x100)
+ && (pending_count-- <= 0)) {
+ pending_count = 4;
- if (!(((standbys_pending > 0) || (suspends_pending > 0))
- && (apm_bios_info.version == 0x100)))
- check_events();
+ err = apm_set_power_state(APM_STATE_BUSY);
+ if (err)
+ apm_error("busy", err);
+ }
- init_timer(&apm_timer);
- apm_timer.expires = APM_CHECK_TIMEOUT + jiffies;
- add_timer(&apm_timer);
+ if (!(((standbys_pending > 0) || (suspends_pending > 0))
+ && (apm_bios_info.version == 0x100)))
+ check_events();
+ }
}
-/* Called from sys_idle, must make sure apm_enabled. */
+/* Called from cpu_idle, must make sure apm_enabled. */
int apm_do_idle(void)
{
#ifdef CONFIG_APM_CPU_IDLE
#endif
}
-/* Called from sys_idle, must make sure apm_enabled. */
+/* Called from cpu_idle, must make sure apm_enabled. */
void apm_do_busy(void)
{
#ifdef CONFIG_APM_CPU_IDLE
if (queue_empty(as)) {
if (fp->f_flags & O_NONBLOCK)
return -EAGAIN;
- add_wait_queue(&process_list, &wait);
+ add_wait_queue(&apm_waitqueue, &wait);
repeat:
current->state = TASK_INTERRUPTIBLE;
if (queue_empty(as) && !signal_pending(current)) {
goto repeat;
}
current->state = TASK_RUNNING;
- remove_wait_queue(&process_list, &wait);
+ remove_wait_queue(&apm_waitqueue, &wait);
}
i = count;
while ((i >= sizeof(event)) && !queue_empty(as)) {
as = fp->private_data;
if (check_apm_bios_struct(as, "select"))
return 0;
- poll_wait(fp, &process_list, wait);
+ poll_wait(fp, &apm_waitqueue, wait);
if (!queue_empty(as))
return POLLIN | POLLRDNORM;
return 0;
}
}
-void __init apm_bios_init(void)
+static int apm(void *unused)
{
unsigned short bx;
unsigned short cx;
unsigned short error;
char * power_stat;
char * bat_stat;
+
+ strcpy(current->comm, "kapmd");
+ sigfillset(¤t->blocked);
+
+ if (apm_bios_info.version > 0x100) {
+ /*
+ * We only support BIOSs up to version 1.2
+ */
+ if (apm_bios_info.version > 0x0102)
+ apm_bios_info.version = 0x0102;
+ if (apm_driver_version(&apm_bios_info.version) != APM_SUCCESS) {
+ /* Fall back to an APM 1.0 connection. */
+ apm_bios_info.version = 0x100;
+ }
+ }
+ if (debug) {
+ printk(KERN_INFO "apm: Connection version %d.%d\n",
+ (apm_bios_info.version >> 8) & 0xff,
+ apm_bios_info.version & 0xff );
+
+ error = apm_get_power_status(&bx, &cx, &dx);
+ if (error)
+ printk(KERN_INFO "apm: power status not available\n");
+ else {
+ switch ((bx >> 8) & 0xff) {
+ case 0: power_stat = "off line"; break;
+ case 1: power_stat = "on line"; break;
+ case 2: power_stat = "on backup power"; break;
+ default: power_stat = "unknown"; break;
+ }
+ switch (bx & 0xff) {
+ case 0: bat_stat = "high"; break;
+ case 1: bat_stat = "low"; break;
+ case 2: bat_stat = "critical"; break;
+ case 3: bat_stat = "charging"; break;
+ default: bat_stat = "unknown"; break;
+ }
+ printk(KERN_INFO
+ "apm: AC %s, battery status %s, battery life ",
+ power_stat, bat_stat);
+ if ((cx & 0xff) == 0xff)
+ printk("unknown\n");
+ else
+ printk("%d%%\n", cx & 0xff);
+ if (apm_bios_info.version > 0x100) {
+ printk(KERN_INFO
+ "apm: battery flag 0x%02x, battery life ",
+ (cx >> 8) & 0xff);
+ if (dx == 0xffff)
+ printk("unknown\n");
+ else
+ printk("%d %s\n", dx & 0x7fff,
+ (dx & 0x8000) ?
+ "minutes" : "seconds");
+ }
+ }
+ }
+
+#ifdef CONFIG_APM_DO_ENABLE
+ if (apm_bios_info.flags & APM_BIOS_DISABLED) {
+ /*
+ * This call causes my NEC UltraLite Versa 33/C to hang if it
+ * is booted with PM disabled but not in the docking station.
+ * Unfortunate ...
+ */
+ error = apm_enable_power_management();
+ if (error) {
+ apm_error("enable power management", error);
+ return -1;
+ }
+ }
+#endif
+ if (((apm_bios_info.flags & APM_BIOS_DISABLED) == 0)
+ && (apm_bios_info.version > 0x0100)) {
+ if (apm_engage_power_management(0x0001) == APM_SUCCESS)
+ apm_bios_info.flags &= ~APM_BIOS_DISENGAGED;
+ }
+
+ apm_mainloop();
+ return 0;
+}
+
+/*
+ * Just start the APM thread. We do NOT want to do APM BIOS
+ * calls from anything but the APM thread, if for no other reason
+ * than the fact that we don't trust the APM BIOS. This way,
+ * most common APM BIOS problems that lead to protection errors
+ * etc will have at least some level of being contained...
+ *
+ * In short, if something bad happens, at least we have a choice
+ * of just killing the apm thread..
+ */
+void __init apm_init(void)
+{
static struct proc_dir_entry *ent;
if (apm_bios_info.version == 0) {
return;
}
+#ifdef CONFIG_SMP
+ if (smp_num_cpus > 1) {
+ printk(KERN_NOTICE "apm: disabled - APM is not SMP safe.\n");
+ if (smp_hack)
+ smp_hack = 2;
+ return -1;
+ }
+#endif
+
/*
* Set up a segment that references the real mode segment 0x40
* that extends up to the end of page zero (that we have reserved).
(apm_bios_info.dseg_len - 1) & 0xffff);
}
#endif
-#ifdef CONFIG_SMP
- if (smp_num_cpus > 1) {
- printk(KERN_NOTICE "apm: disabled - APM is not SMP safe.\n");
- if (smp_hack)
- smp_hack = 2;
- return;
- }
-#endif
- if (apm_bios_info.version > 0x100) {
- /*
- * We only support BIOSs up to version 1.2
- */
- if (apm_bios_info.version > 0x0102)
- apm_bios_info.version = 0x0102;
- if (apm_driver_version(&apm_bios_info.version) != APM_SUCCESS) {
- /* Fall back to an APM 1.0 connection. */
- apm_bios_info.version = 0x100;
- }
- }
- if (debug) {
- printk(KERN_INFO "apm: Connection version %d.%d\n",
- (apm_bios_info.version >> 8) & 0xff,
- apm_bios_info.version & 0xff );
-
- error = apm_get_power_status(&bx, &cx, &dx);
- if (error)
- printk(KERN_INFO "apm: power status not available\n");
- else {
- switch ((bx >> 8) & 0xff) {
- case 0: power_stat = "off line"; break;
- case 1: power_stat = "on line"; break;
- case 2: power_stat = "on backup power"; break;
- default: power_stat = "unknown"; break;
- }
- switch (bx & 0xff) {
- case 0: bat_stat = "high"; break;
- case 1: bat_stat = "low"; break;
- case 2: bat_stat = "critical"; break;
- case 3: bat_stat = "charging"; break;
- default: bat_stat = "unknown"; break;
- }
- printk(KERN_INFO
- "apm: AC %s, battery status %s, battery life ",
- power_stat, bat_stat);
- if ((cx & 0xff) == 0xff)
- printk("unknown\n");
- else
- printk("%d%%\n", cx & 0xff);
- if (apm_bios_info.version > 0x100) {
- printk(KERN_INFO
- "apm: battery flag 0x%02x, battery life ",
- (cx >> 8) & 0xff);
- if (dx == 0xffff)
- printk("unknown\n");
- else
- printk("%d %s\n", dx & 0x7fff,
- (dx & 0x8000) ?
- "minutes" : "seconds");
- }
- }
- }
-
-#ifdef CONFIG_APM_DO_ENABLE
- if (apm_bios_info.flags & APM_BIOS_DISABLED) {
- /*
- * This call causes my NEC UltraLite Versa 33/C to hang if it
- * is booted with PM disabled but not in the docking station.
- * Unfortunate ...
- */
- error = apm_enable_power_management();
- if (error) {
- apm_error("enable power management", error);
- return;
- }
- }
-#endif
- if (((apm_bios_info.flags & APM_BIOS_DISABLED) == 0)
- && (apm_bios_info.version > 0x0100)) {
- if (apm_engage_power_management(0x0001) == APM_SUCCESS)
- apm_bios_info.flags &= ~APM_BIOS_DISENGAGED;
- }
-
- init_timer(&apm_timer);
- apm_timer.function = do_apm_timer;
- apm_timer.expires = APM_CHECK_TIMEOUT + jiffies;
- add_timer(&apm_timer);
ent = create_proc_entry("apm", 0, 0);
if (ent != NULL)
misc_register(&apm_device);
- apm_enabled = 1;
+ kernel_thread(apm, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGHAND | SIGCHLD);
}
.long SYMBOL_NAME(sys_uname)
.long SYMBOL_NAME(sys_iopl) /* 110 */
.long SYMBOL_NAME(sys_vhangup)
- .long SYMBOL_NAME(sys_idle)
+ .long SYMBOL_NAME(sys_ni_syscall) /* old "idle" system call */
.long SYMBOL_NAME(sys_vm86old)
.long SYMBOL_NAME(sys_wait4)
.long SYMBOL_NAME(sys_swapoff) /* 115 */
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
+/*
+ * We'll get there..
+ */
#ifdef CONFIG_APM
-extern int apm_do_idle(void);
-extern void apm_do_busy(void);
+#define powermanagement_idle() do { } while (0)
+#else
+#define powermanagement_idle() do { } while (0)
#endif
static int hlt_counter=0;
-#define HARD_IDLE_TIMEOUT (HZ / 3)
-
void disable_hlt(void)
{
hlt_counter++;
hlt_counter--;
}
-#ifndef __SMP__
-
-static void hard_idle(void)
-{
- while (!current->need_resched) {
- if (boot_cpu_data.hlt_works_ok && !hlt_counter) {
-#ifdef CONFIG_APM
- /* If the APM BIOS is not enabled, or there
- is an error calling the idle routine, we
- should hlt if possible. We need to check
- need_resched again because an interrupt
- may have occurred in apm_do_idle(). */
- start_bh_atomic();
- if (!apm_do_idle() && !current->need_resched)
- __asm__("hlt");
- end_bh_atomic();
-#else
- __asm__("hlt");
-#endif
- }
- if (current->need_resched)
- break;
- schedule();
- }
-#ifdef CONFIG_APM
- apm_do_busy();
-#endif
-}
+/*
+ * If no process has been interested in this
+ * CPU for some time, we want to wake up the
+ * power management thread - we probably want
+ * to conserve power.
+ */
+#define HARD_IDLE_TIMEOUT (HZ/3)
/*
- * The idle loop on a uniprocessor i386..
- */
-static int cpu_idle(void *unused)
+ * The idle thread. There's no useful work to be
+ * done, so just try to conserve power and have a
+ * low exit latency (ie sit in a loop waiting for
+ * somebody to say that they'd like to reschedule)
+ */
+int cpu_idle(void *unused)
{
- int work = 1;
- unsigned long start_idle = 0;
+ unsigned int start_idle;
/* endless idle loop with no priority at all */
+ init_idle();
current->priority = 0;
current->counter = -100;
- init_idle();
- for (;;) {
- if (work)
- start_idle = jiffies;
+ start_idle = jiffies;
+ while (1) {
+ if (!current->need_resched) {
+ if (jiffies - start_idle < HARD_IDLE_TIMEOUT) {
+ if (!current_cpu_data.hlt_works_ok)
+ continue;
+ if (hlt_counter)
+ continue;
+ asm volatile("sti ; hlt" : : : "memory");
+ continue;
+ }
- if (jiffies - start_idle > HARD_IDLE_TIMEOUT)
- hard_idle();
- else {
- if (boot_cpu_data.hlt_works_ok && !hlt_counter && !current->need_resched)
- __asm__("hlt");
+ /*
+ * Ok, do some power management - we've been idle for too long
+ */
+ powermanagement_idle();
}
- work = current->need_resched;
schedule();
check_pgt_cache();
+ start_idle = jiffies;
}
}
-#else
-
-/*
- * This is being executed in task 0 'user space'.
- */
-
-int cpu_idle(void *unused)
-{
- /* endless idle loop with no priority at all */
- current->priority = 0;
- current->counter = -100;
- init_idle();
-
- while(1) {
- if (current_cpu_data.hlt_works_ok && !hlt_counter &&
- !current->need_resched)
- __asm__("hlt");
- /*
- * although we are an idle CPU, we do not want to
- * get into the scheduler unnecessarily.
- */
- if (current->need_resched) {
- schedule();
- check_pgt_cache();
- }
- }
-}
-
-#endif
-
-asmlinkage int sys_idle(void)
-{
- if (current->pid != 0)
- return -EPERM;
- cpu_idle(NULL);
- return 0;
-}
-
/*
* This routine reboots the machine by asking the keyboard
* controller to pulse the reset-line low. We try that for a while,
#include <linux/delay.h>
#include <linux/config.h>
#include <linux/init.h>
-#ifdef CONFIG_APM
#include <linux/apm_bios.h>
-#endif
#ifdef CONFIG_BLK_DEV_RAM
#include <linux/blk.h>
#endif
*/
struct drive_info_struct { char dummy[32]; } drive_info;
struct screen_info screen_info;
-#ifdef CONFIG_APM
struct apm_bios_info apm_bios_info;
-#endif
struct sys_desc_table_struct {
unsigned short length;
unsigned char table[0];
ROOT_DEV = to_kdev_t(ORIG_ROOT_DEV);
drive_info = DRIVE_INFO;
screen_info = SCREEN_INFO;
-#ifdef CONFIG_APM
apm_bios_info = APM_BIOS_INFO;
-#endif
if( SYS_DESC_TABLE.length != 0 ) {
MCA_bus = SYS_DESC_TABLE.table[3] &0x2;
machine_id = SYS_DESC_TABLE.table[0];
* once we got the process:
*/
idle = init_task.prev_task;
-
- init_tasks[cpucount] = idle;
if (!idle)
panic("No idle process for CPU %d", i);
del_from_runqueue(idle);
unhash_process(idle);
+ init_tasks[cpucount] = idle;
/* start_eip had better be page-aligned! */
start_eip = setup_trampoline();
{
unsigned long vm_mask = 1 << current->processor;
struct mm_struct *mm = current->mm;
+ unsigned long cpu_mask = mm->cpu_vm_mask & ~vm_mask;
- if (mm->cpu_vm_mask != vm_mask) {
- flush_tlb_others(mm->cpu_vm_mask & ~vm_mask);
- mm->cpu_vm_mask = vm_mask;
- }
+ mm->cpu_vm_mask = vm_mask;
+ flush_tlb_others(cpu_mask);
local_flush_tlb();
}
#include <linux/proc_fs.h>
#include <linux/stat.h>
#include <linux/init.h>
-#ifdef CONFIG_APM
-#include <linux/apm_bios.h>
-#endif
#include <linux/tty.h>
#include <linux/selection.h>
#ifdef CONFIG_DTLK
dtlk_init();
#endif
-#ifdef CONFIG_APM
- apm_bios_init();
-#endif
#ifdef CONFIG_H8
h8_init();
#endif
#ifdef CONFIG_PROC_FS
static char *floppy_types[] = {
- "none", "5.25'' 360k", "5.25'' 1.2M", "3.5'' 720k", "3.5'' 1.44M"
+ "none", "5.25'' 360k", "5.25'' 1.2M", "3.5'' 720k", "3.5'' 1.44M", "3.5'' 2.88M"
};
static char *gfx_types[] = {
PRINT_PROC( "HD 0 type : " );
type = nvram[4] >> 4;
if (type)
- PRINT_PROC( " %02x\n", type == 0x0f ? nvram[11] : type );
+ PRINT_PROC( "%02x\n", type == 0x0f ? nvram[11] : type );
else
PRINT_PROC( "none\n" );
PRINT_PROC( "HD 1 type : " );
type = nvram[4] & 0x0f;
if (type)
- PRINT_PROC( " %02x\n", type == 0x0f ? nvram[12] : type );
+ PRINT_PROC( "%02x\n", type == 0x0f ? nvram[12] : type );
else
PRINT_PROC( "none\n" );
* CLAIM (register device first time) parport_claim_or_block
* RELEASE parport_release
* SETMODE set the IEEE 1284 protocol to use for read/write
+ * SETPHASE set the IEEE 1284 phase of a particular mode. Not to be
+ * confused with ioctl(fd, SETPHASER, &stun). ;-)
* DATADIR data_forward / data_reverse
* WDATA write_data
* RDATA read_data
struct pardevice * pdev;
wait_queue_head_t irq_wait;
atomic_t irqc;
- int mode;
unsigned int flags;
int irqresponse;
unsigned char irqctl;
return -ESPIPE;
}
-/* This looks a bit like parport_read. The difference is that we don't
- * determine the mode to use from the port data, but rather from the
- * mode the driver told us to use. */
-static ssize_t do_read (struct pp_struct *pp, void *buf, size_t len)
-{
- size_t (*fn) (struct parport *, void *, size_t, int);
- struct parport *port = pp->pdev->port;
- int addr = pp->mode & IEEE1284_ADDR;
- int mode = pp->mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR);
-
- switch (mode) {
- case IEEE1284_MODE_COMPAT:
- /* This is a write-only mode. */
- return -EIO;
-
- case IEEE1284_MODE_NIBBLE:
- fn = port->ops->nibble_read_data;
- break;
-
- case IEEE1284_MODE_BYTE:
- fn = port->ops->byte_read_data;
- break;
-
- case IEEE1284_MODE_EPP:
- if (addr)
- fn = port->ops->epp_read_addr;
- else
- fn = port->ops->epp_read_data;
- break;
-
- case IEEE1284_MODE_ECP:
- case IEEE1284_MODE_ECPRLE:
- fn = port->ops->ecp_read_data;
- break;
-
- case IEEE1284_MODE_ECPSWE:
- fn = parport_ieee1284_ecp_read_data;
- break;
-
- default:
- printk (KERN_DEBUG "%s: unknown mode 0x%02x\n",
- pp->pdev->name, pp->mode);
- return -EINVAL;
- }
-
- return (*fn) (port, buf, len, 0);
-}
-
-/* This looks a bit like parport_write. The difference is that we don't
- * determine the mode to use from the port data, but rather from the
- * mode the driver told us to use. */
-static ssize_t do_write (struct pp_struct *pp, const void *buf, size_t len)
-{
- size_t (*fn) (struct parport *, const void *, size_t, int);
- struct parport *port = pp->pdev->port;
- int addr = pp->mode & IEEE1284_ADDR;
- int mode = pp->mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR);
-
- switch (mode) {
- case IEEE1284_MODE_NIBBLE:
- case IEEE1284_MODE_BYTE:
- /* Read-only modes. */
- return -EIO;
-
- case IEEE1284_MODE_COMPAT:
- fn = port->ops->compat_write_data;
- break;
-
- case IEEE1284_MODE_EPP:
- if (addr)
- fn = port->ops->epp_write_addr;
- else
- fn = port->ops->epp_write_data;
- break;
-
- case IEEE1284_MODE_ECP:
- case IEEE1284_MODE_ECPRLE:
- if (addr)
- fn = port->ops->ecp_write_addr;
- else
- fn = port->ops->ecp_write_data;
- break;
-
- case IEEE1284_MODE_ECPSWE:
- if (addr)
- fn = parport_ieee1284_ecp_write_addr;
- else
- fn = parport_ieee1284_ecp_write_data;
- break;
-
- default:
- printk (KERN_DEBUG "%s: unknown mode 0x%02x\n",
- pp->pdev->name, pp->mode);
- return -EINVAL;
- }
-
- return (*fn) (port, buf, len, 0);
-}
-
static ssize_t pp_read (struct file * file, char * buf, size_t count,
loff_t * ppos)
{
while (bytes_read < count) {
ssize_t need = min(count - bytes_read, PP_BUFFER_SIZE);
- got = do_read (pp, kbuffer, need);
+ got = parport_read (pp->pdev->port, kbuffer, need);
if (got <= 0) {
if (!bytes_read)
break;
}
- wrote = do_write (pp, kbuffer, n);
+ wrote = parport_write (pp->pdev->port, kbuffer, n);
if (wrote < 0) {
if (!bytes_written)
return 0;
}
+static enum ieee1284_phase init_phase (int mode)
+{
+ switch (mode & ~(IEEE1284_DEVICEID
+ | IEEE1284_ADDR)) {
+ case IEEE1284_MODE_NIBBLE:
+ case IEEE1284_MODE_BYTE:
+ return IEEE1284_PH_REV_IDLE;
+ }
+ return IEEE1284_PH_FWD_IDLE;
+}
+
static int pp_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
{
/* First handle the cases that don't take arguments. */
if (cmd == PPCLAIM) {
struct ieee1284_info *info;
- int first_claim = 0;
if (pp->flags & PP_CLAIMED) {
printk (KERN_DEBUG CHRDEV
int err = register_device (minor, pp);
if (err)
return err;
-
- first_claim = 1;
}
parport_claim_or_block (pp->pdev);
info = &pp->pdev->port->ieee1284;
pp->saved_state.mode = info->mode;
pp->saved_state.phase = info->phase;
- if (pp->mode != info->mode) {
- int phase = IEEE1284_PH_FWD_IDLE;
-
- if (first_claim) {
- info->mode = pp->mode;
- switch (pp->mode & ~(IEEE1284_DEVICEID
- | IEEE1284_ADDR)) {
- case IEEE1284_MODE_NIBBLE:
- case IEEE1284_MODE_BYTE:
- phase = IEEE1284_PH_REV_IDLE;
- }
- info->phase = phase;
- } else {
- /* Just restore the state. */
- info->mode = pp->state.mode;
- info->phase = pp->state.phase;
- }
- }
+ info->mode = pp->state.mode;
+ info->phase = pp->state.phase;
return 0;
}
if (copy_from_user (&mode, (int *) arg, sizeof (mode)))
return -EFAULT;
/* FIXME: validate mode */
- pp->mode = mode;
+ pp->state.mode = mode;
+ pp->state.phase = init_phase (mode);
+
+ if (pp->flags & PP_CLAIMED) {
+ pp->pdev->port->ieee1284.mode = mode;
+ pp->pdev->port->ieee1284.phase = pp->state.phase;
+ }
+
+ return 0;
+ }
+
+ if (cmd == PPSETPHASE) {
+ int phase;
+ if (copy_from_user (&phase, (int *) arg, sizeof (phase)))
+ return -EFAULT;
+ /* FIXME: validate phase */
+ pp->state.phase = phase;
+
+ if (pp->flags & PP_CLAIMED)
+ pp->pdev->port->ieee1284.phase = phase;
+
return 0;
}
if (!pp)
return -ENOMEM;
- pp->mode = IEEE1284_MODE_COMPAT;
+ pp->state.mode = IEEE1284_MODE_COMPAT;
+ pp->state.phase = init_phase (pp->state.mode);
pp->flags = 0;
atomic_set (&pp->irqc, 0);
init_waitqueue_head (&pp->irq_wait);
/* Clear (and return) interrupt count. */
#define PPCLRIRQ _IOR(PP_IOCTL, 0x93, int)
+
+/* Set the IEEE 1284 phase that we're in (e.g. IEEE1284_PH_FWD_IDLE) */
+#define PPSETPHASE _IOW(PP_IOCTL, 0x94, int)
/* Event 38: Set nAutoFd low */
parport_frob_control (port,
PARPORT_CONTROL_AUTOFD,
- PARPORT_CONTROL_AUTOFD);
+ 0);
parport_data_reverse (port);
udelay (5);
/* Event 39: Set nInit low to initiate bus reversal */
parport_frob_control (port,
PARPORT_CONTROL_INIT,
- PARPORT_CONTROL_INIT);
+ 0);
/* Event 40: PError goes low */
retval = parport_wait_peripheral (port,
}
out:
+ port->ieee1284.phase = IEEE1284_PH_REV_IDLE;
return count;
#endif /* IEEE1284 support */
}
/*
* Put the ECP detected port in PS2 mode.
*/
- outb (0x24, ECONTROL (p));
+ outb (0x34, ECONTROL (p));
parport_pc_write_data(p, 0);
parport_pc_data_forward (p);
#include "usb.h"
#include "cpia.h"
+#define CPIA_DEBUG /* Gobs of debugging info */
+
#define MAX_FRAME_SIZE (384 * 288 * 3)
/*******************************/
return dev->bus->op->control_msg(dev, usb_sndctrlpipe(dev,0), &dr, NULL, 0);
}
+/* How much data is left in the scratch buf? */
#define scratch_left(x) (cpia->scratchlen - (int)((char *)x - (char *)cpia->scratch))
static void cpia_parse_data(struct usb_cpia *cpia)
{
unsigned char *data = cpia->scratch;
+ unsigned long l;
int done;
done = 0;
break;
}
+ /* 0x1968 is magic */
printk("header: %X\n", (*data << 8) + *(data + 1));
if ((*data == 0x19) && (*(data + 1) == 0x68)) {
cpia->state = STATE_HEADER;
break;
}
+ /* Woops, lost the header, find the end of the frame */
if (scratch_left(data) < 4) {
done = 1;
break;
}
data++;
}
-printk("scan: scanned %d bytes\n", data-begin);
+#ifdef CPIA_DEBUG
+ printk("scan: scanned %d bytes\n", data-begin);
+#endif
break;
}
case STATE_HEADER:
break;
}
-printk("header: framerate %d\n", data[41]);
+#ifdef CPIA_DEBUG
+ printk("header: framerate %d\n", data[41]);
+#endif
data += 64;
found = 1;
break;
} else if ((*data == 0xFF) &&
- (scratch_left(data) >= 3) &&
- (*(data + 1) == 0xFF) &&
- (*(data + 2) == 0xFF) &&
- (*(data + 3) == 0xFF)) {
- data+=4;
+ (scratch_left(data) >= 3) &&
+ (*(data + 1) == 0xFF) &&
+ (*(data + 2) == 0xFF) &&
+ (*(data + 3) == 0xFF)) {
+ data += 4;
cpia->curline = 144;
found = 1;
break;
data++;
}
-#if 0
-printk("line %d: scanned %d bytes\n", cpia->curline, data-begin);
-#endif
-if (data-begin == 355 && cpia->frame[cpia->curframe].width != 64) {
- int i;
- char *f = cpia->frame[cpia->curframe].data, *b = begin;
-#if 0
-printk("copying\n");
-#endif
+ if (data-begin == 355 && cpia->frame[cpia->curframe].width != 64) {
+ int i;
+ char *f = cpia->frame[cpia->curframe].data, *b = begin;
+
+ b += 2;
+ f += (cpia->frame[cpia->curframe].width * 3) * cpia->curline;
+
+ for (i = 0; i < 176; i++)
+ f[(i * 3) + 0] =
+ f[(i * 3) + 1] =
+ f[(i * 3) + 2] =
+ b[(i * 2)];
+ }
- b+=2;
- f+=(cpia->frame[cpia->curframe].width*3)*cpia->curline;
- for (i = 0; i < 176; i++)
- f[(i * 3) + 0] =
- f[(i * 3) + 1] =
- f[(i * 3) + 2] =
- b[(i * 2)];
-}
if (found) {
cpia->curline++;
if (cpia->curline >= 144) {
}
}
- {
- int l;
-
+ /* Grab the remaining */
l = scratch_left(data);
memmove(cpia->scratch, data, l);
+
cpia->scratchlen = l;
- }
}
static int cpia_isoc_irq(int status, void *__buffer, int len, void *dev_id)
if (cpia->frame[0].state == FRAME_READY) {
cpia->curframe = 0;
cpia->frame[0].state = FRAME_GRABBING;
-printk("capturing to frame 0\n");
+#ifdef CPIA_DEBUG
+ printk("capturing to frame 0\n");
+#endif
} else if (cpia->frame[1].state == FRAME_READY) {
cpia->curframe = 1;
cpia->frame[1].state = FRAME_GRABBING;
-printk("capturing to frame 1\n");
+#ifdef CPIA_DEBUG
+ printk("capturing to frame 1\n");
+#endif
+#ifdef CPIA_DEBUG
} else
-printk("no frame available\n");
+ printk("no frame available\n");
+#else
+ }
+#endif
}
sbuf = &cpia->sbuf[cpia->receivesbuf];
/* Do something to it now */
sbuf->len = usb_compress_isochronous(dev, sbuf->isodesc);
+#ifdef CPIA_DEBUG
if (sbuf->len)
- printk("%d bytes received\n", sbuf->len);
+ printk("%d bytes received\n", sbuf->len);
+#endif
if (sbuf->len && cpia->curframe >= 0) {
if (sbuf->len > (SCRATCH_BUF_SIZE - cpia->scratchlen)) {
cpia->receivesbuf = 0;
-#if 0
- cpia->parsesbuf = 0;
- cpia->parsepos = 0;
-#endif
cpia->scratchlen = 0;
cpia->curline = 0;
cpia->state = STATE_SCANNING;
cpia->sbuf[1].isodesc = usb_allocate_isochronous(dev, usb_rcvisocpipe(dev,1), cpia->sbuf[1].data, STREAM_BUF_SIZE, 960, cpia_isoc_irq, cpia);
cpia->sbuf[2].isodesc = usb_allocate_isochronous(dev, usb_rcvisocpipe(dev,1), cpia->sbuf[2].data, STREAM_BUF_SIZE, 960, cpia_isoc_irq, cpia);
+#ifdef CPIA_DEBUG
printk("isodesc[0] @ %p\n", cpia->sbuf[0].isodesc);
printk("isodesc[1] @ %p\n", cpia->sbuf[1].isodesc);
printk("isodesc[2] @ %p\n", cpia->sbuf[2].isodesc);
+#endif
/* Schedule the queues */
usb_schedule_isochronous(dev, cpia->sbuf[0].isodesc, NULL);
usb_schedule_isochronous(dev, cpia->sbuf[1].isodesc, cpia->sbuf[0].isodesc);
usb_schedule_isochronous(dev, cpia->sbuf[2].isodesc, cpia->sbuf[1].isodesc);
+#ifdef CPIA_DEBUG
+ printk("done scheduling\n");
+#endif
if (usb_set_interface(cpia->dev, 1, 3)) {
printk("cpia_set_interface error\n");
return -EINVAL;
usb_cpia_startstreamcap(cpia->dev);
cpia->streaming = 1;
+#ifdef CPIA_DEBUG
+ printk("now streaming\n");
+#endif
return 0;
}
{
struct usb_cpia *cpia = (struct usb_cpia *)dev;
-printk("cpia_open\n");
+#ifdef CPIA_DEBUG
+ printk("cpia_open\n");
+#endif
cpia->fbuf = rvmalloc(2 * MAX_FRAME_SIZE);
if (!cpia->fbuf)
cpia->frame[0].data = cpia->fbuf;
cpia->frame[1].data = cpia->fbuf + MAX_FRAME_SIZE;
+#ifdef CPIA_DEBUG
printk("frame [0] @ %p\n", cpia->frame[0].data);
printk("frame [1] @ %p\n", cpia->frame[1].data);
+#endif
cpia->sbuf[0].data = kmalloc(STREAM_BUF_SIZE, GFP_KERNEL);
if (!cpia->sbuf[0].data)
if (!cpia->sbuf[2].data)
goto open_err_on2;
+#ifdef CPIA_DEBUG
printk("sbuf[0] @ %p\n", cpia->sbuf[0].data);
printk("sbuf[1] @ %p\n", cpia->sbuf[1].data);
printk("sbuf[2] @ %p\n", cpia->sbuf[2].data);
+#endif
cpia->curframe = -1;
cpia->receivesbuf = 0;
{
struct usb_cpia *cpia = (struct usb_cpia *)dev;
-printk("cpia_close\n");
+#ifdef CPIA_DEBUG
+ printk("cpia_close\n");
+#endif
cpia_stop_isoc(cpia);
return -EINVAL;
}
-#if 0
- if (usb_set_interface(dev, 1, 3)) {
- printk("cpia_set_interface error\n");
- return -EINVAL;
- }
-
- if (usb_cpia_grab_frame(dev, 0)) {
- printk("cpia_grab_frame error\n");
- return -EINVAL;
- }
-
- if (usb_cpia_upload_frame(dev, 0)) {
- printk("cpia_upload_frame error\n");
- return -EINVAL;
- }
-
- buf = cpia->ibuf;
- uhci_receive_isochronous(dev, usb_rcvisocpipe(dev,1), buf, 176*144*4);
-
- {
- printk("header magic: %X\n", (buf[0] << 8) + buf[1]);
-
- while ((buf[0] != 0x19) || (buf[1] != 0x68)) {
- int i;
-
- printk("resync'ing\n");
- for (i=0;i<(176*144*4)-4;i++, buf++)
- if (
- (buf[0] == 0xFF) &&
- (buf[1] == 0xFF) &&
- (buf[2] == 0xFF) &&
- (buf[3] == 0xFF)) {
- buf+=4;
- i+=4;
- break;
- }
-
- memmove(cpia->ibuf, buf, (176*144*4) - i);
- uhci_receive_isochronous(dev, usb_rcvisocpipe(dev,1), cpia->ibuf + (176*144*4) - i, i);
- buf = cpia->ibuf;
-
-#if 0
- printk("header magic: %X\n", (buf[0] << 8) + buf[1]);
-#endif
- }
-
- printk("size: %d, sample: %d, order: %d\n", buf[16], buf[17], buf[18]);
- printk("comp: %d, decimation: %d\n", buf[28], buf[29]);
- printk("roi: top left: %d, %d bottom right: %d, %d\n",
- buf[26] * 4, buf[24] * 8,
- buf[27] * 4, buf[25] * 8);
-
- printk("vm->frame: %d\n", vm->frame);
-
- {
- int i, i1;
- char *b = buf + 64, *fbuf = &cpia->fbuffer[MAX_FRAME_SIZE * (vm->frame & 1)];
- for (i=0;i<144;i++) {
-#if 0
- printk("line len: %d\n", (b[1] << 8) + b[0]);
-#endif
- b += 2;
- for (i1=0;i1<176;i1++) {
- fbuf[(i * vm->width * 3) + (i1 * 3)] =
- fbuf[(i * vm->width * 3) + (i1 * 3) + 1] =
- fbuf[(i * vm->width * 3) + (i1 * 3) + 2] =
- b[i1 * 2];
-#if 0
- *((short *)&fbuf[(i * vm->width * 2) + (i1 * 2)]) =
- ((b[i1 * 2] >> 3) << 11) + ((b[i1 * 2] >> 2) << 6) + (b[i1 * 2] >> 3);
-#endif
- }
- b += (176 * 2) + 1;
- }
- }
-
- }
-
- if (usb_set_interface(dev, 1, 0)) {
- printk("cpia_set_interface error\n");
- return -EINVAL;
- }
-#endif
-
static int cpia_ioctl(struct video_device *dev, unsigned int cmd, void *arg)
{
struct usb_cpia *cpia = (struct usb_cpia *)dev;
b.type = VID_TYPE_CAPTURE /* | VID_TYPE_SUBCAPTURE */;
b.channels = 1;
b.audios = 0;
- b.maxwidth = 176 /* 352 */;
- b.maxheight = 144 /* 240 */;
- b.minwidth = 176 /* (Something small?) */;
- b.minheight = 144 /* " " */;
+ b.maxwidth = 176 /* 352 */;
+ b.maxheight = 144 /* 240 */;
+ b.minwidth = 176 /* (Something small?) */;
+ b.minheight = 144 /* " " */;
if (copy_to_user(arg, &b, sizeof(b)))
return -EFAULT;
if (copy_from_user(&p, arg, sizeof(p)))
return -EFAULT;
-printk("Attempting to set palette %d, depth %d\n", p.palette, p.depth);
-
-#if 0
- if (p.palette != VIDEO_PALETTE_YUYV)
- return -EINVAL;
- if (p.depth != 16)
- return -EINVAL;
+#ifdef CPIA_DEBUG
+ printk("Attempting to set palette %d, depth %d\n",
+ p.palette, p.depth);
#endif
return 0;
{
struct video_window vw;
-printk("VIDIOCSWIN\n");
+#ifdef CPIA_DEBUG
+ printk("VIDIOCSWIN\n");
+#endif
+
if (copy_from_user(&vw, arg, sizeof(vw)))
return -EFAULT;
if (vw.flags)
{
struct video_window vw;
-printk("VIDIOCGWIN\n");
+#ifdef CPIA_DEBUG
+ printk("VIDIOCGWIN\n");
+#endif
+
vw.x = 0;
vw.y = 0;
vw.width = 176;
if (copy_from_user((void *)&vm, (void *)arg, sizeof(vm)))
return -EFAULT;
-printk("MCAPTURE\n");
-printk("frame: %d, size: %dx%d, format: %d\n", vm.frame, vm.width, vm.height, vm.format);
+#ifdef CPIA_DEBUG
+ printk("MCAPTURE\n");
+ printk("frame: %d, size: %dx%d, format: %d\n",
+ vm.frame, vm.width, vm.height, vm.format);
+#endif
if (vm.format != VIDEO_PALETTE_RGB24)
return -EINVAL;
if (copy_from_user((void *)&frame, arg, sizeof(int)))
return -EFAULT;
+#ifdef CPIA_DEBUG
printk("syncing to frame %d\n", frame);
+#endif
switch (cpia->frame[frame].state) {
case FRAME_UNUSED:
return -EINVAL;
cpia->frame[frame].state = FRAME_UNUSED;
break;
}
+#ifdef CPIA_DEBUG
printk("synced to frame %d\n", frame);
+#endif
return 0;
}
case VIDIOCCAPTURE:
struct usb_cpia *cpia = (struct usb_cpia *)dev;
int len;
+#ifdef CPIA_DEBUG
printk("cpia_read: %ld bytes\n", count);
+#endif
#if 0
len = cpia_capture(cpia, buf, count);
unsigned long start = (unsigned long)adr;
unsigned long page, pos;
+#ifdef CPIA_DEBUG
printk("mmap: %ld (%lX) bytes\n", size, size);
+#endif
if (size > (((2 * MAX_FRAME_SIZE) + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1)))
return -EINVAL;
-#if 0
- if (!cpia->fbuffer) {
- if ((cpia->fbuffer = rvmalloc(2 * MAX_FRAME_SIZE)) == NULL)
- return -EINVAL;
- }
-#endif
-
pos = (unsigned long)cpia->fbuf;
while (size > 0)
{
printk("cpia_set_compression error\n");
return;
}
-
-#if 0
- if (usb_cpia_grab_frame(dev, 0)) {
- printk("cpia_grab_frame error\n");
- return;
- }
-
- if (usb_cpia_upload_frame(dev, 1)) {
- printk("cpia_upload_frame error\n");
- return;
- }
-
- buf = (void *)__get_free_page(GFP_KERNEL);
-
- {
- int i;
- for (i=0;i<448;i++)
- buf[i]=0;
- }
- uhci_receive_isochronous(dev, usb_rcvisocpipe(dev,1), buf, 448);
-
- {
- int i;
- for (i=0;i<448;i++) {
- printk("%02X ", buf[i]);
- if ((i % 16) == 15)
- printk("\n");
- }
- printk("\n");
- }
-
- free_page((unsigned long)buf);
-#endif
}
static int cpia_probe(struct usb_device *dev)
if (dev->descriptor.bNumConfigurations != 1)
return -1;
-#if 0
- /* We don't handle multi-interface hubs */
- if (dev->config[0].bNumInterfaces != 1)
- return -1;
-#endif
-
interface = &dev->config[0].altsetting[0].interface[0];
/* Is it a CPiA? */
-/*
-Apr 24 17:49:04 bjorn kernel: Vendor: 0545
-Apr 24 17:49:04 bjorn kernel: Product: 8080
-*/
-/*
- if (dev->descriptor.idVendor != 0x0545)
- return -1;
- if (dev->descriptor.idProduct != 0x8080)
- return -1;
- if (interface->bInterfaceClass != 0xFF)
- return -1;
- if (interface->bInterfaceSubClass != 0xFF)
- return -1;
-*/
if (dev->descriptor.idVendor != 0x0553)
return -1;
if (dev->descriptor.idProduct != 0x0002)
if (interface->bInterfaceSubClass != 0x00)
return -1;
-#if 0
- /* Multiple endpoints? What kind of mutant ninja-hub is this? */
- if (interface->bNumEndpoints != 1)
- return -1;
-
- endpoint = &interface->endpoint[0];
-
- /* Output endpoint? Curiousier and curiousier.. */
- if (!(endpoint->bEndpointAddress & 0x80))
- return -1;
-
- /* If it's not an interrupt endpoint, we'd better punt! */
- if ((endpoint->bmAttributes & 3) != 3)
- return -1;
-#endif
-
/* We found a CPiA */
printk("USB CPiA camera found\n");
usb_cpia_configure(cpia);
-#if 0
- usb_request_irq(dev, usb_rcvctrlpipe(dev, endpoint->bEndpointAddress), pport_irq, endpoint->bInterval, pport);
-#endif
-
return 0;
}
{
return usb_cpia_init();
}
+
void cleanup_module(void)
{
}
return USB_ST_INTERNALERROR;
/* Remove it from the internal irq_list */
+ uhci_remove_irq_list(td);
+#if 0
spin_lock_irqsave(&irqlist_lock, flags);
list_del(&td->irq_list);
spin_unlock_irqrestore(&irqlist_lock, flags);
+#endif
/* Remove the interrupt TD and QH */
uhci_remove_td(td);
if ((cdata != data) && (n))
memmove(data, cdata, n);
-#if 0
-if (n && n != 960)
- printk("underrun: %d %d\n", i, n);
-#endif
-if ((isodesc->td[i].status >> 16) & 0xFF)
- printk("error: %d %X\n", i, (isodesc->td[i].status >> 16));
+ /* Debugging */
+ if ((isodesc->td[i].status >> 16) & 0xFF)
+ printk("error: %d %X\n", i,
+ (isodesc->td[i].status >> 16));
data += n;
totlen += n;
/* Insert TD into list */
if (!pisodesc) {
+ /* It's not guaranteed to be 1-1024 */
frame = inw(uhci->io_addr + USBFRNUM) % 1024;
/* HACK: Start 2 frames from now */
frame = (frame + 2) % 1024;
} else
frame = (pisodesc->endframe + 1) % 1024;
-#if 0
-printk("scheduling first at frame %d\n", frame);
-#endif
-
for (i = 0; i < isodesc->num; i++) {
/* Active */
isodesc->td[i].status |= (1 << 23);
uhci->fl->frame[(frame + i) % 1024] = virt_to_bus(&isodesc->td[i]);
}
-#if 0
-printk("last at frame %d\n", (frame + i - 1) % 1024);
-#endif
-
- /* Interrupt */
+ /* IOC on the last TD */
isodesc->td[i - 1].status |= (1 << 24);
isodesc->frame = frame;
isodesc->endframe = (frame + isodesc->num - 1) % 1024;
-#if 0
- return uhci_td_result(dev, td[num - 1]);
-#endif
return 0;
}
memset(isodesc, 0, sizeof(*isodesc));
/* Carefully work around the non contiguous pages */
- isodesc->num = (len / PAGE_SIZE) * (PAGE_SIZE / maxsze);
+ isodesc->num = len / maxsze;
isodesc->td = kmalloc(sizeof(struct uhci_td) * isodesc->num, GFP_KERNEL);
isodesc->frame = isodesc->endframe = -1;
isodesc->data = data;
i++;
data += maxsze;
-
- if (((int)data % PAGE_SIZE) + maxsze >= PAGE_SIZE)
- data = (char *)(((int)data + maxsze) & ~(PAGE_SIZE - 1));
-
len -= maxsze;
} while (i < isodesc->num);
+#if 0
/* IOC on the last TD */
td->status |= (1 << 24);
+#endif
+
uhci_add_irq_list(dev->uhci, td, completed, dev_id);
return isodesc;
td->first = first;
td->link = 1; /* Terminate */
-
/* Start it up.. */
ret = uhci_run_control(dev, first, td);
DECLARE_WAITQUEUE(wait, current);
struct uhci_qh *bulk_qh = uhci_qh_allocate(dev);
struct uhci_td *curtd;
- struct uhci_device *root_hub=usb_to_uhci(dev->uhci->bus->root_hub);
+ struct uhci_device *root_hub = usb_to_uhci(dev->uhci->bus->root_hub);
current->state = TASK_UNINTERRUPTIBLE;
add_wait_queue(&bulk_wakeup, &wait);
struct uhci_device *dev;
int i;
+ /* Allocate the USB device */
usb_dev = kmalloc(sizeof(*usb_dev), GFP_KERNEL);
if (!usb_dev)
return NULL;
memset(usb_dev, 0, sizeof(*usb_dev));
+ /* Allocate the UHCI device private data */
dev = kmalloc(sizeof(*dev), GFP_KERNEL);
if (!dev) {
- usb_destroy_configuration(usb_dev);
kfree(usb_dev);
return NULL;
}
next = tmp->next;
if (!((status = td->status) & (1 << 23)) || /* No longer active? */
+ (td->qh &&
((td->qh->element & ~15) &&
!((status = uhci_link_to_td(td->qh->element)->status) & (1 <<23)) &&
- (status & 0x760000) /* is in error state (Stall, db, babble, timeout, bitstuff) */)) {
+ (status & 0x760000) /* is in error state (Stall, db, babble, timeout, bitstuff) */))) {
/* remove from IRQ list */
__list_del(tmp->prev, next);
INIT_LIST_HEAD(tmp);
parsed += *ptr;
le16_to_cpus(&config->wTotalLength);
- if (config->MaxPower == 200) {
- printk("bNumInterfaces kludge\n");
- config->bNumInterfaces += 3;
- }
-
if (config->bNumInterfaces > USB_MAXINTERFACES)
{
printk(KERN_WARNING "usb: too many interfaces.\n");
config->altsetting = (struct usb_alternate_setting *)
kmalloc(USB_MAXALTSETTING * sizeof(struct usb_alternate_setting), GFP_KERNEL);
if (config->altsetting == NULL) {
- printk(KERN_WARNING "usb: out of memory.\n");
- return -1;
+ printk(KERN_WARNING "usb: out of memory.\n");
+ return -1;
}
config->act_altsetting = 0;
config->num_altsetting = 1;
#ifdef CONFIG_MAC
/*
- * On the Macintoy, there may or may not be a working VBL int. We need to prob
+ * On the Macintoy, there may or may not be a working VBL int. We need to probe
*/
static int vbl_detected = 0;
p->dispsw->clear_margins(conp, p, 0);
if (logo_shown == -2) {
logo_shown = fg_console;
+ fbcon_clear(conp, 0, 0, LOGO_H, p->var.xres-LOGO_W);
fbcon_show_logo(); /* This is protected above by initmem_freed */
update_region(fg_console,
conp->vc_origin + conp->vc_size_row * conp->vc_top,
current->mm = mm;
current->active_mm = mm;
- switch_mm(active_mm, mm, smp_processor_id());
+ activate_mm(active_mm, mm);
mm_release();
if (old_mm) {
if (active_mm != old_mm) BUG();
unsigned long schedule_frame;
unsigned long pc;
- pc = thread_saved_pc(&p->tss);
+ pc = thread_saved_pc(&p->thread);
if (pc >= first_sched && pc < last_sched) {
- schedule_frame = ((unsigned long *)p->tss.ksp)[6];
+ schedule_frame = ((unsigned long *)p->thread.ksp)[6];
return ((unsigned long *)schedule_frame)[12];
}
return pc;
+ (long)&((struct pt_regs *)0)->reg)
# define KSTK_EIP(tsk) \
(*(unsigned long *)(PT_REG(pc) + PAGE_SIZE + (unsigned long)(tsk)))
-# define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->tss.usp)
+# define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
#elif defined(__arm__)
# define KSTK_EIP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1022])
# define KSTK_ESP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1020])
buf = page_address(page);
/* hack? */
- page->owner = (int)current;
+ page->owner = current;
offset = page->offset;
if (offset < inode->i_size) {
# include <asm/core_tsunami.h>
#elif defined(CONFIG_ALPHA_JENSEN)
# include <asm/jensen.h>
-#elif defined(CONFIG_ALPHA_RX164)
+#elif defined(CONFIG_ALPHA_POLARIS)
# include <asm/core_polaris.h>
#else
#error "What system is this?"
int (*hose_write_config_dword)(u8, u8, u8, u32 value,
struct linux_hose_info *);
- void (*mv_get_mmu_context)(struct task_struct *);
+ void (*mv_switch_mm)(struct mm_struct *, struct mm_struct *,
+ struct task_struct *, long);
+ void (*mv_activate_mm)(struct mm_struct *, struct mm_struct *, long);
+
void (*mv_flush_tlb_current)(struct mm_struct *);
void (*mv_flush_tlb_other)(struct mm_struct *);
void (*mv_flush_tlb_current_page)(struct mm_struct * mm,
#include <asm/system.h>
#include <asm/machvec.h>
+
+/*
+ * Force a context reload. This is needed when we change the page
+ * table pointer or when we update the ASN of the current process.
+ */
+
+/* Don't get into trouble with dueling __EXTERN_INLINEs. */
+#ifndef __EXTERN_INLINE
+#include <asm/io.h>
+#endif
+
+extern inline unsigned long
+__reload_thread(struct thread_struct *pcb)
+{
+ register unsigned long a0 __asm__("$16");
+ register unsigned long v0 __asm__("$0");
+
+ a0 = virt_to_phys(pcb);
+ __asm__ __volatile__(
+ "call_pal %2 #__reload_thread"
+ : "=r"(v0), "=r"(a0)
+ : "i"(PAL_swpctx), "r"(a0)
+ : "$1", "$16", "$22", "$23", "$24", "$25");
+
+ return v0;
+}
+
+
/*
* The maximum ASN's the processor supports. On the EV4 this is 63
* but the PAL-code doesn't actually use this information. On the
#define __MMU_EXTERN_INLINE
#endif
-extern void get_new_mmu_context(struct task_struct *p, struct mm_struct *mm);
+extern void get_new_mm_context(struct task_struct *p, struct mm_struct *mm);
static inline unsigned long
-__get_new_mmu_context(struct task_struct *p, struct mm_struct *mm)
+__get_new_mm_context(struct mm_struct *mm, long cpu)
{
- unsigned long asn = cpu_last_asn(smp_processor_id());
+ unsigned long asn = cpu_last_asn(cpu);
unsigned long next = asn + 1;
if ((asn & HARDWARE_ASN_MASK) >= MAX_ASN) {
tbiap();
next = (asn & ~HARDWARE_ASN_MASK) + ASN_FIRST_VERSION;
}
- cpu_last_asn(smp_processor_id()) = next;
+ cpu_last_asn(cpu) = next;
return next;
}
__EXTERN_INLINE void
-ev4_get_mmu_context(struct task_struct *p)
+ev4_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
+ struct task_struct *next, long cpu)
{
/* As described, ASN's are broken. But we can optimize for
switching between threads -- if the mm is unchanged from
for a 1992 SRM, reports Joseph Martin (jmartin@hlo.dec.com).
I'm going to leave this here anyway, just to Be Sure. -- r~ */
- if (current->mm != p->mm)
+ if (prev_mm != next_mm)
tbiap();
}
__EXTERN_INLINE void
-ev5_get_mmu_context(struct task_struct *p)
+ev4_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, long cpu)
+{
+ /* This is only called after changing mm on current. */
+ tbiap();
+
+ current->thread.ptbr
+ = ((unsigned long) next_mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
+}
+
+__EXTERN_INLINE void
+ev5_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
+ struct task_struct *next, long cpu)
{
/* Check if our ASN is of an older version, or on a different CPU,
and thus invalid. */
fight over the context. Find a way to record a per-mm, per-cpu
value for the asn. */
- unsigned long asn = cpu_last_asn(smp_processor_id());
- struct mm_struct *mm = p->mm;
- unsigned long mmc = mm->context;
+ unsigned long asn = cpu_last_asn(cpu);
+ unsigned long mmc = next_mm->context;
if ((mmc ^ asn) & ~HARDWARE_ASN_MASK) {
- mmc = __get_new_mmu_context(p, mm);
- mm->context = mmc;
+ mmc = __get_new_mm_context(next_mm, cpu);
+ next_mm->context = mmc;
}
/* Always update the PCB ASN. Another thread may have allocated
a new mm->context (via flush_tlb_mm) without the ASN serial
number wrapping. We have no way to detect when this is needed. */
- p->tss.asn = mmc & HARDWARE_ASN_MASK;
+ next->thread.asn = mmc & HARDWARE_ASN_MASK;
+}
+
+__EXTERN_INLINE void
+ev5_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, long cpu)
+{
+ unsigned long mmc = __get_new_mm_context(next_mm, cpu);
+ next_mm->context = mmc;
+ current->thread.asn = mmc & HARDWARE_ASN_MASK;
+ current->thread.ptbr
+ = ((unsigned long) next_mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
+
+ __reload_thread(¤t->thread);
}
+
#ifdef CONFIG_ALPHA_GENERIC
-# define get_mmu_context (alpha_mv.mv_get_mmu_context)
+# define switch_mm alpha_mv.mv_switch_mm
+# define activate_mm(x,y) alpha_mv.mv_activate_mm((x),(y),smp_processor_id())
#else
# ifdef CONFIG_ALPHA_EV4
-# define get_mmu_context ev4_get_mmu_context
+# define switch_mm ev4_switch_mm
+# define activate_mm(x,y) ev4_activate_mm((x),(y),smp_processor_id())
# else
-# define get_mmu_context ev5_get_mmu_context
+# define switch_mm ev5_switch_mm
+# define activate_mm(x,y) ev5_activate_mm((x),(y),smp_processor_id())
# endif
#endif
extern inline void
-init_new_context(struct mm_struct *mm)
+init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
mm->context = 0;
+ tsk->thread.ptbr = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
}
extern inline void
#undef __MMU_EXTERN_INLINE
#endif
-/*
- * Force a context reload. This is needed when we change the page
- * table pointer or when we update the ASN of the current process.
- */
-
-/* Don't get into trouble with dueling __EXTERN_INLINEs. */
-#ifndef __EXTERN_INLINE
-#include <asm/io.h>
-#endif
-
-extern inline unsigned long
-__reload_tss(struct thread_struct *tss)
-{
- register unsigned long a0 __asm__("$16");
- register unsigned long v0 __asm__("$0");
-
- a0 = virt_to_phys(tss);
- __asm__ __volatile__(
- "call_pal %2 #__reload_tss"
- : "=r"(v0), "=r"(a0)
- : "i"(PAL_swpctx), "r"(a0)
- : "$1", "$16", "$22", "$23", "$24", "$25");
-
- return v0;
-}
-
-extern inline void
-reload_context(struct task_struct *task)
-{
- __reload_tss(&task->tss);
-}
-
-/*
- * After setting current->mm to a new value, activate the context for the
- * new mm so we see the new mappings.
- */
-
-extern inline void
-activate_context(struct task_struct *task)
-{
- get_new_mmu_context(task, task->mm);
- reload_context(task);
-}
-
#endif /* __ALPHA_MMU_CONTEXT_H */
#endif /* STRICT_MM_TYPECHECKS */
-#define BUG() \
-do { \
- printk("Kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
- __asm__ __volatile__("call_pal 129 # bugchk"); \
-} while (1)
-
-#define PAGE_BUG(page) BUG()
+#define BUG() __asm__ __volatile__("call_pal 129 # bugchk")
+#define PAGE_BUG(page) BUG()
#endif /* !ASSEMBLY */
{
}
-__EXTERN_INLINE void
-ev5_flush_tlb_current(struct mm_struct *mm)
-{
- get_new_mmu_context(current, mm);
- reload_context(current);
-}
+extern void ev5_flush_tlb_current(struct mm_struct *mm);
__EXTERN_INLINE void
ev5_flush_tlb_other(struct mm_struct *mm)
extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= __DIRTY_BITS; return pte; }
extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= __ACCESS_BITS; return pte; }
-/*
- * To set the page-dir. Note the self-mapping in the last entry
- *
- * Also note that if we update the current process ptbr, we need to
- * update the PAL-cached ptbr value as well.. There doesn't seem to
- * be any "wrptbr" PAL-insn, but we can do a dummy swpctx to ourself
- * instead.
- */
-extern inline void SET_PAGE_DIR(struct task_struct * tsk, pgd_t * pgdir)
-{
- pgd_val(pgdir[PTRS_PER_PGD]) = pte_val(mk_pte((unsigned long) pgdir, PAGE_KERNEL));
- tsk->tss.ptbr = ((unsigned long) pgdir - PAGE_OFFSET) >> PAGE_SHIFT;
- if (tsk == current)
- reload_context(tsk);
-}
-
#define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
/* to find an entry in a kernel page-table-directory */
memset (ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+
+ pgd_val(ret[PTRS_PER_PGD])
+ = pte_val(mk_pte((unsigned long)ret, PAGE_KERNEL));
}
return ret;
}
#define INIT_MMAP { &init_mm, PAGE_OFFSET, PAGE_OFFSET+0x10000000, \
NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }
-#define INIT_TSS { \
+#define INIT_THREAD { \
0, 0, 0, \
0, 0, 0, \
0, 0, 0, \
KERNEL_DS \
}
+#define THREAD_SIZE (2*PAGE_SIZE)
+
#include <asm/ptrace.h>
/*
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);
-#define copy_segments(nr, tsk, mm) do { } while (0)
+#define copy_segments(tsk, mm) do { } while (0)
#define release_segments(mm) do { } while (0)
#define forget_segments() do { } while (0)
{LONG_MAX, LONG_MAX}, /* RLIMIT_CPU */ \
{LONG_MAX, LONG_MAX}, /* RLIMIT_FSIZE */ \
{LONG_MAX, LONG_MAX}, /* RLIMIT_DATA */ \
- {_STK_LIM, _STK_LIM}, /* RLIMIT_STACK */ \
+ {_STK_LIM, LONG_MAX}, /* RLIMIT_STACK */ \
{ 0, LONG_MAX}, /* RLIMIT_CORE */ \
{LONG_MAX, LONG_MAX}, /* RLIMIT_RSS */ \
{INR_OPEN, INR_OPEN}, /* RLIMIT_NOFILE */ \
{LONG_MAX, LONG_MAX}, /* RLIMIT_AS */ \
- {MAX_TASKS_PER_USER, MAX_TASKS_PER_USER}, /* RLIMIT_NPROC */ \
+ {LONG_MAX, LONG_MAX}, /* RLIMIT_NPROC */ \
{LONG_MAX, LONG_MAX}, /* RLIMIT_MEMLOCK */ \
}
do { \
unsigned long pcbb; \
current = (next); \
- pcbb = virt_to_phys(¤t->tss); \
+ pcbb = virt_to_phys(¤t->thread); \
(last) = alpha_switch_to(pcbb, (prev)); \
} while (0)
#define VERIFY_READ 0
#define VERIFY_WRITE 1
-#define get_fs() (current->tss.fs)
+#define get_fs() (current->thread.fs)
#define get_ds() (KERNEL_DS)
-#define set_fs(x) (current->tss.fs = (x))
+#define set_fs(x) (current->thread.fs = (x))
#define segment_eq(a,b) ((a).seg == (b).seg)
#define destroy_context(mm) do { } while(0)
#define init_new_context(tsk,mm) do { } while (0)
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, unsigned cpu)
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu)
{
if (prev != next) {
set_bit(cpu, &next->cpu_vm_mask);
}
+#define activate_mm(prev, next) \
+ switch_mm((prev),(next),NULL,smp_processor_id())
+
#endif
*/
extern struct apm_bios_info apm_bios_info;
-extern void apm_bios_init(void);
+extern void apm_init(void);
extern void apm_setup(char *, int *);
extern int apm_register_callback(int (*callback)(apm_event_t));
0-0xFFFFFFFF for kernel-thread
*/
struct exec_domain *exec_domain;
- long need_resched;
+ volatile long need_resched;
/* various fields */
long counter;
extern void setup_arch(char **, unsigned long *, unsigned long *);
+extern int cpu_idle(void * unused);
#ifndef __SMP__
-/*
- * Uniprocessor idle thread
- */
-
-int cpu_idle(void *unused)
-{
- for(;;)
- idle();
-}
-
#define smp_init() do { } while (0)
#else
-/*
- * Multiprocessor idle thread is in arch/...
- */
-
-extern int cpu_idle(void * unused);
-
/* Called by boot processor to activate the rest. */
static void __init smp_init(void)
{
#ifdef CONFIG_MAC
nubus_init();
#endif
+#ifdef CONFIG_APM
+ apm_init();
+#endif
/* Networking initialization needs a process context */
sock_init();
current->mm = mm;
if (mm != active_mm) {
current->active_mm = mm;
- switch_mm(active_mm, mm, current->processor);
+ activate_mm(active_mm, mm);
}
mmdrop(active_mm);
}
unsigned long page;
long retval;
- if ((unsigned long)user_name >= TASK_SIZE
- && !segment_eq(get_fs (), KERNEL_DS))
- return -EFAULT;
-
page = __get_free_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
atomic_inc(&oldmm->mm_count);
} else {
if (next->active_mm != mm) BUG();
- switch_mm(oldmm, mm, this_cpu);
+ switch_mm(oldmm, mm, next, this_cpu);
}
if (!prev->mm) {
*/
entry = get_swap_page();
if (!entry)
- goto out_failed; /* No swap space left */
+ goto out_failed_unlock; /* No swap space left */
vma->vm_mm->rss--;
tsk->nswap++;