VERSION = 2
PATCHLEVEL = 3
-SUBLEVEL = 46
+SUBLEVEL = 47
EXTRAVERSION =
ARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ -e s/arm.*/arm/ -e s/sa110/arm/)
DRIVERS := $(DRIVERS) drivers/fc4/fc4.a
endif
-ifdef CONFIG_PPC
+ifdef CONFIG_MAC
DRIVERS := $(DRIVERS) drivers/macintosh/macintosh.a
endif
O_TARGET := kernel.o
O_OBJS := entry.o traps.o process.o osf_sys.o irq.o signal.o setup.o \
- ptrace.o time.o semaphore.o
+ ptrace.o time.o semaphore.o i8259.o rtc_irq.o
OX_OBJS := alpha_ksyms.o
EXPORT_SYMBOL(__memsetw);
EXPORT_SYMBOL(__constant_c_memset);
+EXPORT_SYMBOL(pci_alloc_consistent);
+EXPORT_SYMBOL(pci_free_consistent);
+EXPORT_SYMBOL(pci_map_single);
+EXPORT_SYMBOL(pci_unmap_single);
+EXPORT_SYMBOL(pci_map_sg);
+EXPORT_SYMBOL(pci_unmap_sg);
+
EXPORT_SYMBOL(dump_thread);
EXPORT_SYMBOL(dump_fpu);
EXPORT_SYMBOL(hwrpb);
--- /dev/null
+/* started hacking from linux-2.3.30pre6/arch/i386/kernel/i8259.c */
+
+#include <linux/init.h>
+#include <linux/cache.h>
+#include <linux/sched.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+
+#include <asm/io.h>
+#include <asm/delay.h>
+
+/*
+ * This is the 'legacy' 8259A Programmable Interrupt Controller,
+ * present in the majority of PC/AT boxes.
+ */
+
+static void enable_8259A_irq(unsigned int irq);
+static void disable_8259A_irq(unsigned int irq);
+
+/* shutdown is same as "disable" */
+#define end_8259A_irq enable_8259A_irq
+#define shutdown_8259A_irq disable_8259A_irq
+
+static void mask_and_ack_8259A(unsigned int);
+
+static unsigned int startup_8259A_irq(unsigned int irq)
+{
+ enable_8259A_irq(irq);
+ return 0; /* never anything pending */
+}
+
+static struct hw_interrupt_type i8259A_irq_type = {
+ "XT-PIC",
+ startup_8259A_irq,
+ shutdown_8259A_irq,
+ enable_8259A_irq,
+ disable_8259A_irq,
+ mask_and_ack_8259A,
+ end_8259A_irq
+};
+
+/*
+ * 8259A PIC functions to handle ISA devices:
+ */
+
+/*
+ * This contains the irq mask for both 8259A irq controllers,
+ */
+static unsigned int cached_irq_mask = 0xffff;
+
+#define __byte(x,y) (((unsigned char *)&(y))[x])
+#define cached_21 (__byte(0,cached_irq_mask))
+#define cached_A1 (__byte(1,cached_irq_mask))
+
+/*
+ * These have to be protected by the irq controller spinlock
+ * before being called.
+ */
+static void disable_8259A_irq(unsigned int irq)
+{
+ unsigned int mask = 1 << irq;
+ cached_irq_mask |= mask;
+ if (irq & 8)
+ outb(cached_A1,0xA1);
+ else
+ outb(cached_21,0x21);
+}
+
+static void enable_8259A_irq(unsigned int irq)
+{
+ unsigned int mask = ~(1 << irq);
+ cached_irq_mask &= mask;
+ if (irq & 8)
+ outb(cached_A1,0xA1);
+ else
+ outb(cached_21,0x21);
+}
+
+static void mask_and_ack_8259A(unsigned int irq)
+{
+ disable_8259A_irq(irq);
+
+ /* Ack the interrupt making it the lowest priority */
+ /* First the slave .. */
+ if (irq > 7) {
+ outb(0xE0 | (irq - 8), 0xa0);
+ irq = 2;
+ }
+ /* .. then the master */
+ outb(0xE0 | irq, 0x20);
+}
+
+static void init_8259A(void)
+{
+ outb(0xff, 0x21); /* mask all of 8259A-1 */
+ outb(0xff, 0xA1); /* mask all of 8259A-2 */
+}
+
+/*
+ * IRQ2 is cascade interrupt to second interrupt controller
+ */
+static struct irqaction irq2 = { no_action, 0, 0, "cascade", NULL, NULL};
+
+void __init
+init_ISA_irqs (void)
+{
+ int i;
+
+ for (i = 0; i < NR_IRQS; i++) {
+ if (i == RTC_IRQ)
+ continue;
+ if (i >= 16)
+ break;
+ irq_desc[i].status = IRQ_DISABLED;
+ /*
+ * 16 old-style INTA-cycle interrupts:
+ */
+ irq_desc[i].handler = &i8259A_irq_type;
+ }
+
+ init_8259A();
+ setup_irq(2, &irq2);
+}
#ifndef __SMP__
int __local_irq_count;
int __local_bh_count;
+unsigned long __irq_attempt[NR_IRQS];
#endif
#if NR_IRQS > 128
#define IS_RESERVED_IRQ(irq) ((irq)==2)
-/*
- * Shadow-copy of masked interrupts.
- */
-
-unsigned long _alpha_irq_masks[2] = { ~0UL, ~0UL };
-
/*
* The ack_irq routine used by 80% of the systems.
*/
return;
}
}
- handle_irq(j, j, regs);
+ handle_irq(j, regs);
#else
unsigned long pic;
void
srm_device_interrupt(unsigned long vector, struct pt_regs * regs)
{
- int irq, ack;
+ int irq;
- ack = irq = (vector - 0x800) >> 4;
- handle_irq(irq, ack, regs);
+ irq = (vector - 0x800) >> 4;
+ handle_irq(irq, regs);
}
/*
- * Initial irq handlers.
+ * Special irq handlers.
*/
-static struct irqaction timer_irq = { NULL, 0, 0, NULL, NULL, NULL};
-spinlock_t irq_controller_lock = SPIN_LOCK_UNLOCKED;
-irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = { [0 ... NR_IRQS-1] = {0,} };
+void no_action(int cpl, void *dev_id, struct pt_regs *regs) { }
+/*
+ * Initial irq handlers.
+ */
-static inline void
-mask_irq(unsigned long irq)
+static void enable_none(unsigned int irq) { }
+static unsigned int startup_none(unsigned int irq) { return 0; }
+static void disable_none(unsigned int irq) { }
+static void ack_none(unsigned int irq)
{
- set_bit(irq, _alpha_irq_masks);
- alpha_mv.update_irq_hw(irq, alpha_irq_mask, 0);
+ printk("unexpected IRQ trap at vector %02x\n", irq);
}
-static inline void
-unmask_irq(unsigned long irq)
+/* startup is the same as "enable", shutdown is same as "disable" */
+#define shutdown_none disable_none
+#define end_none enable_none
+
+struct hw_interrupt_type no_irq_type = {
+ "none",
+ startup_none,
+ shutdown_none,
+ enable_none,
+ disable_none,
+ ack_none,
+ end_none
+};
+
+spinlock_t irq_controller_lock = SPIN_LOCK_UNLOCKED;
+irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned =
+ { [0 ... NR_IRQS-1] = { 0, &no_irq_type, }};
+
+int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction * action)
{
- clear_bit(irq, _alpha_irq_masks);
- alpha_mv.update_irq_hw(irq, alpha_irq_mask, 1);
+ int status;
+ int cpu = smp_processor_id();
+
+ kstat.irqs[cpu][irq]++;
+ irq_enter(cpu, irq);
+
+ status = 1; /* Force the "do bottom halves" bit */
+
+ do {
+ if (!(action->flags & SA_INTERRUPT))
+ __sti();
+ else
+ __cli();
+
+ status |= action->flags;
+ action->handler(irq, action->dev_id, regs);
+ action = action->next;
+ } while (action);
+ if (status & SA_SAMPLE_RANDOM)
+ add_interrupt_randomness(irq);
+ __cli();
+
+ irq_exit(cpu, irq);
+
+ return status;
}
+/*
+ * Generic enable/disable code: this just calls
+ * down into the PIC-specific version for the actual
+ * hardware disable after having gotten the irq
+ * controller lock.
+ */
void
-disable_irq_nosync(unsigned int irq_nr)
+disable_irq_nosync(unsigned int irq)
{
unsigned long flags;
- save_and_cli(flags);
- mask_irq(irq_nr);
- restore_flags(flags);
+ spin_lock_irqsave(&irq_controller_lock, flags);
+ if (!irq_desc[irq].depth++) {
+ irq_desc[irq].status |= IRQ_DISABLED;
+ irq_desc[irq].handler->disable(irq);
+ }
+ spin_unlock_irqrestore(&irq_controller_lock, flags);
}
+/*
+ * Synchronous version of the above, making sure the IRQ is
+ * no longer running on any other IRQ..
+ */
void
-disable_irq(unsigned int irq_nr)
+disable_irq(unsigned int irq)
{
- /* This works non-SMP, and SMP until we write code to distribute
- interrupts to more that cpu 0. */
- disable_irq_nosync(irq_nr);
+ disable_irq_nosync(irq);
+
+ if (!local_irq_count(smp_processor_id())) {
+ do {
+ barrier();
+ } while (irq_desc[irq].status & IRQ_INPROGRESS);
+ }
}
void
-enable_irq(unsigned int irq_nr)
+enable_irq(unsigned int irq)
{
unsigned long flags;
- save_and_cli(flags);
- unmask_irq(irq_nr);
- restore_flags(flags);
+ spin_lock_irqsave(&irq_controller_lock, flags);
+ switch (irq_desc[irq].depth) {
+ case 1: {
+ unsigned int status = irq_desc[irq].status & ~IRQ_DISABLED;
+ irq_desc[irq].status = status;
+ if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
+ irq_desc[irq].status = status | IRQ_REPLAY;
+ hw_resend_irq(irq_desc[irq].handler,irq); /* noop */
+ }
+ irq_desc[irq].handler->enable(irq);
+ /* fall-through */
+ }
+ default:
+ irq_desc[irq].depth--;
+ break;
+ case 0:
+ printk("enable_irq() unbalanced from %p\n",
+ __builtin_return_address(0));
+ }
+ spin_unlock_irqrestore(&irq_controller_lock, flags);
}
int
-check_irq(unsigned int irq)
+setup_irq(unsigned int irq, struct irqaction * new)
{
- return irq_desc[irq].action ? -EBUSY : 0;
+ int shared = 0;
+ struct irqaction *old, **p;
+ unsigned long flags;
+
+ /*
+ * Some drivers like serial.c use request_irq() heavily,
+ * so we have to be careful not to interfere with a
+ * running system.
+ */
+ if (new->flags & SA_SAMPLE_RANDOM) {
+ /*
+ * This function might sleep, we want to call it first,
+ * outside of the atomic block.
+ * Yes, this might clear the entropy pool if the wrong
+ * driver is attempted to be loaded, without actually
+ * installing a new handler, but is this really a problem,
+ * only the sysadmin is able to do this.
+ */
+ rand_initialize_irq(irq);
+ }
+
+ /*
+ * The following block of code has to be executed atomically
+ */
+ spin_lock_irqsave(&irq_controller_lock,flags);
+ p = &irq_desc[irq].action;
+ if ((old = *p) != NULL) {
+ /* Can't share interrupts unless both agree to */
+ if (!(old->flags & new->flags & SA_SHIRQ)) {
+ spin_unlock_irqrestore(&irq_controller_lock,flags);
+ return -EBUSY;
+ }
+
+ /* add new interrupt at end of irq queue */
+ do {
+ p = &old->next;
+ old = *p;
+ } while (old);
+ shared = 1;
+ }
+
+ *p = new;
+
+ if (!shared) {
+ irq_desc[irq].depth = 0;
+ irq_desc[irq].status &= ~IRQ_DISABLED;
+ irq_desc[irq].handler->startup(irq);
+ }
+ spin_unlock_irqrestore(&irq_controller_lock,flags);
+ return 0;
}
int
request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *),
unsigned long irqflags, const char * devname, void *dev_id)
{
- int shared = 0;
- struct irqaction * action, **p;
- unsigned long flags;
+ int retval;
+ struct irqaction * action;
if (irq >= ACTUAL_NR_IRQS)
return -EINVAL;
if (!handler)
return -EINVAL;
- p = &irq_desc[irq].action;
- action = *p;
- if (action) {
- /* Can't share interrupts unless both agree to */
- if (!(action->flags & irqflags & SA_SHIRQ))
- return -EBUSY;
-
- /* Can't share interrupts unless both are same type */
- if ((action->flags ^ irqflags) & SA_INTERRUPT)
- return -EBUSY;
-
- /* Add new interrupt at end of irq queue */
- do {
- p = &action->next;
- action = *p;
- } while (action);
- shared = 1;
+#if 1
+ /*
+ * Sanity-check: shared interrupts should REALLY pass in
+ * a real dev-ID, otherwise we'll have trouble later trying
+ * to figure out which interrupt is which (messes up the
+ * interrupt freeing logic etc).
+ */
+ if (irqflags & SA_SHIRQ) {
+ if (!dev_id)
+ printk("Bad boy: %s (at %p) called us without a dev_id!\n",
+ devname, __builtin_return_address(0));
}
+#endif
- action = &timer_irq;
- if (irq != TIMER_IRQ) {
- action = (struct irqaction *)
+ action = (struct irqaction *)
kmalloc(sizeof(struct irqaction), GFP_KERNEL);
- }
if (!action)
return -ENOMEM;
- if (irqflags & SA_SAMPLE_RANDOM)
- rand_initialize_irq(irq);
-
action->handler = handler;
action->flags = irqflags;
action->mask = 0;
action->next = NULL;
action->dev_id = dev_id;
- save_and_cli(flags);
- *p = action;
-
- if (!shared)
- unmask_irq(irq);
-
- restore_flags(flags);
- return 0;
+ retval = setup_irq(irq, action);
+ if (retval)
+ kfree(action);
+ return retval;
}
-
+
void
free_irq(unsigned int irq, void *dev_id)
{
- struct irqaction * action, **p;
+ struct irqaction **p;
unsigned long flags;
if (irq >= ACTUAL_NR_IRQS) {
printk("Trying to free reserved IRQ %d\n", irq);
return;
}
- for (p = &irq_desc[irq].action; (action = *p) != NULL; p = &action->next) {
- if (action->dev_id != dev_id)
- continue;
+ spin_lock_irqsave(&irq_controller_lock,flags);
+ p = &irq_desc[irq].action;
+ for (;;) {
+ struct irqaction * action = *p;
+ if (action) {
+ struct irqaction **pp = p;
+ p = &action->next;
+ if (action->dev_id != dev_id)
+ continue;
- /* Found it - now free it */
- save_and_cli(flags);
- *p = action->next;
- if (!irq_desc[irq].action)
- mask_irq(irq);
- restore_flags(flags);
- kfree(action);
+ /* Found it - now remove it from the list of entries */
+ *pp = action->next;
+ if (!irq_desc[irq].action) {
+ irq_desc[irq].status |= IRQ_DISABLED;
+ irq_desc[irq].handler->shutdown(irq);
+ }
+ spin_unlock_irqrestore(&irq_controller_lock,flags);
+
+ /* Wait to make sure it's not being used on another CPU */
+ while (irq_desc[irq].status & IRQ_INPROGRESS)
+ barrier();
+ kfree(action);
+ return;
+ }
+ printk("Trying to free free IRQ%d\n",irq);
+ spin_unlock_irqrestore(&irq_controller_lock,flags);
return;
}
- printk("Trying to free free IRQ%d\n",irq);
}
int get_irq_list(char *buf)
{
- int i;
+ int i, j;
struct irqaction * action;
char *p = buf;
p += sprintf(p, " ");
for (i = 0; i < smp_num_cpus; i++)
p += sprintf(p, "CPU%d ", i);
+ for (i = 0; i < smp_num_cpus; i++)
+ p += sprintf(p, "TRY%d ", i);
*p++ = '\n';
#endif
#ifndef __SMP__
p += sprintf(p, "%10u ", kstat_irqs(i));
#else
- {
- int j;
- for (j = 0; j < smp_num_cpus; j++)
- p += sprintf(p, "%10u ",
- kstat.irqs[cpu_logical_map(j)][i]);
- }
+ for (j = 0; j < smp_num_cpus; j++)
+ p += sprintf(p, "%10u ",
+ kstat.irqs[cpu_logical_map(j)][i]);
+ for (j = 0; j < smp_num_cpus; j++)
+ p += sprintf(p, "%10lu ",
+ irq_attempt(cpu_logical_map(j), i));
#endif
+ p += sprintf(p, " %14s", irq_desc[i].handler->typename);
p += sprintf(p, " %c%s",
(action->flags & SA_INTERRUPT)?'+':' ',
action->name);
}
*p++ = '\n';
}
+#if CONFIG_SMP
+ p += sprintf(p, "LOC: ");
+ for (j = 0; j < smp_num_cpus; j++)
+ p += sprintf(p, "%10lu ",
+ cpu_data[cpu_logical_map(j)].smp_local_irq_count);
+ p += sprintf(p, "\n");
+#endif
return p - buf;
}
}
#endif /* __SMP__ */
-static void
-unexpected_irq(int irq, struct pt_regs * regs)
-{
-#if 0
-#if 1
- printk("device_interrupt: unexpected interrupt %d\n", irq);
-#else
- struct irqaction *action;
- int i;
-
- printk("IO device interrupt, irq = %d\n", irq);
- printk("PC = %016lx PS=%04lx\n", regs->pc, regs->ps);
- printk("Expecting: ");
- for (i = 0; i < ACTUAL_NR_IRQS; i++)
- if ((action = irq_desc[i].action))
- while (action->handler) {
- printk("[%s:%d] ", action->name, i);
- action = action->next;
- }
- printk("\n");
-#endif
-#endif
-
-#if defined(CONFIG_ALPHA_JENSEN)
- /* ??? Is all this just debugging, or are the inb's and outb's
- necessary to make things work? */
- printk("64=%02x, 60=%02x, 3fa=%02x 2fa=%02x\n",
- inb(0x64), inb(0x60), inb(0x3fa), inb(0x2fa));
- outb(0x0c, 0x3fc);
- outb(0x0c, 0x2fc);
- outb(0,0x61);
- outb(0,0x461);
-#endif
-}
-
+/*
+ * do_IRQ handles all normal device IRQ's (the special
+ * SMP cross-CPU interrupts have their own specific
+ * handlers).
+ */
void
-handle_irq(int irq, int ack, struct pt_regs * regs)
-{
- struct irqaction * action;
+handle_irq(int irq, struct pt_regs * regs)
+{
+ /*
+ * We ack quickly, we don't want the irq controller
+ * thinking we're snobs just because some other CPU has
+ * disabled global interrupts (we have already done the
+ * INT_ACK cycles, it's too late to try to pretend to the
+ * controller that we aren't taking the interrupt).
+ *
+ * 0 return value means that this irq is already being
+ * handled by some other CPU. (or is disabled)
+ */
int cpu = smp_processor_id();
+ irq_desc_t *desc;
+ struct irqaction * action;
+ unsigned int status;
if ((unsigned) irq > ACTUAL_NR_IRQS) {
printk("device_interrupt: illegal interrupt %d\n", irq);
return;
}
-#if 0
- /* A useful bit of code to find out if an interrupt is going wild. */
- {
- static unsigned int last_msg, last_cc;
- static int last_irq, count;
- unsigned int cc;
-
- __asm __volatile("rpcc %0" : "=r"(cc));
- ++count;
- if (cc - last_msg > 150000000 || irq != last_irq) {
- printk("handle_irq: irq %d count %d cc %u @ %p\n",
- irq, count, cc-last_cc, regs->pc);
- count = 0;
- last_msg = cc;
- last_irq = irq;
- }
- last_cc = cc;
+ irq_attempt(cpu, irq)++;
+ desc = irq_desc + irq;
+ spin_lock_irq(&irq_controller_lock); /* mask also the RTC */
+ desc->handler->ack(irq);
+ /*
+ REPLAY is when Linux resends an IRQ that was dropped earlier
+ WAITING is used by probe to mark irqs that are being tested
+ */
+ status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
+ status |= IRQ_PENDING; /* we _want_ to handle it */
+
+ /*
+ * If the IRQ is disabled for whatever reason, we cannot
+ * use the action we have.
+ */
+ action = NULL;
+ if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
+ action = desc->action;
+ status &= ~IRQ_PENDING; /* we commit to handling */
+ status |= IRQ_INPROGRESS; /* we are handling it */
}
-#endif
+ desc->status = status;
+ spin_unlock(&irq_controller_lock);
- irq_enter(cpu, irq);
- kstat.irqs[cpu][irq] += 1;
- action = irq_desc[irq].action;
+ /*
+ * If there is no IRQ handler or it was disabled, exit early.
+ Since we set PENDING, if another processor is handling
+ a different instance of this same irq, the other processor
+ will take care of it.
+ */
+ if (!action)
+ return;
/*
- * For normal interrupts, we mask it out, and then ACK it.
- * This way another (more timing-critical) interrupt can
- * come through while we're doing this one.
- *
- * Note! An irq without a handler gets masked and acked, but
- * never unmasked. The autoirq stuff depends on this (it looks
- * at the masks before and after doing the probing).
+ * Edge triggered interrupts need to remember
+ * pending events.
+ * This applies to any hw interrupts that allow a second
+ * instance of the same irq to arrive while we are in do_IRQ
+ * or in the handler. But the code here only handles the _second_
+ * instance of the irq, not the third or fourth. So it is mostly
+ * useful for irq hardware that does not mask cleanly in an
+ * SMP environment.
*/
- if (ack >= 0) {
- mask_irq(ack);
- alpha_mv.ack_irq(ack);
- }
- if (action) {
- if (action->flags & SA_SAMPLE_RANDOM)
- add_interrupt_randomness(irq);
- do {
- action->handler(irq, action->dev_id, regs);
- action = action->next;
- } while (action);
- if (ack >= 0)
- unmask_irq(ack);
- } else {
- unexpected_irq(irq, regs);
+ for (;;) {
+ handle_IRQ_event(irq, regs, action);
+ spin_lock(&irq_controller_lock);
+
+ if (!(desc->status & IRQ_PENDING)
+ || (desc->status & IRQ_LEVEL))
+ break;
+ desc->status &= ~IRQ_PENDING;
+ spin_unlock(&irq_controller_lock);
}
- irq_exit(cpu, irq);
+ desc->status &= ~IRQ_INPROGRESS;
+ if (!(desc->status & IRQ_DISABLED))
+ desc->handler->end(irq);
+ spin_unlock(&irq_controller_lock);
}
-
/*
- * Start listening for interrupts..
+ * IRQ autodetection code..
+ *
+ * This depends on the fact that any interrupt that
+ * comes in on to an unassigned handler will get stuck
+ * with "IRQ_WAITING" cleared and the interrupt
+ * disabled.
*/
-
unsigned long
probe_irq_on(void)
{
- struct irqaction * action;
- unsigned long irqs = 0;
- unsigned long delay;
unsigned int i;
+ unsigned long delay;
- /* Handle only the first 64 IRQs here. This is enough for
- [E]ISA, which is the only thing that needs probing anyway. */
- for (i = (ACTUAL_NR_IRQS - 1) & 63; i > 0; i--) {
- if (!(PROBE_MASK & (1UL << i))) {
- continue;
- }
- action = irq_desc[i].action;
- if (!action) {
- enable_irq(i);
- irqs |= (1UL << i);
+ /* Something may have generated an irq long ago and we want to
+ flush such a longstanding irq before considering it as spurious. */
+ spin_lock_irq(&irq_controller_lock);
+ for (i = NR_IRQS-1; i > 0; i--)
+ if (!irq_desc[i].action)
+ irq_desc[i].handler->startup(i);
+ spin_unlock_irq(&irq_controller_lock);
+
+ /* Wait for longstanding interrupts to trigger. */
+ for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
+ /* about 20ms delay */ synchronize_irq();
+
+ /* enable any unassigned irqs (we must startup again here because
+ if a longstanding irq happened in the previous stage, it may have
+ masked itself) first, enable any unassigned irqs. */
+ spin_lock_irq(&irq_controller_lock);
+ for (i = NR_IRQS-1; i > 0; i--) {
+ if (!irq_desc[i].action) {
+ irq_desc[i].status |= IRQ_AUTODETECT | IRQ_WAITING;
+ if(irq_desc[i].handler->startup(i))
+ irq_desc[i].status |= IRQ_PENDING;
}
}
+ spin_unlock_irq(&irq_controller_lock);
+
+ /*
+ * Wait for spurious interrupts to trigger
+ */
+ for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
+ /* about 100ms delay */ synchronize_irq();
/*
- * Wait about 100ms for spurious interrupts to mask themselves
- * out again...
+ * Now filter out any obviously spurious interrupts
*/
- for (delay = jiffies + HZ/10; time_before(jiffies, delay); )
- barrier();
+ spin_lock_irq(&irq_controller_lock);
+ for (i=0; i<NR_IRQS; i++) {
+ unsigned int status = irq_desc[i].status;
- /* Now filter out any obviously spurious interrupts. */
- return irqs & ~alpha_irq_mask;
+ if (!(status & IRQ_AUTODETECT))
+ continue;
+
+ /* It triggered already - consider it spurious. */
+ if (!(status & IRQ_WAITING)) {
+ irq_desc[i].status = status & ~IRQ_AUTODETECT;
+ irq_desc[i].handler->shutdown(i);
+ }
+ }
+ spin_unlock_irq(&irq_controller_lock);
+
+ return 0x12345678;
}
/*
*/
int
-probe_irq_off(unsigned long irqs)
+probe_irq_off(unsigned long unused)
{
- int i;
-
- /* Handle only the first 64 IRQs here. This is enough for
- [E]ISA, which is the only thing that needs probing anyway. */
- irqs &= alpha_irq_mask;
- if (!irqs)
- return 0;
- i = ffz(~irqs);
- if (irqs != (1UL << i))
- i = -i;
- return i;
+ int i, irq_found, nr_irqs;
+
+ if (unused != 0x12345678)
+ printk("Bad IRQ probe from %lx\n", (&unused)[-1]);
+
+ nr_irqs = 0;
+ irq_found = 0;
+ spin_lock_irq(&irq_controller_lock);
+ for (i=0; i<NR_IRQS; i++) {
+ unsigned int status = irq_desc[i].status;
+
+ if (!(status & IRQ_AUTODETECT))
+ continue;
+
+ if (!(status & IRQ_WAITING)) {
+ if (!nr_irqs)
+ irq_found = i;
+ nr_irqs++;
+ }
+ irq_desc[i].status = status & ~IRQ_AUTODETECT;
+ irq_desc[i].handler->shutdown(i);
+ }
+ spin_unlock_irq(&irq_controller_lock);
+
+ if (nr_irqs > 1)
+ irq_found = -irq_found;
+ return irq_found;
}
#endif
break;
case 1:
- handle_irq(RTC_IRQ, -1, ®s);
+#ifdef __SMP__
+ cpu_data[smp_processor_id()].smp_local_irq_count++;
+ smp_percpu_timer_interrupt(®s);
+ if (smp_processor_id() == smp_boot_cpuid)
+#endif
+ handle_irq(RTC_IRQ, ®s);
return;
case 2:
alpha_mv.machine_check(vector, la_ptr, ®s);
ranges->mem_end -= bus->resource[1]->start;
}
-int __init
+int
pcibios_enable_device(struct pci_dev *dev)
{
- /* Not needed, since we enable all devices at startup. */
+ /* Nothing to do, since we enable all devices at startup. */
return 0;
}
/* Map a single buffer of the indicate size for PCI DMA in streaming
mode. The 32-bit PCI bus mastering address to use is returned.
Once the device is given the dma address, the device owns this memory
- until either pci_unmap_single or pci_sync_single is performed. */
+ until either pci_unmap_single or pci_dma_sync_single is performed. */
dma_addr_t
pci_map_single(struct pci_dev *pdev, void *cpu_addr, long size)
out++;
}
+ /* Mark the end of the list for pci_unmap_sg. */
+ if (out < end)
+ out->dma_length = 0;
+
if (out - start == 0)
printk(KERN_INFO "pci_map_sg failed: no entries?\n");
DBGA("pci_map_sg: %ld entries\n", out - start);
if (!arena || arena->dma_base + arena->size > max_dma)
arena = hose->sg_isa;
- DBGA("pci_unmap_sg: %d entries\n", nents);
-
fstart = -1;
fend = 0;
for (end = sg + nents; sg < end; ++sg) {
addr = sg->dma_address;
size = sg->dma_length;
+ if (!size)
+ break;
+
if (addr >= __direct_map_base
&& addr < __direct_map_base + __direct_map_size) {
/* Nothing to do. */
long npages, ofs;
dma_addr_t tend;
+ DBGA(" (%ld) sg [%lx,%lx]\n",
+ sg - end + nents, addr, size);
+
npages = calc_npages((addr & ~PAGE_MASK) + size);
ofs = (addr - arena->dma_base) >> PAGE_SHIFT;
iommu_arena_free(arena, ofs, npages);
fstart = addr;
if (fend < tend)
fend = tend;
-
- DBGA(" (%ld) sg [%lx,%lx]\n",
- sg - end + nents, addr, size);
}
}
if (fend)
alpha_mv.mv_pci_tbi(hose, fstart, fend);
+
+ DBGA("pci_unmap_sg: %d entries\n", nents - (end - sg));
}
#include <linux/reboot.h>
#include <linux/console.h>
+#if 0
#ifdef CONFIG_RTC
#include <linux/mc146818rtc.h>
#endif
+#endif
#include <asm/reg.h>
#include <asm/uaccess.h>
#endif
}
+#if 0
#ifdef CONFIG_RTC
/* Reset rtc to defaults. */
rtc_kill_pit();
+#endif
#endif
if (alpha_mv.kill_arch)
--- /dev/null
+/* RTC irq callbacks, 1999 Andrea Arcangeli <andrea@suse.de> */
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/irq.h>
+
+static void enable_rtc(unsigned int irq) { }
+static unsigned int startup_rtc(unsigned int irq) { return 0; }
+#define shutdown_rtc enable_rtc
+#define end_rtc enable_rtc
+#define ack_rtc enable_rtc
+#define disable_rtc enable_rtc
+
+void __init
+init_RTC_irq(void)
+{
+ static struct hw_interrupt_type rtc_irq_type = { "RTC",
+ startup_rtc,
+ shutdown_rtc,
+ enable_rtc,
+ disable_rtc,
+ ack_rtc,
+ end_rtc };
+ irq_desc[RTC_IRQ].status = IRQ_DISABLED;
+ irq_desc[RTC_IRQ].handler = &rtc_irq_type;
+}
#include <linux/ioport.h>
#include <linux/bootmem.h>
+#if 0
#ifdef CONFIG_RTC
#include <linux/timex.h>
#endif
+#endif
#ifdef CONFIG_BLK_DEV_INITRD
#include <linux/blk.h>
#endif
/* Reserve standard resources. */
reserve_std_resources();
+#if 0
/* Initialize the timers. */
/* ??? There is some circumstantial evidence that this needs
to be done now rather than later in time_init, which would
rtc_init_pit();
#else
alpha_mv.init_pit();
+#endif
#endif
/*
static unsigned long smp_secondary_alive;
unsigned long cpu_present_mask; /* Which cpus ids came online. */
+static unsigned long __cpu_present_mask __initdata = 0; /* cpu reported in the hwrpb */
static int max_cpus = -1; /* Command-line limitation. */
int smp_boot_cpuid; /* Which processor we booted from. */
if ((cpu->flags & 0x1cc) == 0x1cc) {
smp_num_probed++;
/* Assume here that "whami" == index */
- cpu_present_mask |= (1L << i);
+ __cpu_present_mask |= (1L << i);
cpu->pal_revision = boot_cpu_palrev;
}
}
} else {
smp_num_probed = 1;
- cpu_present_mask = (1L << smp_boot_cpuid);
+ __cpu_present_mask = (1L << smp_boot_cpuid);
}
+ cpu_present_mask = 1L << smp_boot_cpuid;
printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n",
- smp_num_probed, cpu_present_mask);
+ smp_num_probed, __cpu_present_mask);
}
/*
if (i == smp_boot_cpuid)
continue;
- if (((cpu_present_mask >> i) & 1) == 0)
+ if (((__cpu_present_mask >> i) & 1) == 0)
continue;
if (smp_boot_one_cpu(i, cpu_count))
continue;
+ cpu_present_mask |= 1L << i;
cpu_count++;
}
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/init.h>
+#include <linux/irq.h>
#include <asm/ptrace.h>
#include <asm/system.h>
* HACK ALERT! only the boot cpu is used for interrupts.
*/
+static void enable_tsunami_irq(unsigned int irq);
+static void disable_tsunami_irq(unsigned int irq);
+static void enable_clipper_irq(unsigned int irq);
+static void disable_clipper_irq(unsigned int irq);
+
+#define end_tsunami_irq enable_tsunami_irq
+#define shutdown_tsunami_irq disable_tsunami_irq
+#define mask_and_ack_tsunami_irq disable_tsunami_irq
+
+#define end_clipper_irq enable_clipper_irq
+#define shutdown_clipper_irq disable_clipper_irq
+#define mask_and_ack_clipper_irq disable_clipper_irq
+
+
+static unsigned int
+startup_tsunami_irq(unsigned int irq)
+{
+ enable_tsunami_irq(irq);
+ return 0; /* never anything pending */
+}
+
+static unsigned int
+startup_clipper_irq(unsigned int irq)
+{
+ enable_clipper_irq(irq);
+ return 0; /* never anything pending */
+}
+
+static struct hw_interrupt_type tsunami_irq_type = {
+ "TSUNAMI",
+ startup_tsunami_irq,
+ shutdown_tsunami_irq,
+ enable_tsunami_irq,
+ disable_tsunami_irq,
+ mask_and_ack_tsunami_irq,
+ end_tsunami_irq
+};
+
+static struct hw_interrupt_type clipper_irq_type = {
+ "CLIPPER",
+ startup_clipper_irq,
+ shutdown_clipper_irq,
+ enable_clipper_irq,
+ disable_clipper_irq,
+ mask_and_ack_clipper_irq,
+ end_clipper_irq
+};
+
+static unsigned long cached_irq_mask = ~0UL;
+
+#define TSUNAMI_SET_IRQ_MASK(cpu, value) \
+do { \
+ volatile unsigned long *csr; \
+ \
+ csr = &TSUNAMI_cchip->dim##cpu##.csr; \
+ *csr = (value); \
+ mb(); \
+ *csr; \
+} while(0)
+
+static inline void
+do_flush_irq_mask(unsigned long value)
+{
+ switch (TSUNAMI_bootcpu)
+ {
+ case 0:
+ TSUNAMI_SET_IRQ_MASK(0, value);
+ break;
+ case 1:
+ TSUNAMI_SET_IRQ_MASK(1, value);
+ break;
+ case 2:
+ TSUNAMI_SET_IRQ_MASK(2, value);
+ break;
+ case 3:
+ TSUNAMI_SET_IRQ_MASK(3, value);
+ break;
+ }
+}
+
+#ifdef CONFIG_SMP
+static inline void
+do_flush_smp_irq_mask(unsigned long value)
+{
+ extern unsigned long cpu_present_mask;
+ unsigned long other_cpus = cpu_present_mask & ~(1L << TSUNAMI_bootcpu);
+
+ if (other_cpus & 1)
+ TSUNAMI_SET_IRQ_MASK(0, value);
+ if (other_cpus & 2)
+ TSUNAMI_SET_IRQ_MASK(1, value);
+ if (other_cpus & 4)
+ TSUNAMI_SET_IRQ_MASK(2, value);
+ if (other_cpus & 8)
+ TSUNAMI_SET_IRQ_MASK(3, value);
+}
+#endif
+
static void
-dp264_update_irq_hw(unsigned long irq, unsigned long mask, int unmask_p)
+dp264_flush_irq_mask(unsigned long mask)
{
- volatile unsigned long *csr;
+ unsigned long value;
- if (TSUNAMI_bootcpu < 2) {
- if (!TSUNAMI_bootcpu)
- csr = &TSUNAMI_cchip->dim0.csr;
- else
- csr = &TSUNAMI_cchip->dim1.csr;
- } else {
- if (TSUNAMI_bootcpu == 2)
- csr = &TSUNAMI_cchip->dim2.csr;
- else
- csr = &TSUNAMI_cchip->dim3.csr;
- }
+#ifdef CONFIG_SMP
+ value = ~mask;
+ do_flush_smp_irq_mask(value);
+#endif
- *csr = ~mask;
- mb();
- *csr;
+ value = ~mask | (1UL << 55) | 0xffff; /* isa irqs always enabled */
+ do_flush_irq_mask(value);
+}
- if (irq < 16) {
- if (irq >= 8)
- outb(mask >> 8, 0xA1); /* ISA PIC2 */
- else
- outb(mask, 0x21); /* ISA PIC1 */
- }
+static void
+enable_tsunami_irq(unsigned int irq)
+{
+ cached_irq_mask &= ~(1UL << irq);
+ dp264_flush_irq_mask(cached_irq_mask);
}
static void
-clipper_update_irq_hw(unsigned long irq, unsigned long mask, int unmask_p)
+disable_tsunami_irq(unsigned int irq)
{
- if (irq >= 16) {
- volatile unsigned long *csr;
-
- if (TSUNAMI_bootcpu < 2)
- if (!TSUNAMI_bootcpu)
- csr = &TSUNAMI_cchip->dim0.csr;
- else
- csr = &TSUNAMI_cchip->dim1.csr;
- else
- if (TSUNAMI_bootcpu == 2)
- csr = &TSUNAMI_cchip->dim2.csr;
- else
- csr = &TSUNAMI_cchip->dim3.csr;
-
- *csr = (~mask >> 16) | (1UL << 55); /* master ISA enable */
- mb();
- *csr;
- }
- else if (irq >= 8)
- outb(mask >> 8, 0xA1); /* ISA PIC2 */
- else
- outb(mask, 0x21); /* ISA PIC1 */
+ cached_irq_mask |= 1UL << irq;
+ dp264_flush_irq_mask(cached_irq_mask);
+}
+
+static void
+clipper_flush_irq_mask(unsigned long mask)
+{
+ unsigned long value;
+
+#ifdef CONFIG_SMP
+ value = ~mask >> 16;
+ do_flush_smp_irq_mask(value);
+#endif
+
+ value = (~mask >> 16) | (1UL << 55); /* master ISA enable */
+ do_flush_irq_mask(value);
+}
+
+static void
+enable_clipper_irq(unsigned int irq)
+{
+ cached_irq_mask &= ~(1UL << irq);
+ clipper_flush_irq_mask(cached_irq_mask);
+}
+
+static void
+disable_clipper_irq(unsigned int irq)
+{
+ cached_irq_mask |= 1UL << irq;
+ clipper_flush_irq_mask(cached_irq_mask);
}
static void
static void
dp264_srm_device_interrupt(unsigned long vector, struct pt_regs * regs)
{
- int irq, ack;
+ int irq;
- ack = irq = (vector - 0x800) >> 4;
+ irq = (vector - 0x800) >> 4;
/*
* The SRM console reports PCI interrupts with a vector calculated by:
* so we don't count them.
*/
if (irq >= 32)
- ack = irq = irq - 16;
+ irq -= 16;
- handle_irq(irq, ack, regs);
+ handle_irq(irq, regs);
}
static void
clipper_srm_device_interrupt(unsigned long vector, struct pt_regs * regs)
{
- int irq, ack;
+ int irq;
- ack = irq = (vector - 0x800) >> 4;
+ irq = (vector - 0x800) >> 4;
/*
* The SRM console reports PCI interrupts with a vector calculated by:
*
* Eg IRQ 24 is DRIR bit 8, etc, etc
*/
- handle_irq(irq, ack, regs);
+ handle_irq(irq, regs);
+}
+
+static void __init
+init_TSUNAMI_irqs(struct hw_interrupt_type * ops)
+{
+ int i;
+
+ for (i = 0; i < NR_IRQS; i++) {
+ if (i == RTC_IRQ)
+ continue;
+ if (i < 16)
+ continue;
+ irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
+ irq_desc[i].handler = ops;
+ }
}
static void __init
if (alpha_using_srm)
alpha_mv.device_interrupt = dp264_srm_device_interrupt;
- dp264_update_irq_hw(16, alpha_irq_mask, 0);
+ init_ISA_irqs();
+ init_RTC_irq();
+ init_TSUNAMI_irqs(&tsunami_irq_type);
- enable_irq(55); /* Enable ISA interrupt controller. */
- enable_irq(2);
+ dp264_flush_irq_mask(~0UL);
}
static void __init
if (alpha_using_srm)
alpha_mv.device_interrupt = clipper_srm_device_interrupt;
- clipper_update_irq_hw(16, alpha_irq_mask, 0);
+ init_ISA_irqs();
+ init_RTC_irq();
+ init_TSUNAMI_irqs(&clipper_irq_type);
- enable_irq(55); /* Enable ISA interrupt controller. */
- enable_irq(2);
+ clipper_flush_irq_mask(~0UL);
}
min_mem_address: DEFAULT_MEM_BASE,
nr_irqs: 64,
- irq_probe_mask: TSUNAMI_PROBE_MASK,
- update_irq_hw: dp264_update_irq_hw,
- ack_irq: common_ack_irq,
device_interrupt: dp264_device_interrupt,
init_arch: tsunami_init_arch,
min_mem_address: DEFAULT_MEM_BASE,
nr_irqs: 64,
- irq_probe_mask: TSUNAMI_PROBE_MASK,
- update_irq_hw: dp264_update_irq_hw,
- ack_irq: common_ack_irq,
device_interrupt: dp264_device_interrupt,
init_arch: tsunami_init_arch,
min_mem_address: DEFAULT_MEM_BASE,
nr_irqs: 64,
- irq_probe_mask: TSUNAMI_PROBE_MASK,
- update_irq_hw: dp264_update_irq_hw,
- ack_irq: common_ack_irq,
device_interrupt: dp264_device_interrupt,
init_arch: tsunami_init_arch,
min_mem_address: DEFAULT_MEM_BASE,
nr_irqs: 64,
- irq_probe_mask: TSUNAMI_PROBE_MASK,
- update_irq_hw: clipper_update_irq_hw,
- ack_irq: common_ack_irq,
device_interrupt: dp264_device_interrupt,
init_arch: tsunami_init_arch,
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
#include <asm/ptrace.h>
#include <asm/system.h>
#include <asm/core_pyxis.h>
#include "proto.h"
-#include <asm/hw_irq.h>
#include "pci_impl.h"
#include "machvec_impl.h"
+/* Note invert on MASK bits. */
+static unsigned long cached_irq_mask;
+
+static inline void
+sx164_change_irq_mask(unsigned long mask)
+{
+ *(vulp)PYXIS_INT_MASK = mask;
+ mb();
+ *(vulp)PYXIS_INT_MASK;
+}
+
+static inline void
+sx164_enable_irq(unsigned int irq)
+{
+ sx164_change_irq_mask(cached_irq_mask |= 1UL << (irq - 16));
+}
+
static void
-sx164_update_irq_hw(unsigned long irq, unsigned long mask, int unmask_p)
+sx164_disable_irq(unsigned int irq)
{
- if (irq >= 16) {
- /* Make CERTAIN none of the bogus ints get enabled */
- *(vulp)PYXIS_INT_MASK =
- ~((long)mask >> 16) & ~0x000000000000003bUL;
- mb();
- /* ... and read it back to make sure it got written. */
- *(vulp)PYXIS_INT_MASK;
- }
- else if (irq >= 8)
- outb(mask >> 8, 0xA1); /* ISA PIC2 */
- else
- outb(mask, 0x21); /* ISA PIC1 */
+ sx164_change_irq_mask(cached_irq_mask &= ~(1UL << (irq - 16)));
+}
+
+static unsigned int
+sx164_startup_irq(unsigned int irq)
+{
+ sx164_enable_irq(irq);
+ return 0;
+}
+
+static inline void
+sx164_srm_enable_irq(unsigned int irq)
+{
+ cserve_ena(irq - 16);
}
static void
-sx164_srm_update_irq_hw(unsigned long irq, unsigned long mask, int unmask_p)
+sx164_srm_disable_irq(unsigned int irq)
{
- if (irq >= 16) {
- if (unmask_p)
- cserve_ena(irq - 16);
- else
- cserve_dis(irq - 16);
- }
- else if (irq >= 8)
- outb(mask >> 8, 0xA1); /* ISA PIC2 */
- else
- outb(mask, 0x21); /* ISA PIC1 */
+ cserve_dis(irq - 16);
}
+static unsigned int
+sx164_srm_startup_irq(unsigned int irq)
+{
+ sx164_srm_enable_irq(irq);
+ return 0;
+}
+
+static struct hw_interrupt_type sx164_irq_type = {
+ typename: "SX164",
+ startup: sx164_startup_irq,
+ shutdown: sx164_disable_irq,
+ enable: sx164_enable_irq,
+ disable: sx164_disable_irq,
+ ack: sx164_disable_irq,
+ end: sx164_enable_irq,
+};
+
+static struct hw_interrupt_type sx164_srm_irq_type = {
+ typename: "SX164-SRM",
+ startup: sx164_srm_startup_irq,
+ shutdown: sx164_srm_disable_irq,
+ enable: sx164_srm_enable_irq,
+ disable: sx164_srm_disable_irq,
+ ack: sx164_srm_disable_irq,
+ end: sx164_srm_enable_irq,
+};
+
static void
sx164_device_interrupt(unsigned long vector, struct pt_regs *regs)
{
- unsigned long pld, tmp;
+ unsigned long pld;
unsigned int i;
/* Read the interrupt summary register of PYXIS */
continue;
} else {
/* if not timer int */
- handle_irq(16 + i, 16 + i, regs);
+ handle_irq(16 + i, regs);
}
- *(vulp)PYXIS_INT_REQ = 1UL << i; mb();
- tmp = *(vulp)PYXIS_INT_REQ;
+
+ *(vulp)PYXIS_INT_REQ = 1UL << i;
+ mb();
+ *(vulp)PYXIS_INT_REQ;
}
}
static void
sx164_init_irq(void)
{
+ struct hw_interrupt_type *ops;
+ long i;
+
outb(0, DMA1_RESET_REG);
outb(0, DMA2_RESET_REG);
outb(DMA_MODE_CASCADE, DMA2_MODE_REG);
outb(0, DMA2_MASK_REG);
+ init_ISA_irqs();
+ init_RTC_irq();
+
if (alpha_using_srm) {
- alpha_mv.update_irq_hw = sx164_srm_update_irq_hw;
alpha_mv.device_interrupt = srm_device_interrupt;
+ ops = &sx164_srm_irq_type;
}
else {
- /* Note invert on MASK bits. */
- *(vulp)PYXIS_INT_MASK = ~((long)alpha_irq_mask >> 16);
- mb();
- *(vulp)PYXIS_INT_MASK;
+ sx164_change_irq_mask(0);
+ ops = &sx164_irq_type;
+ }
+
+ for (i = 16; i < 40; ++i) {
+ /* Make CERTAIN none of the bogus ints get enabled. */
+ if ((0x3b0000 >> i) & 1)
+ continue;
+ irq_desc[i].status = IRQ_DISABLED;
+ irq_desc[i].handler = ops;
}
- enable_irq(16 + 6); /* enable timer */
- enable_irq(16 + 7); /* enable ISA PIC cascade */
- enable_irq(2); /* enable cascade */
+ ops->startup(16 + 6); /* enable timer */
+ ops->startup(16 + 7); /* enable ISA PIC cascade */
}
/*
min_mem_address: DEFAULT_MEM_BASE,
nr_irqs: 40,
- irq_probe_mask: _PROBE_MASK(40),
- update_irq_hw: sx164_update_irq_hw,
- ack_irq: common_ack_irq,
device_interrupt: sx164_device_interrupt,
init_arch: pyxis_init_arch,
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
#include <asm/uaccess.h>
#include <asm/io.h>
__u32 now;
long nticks;
-#ifdef __SMP__
- /* When SMP, do this for *all* CPUs, but only do the rest for
- the boot CPU. */
- smp_percpu_timer_interrupt(regs);
- if (smp_processor_id() != smp_boot_cpuid)
- return;
-#else
+#ifndef __SMP__
/* Not SMP, do kernel PC profiling here. */
if (!user_mode(regs))
alpha_do_profile(regs->pc);
)*60 + sec; /* finally seconds */
}
+#if 0
/*
* Initialize Programmable Interval Timers with standard values. Some
* drivers depend on them being initialized (e.g., joystick driver).
sti();
}
#endif
+#endif
void
common_init_pit (void)
void
time_init(void)
{
- void (*irq_handler)(int, void *, struct pt_regs *);
unsigned int year, mon, day, hour, min, sec, cc1, cc2;
unsigned long cycle_freq, one_percent;
long diff;
+ static struct irqaction timer_irqaction = { timer_interrupt,
+ SA_INTERRUPT, 0, "timer",
+ NULL, NULL};
+
+ /* Startup the timer source. */
+ alpha_mv.init_pit();
/*
* The Linux interpretation of the CMOS clock register contents:
state.partial_tick = 0L;
/* setup timer */
- irq_handler = timer_interrupt;
- if (request_irq(TIMER_IRQ, irq_handler, 0, "timer", NULL))
- panic("Could not allocate timer IRQ!");
+ setup_irq(TIMER_IRQ, &timer_irqaction);
}
/*
ldq $0,0($16) # e0 : load src & dst addr words
zapnot $20,15,$20 # .. e1 : zero extend incoming csum
- extqh $18,7,$4 # e0 : byte swap len & proto while we wait
+ extqh $18,1,$4 # e0 : byte swap len & proto while we wait
ldq $1,8($16) # .. e1 :
extbl $18,1,$5 # e0 :
# CONFIG_NTFS_RW is not set
CONFIG_HPFS_FS=m
CONFIG_PROC_FS=y
+# CONFIG_DEVFS_FS is not set
CONFIG_DEVPTS_FS=y
# CONFIG_QNX4FS_FS is not set
# CONFIG_QNX4FS_RW is not set
CONFIG_SYSV_FS=m
# CONFIG_SYSV_FS_WRITE is not set
# CONFIG_UDF_FS is not set
-# CONFIG_UDF_RW is not set
CONFIG_UFS_FS=m
# CONFIG_UFS_FS_WRITE is not set
-/* $Id: ioport.c,v 1.32 2000/02/12 03:04:48 zaitcev Exp $
+/* $Id: ioport.c,v 1.33 2000/02/16 07:31:29 davem Exp $
* ioport.c: Simple io mapping allocator.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
-/* $Id: sys_sparc.c,v 1.60 2000/02/08 20:24:18 davem Exp $
+/* $Id: sys_sparc.c,v 1.61 2000/02/16 07:31:29 davem Exp $
* linux/arch/sparc/kernel/sys_sparc.c
*
* This file contains various random system calls that
-/* $Id: sys_sunos.c,v 1.112 2000/01/29 07:40:11 davem Exp $
+/* $Id: sys_sunos.c,v 1.113 2000/02/16 07:31:29 davem Exp $
* sys_sunos.c: SunOS specific syscall compatibility support.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
-/* $Id: systbls.S,v 1.93 2000/01/29 16:41:18 jj Exp $
+/* $Id: systbls.S,v 1.94 2000/02/16 07:31:30 davem Exp $
* systbls.S: System call entry point tables for OS compatibility.
* The native Linux system call table lives here also.
*
-/* $Id: fault.c,v 1.113 2000/01/21 11:38:47 jj Exp $
+/* $Id: fault.c,v 1.114 2000/02/14 04:52:36 jj Exp $
* fault.c: Page fault handlers for the Sparc.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
{
extern void sun4c_update_mmu_cache(struct vm_area_struct *,
unsigned long,pte_t);
- extern pgd_t *sun4c_pgd_offset(struct mm_struct *,unsigned long);
extern pte_t *sun4c_pte_offset(pmd_t *,unsigned long);
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
}
}
- pgdp = sun4c_pgd_offset(mm, address);
+ pgdp = pgd_offset(mm, address);
ptep = sun4c_pte_offset((pmd_t *) pgdp, address);
if (pgd_val(*pgdp)) {
-/* $Id: nosun4c.c,v 1.2 1999/08/31 06:54:36 davem Exp $
+/* $Id: nosun4c.c,v 1.3 2000/02/14 04:52:36 jj Exp $
* nosun4c.c: This file is a bunch of dummies for SMP compiles,
* so that it does not need sun4c and avoid ifdefs.
*
{
}
-pgd_t *sun4c_pgd_offset(struct mm_struct * mm, unsigned long address)
-{
- return NULL;
-}
-
pte_t *sun4c_pte_offset(pmd_t * dir, unsigned long address)
{
return NULL;
-/* $Id: srmmu.c,v 1.207 2000/02/14 02:51:53 davem Exp $
+/* $Id: srmmu.c,v 1.208 2000/02/14 04:52:33 jj Exp $
* srmmu.c: SRMMU specific routines for memory management.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
}
/* to find an entry in a top-level page table... */
-static inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address)
+extern inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address)
{
return mm->pgd + (address >> SRMMU_PGDIR_SHIFT);
}
BTFIXUPSET_CALL(pgd_set, srmmu_pgd_set, BTFIXUPCALL_NORM);
BTFIXUPSET_INT(pte_modify_mask, SRMMU_CHG_MASK);
- BTFIXUPSET_CALL(pgd_offset, srmmu_pgd_offset, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pmd_offset, srmmu_pmd_offset, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pte_offset, srmmu_pte_offset, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pte_free_kernel, srmmu_pte_free, BTFIXUPCALL_NORM);
-/* $Id: sun4c.c,v 1.189 2000/02/14 02:51:55 davem Exp $
+/* $Id: sun4c.c,v 1.190 2000/02/14 04:52:34 jj Exp $
* sun4c.c: Doing in software what should be done in hardware.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
}
/* to find an entry in a page-table-directory */
-pgd_t *sun4c_pgd_offset(struct mm_struct * mm, unsigned long address)
+extern inline pgd_t *sun4c_pgd_offset(struct mm_struct * mm, unsigned long address)
{
return mm->pgd + (address >> SUN4C_PGDIR_SHIFT);
}
ret = (unsigned long *)__get_free_page(GFP_KERNEL);
memset (ret, 0, (KERNBASE / SUN4C_PGDIR_SIZE) * sizeof(pgd_t));
- init = pgd_offset(&init_mm, 0);
+ init = sun4c_pgd_offset(&init_mm, 0);
memcpy (((pgd_t *)ret) + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
}
BTFIXUPSET_CALL(mk_pte_io, sun4c_mk_pte_io, BTFIXUPCALL_NORM);
BTFIXUPSET_INT(pte_modify_mask, _SUN4C_PAGE_CHG_MASK);
- BTFIXUPSET_CALL(pgd_offset, sun4c_pgd_offset, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pmd_offset, sun4c_pmd_offset, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pte_offset, sun4c_pte_offset, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pte_free_kernel, sun4c_pte_free_kernel, BTFIXUPCALL_NORM);
-/* $Id: ioctl32.c,v 1.79 2000/02/08 20:24:25 davem Exp $
+/* $Id: ioctl32.c,v 1.80 2000/02/17 06:45:09 jj Exp $
* ioctl32.c: Conversion between 32bit and 64bit native ioctls.
*
- * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 1997-2000 Jakub Jelinek (jakub@redhat.com)
* Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
*
* These routines maintain argument size conversion between 32bit and 64bit
#include <linux/netdevice.h>
#include <linux/raw.h>
#include <linux/smb_fs.h>
+#include <linux/blkpg.h>
#include <scsi/scsi.h>
/* Ugly hack. */
__kernel_caddr_t32 ifcbuf;
};
-static int dev_ifname32(unsigned int fd, unsigned long arg)
+static int dev_ifname32(unsigned int fd, unsigned int cmd, unsigned long arg)
{
struct net_device *dev;
struct ifreq32 ifr32;
return (err ? -EFAULT : 0);
}
-static inline int dev_ifconf(unsigned int fd, unsigned long arg)
+static inline int dev_ifconf(unsigned int fd, unsigned int cmd, unsigned long arg)
{
struct ifconf32 ifc32;
struct ifconf ifc;
u32 start;
};
-static inline int hdio_getgeo(unsigned int fd, unsigned long arg)
+static inline int hdio_getgeo(unsigned int fd, unsigned int cmd, unsigned long arg)
{
mm_segment_t old_fs = get_fs();
struct hd_geometry geo;
unsigned int badness;
};
-#define FDSETPRM32 _IOW(2, 0x42, struct floppy_struct32)
-#define FDDEFPRM32 _IOW(2, 0x43, struct floppy_struct32)
+#define FDSETPRM32 _IOW(2, 0x42, struct floppy_struct32)
+#define FDDEFPRM32 _IOW(2, 0x43, struct floppy_struct32)
#define FDGETPRM32 _IOR(2, 0x04, struct floppy_struct32)
#define FDSETDRVPRM32 _IOW(2, 0x90, struct floppy_drive_params32)
#define FDGETDRVPRM32 _IOR(2, 0x11, struct floppy_drive_params32)
u32 chardata; /* font data in expanded form */
};
-static int do_fontx_ioctl(struct file *file, int cmd, struct consolefontdesc32 *user_cfd)
+static int do_fontx_ioctl(unsigned int fd, int cmd, struct consolefontdesc32 *user_cfd, struct file *file)
{
struct consolefontdesc cfdarg;
struct console_font_op op;
perm = vt_check(file);
if (perm < 0) return perm;
- if (copy_from_user(&cfdarg, user_cfd, sizeof(struct consolefontdesc32)))
+ if (copy_from_user(&cfdarg, user_cfd, sizeof(struct consolefontdesc32)))
return -EFAULT;
cfdarg.chardata = (unsigned char *)A(((struct consolefontdesc32 *)&cfdarg)->chardata);
u32 data; /* font data with height fixed to 32 */
};
-static int do_kdfontop_ioctl(struct file *file, struct console_font_op32 *fontop)
+static int do_kdfontop_ioctl(unsigned int fd, unsigned int cmd, struct console_font_op32 *fontop, struct file *file)
{
struct console_font_op op;
int perm = vt_check(file), i;
u32 entries;
};
-static int do_unimap_ioctl(struct file *file, int cmd, struct unimapdesc32 *user_ud)
+static int do_unimap_ioctl(unsigned int fd, unsigned int cmd, struct unimapdesc32 *user_ud, struct file *file)
{
struct unimapdesc32 tmp;
int perm = vt_check(file);
return -EINVAL;
}
-asmlinkage int sys32_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
+static int ret_einval(unsigned int fd, unsigned int cmd, unsigned long arg)
{
- struct file * filp;
- int error = -EBADF;
+ return -EINVAL;
+}
- lock_kernel();
- filp = fget(fd);
- if(!filp)
- goto out2;
+static int broken_blkgetsize(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ /* The mkswap binary hard codes it to Intel value :-((( */
+ return w_long(fd, BLKGETSIZE, arg);
+}
- if (!filp->f_op || !filp->f_op->ioctl) {
- error = sys_ioctl (fd, cmd, arg);
- goto out;
- }
- switch (cmd) {
- case SIOCGIFNAME:
- error = dev_ifname32(fd, arg);
- goto out;
+struct blkpg_ioctl_arg32 {
+ int op;
+ int flags;
+ int datalen;
+ u32 data;
+};
+
+static int blkpg_ioctl_trans(unsigned int fd, unsigned int cmd, struct blkpg_ioctl_arg32 *arg)
+{
+ struct blkpg_ioctl_arg a;
+ struct blkpg_partition p;
+ int err;
+ mm_segment_t old_fs = get_fs();
+
+ err = get_user(a.op, &arg->op);
+ err |= __get_user(a.flags, &arg->flags);
+ err |= __get_user(a.datalen, &arg->datalen);
+ err |= __get_user((long)a.data, &arg->data);
+ if (err) return err;
+ switch (a.op) {
+ case BLKPG_ADD_PARTITION:
+ case BLKPG_DEL_PARTITION:
+ if (a.datalen < sizeof(struct blkpg_partition))
+ return -EINVAL;
+ if (copy_from_user(&p, a.data, sizeof(struct blkpg_partition)))
+ return -EFAULT;
+ a.data = &p;
+ set_fs (KERNEL_DS);
+ err = sys_ioctl(fd, cmd, (unsigned long)&a);
+ set_fs (old_fs);
+ default:
+ return -EINVAL;
+ }
+ return err;
+}
- case SIOCGIFCONF:
- error = dev_ifconf(fd, arg);
- goto out;
-
- case SIOCGIFFLAGS:
- case SIOCSIFFLAGS:
- case SIOCGIFMETRIC:
- case SIOCSIFMETRIC:
- case SIOCGIFMTU:
- case SIOCSIFMTU:
- case SIOCGIFMEM:
- case SIOCSIFMEM:
- case SIOCGIFHWADDR:
- case SIOCSIFHWADDR:
- case SIOCADDMULTI:
- case SIOCDELMULTI:
- case SIOCGIFINDEX:
- case SIOCGIFMAP:
- case SIOCSIFMAP:
- case SIOCGIFADDR:
- case SIOCSIFADDR:
- case SIOCGIFBRDADDR:
- case SIOCSIFBRDADDR:
- case SIOCGIFDSTADDR:
- case SIOCSIFDSTADDR:
- case SIOCGIFNETMASK:
- case SIOCSIFNETMASK:
- case SIOCSIFPFLAGS:
- case SIOCGIFPFLAGS:
- case SIOCGPPPSTATS:
- case SIOCGPPPCSTATS:
- case SIOCGPPPVER:
- case SIOCGIFTXQLEN:
- case SIOCSIFTXQLEN:
- case SIOCETHTOOL:
- error = dev_ifsioc(fd, cmd, arg);
- goto out;
-
- case SIOCADDRT:
- case SIOCDELRT:
- error = routing_ioctl(fd, cmd, arg);
- goto out;
+static int ioc_settimeout(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ return rw_long(fd, AUTOFS_IOC_SETTIMEOUT, arg);
+}
- case SIOCRTMSG: /* Note SIOCRTMSG is no longer, so this is safe and
- * the user would have seen just an -EINVAL anyways.
- */
- error = -EINVAL;
- goto out;
+struct ioctl_trans {
+ unsigned int cmd;
+ unsigned int handler;
+ unsigned int next;
+};
- case SIOCGSTAMP:
- /* Sorry, timeval in the kernel is different now. */
- error = do_siocgstamp(fd, cmd, arg);
- goto out;
+#define COMPATIBLE_IOCTL(cmd) asm volatile(".word %0, sys_ioctl, 0" : : "i" (cmd));
+#define HANDLE_IOCTL(cmd,handler) asm volatile(".word %0, %1, 0" : : "i" (cmd), "i" (handler));
+#define IOCTL_TABLE_START void ioctl32_foo(void) { asm volatile(".data\nioctl_translations:");
+#define IOCTL_TABLE_END asm volatile("\nioctl_translations_end:\n\t.previous"); }
- case HDIO_GETGEO:
- error = hdio_getgeo(fd, arg);
- goto out;
-
- case BLKRAGET:
- case BLKGETSIZE:
- case 0x1260:
- /* The mkswap binary hard codes it to Intel value :-((( */
- if(cmd == 0x1260)
- cmd = BLKGETSIZE;
- error = w_long(fd, cmd, arg);
- goto out;
-
- case FBIOPUTCMAP32:
- case FBIOGETCMAP32:
- error = fbiogetputcmap(fd, cmd, arg);
- goto out;
-
- case FBIOSCURSOR32:
- error = fbiogscursor(fd, cmd, arg);
- goto out;
+IOCTL_TABLE_START
+/* List here exlicitly which ioctl's are known to have
+ * compatable types passed or none at all...
+ */
+/* Big T */
+COMPATIBLE_IOCTL(TCGETA)
+COMPATIBLE_IOCTL(TCSETA)
+COMPATIBLE_IOCTL(TCSETAW)
+COMPATIBLE_IOCTL(TCSETAF)
+COMPATIBLE_IOCTL(TCSBRK)
+COMPATIBLE_IOCTL(TCXONC)
+COMPATIBLE_IOCTL(TCFLSH)
+COMPATIBLE_IOCTL(TCGETS)
+COMPATIBLE_IOCTL(TCSETS)
+COMPATIBLE_IOCTL(TCSETSW)
+COMPATIBLE_IOCTL(TCSETSF)
+COMPATIBLE_IOCTL(TIOCLINUX)
+/* Little t */
+COMPATIBLE_IOCTL(TIOCGETD)
+COMPATIBLE_IOCTL(TIOCSETD)
+COMPATIBLE_IOCTL(TIOCEXCL)
+COMPATIBLE_IOCTL(TIOCNXCL)
+COMPATIBLE_IOCTL(TIOCCONS)
+COMPATIBLE_IOCTL(TIOCGSOFTCAR)
+COMPATIBLE_IOCTL(TIOCSSOFTCAR)
+COMPATIBLE_IOCTL(TIOCSWINSZ)
+COMPATIBLE_IOCTL(TIOCGWINSZ)
+COMPATIBLE_IOCTL(TIOCMGET)
+COMPATIBLE_IOCTL(TIOCMBIC)
+COMPATIBLE_IOCTL(TIOCMBIS)
+COMPATIBLE_IOCTL(TIOCMSET)
+COMPATIBLE_IOCTL(TIOCPKT)
+COMPATIBLE_IOCTL(TIOCNOTTY)
+COMPATIBLE_IOCTL(TIOCSTI)
+COMPATIBLE_IOCTL(TIOCOUTQ)
+COMPATIBLE_IOCTL(TIOCSPGRP)
+COMPATIBLE_IOCTL(TIOCGPGRP)
+COMPATIBLE_IOCTL(TIOCSCTTY)
+COMPATIBLE_IOCTL(TIOCGPTN)
+COMPATIBLE_IOCTL(TIOCSPTLCK)
+COMPATIBLE_IOCTL(TIOCGSERIAL)
+COMPATIBLE_IOCTL(TIOCSSERIAL)
+COMPATIBLE_IOCTL(TIOCSERGETLSR)
+/* Big F */
+COMPATIBLE_IOCTL(FBIOGTYPE)
+COMPATIBLE_IOCTL(FBIOSATTR)
+COMPATIBLE_IOCTL(FBIOGATTR)
+COMPATIBLE_IOCTL(FBIOSVIDEO)
+COMPATIBLE_IOCTL(FBIOGVIDEO)
+COMPATIBLE_IOCTL(FBIOGCURSOR32) /* This is not implemented yet. Later it should be converted... */
+COMPATIBLE_IOCTL(FBIOSCURPOS)
+COMPATIBLE_IOCTL(FBIOGCURPOS)
+COMPATIBLE_IOCTL(FBIOGCURMAX)
+COMPATIBLE_IOCTL(FBIOGET_VSCREENINFO)
+COMPATIBLE_IOCTL(FBIOPUT_VSCREENINFO)
+COMPATIBLE_IOCTL(FBIOPAN_DISPLAY)
+COMPATIBLE_IOCTL(FBIOGET_FCURSORINFO)
+COMPATIBLE_IOCTL(FBIOGET_VCURSORINFO)
+COMPATIBLE_IOCTL(FBIOPUT_VCURSORINFO)
+COMPATIBLE_IOCTL(FBIOGET_CURSORSTATE)
+COMPATIBLE_IOCTL(FBIOPUT_CURSORSTATE)
+COMPATIBLE_IOCTL(FBIOGET_CON2FBMAP)
+COMPATIBLE_IOCTL(FBIOPUT_CON2FBMAP)
+/* Little f */
+COMPATIBLE_IOCTL(FIOCLEX)
+COMPATIBLE_IOCTL(FIONCLEX)
+COMPATIBLE_IOCTL(FIOASYNC)
+COMPATIBLE_IOCTL(FIONBIO)
+COMPATIBLE_IOCTL(FIONREAD) /* This is also TIOCINQ */
+/* 0x00 */
+COMPATIBLE_IOCTL(FIBMAP)
+COMPATIBLE_IOCTL(FIGETBSZ)
+/* 0x03 -- HD/IDE ioctl's used by hdparm and friends.
+ * Some need translations, these do not.
+ */
+COMPATIBLE_IOCTL(HDIO_GET_IDENTITY)
+COMPATIBLE_IOCTL(HDIO_SET_DMA)
+COMPATIBLE_IOCTL(HDIO_SET_KEEPSETTINGS)
+COMPATIBLE_IOCTL(HDIO_SET_UNMASKINTR)
+COMPATIBLE_IOCTL(HDIO_SET_NOWERR)
+COMPATIBLE_IOCTL(HDIO_SET_32BIT)
+COMPATIBLE_IOCTL(HDIO_SET_MULTCOUNT)
+COMPATIBLE_IOCTL(HDIO_DRIVE_CMD)
+COMPATIBLE_IOCTL(HDIO_SET_PIO_MODE)
+COMPATIBLE_IOCTL(HDIO_SCAN_HWIF)
+COMPATIBLE_IOCTL(HDIO_SET_NICE)
+/* 0x02 -- Floppy ioctls */
+COMPATIBLE_IOCTL(FDMSGON)
+COMPATIBLE_IOCTL(FDMSGOFF)
+COMPATIBLE_IOCTL(FDSETEMSGTRESH)
+COMPATIBLE_IOCTL(FDFLUSH)
+COMPATIBLE_IOCTL(FDWERRORCLR)
+COMPATIBLE_IOCTL(FDSETMAXERRS)
+COMPATIBLE_IOCTL(FDGETMAXERRS)
+COMPATIBLE_IOCTL(FDGETDRVTYP)
+COMPATIBLE_IOCTL(FDEJECT)
+COMPATIBLE_IOCTL(FDCLRPRM)
+COMPATIBLE_IOCTL(FDFMTBEG)
+COMPATIBLE_IOCTL(FDFMTEND)
+COMPATIBLE_IOCTL(FDRESET)
+COMPATIBLE_IOCTL(FDTWADDLE)
+COMPATIBLE_IOCTL(FDFMTTRK)
+COMPATIBLE_IOCTL(FDRAWCMD)
+/* 0x12 */
+COMPATIBLE_IOCTL(BLKROSET)
+COMPATIBLE_IOCTL(BLKROGET)
+COMPATIBLE_IOCTL(BLKRRPART)
+COMPATIBLE_IOCTL(BLKFLSBUF)
+COMPATIBLE_IOCTL(BLKRASET)
+COMPATIBLE_IOCTL(BLKFRASET)
+COMPATIBLE_IOCTL(BLKSECTSET)
+COMPATIBLE_IOCTL(BLKSSZGET)
- case FBIOGET_FSCREENINFO:
- case FBIOGETCMAP:
- case FBIOPUTCMAP:
- error = fb_ioctl_trans(fd, cmd, arg);
- goto out;
- case HDIO_GET_KEEPSETTINGS:
- case HDIO_GET_UNMASKINTR:
- case HDIO_GET_DMA:
- case HDIO_GET_32BIT:
- case HDIO_GET_MULTCOUNT:
- case HDIO_GET_NOWERR:
- case HDIO_GET_NICE:
- error = hdio_ioctl_trans(fd, cmd, arg);
- goto out;
+#if 0 /* New RAID code is being merged, fix up to handle
+ * new RAID ioctls when fully merged in 2.3.x -DaveM
+ */
+/* 0x09 */
+COMPATIBLE_IOCTL(REGISTER_DEV)
+COMPATIBLE_IOCTL(REGISTER_DEV_NEW)
+COMPATIBLE_IOCTL(START_MD)
+COMPATIBLE_IOCTL(STOP_MD)
+#endif
+
+/* Big K */
+COMPATIBLE_IOCTL(PIO_FONT)
+COMPATIBLE_IOCTL(GIO_FONT)
+COMPATIBLE_IOCTL(KDSIGACCEPT)
+COMPATIBLE_IOCTL(KDGETKEYCODE)
+COMPATIBLE_IOCTL(KDSETKEYCODE)
+COMPATIBLE_IOCTL(KIOCSOUND)
+COMPATIBLE_IOCTL(KDMKTONE)
+COMPATIBLE_IOCTL(KDGKBTYPE)
+COMPATIBLE_IOCTL(KDSETMODE)
+COMPATIBLE_IOCTL(KDGETMODE)
+COMPATIBLE_IOCTL(KDSKBMODE)
+COMPATIBLE_IOCTL(KDGKBMODE)
+COMPATIBLE_IOCTL(KDSKBMETA)
+COMPATIBLE_IOCTL(KDGKBMETA)
+COMPATIBLE_IOCTL(KDGKBENT)
+COMPATIBLE_IOCTL(KDSKBENT)
+COMPATIBLE_IOCTL(KDGKBSENT)
+COMPATIBLE_IOCTL(KDSKBSENT)
+COMPATIBLE_IOCTL(KDGKBDIACR)
+COMPATIBLE_IOCTL(KDSKBDIACR)
+COMPATIBLE_IOCTL(KDGKBLED)
+COMPATIBLE_IOCTL(KDSKBLED)
+COMPATIBLE_IOCTL(KDGETLED)
+COMPATIBLE_IOCTL(KDSETLED)
+COMPATIBLE_IOCTL(GIO_SCRNMAP)
+COMPATIBLE_IOCTL(PIO_SCRNMAP)
+COMPATIBLE_IOCTL(GIO_UNISCRNMAP)
+COMPATIBLE_IOCTL(PIO_UNISCRNMAP)
+COMPATIBLE_IOCTL(PIO_FONTRESET)
+COMPATIBLE_IOCTL(PIO_UNIMAPCLR)
+/* Little k */
+COMPATIBLE_IOCTL(KIOCTYPE)
+COMPATIBLE_IOCTL(KIOCLAYOUT)
+COMPATIBLE_IOCTL(KIOCGTRANS)
+COMPATIBLE_IOCTL(KIOCTRANS)
+COMPATIBLE_IOCTL(KIOCCMD)
+COMPATIBLE_IOCTL(KIOCSDIRECT)
+COMPATIBLE_IOCTL(KIOCSLED)
+COMPATIBLE_IOCTL(KIOCGLED)
+COMPATIBLE_IOCTL(KIOCSRATE)
+COMPATIBLE_IOCTL(KIOCGRATE)
+/* Big S */
+COMPATIBLE_IOCTL(SCSI_IOCTL_GET_IDLUN)
+COMPATIBLE_IOCTL(SCSI_IOCTL_DOORLOCK)
+COMPATIBLE_IOCTL(SCSI_IOCTL_DOORUNLOCK)
+COMPATIBLE_IOCTL(SCSI_IOCTL_TEST_UNIT_READY)
+COMPATIBLE_IOCTL(SCSI_IOCTL_TAGGED_ENABLE)
+COMPATIBLE_IOCTL(SCSI_IOCTL_TAGGED_DISABLE)
+COMPATIBLE_IOCTL(SCSI_IOCTL_GET_BUS_NUMBER)
+COMPATIBLE_IOCTL(SCSI_IOCTL_SEND_COMMAND)
+/* Big V */
+COMPATIBLE_IOCTL(VT_SETMODE)
+COMPATIBLE_IOCTL(VT_GETMODE)
+COMPATIBLE_IOCTL(VT_GETSTATE)
+COMPATIBLE_IOCTL(VT_OPENQRY)
+COMPATIBLE_IOCTL(VT_ACTIVATE)
+COMPATIBLE_IOCTL(VT_WAITACTIVE)
+COMPATIBLE_IOCTL(VT_RELDISP)
+COMPATIBLE_IOCTL(VT_DISALLOCATE)
+COMPATIBLE_IOCTL(VT_RESIZE)
+COMPATIBLE_IOCTL(VT_RESIZEX)
+COMPATIBLE_IOCTL(VT_LOCKSWITCH)
+COMPATIBLE_IOCTL(VT_UNLOCKSWITCH)
+/* Little v */
+COMPATIBLE_IOCTL(VUIDSFORMAT)
+COMPATIBLE_IOCTL(VUIDGFORMAT)
+/* Little v, the video4linux ioctls */
+COMPATIBLE_IOCTL(VIDIOCGCAP)
+COMPATIBLE_IOCTL(VIDIOCGCHAN)
+COMPATIBLE_IOCTL(VIDIOCSCHAN)
+COMPATIBLE_IOCTL(VIDIOCGPICT)
+COMPATIBLE_IOCTL(VIDIOCSPICT)
+COMPATIBLE_IOCTL(VIDIOCCAPTURE)
+COMPATIBLE_IOCTL(VIDIOCKEY)
+COMPATIBLE_IOCTL(VIDIOCGAUDIO)
+COMPATIBLE_IOCTL(VIDIOCSAUDIO)
+COMPATIBLE_IOCTL(VIDIOCSYNC)
+COMPATIBLE_IOCTL(VIDIOCMCAPTURE)
+COMPATIBLE_IOCTL(VIDIOCGMBUF)
+COMPATIBLE_IOCTL(VIDIOCGUNIT)
+COMPATIBLE_IOCTL(VIDIOCGCAPTURE)
+COMPATIBLE_IOCTL(VIDIOCSCAPTURE)
+/* BTTV specific... */
+COMPATIBLE_IOCTL(_IOW('v', BASE_VIDIOCPRIVATE+0, char [256]))
+COMPATIBLE_IOCTL(_IOR('v', BASE_VIDIOCPRIVATE+1, char [256]))
+COMPATIBLE_IOCTL(_IOR('v' , BASE_VIDIOCPRIVATE+2, unsigned int))
+COMPATIBLE_IOCTL(_IOW('v' , BASE_VIDIOCPRIVATE+3, char [16])) /* struct bttv_pll_info */
+COMPATIBLE_IOCTL(_IOR('v' , BASE_VIDIOCPRIVATE+4, int))
+COMPATIBLE_IOCTL(_IOR('v' , BASE_VIDIOCPRIVATE+5, int))
+COMPATIBLE_IOCTL(_IOR('v' , BASE_VIDIOCPRIVATE+6, int))
+COMPATIBLE_IOCTL(_IOR('v' , BASE_VIDIOCPRIVATE+7, int))
+/* Little p (/dev/rtc, /dev/envctrl, etc.) */
+COMPATIBLE_IOCTL(RTCGET)
+COMPATIBLE_IOCTL(RTCSET)
+COMPATIBLE_IOCTL(I2CIOCSADR)
+COMPATIBLE_IOCTL(I2CIOCGADR)
+/* Little m */
+COMPATIBLE_IOCTL(MTIOCTOP)
+/* OPENPROMIO, SunOS/Solaris only, the NetBSD one's have
+ * embedded pointers in the arg which we'd need to clean up...
+ */
+COMPATIBLE_IOCTL(OPROMGETOPT)
+COMPATIBLE_IOCTL(OPROMSETOPT)
+COMPATIBLE_IOCTL(OPROMNXTOPT)
+COMPATIBLE_IOCTL(OPROMSETOPT2)
+COMPATIBLE_IOCTL(OPROMNEXT)
+COMPATIBLE_IOCTL(OPROMCHILD)
+COMPATIBLE_IOCTL(OPROMGETPROP)
+COMPATIBLE_IOCTL(OPROMNXTPROP)
+COMPATIBLE_IOCTL(OPROMU2P)
+COMPATIBLE_IOCTL(OPROMGETCONS)
+COMPATIBLE_IOCTL(OPROMGETFBNAME)
+COMPATIBLE_IOCTL(OPROMGETBOOTARGS)
+COMPATIBLE_IOCTL(OPROMSETCUR)
+COMPATIBLE_IOCTL(OPROMPCI2NODE)
+COMPATIBLE_IOCTL(OPROMPATH2NODE)
+/* Socket level stuff */
+COMPATIBLE_IOCTL(FIOSETOWN)
+COMPATIBLE_IOCTL(SIOCSPGRP)
+COMPATIBLE_IOCTL(FIOGETOWN)
+COMPATIBLE_IOCTL(SIOCGPGRP)
+COMPATIBLE_IOCTL(SIOCATMARK)
+COMPATIBLE_IOCTL(SIOCSIFLINK)
+COMPATIBLE_IOCTL(SIOCSIFENCAP)
+COMPATIBLE_IOCTL(SIOCGIFENCAP)
+COMPATIBLE_IOCTL(SIOCSIFBR)
+COMPATIBLE_IOCTL(SIOCGIFBR)
+COMPATIBLE_IOCTL(SIOCSARP)
+COMPATIBLE_IOCTL(SIOCGARP)
+COMPATIBLE_IOCTL(SIOCDARP)
+#if 0 /* XXX No longer exist in new routing code. XXX */
+COMPATIBLE_IOCTL(OLD_SIOCSARP)
+COMPATIBLE_IOCTL(OLD_SIOCGARP)
+COMPATIBLE_IOCTL(OLD_SIOCDARP)
+#endif
+COMPATIBLE_IOCTL(SIOCSRARP)
+COMPATIBLE_IOCTL(SIOCGRARP)
+COMPATIBLE_IOCTL(SIOCDRARP)
+COMPATIBLE_IOCTL(SIOCADDDLCI)
+COMPATIBLE_IOCTL(SIOCDELDLCI)
+/* SG stuff */
+COMPATIBLE_IOCTL(SG_SET_TIMEOUT)
+COMPATIBLE_IOCTL(SG_GET_TIMEOUT)
+COMPATIBLE_IOCTL(SG_EMULATED_HOST)
+COMPATIBLE_IOCTL(SG_SET_TRANSFORM)
+COMPATIBLE_IOCTL(SG_GET_TRANSFORM)
+/* PPP stuff */
+COMPATIBLE_IOCTL(PPPIOCGFLAGS)
+COMPATIBLE_IOCTL(PPPIOCSFLAGS)
+COMPATIBLE_IOCTL(PPPIOCGASYNCMAP)
+COMPATIBLE_IOCTL(PPPIOCSASYNCMAP)
+COMPATIBLE_IOCTL(PPPIOCGUNIT)
+COMPATIBLE_IOCTL(PPPIOCGRASYNCMAP)
+COMPATIBLE_IOCTL(PPPIOCSRASYNCMAP)
+COMPATIBLE_IOCTL(PPPIOCGMRU)
+COMPATIBLE_IOCTL(PPPIOCSMRU)
+COMPATIBLE_IOCTL(PPPIOCSMAXCID)
+COMPATIBLE_IOCTL(PPPIOCGXASYNCMAP)
+COMPATIBLE_IOCTL(PPPIOCSXASYNCMAP)
+COMPATIBLE_IOCTL(PPPIOCXFERUNIT)
+COMPATIBLE_IOCTL(PPPIOCGNPMODE)
+COMPATIBLE_IOCTL(PPPIOCSNPMODE)
+COMPATIBLE_IOCTL(PPPIOCGDEBUG)
+COMPATIBLE_IOCTL(PPPIOCSDEBUG)
+COMPATIBLE_IOCTL(PPPIOCNEWUNIT)
+COMPATIBLE_IOCTL(PPPIOCATTACH)
+COMPATIBLE_IOCTL(PPPIOCDETACH)
+/* CDROM stuff */
+COMPATIBLE_IOCTL(CDROMPAUSE)
+COMPATIBLE_IOCTL(CDROMRESUME)
+COMPATIBLE_IOCTL(CDROMPLAYMSF)
+COMPATIBLE_IOCTL(CDROMPLAYTRKIND)
+COMPATIBLE_IOCTL(CDROMREADTOCHDR)
+COMPATIBLE_IOCTL(CDROMREADTOCENTRY)
+COMPATIBLE_IOCTL(CDROMSTOP)
+COMPATIBLE_IOCTL(CDROMSTART)
+COMPATIBLE_IOCTL(CDROMEJECT)
+COMPATIBLE_IOCTL(CDROMVOLCTRL)
+COMPATIBLE_IOCTL(CDROMSUBCHNL)
+COMPATIBLE_IOCTL(CDROMEJECT_SW)
+COMPATIBLE_IOCTL(CDROMMULTISESSION)
+COMPATIBLE_IOCTL(CDROM_GET_MCN)
+COMPATIBLE_IOCTL(CDROMRESET)
+COMPATIBLE_IOCTL(CDROMVOLREAD)
+COMPATIBLE_IOCTL(CDROMSEEK)
+COMPATIBLE_IOCTL(CDROMPLAYBLK)
+COMPATIBLE_IOCTL(CDROMCLOSETRAY)
+COMPATIBLE_IOCTL(CDROM_SET_OPTIONS)
+COMPATIBLE_IOCTL(CDROM_CLEAR_OPTIONS)
+COMPATIBLE_IOCTL(CDROM_SELECT_SPEED)
+COMPATIBLE_IOCTL(CDROM_SELECT_DISC)
+COMPATIBLE_IOCTL(CDROM_MEDIA_CHANGED)
+COMPATIBLE_IOCTL(CDROM_DRIVE_STATUS)
+COMPATIBLE_IOCTL(CDROM_DISC_STATUS)
+COMPATIBLE_IOCTL(CDROM_CHANGER_NSLOTS)
+COMPATIBLE_IOCTL(CDROM_LOCKDOOR)
+COMPATIBLE_IOCTL(CDROM_DEBUG)
+COMPATIBLE_IOCTL(CDROM_GET_CAPABILITY)
+/* Big L */
+COMPATIBLE_IOCTL(LOOP_SET_FD)
+COMPATIBLE_IOCTL(LOOP_CLR_FD)
+/* Big A */
+COMPATIBLE_IOCTL(AUDIO_GETINFO)
+COMPATIBLE_IOCTL(AUDIO_SETINFO)
+COMPATIBLE_IOCTL(AUDIO_DRAIN)
+COMPATIBLE_IOCTL(AUDIO_GETDEV)
+COMPATIBLE_IOCTL(AUDIO_GETDEV_SUNOS)
+COMPATIBLE_IOCTL(AUDIO_FLUSH)
+/* Big Q for sound/OSS */
+COMPATIBLE_IOCTL(SNDCTL_SEQ_RESET)
+COMPATIBLE_IOCTL(SNDCTL_SEQ_SYNC)
+COMPATIBLE_IOCTL(SNDCTL_SYNTH_INFO)
+COMPATIBLE_IOCTL(SNDCTL_SEQ_CTRLRATE)
+COMPATIBLE_IOCTL(SNDCTL_SEQ_GETOUTCOUNT)
+COMPATIBLE_IOCTL(SNDCTL_SEQ_GETINCOUNT)
+COMPATIBLE_IOCTL(SNDCTL_SEQ_PERCMODE)
+COMPATIBLE_IOCTL(SNDCTL_FM_LOAD_INSTR)
+COMPATIBLE_IOCTL(SNDCTL_SEQ_TESTMIDI)
+COMPATIBLE_IOCTL(SNDCTL_SEQ_RESETSAMPLES)
+COMPATIBLE_IOCTL(SNDCTL_SEQ_NRSYNTHS)
+COMPATIBLE_IOCTL(SNDCTL_SEQ_NRMIDIS)
+COMPATIBLE_IOCTL(SNDCTL_MIDI_INFO)
+COMPATIBLE_IOCTL(SNDCTL_SEQ_THRESHOLD)
+COMPATIBLE_IOCTL(SNDCTL_SYNTH_MEMAVL)
+COMPATIBLE_IOCTL(SNDCTL_FM_4OP_ENABLE)
+COMPATIBLE_IOCTL(SNDCTL_SEQ_PANIC)
+COMPATIBLE_IOCTL(SNDCTL_SEQ_OUTOFBAND)
+COMPATIBLE_IOCTL(SNDCTL_SEQ_GETTIME)
+COMPATIBLE_IOCTL(SNDCTL_SYNTH_ID)
+COMPATIBLE_IOCTL(SNDCTL_SYNTH_CONTROL)
+COMPATIBLE_IOCTL(SNDCTL_SYNTH_REMOVESAMPLE)
+/* Big T for sound/OSS */
+COMPATIBLE_IOCTL(SNDCTL_TMR_TIMEBASE)
+COMPATIBLE_IOCTL(SNDCTL_TMR_START)
+COMPATIBLE_IOCTL(SNDCTL_TMR_STOP)
+COMPATIBLE_IOCTL(SNDCTL_TMR_CONTINUE)
+COMPATIBLE_IOCTL(SNDCTL_TMR_TEMPO)
+COMPATIBLE_IOCTL(SNDCTL_TMR_SOURCE)
+COMPATIBLE_IOCTL(SNDCTL_TMR_METRONOME)
+COMPATIBLE_IOCTL(SNDCTL_TMR_SELECT)
+/* Little m for sound/OSS */
+COMPATIBLE_IOCTL(SNDCTL_MIDI_PRETIME)
+COMPATIBLE_IOCTL(SNDCTL_MIDI_MPUMODE)
+COMPATIBLE_IOCTL(SNDCTL_MIDI_MPUCMD)
+/* Big P for sound/OSS */
+COMPATIBLE_IOCTL(SNDCTL_DSP_RESET)
+COMPATIBLE_IOCTL(SNDCTL_DSP_SYNC)
+COMPATIBLE_IOCTL(SNDCTL_DSP_SPEED)
+COMPATIBLE_IOCTL(SNDCTL_DSP_STEREO)
+COMPATIBLE_IOCTL(SNDCTL_DSP_GETBLKSIZE)
+COMPATIBLE_IOCTL(SNDCTL_DSP_CHANNELS)
+COMPATIBLE_IOCTL(SOUND_PCM_WRITE_FILTER)
+COMPATIBLE_IOCTL(SNDCTL_DSP_POST)
+COMPATIBLE_IOCTL(SNDCTL_DSP_SUBDIVIDE)
+COMPATIBLE_IOCTL(SNDCTL_DSP_SETFRAGMENT)
+COMPATIBLE_IOCTL(SNDCTL_DSP_GETFMTS)
+COMPATIBLE_IOCTL(SNDCTL_DSP_SETFMT)
+COMPATIBLE_IOCTL(SNDCTL_DSP_GETOSPACE)
+COMPATIBLE_IOCTL(SNDCTL_DSP_GETISPACE)
+COMPATIBLE_IOCTL(SNDCTL_DSP_NONBLOCK)
+COMPATIBLE_IOCTL(SNDCTL_DSP_GETCAPS)
+COMPATIBLE_IOCTL(SNDCTL_DSP_GETTRIGGER)
+COMPATIBLE_IOCTL(SNDCTL_DSP_SETTRIGGER)
+COMPATIBLE_IOCTL(SNDCTL_DSP_GETIPTR)
+COMPATIBLE_IOCTL(SNDCTL_DSP_GETOPTR)
+/* SNDCTL_DSP_MAPINBUF, XXX needs translation */
+/* SNDCTL_DSP_MAPOUTBUF, XXX needs translation */
+COMPATIBLE_IOCTL(SNDCTL_DSP_SETSYNCRO)
+COMPATIBLE_IOCTL(SNDCTL_DSP_SETDUPLEX)
+COMPATIBLE_IOCTL(SNDCTL_DSP_GETODELAY)
+COMPATIBLE_IOCTL(SNDCTL_DSP_PROFILE)
+COMPATIBLE_IOCTL(SOUND_PCM_READ_RATE)
+COMPATIBLE_IOCTL(SOUND_PCM_READ_CHANNELS)
+COMPATIBLE_IOCTL(SOUND_PCM_READ_BITS)
+COMPATIBLE_IOCTL(SOUND_PCM_READ_FILTER)
+/* Big C for sound/OSS */
+COMPATIBLE_IOCTL(SNDCTL_COPR_RESET)
+COMPATIBLE_IOCTL(SNDCTL_COPR_LOAD)
+COMPATIBLE_IOCTL(SNDCTL_COPR_RDATA)
+COMPATIBLE_IOCTL(SNDCTL_COPR_RCODE)
+COMPATIBLE_IOCTL(SNDCTL_COPR_WDATA)
+COMPATIBLE_IOCTL(SNDCTL_COPR_WCODE)
+COMPATIBLE_IOCTL(SNDCTL_COPR_RUN)
+COMPATIBLE_IOCTL(SNDCTL_COPR_HALT)
+COMPATIBLE_IOCTL(SNDCTL_COPR_SENDMSG)
+COMPATIBLE_IOCTL(SNDCTL_COPR_RCVMSG)
+/* Big M for sound/OSS */
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_VOLUME)
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_BASS)
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_TREBLE)
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_SYNTH)
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_PCM)
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_SPEAKER)
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_LINE)
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_MIC)
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_CD)
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_IMIX)
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_ALTPCM)
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_RECLEV)
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_IGAIN)
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_OGAIN)
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_LINE1)
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_LINE2)
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_LINE3)
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_MUTE)
+/* SOUND_MIXER_READ_ENHANCE, same value as READ_MUTE */
+/* SOUND_MIXER_READ_LOUD, same value as READ_MUTE */
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_RECSRC)
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_DEVMASK)
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_RECMASK)
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_STEREODEVS)
+COMPATIBLE_IOCTL(SOUND_MIXER_READ_CAPS)
+COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_VOLUME)
+COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_BASS)
+COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_TREBLE)
+COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_SYNTH)
+COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_PCM)
+COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_SPEAKER)
+COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_LINE)
+COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_MIC)
+COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_CD)
+COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_IMIX)
+COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_ALTPCM)
+COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_RECLEV)
+COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_IGAIN)
+COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_OGAIN)
+COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_LINE1)
+COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_LINE2)
+COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_LINE3)
+COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_MUTE)
+/* SOUND_MIXER_WRITE_ENHANCE, same value as WRITE_MUTE */
+/* SOUND_MIXER_WRITE_LOUD, same value as WRITE_MUTE */
+COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_RECSRC)
+COMPATIBLE_IOCTL(SOUND_MIXER_INFO)
+COMPATIBLE_IOCTL(SOUND_OLD_MIXER_INFO)
+COMPATIBLE_IOCTL(SOUND_MIXER_ACCESS)
+COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE1)
+COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE2)
+COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE3)
+COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE4)
+COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE5)
+COMPATIBLE_IOCTL(SOUND_MIXER_GETLEVELS)
+COMPATIBLE_IOCTL(SOUND_MIXER_SETLEVELS)
+COMPATIBLE_IOCTL(OSS_GETVERSION)
+/* AUTOFS */
+COMPATIBLE_IOCTL(AUTOFS_IOC_READY)
+COMPATIBLE_IOCTL(AUTOFS_IOC_FAIL)
+COMPATIBLE_IOCTL(AUTOFS_IOC_CATATONIC)
+COMPATIBLE_IOCTL(AUTOFS_IOC_PROTOVER)
+COMPATIBLE_IOCTL(AUTOFS_IOC_EXPIRE)
+/* Raw devices */
+COMPATIBLE_IOCTL(RAW_SETBIND)
+COMPATIBLE_IOCTL(RAW_GETBIND)
+/* SMB ioctls which do not need any translations */
+COMPATIBLE_IOCTL(SMB_IOC_NEWCONN)
+/* Little a */
+COMPATIBLE_IOCTL(ATMSIGD_CTRL)
+COMPATIBLE_IOCTL(ATMARPD_CTRL)
+COMPATIBLE_IOCTL(ATMLEC_CTRL)
+COMPATIBLE_IOCTL(ATMLEC_MCAST)
+COMPATIBLE_IOCTL(ATMLEC_DATA)
+COMPATIBLE_IOCTL(ATM_SETSC)
+COMPATIBLE_IOCTL(SIOCSIFATMTCP)
+COMPATIBLE_IOCTL(SIOCMKCLIP)
+COMPATIBLE_IOCTL(ATMARP_MKIP)
+COMPATIBLE_IOCTL(ATMARP_SETENTRY)
+COMPATIBLE_IOCTL(ATMARP_ENCAP)
+COMPATIBLE_IOCTL(ATMTCP_CREATE)
+COMPATIBLE_IOCTL(ATMTCP_REMOVE)
+COMPATIBLE_IOCTL(ATMMPC_CTRL)
+COMPATIBLE_IOCTL(ATMMPC_DATA)
+/* And these ioctls need translation */
+HANDLE_IOCTL(SIOCGIFNAME, dev_ifname32)
+HANDLE_IOCTL(SIOCGIFCONF, dev_ifconf)
+HANDLE_IOCTL(SIOCGIFFLAGS, dev_ifsioc)
+HANDLE_IOCTL(SIOCSIFFLAGS, dev_ifsioc)
+HANDLE_IOCTL(SIOCGIFMETRIC, dev_ifsioc)
+HANDLE_IOCTL(SIOCSIFMETRIC, dev_ifsioc)
+HANDLE_IOCTL(SIOCGIFMTU, dev_ifsioc)
+HANDLE_IOCTL(SIOCSIFMTU, dev_ifsioc)
+HANDLE_IOCTL(SIOCGIFMEM, dev_ifsioc)
+HANDLE_IOCTL(SIOCSIFMEM, dev_ifsioc)
+HANDLE_IOCTL(SIOCGIFHWADDR, dev_ifsioc)
+HANDLE_IOCTL(SIOCSIFHWADDR, dev_ifsioc)
+HANDLE_IOCTL(SIOCADDMULTI, dev_ifsioc)
+HANDLE_IOCTL(SIOCDELMULTI, dev_ifsioc)
+HANDLE_IOCTL(SIOCGIFINDEX, dev_ifsioc)
+HANDLE_IOCTL(SIOCGIFMAP, dev_ifsioc)
+HANDLE_IOCTL(SIOCSIFMAP, dev_ifsioc)
+HANDLE_IOCTL(SIOCGIFADDR, dev_ifsioc)
+HANDLE_IOCTL(SIOCSIFADDR, dev_ifsioc)
+HANDLE_IOCTL(SIOCGIFBRDADDR, dev_ifsioc)
+HANDLE_IOCTL(SIOCSIFBRDADDR, dev_ifsioc)
+HANDLE_IOCTL(SIOCGIFDSTADDR, dev_ifsioc)
+HANDLE_IOCTL(SIOCSIFDSTADDR, dev_ifsioc)
+HANDLE_IOCTL(SIOCGIFNETMASK, dev_ifsioc)
+HANDLE_IOCTL(SIOCSIFNETMASK, dev_ifsioc)
+HANDLE_IOCTL(SIOCSIFPFLAGS, dev_ifsioc)
+HANDLE_IOCTL(SIOCGIFPFLAGS, dev_ifsioc)
+HANDLE_IOCTL(SIOCGPPPSTATS, dev_ifsioc)
+HANDLE_IOCTL(SIOCGPPPCSTATS, dev_ifsioc)
+HANDLE_IOCTL(SIOCGPPPVER, dev_ifsioc)
+HANDLE_IOCTL(SIOCGIFTXQLEN, dev_ifsioc)
+HANDLE_IOCTL(SIOCSIFTXQLEN, dev_ifsioc)
+HANDLE_IOCTL(SIOCETHTOOL, dev_ifsioc)
+HANDLE_IOCTL(SIOCADDRT, routing_ioctl)
+HANDLE_IOCTL(SIOCDELRT, routing_ioctl)
+/* Note SIOCRTMSG is no longer, so this is safe and * the user would have seen just an -EINVAL anyways. */
+HANDLE_IOCTL(SIOCRTMSG, ret_einval)
+HANDLE_IOCTL(SIOCGSTAMP, do_siocgstamp)
+HANDLE_IOCTL(HDIO_GETGEO, hdio_getgeo)
+HANDLE_IOCTL(BLKRAGET, w_long)
+HANDLE_IOCTL(BLKGETSIZE, w_long)
+HANDLE_IOCTL(0x1260, broken_blkgetsize)
+HANDLE_IOCTL(BLKFRAGET, w_long)
+HANDLE_IOCTL(BLKSECTGET, w_long)
+HANDLE_IOCTL(BLKPG, blkpg_ioctl_trans)
+HANDLE_IOCTL(FBIOPUTCMAP32, fbiogetputcmap)
+HANDLE_IOCTL(FBIOGETCMAP32, fbiogetputcmap)
+HANDLE_IOCTL(FBIOSCURSOR32, fbiogscursor)
+HANDLE_IOCTL(FBIOGET_FSCREENINFO, fb_ioctl_trans)
+HANDLE_IOCTL(FBIOGETCMAP, fb_ioctl_trans)
+HANDLE_IOCTL(FBIOPUTCMAP, fb_ioctl_trans)
+HANDLE_IOCTL(HDIO_GET_KEEPSETTINGS, hdio_ioctl_trans)
+HANDLE_IOCTL(HDIO_GET_UNMASKINTR, hdio_ioctl_trans)
+HANDLE_IOCTL(HDIO_GET_DMA, hdio_ioctl_trans)
+HANDLE_IOCTL(HDIO_GET_32BIT, hdio_ioctl_trans)
+HANDLE_IOCTL(HDIO_GET_MULTCOUNT, hdio_ioctl_trans)
+HANDLE_IOCTL(HDIO_GET_NOWERR, hdio_ioctl_trans)
+HANDLE_IOCTL(HDIO_GET_NICE, hdio_ioctl_trans)
+HANDLE_IOCTL(FDSETPRM32, fd_ioctl_trans)
+HANDLE_IOCTL(FDDEFPRM32, fd_ioctl_trans)
+HANDLE_IOCTL(FDGETPRM32, fd_ioctl_trans)
+HANDLE_IOCTL(FDSETDRVPRM32, fd_ioctl_trans)
+HANDLE_IOCTL(FDGETDRVPRM32, fd_ioctl_trans)
+HANDLE_IOCTL(FDGETDRVSTAT32, fd_ioctl_trans)
+HANDLE_IOCTL(FDPOLLDRVSTAT32, fd_ioctl_trans)
+HANDLE_IOCTL(FDGETFDCSTAT32, fd_ioctl_trans)
+HANDLE_IOCTL(FDWERRORGET32, fd_ioctl_trans)
+HANDLE_IOCTL(PPPIOCGIDLE32, ppp_ioctl_trans)
+HANDLE_IOCTL(PPPIOCSCOMPRESS32, ppp_ioctl_trans)
+HANDLE_IOCTL(MTIOCGET32, mt_ioctl_trans)
+HANDLE_IOCTL(MTIOCPOS32, mt_ioctl_trans)
+HANDLE_IOCTL(MTIOCGETCONFIG32, mt_ioctl_trans)
+HANDLE_IOCTL(MTIOCSETCONFIG32, mt_ioctl_trans)
+HANDLE_IOCTL(CDROMREADMODE2, cdrom_ioctl_trans)
+HANDLE_IOCTL(CDROMREADMODE1, cdrom_ioctl_trans)
+HANDLE_IOCTL(CDROMREADRAW, cdrom_ioctl_trans)
+HANDLE_IOCTL(CDROMREADCOOKED, cdrom_ioctl_trans)
+HANDLE_IOCTL(CDROMREADAUDIO, cdrom_ioctl_trans)
+HANDLE_IOCTL(CDROMREADALL, cdrom_ioctl_trans)
+HANDLE_IOCTL(CDROM_SEND_PACKET, cdrom_ioctl_trans)
+HANDLE_IOCTL(LOOP_SET_STATUS, loop_status)
+HANDLE_IOCTL(LOOP_GET_STATUS, loop_status)
+#define AUTOFS_IOC_SETTIMEOUT32 _IOWR(0x93,0x64,unsigned int)
+HANDLE_IOCTL(AUTOFS_IOC_SETTIMEOUT32, ioc_settimeout)
+HANDLE_IOCTL(PIO_FONTX, do_fontx_ioctl)
+HANDLE_IOCTL(GIO_FONTX, do_fontx_ioctl)
+HANDLE_IOCTL(PIO_UNIMAP, do_unimap_ioctl)
+HANDLE_IOCTL(GIO_UNIMAP, do_unimap_ioctl)
+HANDLE_IOCTL(KDFONTOP, do_kdfontop_ioctl)
+HANDLE_IOCTL(EXT2_IOC32_GETFLAGS, do_ext2_ioctl)
+HANDLE_IOCTL(EXT2_IOC32_SETFLAGS, do_ext2_ioctl)
+HANDLE_IOCTL(EXT2_IOC32_GETVERSION, do_ext2_ioctl)
+HANDLE_IOCTL(EXT2_IOC32_SETVERSION, do_ext2_ioctl)
+HANDLE_IOCTL(VIDIOCGTUNER32, do_video_ioctl)
+HANDLE_IOCTL(VIDIOCSTUNER32, do_video_ioctl)
+HANDLE_IOCTL(VIDIOCGWIN32, do_video_ioctl)
+HANDLE_IOCTL(VIDIOCSWIN32, do_video_ioctl)
+HANDLE_IOCTL(VIDIOCGFBUF32, do_video_ioctl)
+HANDLE_IOCTL(VIDIOCSFBUF32, do_video_ioctl)
+HANDLE_IOCTL(VIDIOCGFREQ32, do_video_ioctl)
+HANDLE_IOCTL(VIDIOCSFREQ32, do_video_ioctl)
+/* One SMB ioctl needs translations. */
+#define SMB_IOC_GETMOUNTUID_32 _IOR('u', 1, __kernel_uid_t32)
+HANDLE_IOCTL(SMB_IOC_GETMOUNTUID_32, do_smb_getmountuid)
+HANDLE_IOCTL(ATM_GETLINKRATE32, do_atm_ioctl)
+HANDLE_IOCTL(ATM_GETNAMES32, do_atm_ioctl)
+HANDLE_IOCTL(ATM_GETTYPE32, do_atm_ioctl)
+HANDLE_IOCTL(ATM_GETESI32, do_atm_ioctl)
+HANDLE_IOCTL(ATM_GETADDR32, do_atm_ioctl)
+HANDLE_IOCTL(ATM_RSTADDR32, do_atm_ioctl)
+HANDLE_IOCTL(ATM_ADDADDR32, do_atm_ioctl)
+HANDLE_IOCTL(ATM_DELADDR32, do_atm_ioctl)
+HANDLE_IOCTL(ATM_GETCIRANGE32, do_atm_ioctl)
+HANDLE_IOCTL(ATM_SETCIRANGE32, do_atm_ioctl)
+HANDLE_IOCTL(ATM_SETESI32, do_atm_ioctl)
+HANDLE_IOCTL(ATM_SETESIF32, do_atm_ioctl)
+HANDLE_IOCTL(ATM_GETSTAT32, do_atm_ioctl)
+HANDLE_IOCTL(ATM_GETSTATZ32, do_atm_ioctl)
+HANDLE_IOCTL(SUNI_GETLOOP, do_atm_ioctl)
+HANDLE_IOCTL(SUNI_SETLOOP, do_atm_ioctl)
+HANDLE_IOCTL(SONET_GETSTAT, do_atm_ioctl)
+HANDLE_IOCTL(SONET_GETSTATZ, do_atm_ioctl)
+HANDLE_IOCTL(SONET_GETDIAG, do_atm_ioctl)
+HANDLE_IOCTL(SONET_SETDIAG, do_atm_ioctl)
+HANDLE_IOCTL(SONET_CLRDIAG, do_atm_ioctl)
+HANDLE_IOCTL(SONET_SETFRAMING, do_atm_ioctl)
+HANDLE_IOCTL(SONET_GETFRAMING, do_atm_ioctl)
+HANDLE_IOCTL(SONET_GETFRSENSE, do_atm_ioctl)
+IOCTL_TABLE_END
+
+unsigned int ioctl32_hash_table[1024];
+
+extern inline unsigned long ioctl32_hash(unsigned long cmd)
+{
+ return ((cmd >> 6) ^ (cmd >> 4) ^ cmd) & 0x3ff;
+}
- case FDSETPRM32:
- case FDDEFPRM32:
- case FDGETPRM32:
- case FDSETDRVPRM32:
- case FDGETDRVPRM32:
- case FDGETDRVSTAT32:
- case FDPOLLDRVSTAT32:
- case FDGETFDCSTAT32:
- case FDWERRORGET32:
- error = fd_ioctl_trans(fd, cmd, arg);
- goto out;
+static void ioctl32_insert_translation(struct ioctl_trans *trans)
+{
+ unsigned long hash;
+ struct ioctl_trans *t;
+
+ hash = ioctl32_hash (trans->cmd);
+ if (!ioctl32_hash_table[hash])
+ ioctl32_hash_table[hash] = (u32)(long)trans;
+ else {
+ t = (struct ioctl_trans *)(long)ioctl32_hash_table[hash];
+ while (t->next)
+ t = (struct ioctl_trans *)(long)t->next;
+ trans->next = 0;
+ t->next = (u32)(long)trans;
+ }
+}
- case PPPIOCGIDLE32:
- case PPPIOCSCOMPRESS32:
- error = ppp_ioctl_trans(fd, cmd, arg);
- goto out;
+static int __init init_sys32_ioctl(void)
+{
+ int i;
+ extern struct ioctl_trans ioctl_translations[], ioctl_translations_end[];
- case MTIOCGET32:
- case MTIOCPOS32:
- case MTIOCGETCONFIG32:
- case MTIOCSETCONFIG32:
- error = mt_ioctl_trans(fd, cmd, arg);
- goto out;
+ for (i = 0; &ioctl_translations[i] < &ioctl_translations_end[0]; i++)
+ ioctl32_insert_translation(&ioctl_translations[i]);
+ return 0;
+}
- case CDROMREADMODE2:
- case CDROMREADMODE1:
- case CDROMREADRAW:
- case CDROMREADCOOKED:
- case CDROMREADAUDIO:
- case CDROMREADALL:
- case CDROM_SEND_PACKET:
- error = cdrom_ioctl_trans(fd, cmd, arg);
- goto out;
-
- case LOOP_SET_STATUS:
- case LOOP_GET_STATUS:
- error = loop_status(fd, cmd, arg);
- goto out;
+__initcall(init_sys32_ioctl);
-#define AUTOFS_IOC_SETTIMEOUT32 _IOWR(0x93,0x64,unsigned int)
- case AUTOFS_IOC_SETTIMEOUT32:
- error = rw_long(fd, AUTOFS_IOC_SETTIMEOUT, arg);
- goto out;
-
- case PIO_FONTX:
- case GIO_FONTX:
- error = do_fontx_ioctl(filp, cmd, (struct consolefontdesc32 *)arg);
- goto out;
-
- case PIO_UNIMAP:
- case GIO_UNIMAP:
- error = do_unimap_ioctl(filp, cmd, (struct unimapdesc32 *)arg);
- goto out;
+static struct ioctl_trans *additional_ioctls;
- case KDFONTOP:
- error = do_kdfontop_ioctl(filp, (struct console_font_op32 *)arg);
- goto out;
-
- case EXT2_IOC32_GETFLAGS:
- case EXT2_IOC32_SETFLAGS:
- case EXT2_IOC32_GETVERSION:
- case EXT2_IOC32_SETVERSION:
- error = do_ext2_ioctl(fd, cmd, arg);
- goto out;
-
- case VIDIOCGTUNER32:
- case VIDIOCSTUNER32:
- case VIDIOCGWIN32:
- case VIDIOCSWIN32:
- case VIDIOCGFBUF32:
- case VIDIOCSFBUF32:
- case VIDIOCGFREQ32:
- case VIDIOCSFREQ32:
- error = do_video_ioctl(fd, cmd, arg);
- goto out;
+/* Always call these with kernel lock held! */
- /* One SMB ioctl needs translations. */
- case _IOR('u', 1, __kernel_uid_t32): /* SMB_IOC_GETMOUNTUID */
- error = do_smb_getmountuid(fd, cmd, arg);
- goto out;
+int register_ioctl32_conversion(unsigned int cmd, int (*handler)(unsigned int, unsigned int, unsigned long, struct file *))
+{
+ int i;
+ if (!additional_ioctls) {
+ additional_ioctls = module_map(PAGE_SIZE);
+ if (!additional_ioctls) return -ENOMEM;
+ }
+ for (i = 0; i < PAGE_SIZE/sizeof(struct ioctl_trans); i++)
+ if (!additional_ioctls[i].cmd)
+ break;
+ if (i == PAGE_SIZE/sizeof(struct ioctl_trans))
+ return -ENOMEM;
+ additional_ioctls[i].cmd = cmd;
+ if (!handler)
+ additional_ioctls[i].handler = (u32)(long)sys_ioctl;
+ else
+ additional_ioctls[i].handler = (u32)(long)handler;
+ ioctl32_insert_translation(&additional_ioctls[i]);
+ return 0;
+}
- case ATM_GETLINKRATE32:
- case ATM_GETNAMES32:
- case ATM_GETTYPE32:
- case ATM_GETESI32:
- case ATM_GETADDR32:
- case ATM_RSTADDR32:
- case ATM_ADDADDR32:
- case ATM_DELADDR32:
- case ATM_GETCIRANGE32:
- case ATM_SETCIRANGE32:
- case ATM_SETESI32:
- case ATM_SETESIF32:
- case ATM_GETSTAT32:
- case ATM_GETSTATZ32:
- case SUNI_GETLOOP:
- case SUNI_SETLOOP:
- case SONET_GETSTAT:
- case SONET_GETSTATZ:
- case SONET_GETDIAG:
- case SONET_SETDIAG:
- case SONET_CLRDIAG:
- case SONET_SETFRAMING:
- case SONET_GETFRAMING:
- case SONET_GETFRSENSE:
- error = do_atm_ioctl(fd, cmd, arg);
- goto out;
-
- /* List here exlicitly which ioctl's are known to have
- * compatable types passed or none at all...
- */
+int unregister_ioctl32_conversion(unsigned int cmd)
+{
+ unsigned long hash = ioctl32_hash(cmd);
+ struct ioctl_trans *t, *t1;
+
+ t = (struct ioctl_trans *)(long)ioctl32_hash_table[hash];
+ if (!t) return -EINVAL;
+ if (t->cmd == cmd && t >= additional_ioctls &&
+ (unsigned long)t < ((unsigned long)additional_ioctls) + PAGE_SIZE) {
+ ioctl32_hash_table[hash] = t->next;
+ t->cmd = 0;
+ return 0;
+ } else while (t->next) {
+ t1 = (struct ioctl_trans *)(long)t->next;
+ if (t1->cmd == cmd && t1 >= additional_ioctls &&
+ (unsigned long)t1 < ((unsigned long)additional_ioctls) + PAGE_SIZE) {
+ t1->cmd = 0;
+ t->next = t1->next;
+ return 0;
+ }
+ t = t1;
+ }
+ return -EINVAL;
+}
- /* Big T */
- case TCGETA:
- case TCSETA:
- case TCSETAW:
- case TCSETAF:
- case TCSBRK:
- case TCXONC:
- case TCFLSH:
- case TCGETS:
- case TCSETS:
- case TCSETSW:
- case TCSETSF:
- case TIOCLINUX:
-
- /* Little t */
- case TIOCGETD:
- case TIOCSETD:
- case TIOCEXCL:
- case TIOCNXCL:
- case TIOCCONS:
- case TIOCGSOFTCAR:
- case TIOCSSOFTCAR:
- case TIOCSWINSZ:
- case TIOCGWINSZ:
- case TIOCMGET:
- case TIOCMBIC:
- case TIOCMBIS:
- case TIOCMSET:
- case TIOCPKT:
- case TIOCNOTTY:
- case TIOCSTI:
- case TIOCOUTQ:
- case TIOCSPGRP:
- case TIOCGPGRP:
- case TIOCSCTTY:
- case TIOCGPTN:
- case TIOCSPTLCK:
- case TIOCGSERIAL:
- case TIOCSSERIAL:
- case TIOCSERGETLSR:
-
- /* Big F */
- case FBIOGTYPE:
- case FBIOSATTR:
- case FBIOGATTR:
- case FBIOSVIDEO:
- case FBIOGVIDEO:
- case FBIOGCURSOR32: /* This is not implemented yet. Later it should be converted... */
- case FBIOSCURPOS:
- case FBIOGCURPOS:
- case FBIOGCURMAX:
-
- case FBIOGET_VSCREENINFO:
- case FBIOPUT_VSCREENINFO:
- case FBIOPAN_DISPLAY:
- case FBIOGET_FCURSORINFO:
- case FBIOGET_VCURSORINFO:
- case FBIOPUT_VCURSORINFO:
- case FBIOGET_CURSORSTATE:
- case FBIOPUT_CURSORSTATE:
- case FBIOGET_CON2FBMAP:
- case FBIOPUT_CON2FBMAP:
-
- /* Little f */
- case FIOCLEX:
- case FIONCLEX:
- case FIOASYNC:
- case FIONBIO:
- case FIONREAD: /* This is also TIOCINQ */
-
- /* 0x00 */
- case FIBMAP:
- case FIGETBSZ:
-
- /* 0x03 -- HD/IDE ioctl's used by hdparm and friends.
- * Some need translations, these do not.
- */
- case HDIO_GET_IDENTITY:
- case HDIO_SET_DMA:
- case HDIO_SET_KEEPSETTINGS:
- case HDIO_SET_UNMASKINTR:
- case HDIO_SET_NOWERR:
- case HDIO_SET_32BIT:
- case HDIO_SET_MULTCOUNT:
- case HDIO_DRIVE_CMD:
- case HDIO_SET_PIO_MODE:
- case HDIO_SCAN_HWIF:
- case HDIO_SET_NICE:
- case BLKROSET:
- case BLKROGET:
-
- /* 0x02 -- Floppy ioctls */
- case FDMSGON:
- case FDMSGOFF:
- case FDSETEMSGTRESH:
- case FDFLUSH:
- case FDWERRORCLR:
- case FDSETMAXERRS:
- case FDGETMAXERRS:
- case FDGETDRVTYP:
- case FDEJECT:
- case FDCLRPRM:
- case FDFMTBEG:
- case FDFMTEND:
- case FDRESET:
- case FDTWADDLE:
- case FDFMTTRK:
- case FDRAWCMD:
-
- /* 0x12 */
- case BLKRRPART:
- case BLKFLSBUF:
- case BLKRASET:
+asmlinkage int sys32_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ struct file * filp;
+ int error = -EBADF;
+ int (*handler)(unsigned int, unsigned int, unsigned long, struct file * filp);
+ struct ioctl_trans *t;
-#if 0 /* New RAID code is being merged, fix up to handle
- * new RAID ioctls when fully merged in 2.3.x -DaveM
- */
- /* 0x09 */
- case REGISTER_DEV:
- case REGISTER_DEV_NEW:
- case START_MD:
- case STOP_MD:
-#endif
-
- /* Big K */
- case PIO_FONT:
- case GIO_FONT:
- case KDSIGACCEPT:
- case KDGETKEYCODE:
- case KDSETKEYCODE:
- case KIOCSOUND:
- case KDMKTONE:
- case KDGKBTYPE:
- case KDSETMODE:
- case KDGETMODE:
- case KDSKBMODE:
- case KDGKBMODE:
- case KDSKBMETA:
- case KDGKBMETA:
- case KDGKBENT:
- case KDSKBENT:
- case KDGKBSENT:
- case KDSKBSENT:
- case KDGKBDIACR:
- case KDSKBDIACR:
- case KDGKBLED:
- case KDSKBLED:
- case KDGETLED:
- case KDSETLED:
- case GIO_SCRNMAP:
- case PIO_SCRNMAP:
- case GIO_UNISCRNMAP:
- case PIO_UNISCRNMAP:
- case PIO_FONTRESET:
- case PIO_UNIMAPCLR:
-
- /* Little k */
- case KIOCTYPE:
- case KIOCLAYOUT:
- case KIOCGTRANS:
- case KIOCTRANS:
- case KIOCCMD:
- case KIOCSDIRECT:
- case KIOCSLED:
- case KIOCGLED:
- case KIOCSRATE:
- case KIOCGRATE:
-
- /* Big S */
- case SCSI_IOCTL_GET_IDLUN:
- case SCSI_IOCTL_DOORLOCK:
- case SCSI_IOCTL_DOORUNLOCK:
- case SCSI_IOCTL_TEST_UNIT_READY:
- case SCSI_IOCTL_TAGGED_ENABLE:
- case SCSI_IOCTL_TAGGED_DISABLE:
- case SCSI_IOCTL_GET_BUS_NUMBER:
- case SCSI_IOCTL_SEND_COMMAND:
-
- /* Big V */
- case VT_SETMODE:
- case VT_GETMODE:
- case VT_GETSTATE:
- case VT_OPENQRY:
- case VT_ACTIVATE:
- case VT_WAITACTIVE:
- case VT_RELDISP:
- case VT_DISALLOCATE:
- case VT_RESIZE:
- case VT_RESIZEX:
- case VT_LOCKSWITCH:
- case VT_UNLOCKSWITCH:
-
- /* Little v */
- case VUIDSFORMAT:
- case VUIDGFORMAT:
-
- /* Little v, the video4linux ioctls */
- case VIDIOCGCAP:
- case VIDIOCGCHAN:
- case VIDIOCSCHAN:
- case VIDIOCGPICT:
- case VIDIOCSPICT:
- case VIDIOCCAPTURE:
- case VIDIOCKEY:
- case VIDIOCGAUDIO:
- case VIDIOCSAUDIO:
- case VIDIOCSYNC:
- case VIDIOCMCAPTURE:
- case VIDIOCGMBUF:
- case VIDIOCGUNIT:
- case VIDIOCGCAPTURE:
- case VIDIOCSCAPTURE:
-
- /* BTTV specific... */
- case _IOW('v', BASE_VIDIOCPRIVATE+0, char [256]):
- case _IOR('v', BASE_VIDIOCPRIVATE+1, char [256]):
- case _IOR('v' , BASE_VIDIOCPRIVATE+2, unsigned int):
- case _IOW('v' , BASE_VIDIOCPRIVATE+3, char [16]): /* struct bttv_pll_info */
- case _IOR('v' , BASE_VIDIOCPRIVATE+4, int):
- case _IOR('v' , BASE_VIDIOCPRIVATE+5, int):
- case _IOR('v' , BASE_VIDIOCPRIVATE+6, int):
- case _IOR('v' , BASE_VIDIOCPRIVATE+7, int):
-
- /* Little p (/dev/rtc, /dev/envctrl, etc.) */
- case RTCGET:
- case RTCSET:
- case I2CIOCSADR:
- case I2CIOCGADR:
-
- /* Little m */
- case MTIOCTOP:
-
- /* OPENPROMIO, SunOS/Solaris only, the NetBSD one's have
- * embedded pointers in the arg which we'd need to clean up...
- */
- case OPROMGETOPT:
- case OPROMSETOPT:
- case OPROMNXTOPT:
- case OPROMSETOPT2:
- case OPROMNEXT:
- case OPROMCHILD:
- case OPROMGETPROP:
- case OPROMNXTPROP:
- case OPROMU2P:
- case OPROMGETCONS:
- case OPROMGETFBNAME:
- case OPROMGETBOOTARGS:
- case OPROMSETCUR:
- case OPROMPCI2NODE:
- case OPROMPATH2NODE:
-
- /* Socket level stuff */
- case FIOSETOWN:
- case SIOCSPGRP:
- case FIOGETOWN:
- case SIOCGPGRP:
- case SIOCATMARK:
- case SIOCSIFLINK:
- case SIOCSIFENCAP:
- case SIOCGIFENCAP:
- case SIOCSIFBR:
- case SIOCGIFBR:
- case SIOCSARP:
- case SIOCGARP:
- case SIOCDARP:
-#if 0 /* XXX No longer exist in new routing code. XXX */
- case OLD_SIOCSARP:
- case OLD_SIOCGARP:
- case OLD_SIOCDARP:
-#endif
- case SIOCSRARP:
- case SIOCGRARP:
- case SIOCDRARP:
- case SIOCADDDLCI:
- case SIOCDELDLCI:
-
- /* SG stuff */
- case SG_SET_TIMEOUT:
- case SG_GET_TIMEOUT:
- case SG_EMULATED_HOST:
- case SG_SET_TRANSFORM:
- case SG_GET_TRANSFORM:
-
- /* PPP stuff */
- case PPPIOCGFLAGS:
- case PPPIOCSFLAGS:
- case PPPIOCGASYNCMAP:
- case PPPIOCSASYNCMAP:
- case PPPIOCGUNIT:
- case PPPIOCGRASYNCMAP:
- case PPPIOCSRASYNCMAP:
- case PPPIOCGMRU:
- case PPPIOCSMRU:
- case PPPIOCSMAXCID:
- case PPPIOCGXASYNCMAP:
- case PPPIOCSXASYNCMAP:
- case PPPIOCXFERUNIT:
- case PPPIOCGNPMODE:
- case PPPIOCSNPMODE:
- case PPPIOCGDEBUG:
- case PPPIOCSDEBUG:
- case PPPIOCNEWUNIT:
- case PPPIOCATTACH:
- case PPPIOCDETACH:
-
- /* CDROM stuff */
- case CDROMPAUSE:
- case CDROMRESUME:
- case CDROMPLAYMSF:
- case CDROMPLAYTRKIND:
- case CDROMREADTOCHDR:
- case CDROMREADTOCENTRY:
- case CDROMSTOP:
- case CDROMSTART:
- case CDROMEJECT:
- case CDROMVOLCTRL:
- case CDROMSUBCHNL:
- case CDROMEJECT_SW:
- case CDROMMULTISESSION:
- case CDROM_GET_MCN:
- case CDROMRESET:
- case CDROMVOLREAD:
- case CDROMSEEK:
- case CDROMPLAYBLK:
- case CDROMCLOSETRAY:
- case CDROM_SET_OPTIONS:
- case CDROM_CLEAR_OPTIONS:
- case CDROM_SELECT_SPEED:
- case CDROM_SELECT_DISC:
- case CDROM_MEDIA_CHANGED:
- case CDROM_DRIVE_STATUS:
- case CDROM_DISC_STATUS:
- case CDROM_CHANGER_NSLOTS:
- case CDROM_LOCKDOOR:
- case CDROM_DEBUG:
- case CDROM_GET_CAPABILITY:
-
- /* Big L */
- case LOOP_SET_FD:
- case LOOP_CLR_FD:
-
- /* Big A */
- case AUDIO_GETINFO:
- case AUDIO_SETINFO:
- case AUDIO_DRAIN:
- case AUDIO_GETDEV:
- case AUDIO_GETDEV_SUNOS:
- case AUDIO_FLUSH:
-
- /* Big Q for sound/OSS */
- case SNDCTL_SEQ_RESET:
- case SNDCTL_SEQ_SYNC:
- case SNDCTL_SYNTH_INFO:
- case SNDCTL_SEQ_CTRLRATE:
- case SNDCTL_SEQ_GETOUTCOUNT:
- case SNDCTL_SEQ_GETINCOUNT:
- case SNDCTL_SEQ_PERCMODE:
- case SNDCTL_FM_LOAD_INSTR:
- case SNDCTL_SEQ_TESTMIDI:
- case SNDCTL_SEQ_RESETSAMPLES:
- case SNDCTL_SEQ_NRSYNTHS:
- case SNDCTL_SEQ_NRMIDIS:
- case SNDCTL_MIDI_INFO:
- case SNDCTL_SEQ_THRESHOLD:
- case SNDCTL_SYNTH_MEMAVL:
- case SNDCTL_FM_4OP_ENABLE:
- case SNDCTL_SEQ_PANIC:
- case SNDCTL_SEQ_OUTOFBAND:
- case SNDCTL_SEQ_GETTIME:
- case SNDCTL_SYNTH_ID:
- case SNDCTL_SYNTH_CONTROL:
- case SNDCTL_SYNTH_REMOVESAMPLE:
-
- /* Big T for sound/OSS */
- case SNDCTL_TMR_TIMEBASE:
- case SNDCTL_TMR_START:
- case SNDCTL_TMR_STOP:
- case SNDCTL_TMR_CONTINUE:
- case SNDCTL_TMR_TEMPO:
- case SNDCTL_TMR_SOURCE:
- case SNDCTL_TMR_METRONOME:
- case SNDCTL_TMR_SELECT:
-
- /* Little m for sound/OSS */
- case SNDCTL_MIDI_PRETIME:
- case SNDCTL_MIDI_MPUMODE:
- case SNDCTL_MIDI_MPUCMD:
-
- /* Big P for sound/OSS */
- case SNDCTL_DSP_RESET:
- case SNDCTL_DSP_SYNC:
- case SNDCTL_DSP_SPEED:
- case SNDCTL_DSP_STEREO:
- case SNDCTL_DSP_GETBLKSIZE:
- case SNDCTL_DSP_CHANNELS:
- case SOUND_PCM_WRITE_FILTER:
- case SNDCTL_DSP_POST:
- case SNDCTL_DSP_SUBDIVIDE:
- case SNDCTL_DSP_SETFRAGMENT:
- case SNDCTL_DSP_GETFMTS:
- case SNDCTL_DSP_SETFMT:
- case SNDCTL_DSP_GETOSPACE:
- case SNDCTL_DSP_GETISPACE:
- case SNDCTL_DSP_NONBLOCK:
- case SNDCTL_DSP_GETCAPS:
- case SNDCTL_DSP_GETTRIGGER:
- case SNDCTL_DSP_SETTRIGGER:
- case SNDCTL_DSP_GETIPTR:
- case SNDCTL_DSP_GETOPTR:
- /* case SNDCTL_DSP_MAPINBUF: XXX needs translation */
- /* case SNDCTL_DSP_MAPOUTBUF: XXX needs translation */
- case SNDCTL_DSP_SETSYNCRO:
- case SNDCTL_DSP_SETDUPLEX:
- case SNDCTL_DSP_GETODELAY:
- case SNDCTL_DSP_PROFILE:
-
- case SOUND_PCM_READ_RATE:
- case SOUND_PCM_READ_CHANNELS:
- case SOUND_PCM_READ_BITS:
- case SOUND_PCM_READ_FILTER:
-
- /* Big C for sound/OSS */
- case SNDCTL_COPR_RESET:
- case SNDCTL_COPR_LOAD:
- case SNDCTL_COPR_RDATA:
- case SNDCTL_COPR_RCODE:
- case SNDCTL_COPR_WDATA:
- case SNDCTL_COPR_WCODE:
- case SNDCTL_COPR_RUN:
- case SNDCTL_COPR_HALT:
- case SNDCTL_COPR_SENDMSG:
- case SNDCTL_COPR_RCVMSG:
-
- /* Big M for sound/OSS */
- case SOUND_MIXER_READ_VOLUME:
- case SOUND_MIXER_READ_BASS:
- case SOUND_MIXER_READ_TREBLE:
- case SOUND_MIXER_READ_SYNTH:
- case SOUND_MIXER_READ_PCM:
- case SOUND_MIXER_READ_SPEAKER:
- case SOUND_MIXER_READ_LINE:
- case SOUND_MIXER_READ_MIC:
- case SOUND_MIXER_READ_CD:
- case SOUND_MIXER_READ_IMIX:
- case SOUND_MIXER_READ_ALTPCM:
- case SOUND_MIXER_READ_RECLEV:
- case SOUND_MIXER_READ_IGAIN:
- case SOUND_MIXER_READ_OGAIN:
- case SOUND_MIXER_READ_LINE1:
- case SOUND_MIXER_READ_LINE2:
- case SOUND_MIXER_READ_LINE3:
- case SOUND_MIXER_READ_MUTE:
- /* case SOUND_MIXER_READ_ENHANCE: same value as READ_MUTE */
- /* case SOUND_MIXER_READ_LOUD: same value as READ_MUTE */
- case SOUND_MIXER_READ_RECSRC:
- case SOUND_MIXER_READ_DEVMASK:
- case SOUND_MIXER_READ_RECMASK:
- case SOUND_MIXER_READ_STEREODEVS:
- case SOUND_MIXER_READ_CAPS:
-
- case SOUND_MIXER_WRITE_VOLUME:
- case SOUND_MIXER_WRITE_BASS:
- case SOUND_MIXER_WRITE_TREBLE:
- case SOUND_MIXER_WRITE_SYNTH:
- case SOUND_MIXER_WRITE_PCM:
- case SOUND_MIXER_WRITE_SPEAKER:
- case SOUND_MIXER_WRITE_LINE:
- case SOUND_MIXER_WRITE_MIC:
- case SOUND_MIXER_WRITE_CD:
- case SOUND_MIXER_WRITE_IMIX:
- case SOUND_MIXER_WRITE_ALTPCM:
- case SOUND_MIXER_WRITE_RECLEV:
- case SOUND_MIXER_WRITE_IGAIN:
- case SOUND_MIXER_WRITE_OGAIN:
- case SOUND_MIXER_WRITE_LINE1:
- case SOUND_MIXER_WRITE_LINE2:
- case SOUND_MIXER_WRITE_LINE3:
- case SOUND_MIXER_WRITE_MUTE:
- /* case SOUND_MIXER_WRITE_ENHANCE: same value as WRITE_MUTE */
- /* case SOUND_MIXER_WRITE_LOUD: same value as WRITE_MUTE */
- case SOUND_MIXER_WRITE_RECSRC:
-
- case SOUND_MIXER_INFO:
- case SOUND_OLD_MIXER_INFO:
- case SOUND_MIXER_ACCESS:
- case SOUND_MIXER_PRIVATE1:
- case SOUND_MIXER_PRIVATE2:
- case SOUND_MIXER_PRIVATE3:
- case SOUND_MIXER_PRIVATE4:
- case SOUND_MIXER_PRIVATE5:
- case SOUND_MIXER_GETLEVELS:
- case SOUND_MIXER_SETLEVELS:
-
- case OSS_GETVERSION:
-
- /* AUTOFS */
- case AUTOFS_IOC_READY:
- case AUTOFS_IOC_FAIL:
- case AUTOFS_IOC_CATATONIC:
- case AUTOFS_IOC_PROTOVER:
- case AUTOFS_IOC_EXPIRE:
-
- /* Raw devices */
- case RAW_SETBIND:
- case RAW_GETBIND:
-
- /* SMB ioctls which do not need any translations */
- case SMB_IOC_NEWCONN:
-
- /* Little a */
- case ATMSIGD_CTRL:
- case ATMARPD_CTRL:
- case ATMLEC_CTRL:
- case ATMLEC_MCAST:
- case ATMLEC_DATA:
- case ATM_SETSC:
- case SIOCSIFATMTCP:
- case SIOCMKCLIP:
- case ATMARP_MKIP:
- case ATMARP_SETENTRY:
- case ATMARP_ENCAP:
- case ATMTCP_CREATE:
- case ATMTCP_REMOVE:
- case ATMMPC_CTRL:
- case ATMMPC_DATA:
-
+ lock_kernel();
+ filp = fget(fd);
+ if(!filp)
+ goto out2;
+
+ if (!filp->f_op || !filp->f_op->ioctl) {
error = sys_ioctl (fd, cmd, arg);
goto out;
+ }
- default:
- do {
- static int count = 0;
- if (++count <= 20)
- printk("sys32_ioctl: Unknown cmd fd(%d) "
- "cmd(%08x) arg(%08x)\n",
- (int)fd, (unsigned int)cmd, (unsigned int)arg);
- } while(0);
+ t = (struct ioctl_trans *)(long)ioctl32_hash_table [ioctl32_hash (cmd)];
+
+ while (t && t->cmd != cmd)
+ t = (struct ioctl_trans *)(long)t->next;
+ if (t) {
+ handler = (void *)(long)t->handler;
+ error = handler(fd, cmd, arg, filp);
+ } else {
+ static int count = 0;
+ if (++count <= 20)
+ printk("sys32_ioctl: Unknown cmd fd(%d) "
+ "cmd(%08x) arg(%08x)\n",
+ (int)fd, (unsigned int)cmd, (unsigned int)arg);
error = -EINVAL;
- break;
}
out:
fput(filp);
-/* $Id: pci_iommu.c,v 1.8 2000/01/28 13:41:59 jj Exp $
+/* $Id: pci_iommu.c,v 1.9 2000/02/16 07:31:34 davem Exp $
* pci_iommu.c: UltraSparc PCI controller IOM/STC support.
*
* Copyright (C) 1999 David S. Miller (davem@redhat.com)
-/* $Id: pci_psycho.c,v 1.11 2000/02/08 05:11:32 jj Exp $
+/* $Id: pci_psycho.c,v 1.12 2000/02/17 08:58:18 davem Exp $
* pci_psycho.c: PSYCHO/U2P specific PCI controller support.
*
* Copyright (C) 1997, 1998, 1999 David S. Miller (davem@caipfs.rutgers.edu)
int is_pbm_a)
{
unsigned long base = p->controller_regs;
-
- /* Currently we don't even use it. */
- pbm->stc.strbuf_enabled = 0;
+ u64 control;
if (is_pbm_a) {
pbm->stc.strbuf_control = base + PSYCHO_STRBUF_CONTROL_A;
pbm->stc.strbuf_flushflag_pa = (unsigned long)
__pa(pbm->stc.strbuf_flushflag);
-#if 0
- /* And when we do enable it, these are the sorts of things
- * we'll do.
+ /* Enable the streaming buffer. We have to be careful
+ * just in case OBP left it with LRU locking enabled.
+ *
+ * It is possible to control if PBM will be rerun on
+ * line misses. Currently I just retain whatever setting
+ * OBP left us with. All checks so far show it having
+ * a value of zero.
*/
+#undef PSYCHO_STRBUF_RERUN_ENABLE
+#undef PSYCHO_STRBUF_RERUN_DISABLE
control = psycho_read(pbm->stc.strbuf_control);
- control |= PSYCHO_SBUFCTRL_SB_EN;
- psycho_write(pbm->stc.strbuf_control, control);
+ control |= PSYCHO_STRBUF_CTRL_ENAB;
+ control &= ~(PSYCHO_STRBUF_CTRL_LENAB | PSYCHO_STRBUF_CTRL_LPTR);
+#ifdef PSYCHO_STRBUF_RERUN_ENABLE
+ control &= ~(PSYCHO_STRBUF_CTRL_RRDIS);
+#else
+#ifdef PSYCHO_STRBUF_RERUN_DISABLE
+ control |= PSYCHO_STRBUF_CTRL_RRDIS;
#endif
+#endif
+ psycho_write(pbm->stc.strbuf_control, control);
+
+ pbm->stc.strbuf_enabled = 1;
}
#define PSYCHO_IOSPACE_A 0x002000000UL
-/* $Id: pci_sabre.c,v 1.12 2000/02/08 05:11:33 jj Exp $
+/* $Id: pci_sabre.c,v 1.13 2000/02/16 07:31:34 davem Exp $
* pci_sabre.c: Sabre specific PCI controller support.
*
* Copyright (C) 1997, 1998, 1999 David S. Miller (davem@caipfs.rutgers.edu)
-/* $Id: sbus.c,v 1.7 2000/01/28 13:41:58 jj Exp $
+/* $Id: sbus.c,v 1.8 2000/02/16 07:31:34 davem Exp $
* sbus.c: UltraSparc SBUS controller support.
*
* Copyright (C) 1999 David S. Miller (davem@redhat.com)
-/* $Id: sys_sparc.c,v 1.35 2000/01/29 07:40:12 davem Exp $
+/* $Id: sys_sparc.c,v 1.36 2000/02/16 07:31:35 davem Exp $
* linux/arch/sparc64/kernel/sys_sparc.c
*
* This file contains various random system calls that
-/* $Id: sys_sparc32.c,v 1.131 2000/01/21 11:38:54 jj Exp $
+/* $Id: sys_sparc32.c,v 1.132 2000/02/16 07:31:35 davem Exp $
* sys_sparc32.c: Conversion between 32bit and 64bit native syscalls.
*
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
-/* $Id: sys_sunos32.c,v 1.38 2000/01/29 07:40:13 davem Exp $
+/* $Id: sys_sunos32.c,v 1.39 2000/02/16 07:31:37 davem Exp $
* sys_sunos32.c: SunOS binary compatability layer on sparc64.
*
* Copyright (C) 1995, 1996, 1997 David S. Miller (davem@caip.rutgers.edu)
-/* $Id: systbls.S,v 1.67 2000/01/29 16:41:21 jj Exp $
+/* $Id: systbls.S,v 1.68 2000/02/16 07:31:38 davem Exp $
* systbls.S: System call entry point tables for OS compatibility.
* The native Linux system call table lives here also.
*
-/* $Id: misc.c,v 1.21 2000/01/29 07:40:15 davem Exp $
+/* $Id: misc.c,v 1.22 2000/02/16 07:31:41 davem Exp $
* misc.c: Miscelaneous syscall emulation for Solaris
*
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
-/* $Id: socksys.c,v 1.11 2000/02/09 22:32:17 davem Exp $
+/* $Id: socksys.c,v 1.12 2000/02/17 05:50:11 davem Exp $
* socksys.c: /dev/inet/ stuff for Solaris emulation.
*
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
MOD_SUB_DIRS += video
endif
-ifdef CONFIG_PPC
+ifdef CONFIG_MAC
SUB_DIRS += macintosh
MOD_SUB_DIRS += macintosh
endif
(long *) Argument);
case BLKRAGET:
/* Get Read-Ahead. */
- if ((int *) Argument == NULL) return -EINVAL;
- return put_user(read_ahead[MAJOR(Inode->i_rdev)], (int *) Argument);
+ if ((long *) Argument == NULL) return -EINVAL;
+ return put_user(read_ahead[MAJOR(Inode->i_rdev)], (long *) Argument);
case BLKRASET:
/* Set Read-Ahead. */
if (!capable(CAP_SYS_ADMIN)) return -EACCES;
INIT_LIST_HEAD(&q->queue_head);
q->elevator = ELEVATOR_DEFAULTS;
q->request_fn = rfn;
- q->back_merges_fn = ll_back_merge_fn;
+ q->back_merge_fn = ll_back_merge_fn;
q->front_merge_fn = ll_front_merge_fn;
q->merge_requests_fn = ll_merge_requests_fn;
q->make_request_fn = NULL;
#define elevator_merge_after(q, req, lat) __elevator_merge((q), (req), (lat), 1)
static inline void __elevator_merge(request_queue_t * q, struct request * req, int latency, int after)
{
-#ifdef DEBUG_ELEVATOR
int sequence = elevator_sequence(&q->elevator, latency);
if (after)
sequence -= req->nr_segments;
if (elevator_sequence_before(sequence, req->elevator_sequence)) {
- static int warned = 0;
- if (!warned) {
+ if (!after)
printk(KERN_WARNING __FUNCTION__
": req latency %d req latency %d\n",
req->elevator_sequence - q->elevator.sequence,
sequence - q->elevator.sequence);
- warned = 1;
- }
req->elevator_sequence = sequence;
}
-#endif
}
static inline void elevator_queue(request_queue_t * q,
int rw_ahead, max_req, max_sectors;
unsigned long flags;
int orig_latency, latency, __latency, starving, __starving, empty;
- struct list_head * entry, * __entry;
+ struct list_head * entry, * __entry = NULL;
count = bh->b_size >> 9;
sector = bh->b_rsector;
void nbd_clear_que(struct nbd_device *lo)
{
struct request *req;
- unsigned long flags;
while (!list_empty(&lo->queue_head)) {
req = blkdev_entry_prev_request(&lo->queue_head);
return 0;
#ifdef PARANOIA
case NBD_PRINT_DEBUG:
- printk(KERN_INFO "NBD device %d: queue_head = %p. Global: in %d, out %d\n",
- dev, lo->queue_head, requests_in, requests_out);
+ printk(KERN_INFO "NBD device %d: next = %p, prev = %p. Global: in %d, out %d\n",
+ dev, lo->queue_head.next, lo->queue_head.prev, requests_in, requests_out);
return 0;
#endif
case BLKGETSIZE:
TRACE_CATCH(devfs_register_chrdev(QIC117_TAPE_MAJOR, "zft", &zft_cdev),);
for (i = 0; i < 4; i++) {
- char devname[8];
+ char devname[9];
sprintf (devname, "qft%i", i);
devfs_register (NULL, devname, 0, DEVFS_FL_NONE,
void cleanup_module(void)
{
int i;
- char devname[8];
+ char devname[9];
TRACE_FUN(ft_t_flow);
ssize_t retv = 0;
ssize_t written;
size_t copy_size = count;
- long old_to;
#ifdef LP_STATS
if (jiffies-lp_table[minor].lastcall > LP_TIME(minor))
/* Go to compatibility mode. */
parport_negotiate (port, IEEE1284_MODE_COMPAT);
- old_to = parport_set_timeout (lp_table[minor].dev,
- lp_table[minor].timeout);
+ parport_set_timeout (lp_table[minor].dev,
+ lp_table[minor].timeout);
do {
/* Write the data. */
}
} while (count > 0);
- /* Not really necessary, but polite. */
- parport_set_timeout (lp_table[minor].dev, old_to);
-
parport_release (lp_table[minor].dev);
up (&lp_table[minor].port_mutex);
struct pardevice *dev = lp_table[CONSOLE_LP].dev;
struct parport *port = dev->port;
ssize_t written;
- signed long old_to;
if (parport_claim (dev))
/* Nothing we can do. */
return;
- old_to = parport_set_timeout (dev, 0);
+ parport_set_timeout (dev, 0);
/* Go to compatibility mode. */
parport_negotiate (port, IEEE1284_MODE_COMPAT);
}
} while (count > 0 && (CONSOLE_LP_STRICT || written > 0));
- parport_set_timeout (dev, old_to);
parport_release (dev);
}
if (!lp_count) {
printk (KERN_INFO "lp: driver loaded but no devices found\n");
-#ifndef CONFIG_PARPORT_12843
+#ifndef CONFIG_PARPORT_1284
if (parport_nr[0] == LP_PARPORT_AUTO)
- printk (KERN_INFO "lp: (is IEEE 1284.3 support enabled?)\n");
+ printk (KERN_INFO "lp: (is IEEE 1284 support enabled?)\n");
#endif
}
static int rtc_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg);
+#ifndef __alpha__
static unsigned int rtc_poll(struct file *file, poll_table *wait);
+#endif
static void get_rtc_time (struct rtc_time *rtc_tm);
static void get_rtc_alm_time (struct rtc_time *alm_tm);
+#ifndef __alpha__
static void rtc_dropped_irq(unsigned long data);
static void set_rtc_irq_bit(unsigned char bit);
static void mask_rtc_irq_bit(unsigned char bit);
+#endif
static inline unsigned char rtc_is_updating(void);
static const unsigned char days_in_mo[] =
{0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
+#ifndef __alpha__
/*
* A very tiny interrupt handler. It runs with SA_INTERRUPT set,
* so that there is no possibility of conflicting with the
if (atomic_read(&rtc_status) & RTC_TIMER_ON)
mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq + 2*HZ/100);
}
+#endif
/*
* Now all the various file operations that we export.
static ssize_t rtc_read(struct file *file, char *buf,
size_t count, loff_t *ppos)
{
+#ifdef __alpha__
+ return -EIO;
+#else
DECLARE_WAITQUEUE(wait, current);
unsigned long data;
ssize_t retval;
remove_wait_queue(&rtc_wait, &wait);
return retval;
+#endif
}
static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
struct rtc_time wtime;
switch (cmd) {
+#ifndef __alpha__
case RTC_AIE_OFF: /* Mask alarm int. enab. bit */
{
mask_rtc_irq_bit(RTC_AIE);
set_rtc_irq_bit(RTC_UIE);
return 0;
}
+#endif
case RTC_ALM_READ: /* Read the present alarm time */
{
/*
spin_unlock_irqrestore(&rtc_lock, flags);
return 0;
}
+#ifndef __alpha__
case RTC_IRQP_READ: /* Read the periodic IRQ rate. */
{
return put_user(rtc_freq, (unsigned long *)arg);
spin_unlock_irqrestore(&rtc_lock, flags);
return 0;
}
-#ifdef __alpha__
+#else
case RTC_EPOCH_READ: /* Read the epoch. */
{
return put_user (epoch, (unsigned long *)arg);
static int rtc_release(struct inode *inode, struct file *file)
{
+ unsigned long flags;
+#ifndef __alpha__
/*
* Turn off all interrupts once the device is no longer
* in use, and clear the data.
*/
unsigned char tmp;
- unsigned long flags;
spin_lock_irqsave(&rtc_lock, flags);
tmp = CMOS_READ(RTC_CONTROL);
rtc_fasync (-1, file, 0);
}
+#endif
MOD_DEC_USE_COUNT;
spin_lock_irqsave (&rtc_lock, flags);
return 0;
}
+#ifndef __alpha__
static unsigned int rtc_poll(struct file *file, poll_table *wait)
{
unsigned long l, flags;
return POLLIN | POLLRDNORM;
return 0;
}
+#endif
/*
* The various file operations we support.
static struct file_operations rtc_fops = {
llseek: rtc_llseek,
read: rtc_read,
+#ifndef __alpha__
poll: rtc_poll,
+#endif
ioctl: rtc_ioctl,
open: rtc_open,
release: rtc_release,
return -EIO;
}
+#ifndef __alpha__
if(request_irq(RTC_IRQ, rtc_interrupt, SA_INTERRUPT, "rtc", NULL))
{
/* Yeah right, seeing as irq 8 doesn't even hit the bus. */
printk(KERN_ERR "rtc: IRQ %d is not free.\n", RTC_IRQ);
return -EIO;
}
+#endif
request_region(RTC_PORT(0), RTC_IO_EXTENT, "rtc");
#endif /* __sparc__ vs. others */
if (guess)
printk("rtc: %s epoch (%lu) detected\n", guess, epoch);
#endif
+#ifndef __alpha__
init_timer(&rtc_irq_timer);
rtc_irq_timer.function = rtc_dropped_irq;
spin_lock_irqsave(&rtc_lock, flags);
/* Initialize periodic freq. to CMOS reset default, which is 1024Hz */
CMOS_WRITE(((CMOS_READ(RTC_FREQ_SELECT) & 0xF0) | 0x06), RTC_FREQ_SELECT);
spin_unlock_irqrestore(&rtc_lock, flags);
+#endif
rtc_freq = 1024;
printk(KERN_INFO "Real Time Clock Driver v" RTC_VERSION "\n");
module_exit(rtc_exit);
EXPORT_NO_SYMBOLS;
+#ifndef __alpha__
/*
* At IRQ rates >= 4096Hz, an interrupt may get lost altogether.
* (usually during an IDE disk interrupt, with IRQ unmasking off)
rtc_irq_data |= (CMOS_READ(RTC_INTR_FLAGS) & 0xF0); /* restart */
spin_unlock_irqrestore(&rtc_lock, flags);
}
+#endif
/*
* Info exported via "/proc/driver/rtc".
}
}
+#ifndef __alpha__
/*
* Used to disable/enable interrupts for any one of UIE, AIE, PIE.
* Rumour has it that if you frob the interrupt enable/disable
rtc_irq_data = 0;
spin_unlock_irqrestore(&rtc_lock, flags);
}
+#endif
* Instead I chose to add isdn_net_started() which gives the state of the
* master in case of slaves.
* I'm still not sure if this is how it's supposed to be done this way
- * because it uses test_bit(LINK_STATE_START, &dev->state) which might be
+ * because it uses netif_running(dev) which might be
* considered private to the network layer. However, it works for now.
* Alternative: set a flag in _open() and clear it in _close()
*
dev = lp->master;
else
dev = &n->dev;
- return test_bit(LINK_STATE_START, &dev->state);
+ return netif_running(dev);
}
/*
static inline void prime_rx(struct net_device *dev)
{
elp_device *adapter = dev->priv;
- while (adapter->rx_active < ELP_RX_PCBS && test_bit(LINK_STATE_START, &dev->state)) {
+ while (adapter->rx_active < ELP_RX_PCBS &&
+ netif_running(dev->state)) {
if (!start_receive(dev, &adapter->itx_pcb))
break;
}
case 0xff:
case CMD_RECEIVE_PACKET_COMPLETE:
/* if the device isn't open, don't pass packets up the stack */
- if (test_bit(LINK_STATE_START, &dev->state) == 0)
+ if (!netif_running(dev))
break;
len = adapter->irx_pcb.data.rcv_resp.pkt_len;
dlen = adapter->irx_pcb.data.rcv_resp.buf_len;
case CMD_TRANSMIT_PACKET_COMPLETE:
if (elp_debug >= 3)
printk("%s: interrupt - packet sent\n", dev->name);
- if (test_bit(LINK_STATE_START, &dev->state) == 0)
+ if (!netif_running(dev))
break;
switch (adapter->irx_pcb.data.xmit_resp.c_stat) {
case 0xffff:
/* If the device is closed, just return the latest stats we have,
- we cannot ask from the adapter without interrupts */
- if (!test_bit(LINK_STATE_START, &dev->state))
+ if (!netif_running(dev))
return &adapter->stats;
/* send a get statistics command to the board */
/* Acknowledge the interrupt sources. */
ack_cmd = status & 0xf000;
- if ((status & 0x0700) != 0x0200 &&
- (test_bit(LINK_STATE_START, &dev->state))) {
+ if ((status & 0x0700) != 0x0200 && netif_running(dev)) {
if (net_debug)
printk("%s: Command unit stopped, status %04x, restarting.\n",
dev->name, status);
ack_cmd |= CUC_RESUME;
}
- if ((status & 0x0070) != 0x0040 &&
- (test_bit(LINK_STATE_START, &dev->state)))
- {
+ if ((status & 0x0070) != 0x0040 && netif_running(dev)) {
static void init_rx_bufs(struct net_device *);
/* The Rx unit is not ready, it must be hung. Restart the receiver by
initializing the rx buffers, and issuing an Rx start command. */
other interrupt problems. */
if (donedidthis++ > 100) {
printk(KERN_ERR "%s: Bogus interrupt, bailing. Status %4.4x, start=%d.\n",
- dev->name, status,
- test_bit(LINK_STATE_START, &dev->state));
+ dev->name, status, netif_running(dev));
free_irq(dev->irq, dev);
}
}
(struct corkscrew_private *) dev->priv;
unsigned long flags;
- if (test_bit(LINK_STATE_START, &dev->state)) {
+ if (netif_running(dev)) {
save_flags(flags);
cli();
update_stats(dev->base_addr, dev);
if (dev == NULL) {
printk(KERN_ERR "elmc-interrupt: irq %d for unknown device.\n", (int) -(((struct pt_regs *) reg_ptr)->orig_eax + 2));
return;
- } else if (!test_bit(LINK_STATE_START, &dev->state)) {
+ } else if (!netif_running(dev)) {
/* The 3c523 has this habit of generating interrupts during the
reset. I'm not sure if the ni52 has this same problem, but it's
really annoying if we haven't finished initializing it. I was
#ifndef NO_NOPCOMMANDS
if (stat & STAT_CNA) {
/* CU went 'not ready' */
- if (test_bit(LINK_STATE_START, &dev->state)) {
+ if (netif_running(dev->state)) {
printk(KERN_WARNING "%s: oops! CU has left active state. stat: %04x/%04x.\n", dev->name, (int) stat, (int) p->scb->status);
}
}
struct vortex_private *vp = (struct vortex_private *)dev->priv;
unsigned long flags;
- if (test_bit(LINK_STATE_START, &dev->state)) {
+ if (netif_running(dev)) {
save_flags(flags);
cli();
update_stats(dev->base_addr, dev);
assert (tp != NULL);
- if (test_bit(LINK_STATE_START, &dev->state)) {
+ if (netif_running(dev)) {
tp->stats.rx_missed_errors += RTL_R32 (RxMissed);
RTL_W32 (RxMissed, 0);
}
}
if ((lp->cmd_head != (struct i596_cmd *) I596_NULL) &&
- (test_bit(LINK_STATE_START, &dev->state)))
+ netif_running(dev))
ack_cmd |= CUC_START;
lp->scb.cmd = WSWAPcmd(lp->cmd_head);
}
printk("%s: i596 interrupt received a frame.\n", dev->name);
/* Only RX_START if stopped - RGH 07-07-96 */
if (status & 0x1000) {
- if (test_bit(LINK_STATE_START, &dev->state))
+ if (netif_running(dev))
ack_cmd |= RX_START;
if (i596_debug > 1)
printk("%s: i596 interrupt receive unit inactive %x.\n", dev->name, status & 0x00f0);
NS8390_init(dev, 1);
/* Set the flag before we drop the lock, That way the IRQ arrives
after its set and we get no silly warnings */
- clear_bit(LINK_STATE_RXSEM, &dev->state);
netif_start_queue(dev);
spin_unlock_irqrestore(&ei_local->page_lock, flags);
ei_local->irqlock = 0;
* board has died and kick it.
*/
- if (test_bit(LINK_STATE_XOFF, &dev->state))
- { /* Do timeouts, just like the 8003 driver. */
+ if (netif_queue_stopped(dev)) {
+ /* Do timeouts, just like the 8003 driver. */
int txsr;
int isr;
int tickssofar = jiffies - dev->trans_start;
ei_local->stat.tx_errors++;
isr = inb(e8390_base+EN0_ISR);
- if (!test_bit(LINK_STATE_START, &dev->state))
- {
+ if (!netif_running(dev)) {
spin_unlock_irqrestore(&ei_local->page_lock, flags);
printk(KERN_WARNING "%s: xmit on stopped card\n", dev->name);
return 1;
return;
}
- set_bit(LINK_STATE_RXSEM, &dev->state);
-
/* Change to page 0 and read the intr status reg. */
outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
if (ei_debug > 3)
while ((interrupts = inb_p(e8390_base + EN0_ISR)) != 0
&& ++nr_serviced < MAX_SERVICE)
{
- if (!test_bit(LINK_STATE_START, &dev->state))
- {
+ if (!netif_running(dev)) {
printk(KERN_WARNING "%s: interrupt from stopped card\n", dev->name);
interrupts = 0;
break;
outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */
}
}
- clear_bit(LINK_STATE_RXSEM, &dev->state);
spin_unlock(&ei_local->page_lock);
return;
}
unsigned long flags;
/* If the card is stopped, just return the present stats. */
- if (!test_bit(LINK_STATE_START, &dev->state))
+ if (!netif_running(dev))
return &ei_local->stat;
spin_lock_irqsave(&ei_local->page_lock,flags);
* Ultra32 EISA) appears to have this bug fixed.
*/
- if (test_bit(LINK_STATE_START, &dev->state))
+ if (netif_running(dev))
outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD);
for(i = 0; i < 8; i++)
ll->rdp = LE_C0_STRT;
}
- if (test_bit(LINK_STATE_XOFF, &dev->state) &&
- TX_BUFFS_AVAIL > 0)
+ if (netif_queue_stopped(dev) && TX_BUFFS_AVAIL > 0)
netif_wake_queue(dev);
ll->rap = LE_CSR0;
volatile struct lance_init_block *ib = lp->init_block;
volatile struct lance_regs *ll = lp->ll;
- if (!test_bit(LINK_STATE_START, &dev->state))
+ if (!netif_running(dev))
return;
if (lp->tx_old != lp->tx_new) {
* with 'testing the tx_ret_csm and setting tx_full'
* David S. Miller <davem@redhat.com>: conversion to new PCI dma mapping
* infrastructure and Sparc support
+ * Pierrick Pinasseau (CERN): For lending me an Ultra 5 to test the
+ * driver under Linux/Sparc64
*/
#include <linux/config.h>
#include <asm/byteorder.h>
#include <asm/uaccess.h>
+
#ifdef CONFIG_ACENIC_OMIT_TIGON_I
#define ACE_IS_TIGON_I(ap) 0
#else
#else
#define NET_BH 0
#define ace_mark_net_bh(foo) {do{} while(0);}
-#define ace_if_busy(dev) test_bit(LINK_STATE_XOFF, &dev->state)
-#define ace_if_running(dev) test_bit(LINK_STATE_START, &dev->state)
+#define ace_if_busy(dev) netif_queue_stopped(dev)
+#define ace_if_running(dev) netif_running(dev)
#define ace_if_down(dev) {do{} while(0);}
#endif
#define ACE_JUMBO_BUFSIZE (ACE_JUMBO_MTU + ETH_HLEN + 2+4+16)
#define DEF_TX_RATIO 24
-#define DEF_TX_COAL 1000
+/*
+ * There seems to be a magic difference in the effect between 995 and 996
+ * but little difference between 900 and 995 ... no idea why.
+ */
+#define DEF_TX_COAL 996
#define DEF_TX_MAX_DESC 40
#define DEF_RX_COAL 1000
-#define DEF_RX_MAX_DESC 20
+#define DEF_RX_MAX_DESC 25
#define TX_COAL_INTS_ONLY 0 /* seems not worth it */
#define DEF_TRACE 0
#define DEF_STAT 2 * TICKS_PER_SEC
static int dis_pci_mem_inval[8] = {1, 1, 1, 1, 1, 1, 1, 1};
static const char __initdata *version =
- "acenic.c: v0.39 02/11/2000 Jes Sorensen, linux-acenic@SunSITE.auc.dk\n"
+ "acenic.c: v0.41 02/16/2000 Jes Sorensen, linux-acenic@SunSITE.auc.dk\n"
" http://home.cern.ch/~jes/gige/acenic.html\n";
static struct net_device *root_dev = NULL;
ap->pdev = pdev;
dev->irq = pdev->irq;
-
dev->open = &ace_open;
dev->hard_start_xmit = &ace_start_xmit;
dev->stop = &ace_close;
break;
}
ap->name [sizeof (ap->name) - 1] = '\0';
- printk("Gigabit Ethernet at 0x%08lx, irq %i\n",
- dev->base_addr, dev->irq);
+ printk("Gigabit Ethernet at 0x%08lx, ", dev->base_addr);
+#ifdef __sparc__
+ printk("irq %s\n", __irq_itoa(dev->irq));
+#else
+ printk("irq %i\n", dev->irq);
+#endif
#ifdef CONFIG_ACENIC_OMIT_TIGON_I
if ((readl(&ap->regs->HostCtrl) >> 28) == 4) {
/*
* This clears any pending interrupts
*/
- writel(0, ®s->Mb0Lo);
+ writel(1, ®s->Mb0Lo);
/*
* Make sure no other CPUs are processing interrupts
kfree(ap->skb);
if (root_dev->irq)
free_irq(root_dev->irq, root_dev);
- iounmap(regs);
unregister_netdev(root_dev);
+ iounmap(regs);
kfree(root_dev);
root_dev = next;
}
-#if (LINUX_VERSION_CODE < 0x02032b)
-int init_module(void)
-{
- return ace_module_init();
-}
-
-
-void cleanup_module(void)
-{
- ace_module_cleanup();
-}
-#endif
-#endif
-
int __init ace_module_init(void)
{
int cards;
}
-#if (LINUX_VERSION_CODE >= 0x02032b)
+#if (LINUX_VERSION_CODE < 0x02032a)
+int init_module(void)
+{
+ return ace_module_init();
+}
+
+
+void cleanup_module(void)
+{
+ ace_module_cleanup();
+}
+#endif
+#endif
+
+
+#if (LINUX_VERSION_CODE >= 0x02032a)
module_init(ace_module_init);
module_exit(ace_module_cleanup);
#endif
pci_free_consistent(ap->pdev, size,
ap->rx_std_ring,
ap->rx_ring_base_dma);
+ ap->rx_std_ring = NULL;
ap->rx_jumbo_ring = NULL;
ap->rx_mini_ring = NULL;
ap->rx_return_ring = NULL;
{
struct ace_private *ap = dev->priv;
int size;
-
+
size = (sizeof(struct rx_desc) *
(RX_STD_RING_ENTRIES +
RX_JUMBO_RING_ENTRIES +
* address the `Firmware not running' problem subsequent
* to any crashes involving the NIC
*/
- writel(HW_RESET, ®s->HostCtrl);
+ writel(HW_RESET | (HW_RESET << 24), ®s->HostCtrl);
wmb();
/*
* This will most likely need BYTE_SWAP once we switch
* to using __raw_writel()
*/
- writel(((WORD_SWAP | CLR_INT) |
+ writel((WORD_SWAP | CLR_INT |
((WORD_SWAP | CLR_INT) << 24)),
®s->HostCtrl);
#else
writel(readl(®s->CpuBCtrl) | CPU_HALT, ®s->CpuBCtrl);
/*
* The SRAM bank size does _not_ indicate the amount
- * of memory on the card, it controls the bank size!
+ * of memory on the card, it controls the _bank_ size!
* Ie. a 1MB AceNIC will have two banks of 512KB.
*/
writel(SRAM_BANK_512K, ®s->LocalCtrl);
* value a second time works as well. This is what caused the
* `Firmware not running' problem on the Tigon II.
*/
-#ifdef __LITTLE_ENDIAN
- writel(ACE_BYTE_SWAP_DATA | ACE_WARN | ACE_FATAL |
- ACE_WORD_SWAP | ACE_NO_JUMBO_FRAG, ®s->ModeStat);
+#ifdef __BIG_ENDIAN
+ writel(ACE_BYTE_SWAP_DMA | ACE_WARN | ACE_FATAL | ACE_BYTE_SWAP_BD |
+ ACE_WORD_SWAP_BD | ACE_NO_JUMBO_FRAG, ®s->ModeStat);
#else
- writel(ACE_BYTE_SWAP_DATA | ACE_WARN | ACE_FATAL | ACE_BYTE_SWAP |
- ACE_WORD_SWAP | ACE_NO_JUMBO_FRAG, ®s->ModeStat);
+ writel(ACE_BYTE_SWAP_DMA | ACE_WARN | ACE_FATAL |
+ ACE_WORD_SWAP_BD | ACE_NO_JUMBO_FRAG, ®s->ModeStat);
#endif
+ mb();
mac1 = 0;
for(i = 0; i < 4; i++) {
*/
myjif = jiffies + 3 * HZ;
while (time_before(jiffies, myjif) && !ap->fw_running);
+
if (!ap->fw_running) {
printk(KERN_ERR "%s: Firmware NOT running!\n", dev->name);
+
ace_dump_trace(ap);
writel(readl(®s->CpuCtrl) | CPU_HALT, ®s->CpuCtrl);
}
return 0;
init_error:
+ iounmap(ap->regs);
+ unregister_netdev(dev);
if (ap->skb) {
kfree(ap->skb);
ap->skb = NULL;
printk(KERN_INFO "%s: Firmware up and running\n",
dev->name);
ap->fw_running = 1;
+ wmb();
break;
case E_STATS_UPDATED:
break;
skb = rip->skb;
rip->skb = NULL;
pci_unmap_single(ap->pdev, rip->mapping, mapsize);
- rxdesc->size = 0;
skb_put(skb, retdesc->size);
#if 0
/* unncessary */
* Mode status
*/
-#define ACE_BYTE_SWAP_DATA 0x10
+#define ACE_BYTE_SWAP_BD 0x02
+#define ACE_WORD_SWAP_BD 0x04 /* not actually used */
#define ACE_WARN 0x08
-#define ACE_WORD_SWAP 0x04
-#define ACE_BYTE_SWAP 0x02
+#define ACE_BYTE_SWAP_DMA 0x10
#define ACE_NO_JUMBO_FRAG 0x200
#define ACE_FATAL 0x40000000
#define EVT_RING_SIZE (EVT_RING_ENTRIES * sizeof(struct event))
struct event {
-#ifdef __LITTLE_ENDIAN
+#ifdef __LITTLE_ENDIAN_BITFIELD
u32 idx:12;
u32 code:12;
u32 evt:8;
#define CMD_RING_ENTRIES 64
struct cmd {
-#ifdef __LITTLE_ENDIAN
+#ifdef __LITTLE_ENDIAN_BITFIELD
u32 idx:12;
u32 code:12;
u32 evt:8;
{
struct ace_skb *skb;
struct ace_regs *regs; /* register base */
- int version, fw_running, fw_up, link;
+ volatile int fw_running;
+ int version, fw_up, link;
int promisc, mcast_all;
/*
* The send ring is located in the shared memory window
static inline void set_aceaddr(aceaddr *aa, dma_addr_t addr)
{
+ unsigned long baddr = (unsigned long) addr;
#if (BITS_PER_LONG == 64)
- aa->addrlo = addr & 0xffffffff;
- aa->addrhi = addr >> 32;
+ aa->addrlo = baddr & 0xffffffff;
+ aa->addrhi = baddr >> 32;
#else
- /* Don't bother setting zero every time */
- aa->addrlo = addr;
+ /* Don't bother setting zero every time */
+ aa->addrlo = baddr;
#endif
mb();
}
+#if 0
static inline void *get_aceaddr(aceaddr *aa)
-{
- unsigned long addr;
- mb();
-#if (BITS_PER_LONG == 64)
- addr = (u64)aa->addrhi << 32 | aa->addrlo;
-#else
- addr = aa->addrlo;
-#endif
- return bus_to_virt(addr);
-}
-
-
-static inline void *get_aceaddr_bus(aceaddr *aa)
{
unsigned long addr;
mb();
#endif
return (void *)addr;
}
+#endif
static inline void ace_set_txprd(struct ace_regs *regs,
* RESET flag was enabled - if device is not running, we must clear it right
* away (but nothing else).
*/
- if (!test_bit(LINK_STATE_START, &dev->state)) {
+ if (!netif_running(dev)) {
if (ASTATUS() & RESETflag)
ACOMMAND(CFLAGScmd | RESETclear);
AINTMASK(0);
static int set_mac_address(struct net_device *dev, void *addr)
{
int i;
- if (test_bit(LINK_STATE_START, &dev->state))
+ if (netif_running(dev))
return -EBUSY;
printk("%s: Setting MAC address to ", dev->name);
for (i = 0; i < 6; i++)
return -1;
/* Transmit descriptor ring full or stale skb */
- if (test_bit(LINK_STATE_XOFF, &dev->state) ||
- (u_long) lp->tx_skb[lp->tx_new] > 1) {
+ if (netif_queue_stopped(dev) || (u_long) lp->tx_skb[lp->tx_new] > 1) {
if (lp->interrupt) {
de4x5_putb_cache(dev, skb); /* Requeue the buffer */
} else {
de4x5_put_cache(dev, skb);
}
if (de4x5_debug & DEBUG_TX) {
- printk("%s: transmit busy, lost media or stale skb found:\n STS:%08x\n tbusy:%d\n IMR:%08x\n OMR:%08x\n Stale skb: %s\n",dev->name, inl(DE4X5_STS), test_bit(LINK_STATE_XOFF, &dev->state), inl(DE4X5_IMR), inl(DE4X5_OMR), ((u_long) lp->tx_skb[lp->tx_new] > 1) ? "YES" : "NO");
+ printk("%s: transmit busy, lost media or stale skb found:\n STS:%08x\n tbusy:%d\n IMR:%08x\n OMR:%08x\n Stale skb: %s\n",dev->name, inl(DE4X5_STS), netif_queue_stopped(dev), inl(DE4X5_IMR), inl(DE4X5_OMR), ((u_long) lp->tx_skb[lp->tx_new] > 1) ? "YES" : "NO");
}
} else if (skb->len > 0) {
/* If we already have stuff queued locally, use that first */
skb = de4x5_get_cache(dev);
}
- while (skb && !test_bit(LINK_STATE_XOFF, &dev->state) && (u_long) lp->tx_skb[lp->tx_new] <= 1) {
+ while (skb && !netif_queue_stopped(dev) &&
+ (u_long) lp->tx_skb[lp->tx_new] <= 1) {
spin_lock_irqsave(&lp->lock, flags);
netif_stop_queue(dev);
load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb);
/* Load the TX ring with any locally stored packets */
if (!test_and_set_bit(0, (void *)&lp->cache.lock)) {
- while (lp->cache.skb && !test_bit(LINK_STATE_XOFF, &dev->state) && lp->tx_enable) {
+ while (lp->cache.skb && !netif_queue_stopped(dev) && lp->tx_enable) {
de4x5_queue_pkt(de4x5_get_cache(dev), dev);
}
lp->cache.lock = 0;
}
/* Any resources available? */
- if (TX_BUFFS_AVAIL && test_bit(LINK_STATE_XOFF, &dev->state)) {
+ if (TX_BUFFS_AVAIL && netif_queue_stopped(dev)) {
if (lp->interrupt)
netif_wake_queue(dev);
else
case DE4X5_SET_HWADDR: /* Set the hardware address */
if (!capable(CAP_NET_ADMIN)) return -EPERM;
if (copy_from_user(tmp.addr, ioc->data, ETH_ALEN)) return -EFAULT;
+ if (netif_queue_stopped(dev))
+ return -EBUSY;
+ netif_stop_queue(dev);
for (i=0; i<ETH_ALEN; i++) {
dev->dev_addr[i] = tmp.addr[i];
}
build_setup_frame(dev, PHYS_ADDR_ONLY);
/* Set up the descriptor and give ownership to the card */
- while (test_and_set_bit(LINK_STATE_XOFF, &dev->state) != 0)
- barrier();
load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
SETUP_FRAME_LEN, (struct sk_buff *)1);
lp->tx_new = (++lp->tx_new) % lp->txRingSize;
outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
- netif_start_queue(dev); /* Unlock the TX ring */
+ netif_wake_queue(dev); /* Unlock the TX ring */
break;
case DE4X5_SET_PROM: /* Set Promiscuous Mode */
}
tmp.addr[j++] = lp->txRingSize;
- tmp.addr[j++] = test_bit(LINK_STATE_XOFF, &dev->state);
+ tmp.addr[j++] = netif_queue_stopped(dev);
ioc->len = j;
if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
de600_put_command(0);
select_prn();
- if (test_bit(LINK_STATE_START, &dev->state)) { /* perhaps not needed? */
+ if (netif_running(dev)) { /* perhaps not needed? */
free_irq(DE600_IRQ, dev);
MOD_DEC_USE_COUNT;
}
if (csr0 & TINT) /* Tx interrupt (packet sent) */
depca_tx (dev);
- if ((TX_BUFFS_AVAIL >= 0) && (test_bit (LINK_STATE_XOFF, &dev->flags))) { /* any resources available? */
+ /* Any resources available? */
+ if ((TX_BUFFS_AVAIL >= 0) && netif_queue_stopped(dev)) {
netif_wake_queue (dev);
/* Unmask the DEPCA board interrupts and turn off the LED */
lp->tx_last = last;
lp->tx_end = end;
- if (test_bit(LINK_STATE_XOFF, &dev->flags))
+ if (netif_queue_stopped(dev))
netif_wake_queue(dev);
/* Enable RX and TX interrupts */
/* Fire up the hardware. */
speedo_resume(dev);
- clear_bit(LINK_STATE_RXSEM, &dev->state);
netif_start_queue(dev);
/* Setup the chip and configure the multicast list. */
sp->stats.rx_fifo_errors += le32_to_cpu(sp->lstats->rx_overrun_errs);
sp->stats.rx_length_errors += le32_to_cpu(sp->lstats->rx_runt_errs);
sp->lstats->done_marker = 0x0000;
- if (test_bit(LINK_STATE_START, &dev->state)) {
+ if (netif_running(dev)) {
wait_for_cmd_done(ioaddr + SCBCmd);
outw(CUDumpStats, ioaddr + SCBCmd);
}
else
{
unsigned short txstatus = eexp_hw_lasttxstat(dev);
- if (test_bit(LINK_STATE_XOFF, &dev->state) && !txstatus)
+ if (netif_queue_stopped(dev) && !txstatus)
{
printk(KERN_WARNING "%s: CU wedged, status %04x %04x, resetting...\n",
dev->name,status,txstatus);
unsigned short tx_block = lp->tx_reap;
unsigned short status;
- if (!test_bit(LINK_STATE_XOFF, &dev->state) && lp->tx_head==lp->tx_reap)
+ if (!netif_queue_stopped(dev) && lp->tx_head==lp->tx_reap)
return 0x0000;
do
#endif
if (ep->tx_full &&
- test_bit(LINK_STATE_XOFF, &dev->flags) &&
+ netif_queue_stopped(dev) &&
dirty_tx > ep->cur_tx - TX_RING_SIZE + 2) {
/* The ring is no longer full, clear tbusy. */
ep->tx_full = 0;
struct epic_private *ep = (struct epic_private *)dev->priv;
long ioaddr = dev->base_addr;
- if (test_bit(LINK_STATE_START, &dev->state)) {
+ if (netif_running(dev)) {
/* Update the error counts. */
ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
data[0] = ((struct epic_private *)dev->priv)->phys[0] & 0x1f;
/* Fall Through */
case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
- if (! test_bit(LINK_STATE_START, &dev->state)) {
+ if (! netif_running(dev)) {
outl(0x0200, ioaddr + GENCTL);
outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
}
data[3] = mdio_read(ioaddr, data[0] & 0x1f, data[1] & 0x1f);
- if (! test_bit(LINK_STATE_START, &dev->state)) {
+ if (! netif_running(dev)) {
#ifdef notdef
outl(0x0008, ioaddr + GENCTL);
outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
case SIOCDEVPRIVATE+2: /* Write the specified MII register */
if (!suser())
return -EPERM;
- if (! test_bit(LINK_STATE_START, &dev->state)) {
+ if (! netif_running(dev)) {
outl(0x0200, ioaddr + GENCTL);
outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
}
mdio_write(ioaddr, data[0] & 0x1f, data[1] & 0x1f, data[2]);
- if (! test_bit(LINK_STATE_START, &dev->state)) {
+ if (! netif_running(dev)) {
#ifdef notdef
outl(0x0008, ioaddr + GENCTL);
outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
struct sixpack *sp = (struct sixpack *) tty->disc_data;
/* First make sure we're connected. */
- if (!sp || sp->magic != SIXPACK_MAGIC || !test_bit(LINK_STATE_START, &sp->dev->state)) {
+ if (!sp || sp->magic != SIXPACK_MAGIC ||
+ !netif_running(sp->dev)) {
return;
}
struct sixpack *sp = (struct sixpack *) tty->disc_data;
- if (!sp || sp->magic != SIXPACK_MAGIC || !test_bit(LINK_STATE_START, &sp->dev->state) || !count)
+ if (!sp || sp->magic != SIXPACK_MAGIC ||
+ !netif_running(sp->dev) || !count)
return;
save_flags(flags);
* VSV = if dev->start==0, then device
* unregistered while close proc.
*/
- if (test_bit(LINK_STATE_START, &sixpack_ctrls[i]->dev.state))
+ if (netif_running(sixpack_ctrls[i]->dev))
unregister_netdev(&(sixpack_ctrls[i]->dev));
kfree(sixpack_ctrls[i]);
break;
case HDLCDRVCTL_SETMODEMPAR:
- if ((!suser()) || test_bit(LINK_STATE_START, &dev->state))
+ if ((!suser()) || netif_running(dev))
return -EACCES;
dev->base_addr = hi.data.mp.iobase;
dev->irq = /*hi.data.mp.irq*/0;
break;
case HDLCDRVCTL_SETMODE:
- if (!suser() || test_bit(LINK_STATE_START, &dev->state))
+ if (!suser() || netif_running(dev))
return -EACCES;
hi.data.modename[sizeof(hi.data.modename)-1] = '\0';
return baycom_setmode(bc, hi.data.modename);
return 0;
case HDLCDRVCTL_SETMODE:
- if (test_bit(LINK_STATE_START, &dev->state) || !suser())
+ if (netif_running(dev) || !suser())
return -EACCES;
hi->data.modename[sizeof(hi->data.modename)-1] = '\0';
return baycom_setmode(bc, hi->data.modename);
return 0;
case HDLCDRVCTL_SETMODE:
- if (test_bit(LINK_STATE_START, &dev->state) || !suser())
+ if (netif_running(dev) || !suser())
return -EACCES;
hi->data.modename[sizeof(hi->data.modename)-1] = '\0';
return baycom_setmode(bc, hi->data.modename);
return 0;
case HDLCDRVCTL_SETMODE:
- if (test_bit(LINK_STATE_START, &dev->state) || !suser())
+ if (netif_running(dev) || !suser())
return -EACCES;
hi->data.modename[sizeof(hi->data.modename)-1] = '\0';
return baycom_setmode(bc, hi->data.modename);
dev = bpq_get_ax25_dev(dev);
- if (dev == NULL || !test_bit(LINK_STATE_START, &dev->state)) {
+ if (dev == NULL || !netif_running(dev)) {
kfree_skb(skb);
return 0;
}
* Just to be *really* sure not to send anything if the interface
* is down, the ethernet device may have gone.
*/
- if (!test_bit(LINK_STATE_START, &dev->state))
- {
+ if (!netif_running(dev)) {
bpq_check_devices(dev);
kfree_skb(skb);
return -ENODEV;
case SIOCSSCCPARAM:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- if (test_bit(LINK_STATE_START, &dev->state))
+ if (netif_running(dev))
return -EAGAIN;
if(copy_from_user(&priv->param, ifr->ifr_data, sizeof(struct scc_param)))
return -EFAULT;
break;
case HDLCDRVCTL_SETMODEMPAR:
- if ((!suser()) || test_bit(LINK_STATE_START, &dev->state))
+ if ((!suser()) || netif_running(dev))
return -EACCES;
dev->base_addr = bi.data.mp.iobase;
dev->irq = bi.data.mp.irq;
return 0;
case HDLCDRVCTL_SETMODE:
- if (test_bit(LINK_STATE_START, &dev->state) || !suser())
+ if (netif_running(dev) || !suser())
return -EACCES;
hi->data.modename[sizeof(hi->data.modename)-1] = '\0';
return sethw(dev, sm, hi->data.modename);
for (i = 0; i < NR_PORTS; i++) {
struct net_device *dev = &yam_ports[i].dev;
- if (test_bit(LINK_STATE_START, &dev->state))
+ if (netif_running(dev))
yam_arbitrate(dev);
}
yam_timer.expires = jiffies + HZ / 100;
yp = &yam_ports[i];
dev = &yp->dev;
- if (!test_bit(LINK_STATE_START, &dev->state))
+ if (!netif_running(dev))
continue;
while ((iir = IIR_MASK & inb(IIR(dev->base_addr))) != IIR_NOPEND) {
if (yam_ports[i].iobase == 0 || yam_ports[i].irq == 0)
continue;
len += sprintf(buffer + len, "Device %s\n", yam_ports[i].name);
- len += sprintf(buffer + len, " Up %d\n", test_bit(LINK_STATE_START, &yam_ports[i].dev.state));
+ len += sprintf(buffer + len, " Up %d\n", netif_running(&yam_ports[i].dev));
len += sprintf(buffer + len, " Speed %u\n", yam_ports[i].bitrate);
len += sprintf(buffer + len, " IoBase 0x%x\n", yam_ports[i].iobase);
len += sprintf(buffer + len, " BaudRate %u\n", yam_ports[i].baudrate);
return -EINVAL; /* unused */
case SIOCYAMSMCS:
- if (test_bit(LINK_STATE_START, &dev->state))
+ if (netif_running(dev))
return -EINVAL; /* Cannot change this parameter when up */
ym = kmalloc(sizeof(struct yamdrv_ioctl_mcs), GFP_ATOMIC);
ym->bitrate = 9600;
if (copy_from_user(&yi, ifr->ifr_data, sizeof(struct yamdrv_ioctl_cfg)))
return -EFAULT;
- if ((yi.cfg.mask & YAM_IOBASE) && test_bit(LINK_STATE_START, &dev->state))
+ if ((yi.cfg.mask & YAM_IOBASE) && netif_running(dev))
return -EINVAL; /* Cannot change this parameter when up */
- if ((yi.cfg.mask & YAM_IRQ) && test_bit(LINK_STATE_START, &dev->state))
+ if ((yi.cfg.mask & YAM_IRQ) && netif_running(dev))
return -EINVAL; /* Cannot change this parameter when up */
- if ((yi.cfg.mask & YAM_BITRATE) && test_bit(LINK_STATE_START, &dev->state))
+ if ((yi.cfg.mask & YAM_BITRATE) && netif_running(dev))
return -EINVAL; /* Cannot change this parameter when up */
- if ((yi.cfg.mask & YAM_BAUDRATE) && test_bit(LINK_STATE_START, &dev->state))
+ if ((yi.cfg.mask & YAM_BAUDRATE) && netif_running(dev))
return -EINVAL; /* Cannot change this parameter when up */
if (yi.cfg.mask & YAM_IOBASE) {
struct net_device *dev = &yam_ports[i].dev;
if (!dev->priv)
continue;
- if (test_bit(LINK_STATE_START, &dev->state))
+ if (netif_running(dev))
yam_close(dev);
unregister_netdev(dev);
}
#endif
if (lp->tx_full &&
- (test_bit(LINK_STATE_XOFF, &dev->flags)) &&
+ (netif_queue_stopped(dev)) &&
dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) {
/* The ring is no longer full, clear tbusy. */
lp->tx_full = 0;
static int eth_mac_addr(struct net_device *dev, void *p)
{
struct sockaddr *addr=p;
- if(test_bit(LINK_STATE_START, &dev->state))
+ if (netif_running(dev))
return -EBUSY;
memcpy(dev->dev_addr, addr->sa_data,dev->addr_len);
return 0;
static int hippi_mac_addr(struct net_device *dev, void *p)
{
struct sockaddr *addr = p;
- if(test_bit(LINK_STATE_START, &dev->state))
+ if (netif_running(dev))
return -EBUSY;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
return 0;
#ifndef NO_NOPCOMMANDS
if(stat & STAT_CNA) /* CU went 'not ready' */
{
- if(test_bit(LINK_STATE_START, &dev->state))
+ if(netif_running(dev))
printk("%s: oops! CU has left active state. stat: %04x/%02x.\n",dev->name,(int) stat,(int) p->scb->cus);
}
#endif
case CS_EVENT_CARD_REMOVAL:
link->state &= ~DEV_PRESENT;
if (link->state & DEV_CONFIG) {
- netif_stop_queue (dev);
- clear_bit(LINK_STATE_START, &dev->state);
+ netif_device_detach(dev);
link->release.expires = jiffies + HZ/20;
add_timer(&link->release);
}
/* Fall through... */
case CS_EVENT_RESET_PHYSICAL:
if (link->state & DEV_CONFIG) {
- if (link->open) {
- netif_stop_queue (dev);
- clear_bit(LINK_STATE_START, &dev->state);
- }
+ if (link->open)
+ netif_device_detach(dev);
+
CardServices(ReleaseConfiguration, link->handle);
}
break;
CardServices(RequestConfiguration, link->handle, &link->conf);
if (link->open) {
tc574_reset(dev);
- set_bit(LINK_STATE_START, &dev->state);
- netif_start_queue (dev);
+ netif_device_attach(dev);
}
}
break;
ioaddr_t ioaddr, status;
int work_budget = max_interrupt_work;
- if (!test_bit(LINK_STATE_START, &dev->state))
+ if (!netif_device_present(dev))
return;
spin_lock (&lp->lock);
while ((status = inw(ioaddr + EL3_STATUS)) &
(IntLatch | RxComplete | RxEarly | StatsFull)) {
- if (!test_bit(LINK_STATE_START, &dev->state) ||
- ((status & 0xe000) != 0x2000)) {
+ if (!netif_device_present(dev) || ((status & 0xe000) != 0x2000)) {
DEBUG(1, "%s: Interrupt from dead card\n", dev->name);
break;
}
u_long flags;
u_short /* cable, */ media, partner;
- if (!test_bit(LINK_STATE_START, &dev->state))
+ if (!netif_device_present(dev))
goto reschedule;
/* Check for pending interrupt with expired latency timer: with
{
struct el3_private *lp = (struct el3_private *)dev->priv;
- if (test_bit(LINK_STATE_START, &dev->state))
+ if (netif_device_present(dev))
update_stats(dev);
return &lp->stats;
}
long ioaddr = dev->base_addr;
int i;
- if (test_bit(LINK_STATE_START, &dev->state))
+ if (netif_device_present(dev))
vortex_down(dev);
if (vortex_debug > 1) {
struct vortex_private *vp = (struct vortex_private *)dev->priv;
unsigned long flags;
- if (test_bit(LINK_STATE_START, &dev->state)) {
+ if (netif_device_present(dev)) {
spin_lock_irqsave (&vp->lock, flags);
update_stats(dev->base_addr, dev);
spin_unlock_irqrestore (&vp->lock, flags);
case CS_EVENT_CARD_REMOVAL:
link->state &= ~DEV_PRESENT;
if (link->state & DEV_CONFIG) {
- netif_stop_queue (dev);
- clear_bit(LINK_STATE_START, &dev->state);
+ netif_device_detach(dev);
link->release.expires = jiffies + HZ/20;
add_timer(&link->release);
}
/* Fall through... */
case CS_EVENT_RESET_PHYSICAL:
if (link->state & DEV_CONFIG) {
- if (link->open) {
- netif_stop_queue (dev);
- clear_bit(LINK_STATE_START, &dev->state);
- }
+ if (link->open)
+ netif_device_detach(dev);
+
CardServices(ReleaseConfiguration, link->handle);
}
break;
CardServices(RequestConfiguration, link->handle, &link->conf);
if (link->open) {
tc589_reset(dev);
- set_bit(LINK_STATE_START, &dev->state);
- netif_start_queue (dev);
+ netif_device_attach(dev);
}
}
break;
ioaddr_t ioaddr, status;
int i = 0;
- if (!test_bit(LINK_STATE_START, &dev->state))
+ if (!netif_device_present(dev))
return;
ioaddr = dev->base_addr;
while ((status = inw(ioaddr + EL3_STATUS)) &
(IntLatch | RxComplete | StatsFull)) {
- if (!test_bit(LINK_STATE_START, &dev->state) ||
+ if (!netif_device_present(dev)) ||
((status & 0xe000) != 0x2000)) {
DEBUG(1, "%s: interrupt from dead card\n", dev->name);
break;
u_short media, errs;
u_long flags;
- if (!test_bit(LINK_STATE_START, &dev->state))
+ if (!netif_device_present(dev))
goto reschedule;
EL3WINDOW(1);
case CS_EVENT_CARD_REMOVAL:
link->state &= ~DEV_PRESENT;
if (link->state & DEV_CONFIG) {
- netif_stop_queue (dev);
+ netif_device_detach(dev);
link->release.expires = RUN_AT( HZ/20 );
add_timer(&link->release);
}
/* Fall through... */
case CS_EVENT_RESET_PHYSICAL:
if (link->state & DEV_CONFIG) {
- if (link->open) {
- netif_stop_queue (dev);
- }
+ if (link->open)
+ netif_device_detach(dev);
+
CardServices(ReleaseConfiguration, link->handle);
}
break;
CardServices(RequestConfiguration, link->handle, &link->conf);
if (link->open) {
// awc_reset(dev);
- netif_start_queue (dev);
+ netif_device_attach(dev);
}
}
break;
// awc_detach(dev_list);
}
-
\ No newline at end of file
+
case CS_EVENT_CARD_REMOVAL:
link->state &= ~DEV_PRESENT;
if (link->state & DEV_CONFIG) {
- netif_stop_queue (dev);
- clear_bit(LINK_STATE_START, &dev->state);
+ netif_device_detach(dev);
link->release.expires = jiffies + HZ/20;
add_timer(&link->release);
}
/* Fall through... */
case CS_EVENT_RESET_PHYSICAL:
if (link->state & DEV_CONFIG) {
- if (link->open) {
- netif_stop_queue (dev);
- clear_bit(LINK_STATE_START, &dev->state);
- }
+ if (link->open)
+ netif_device_detach(dev);
+
CardServices(ReleaseConfiguration, link->handle);
}
break;
CardServices(RequestConfiguration, link->handle, &link->conf);
if (link->open) {
fjn_reset(dev);
- set_bit(LINK_STATE_START, &dev->state);
- netif_start_queue (dev);
+ netif_device_attach(dev);
}
}
break;
case CS_EVENT_CARD_REMOVAL:
link->state &= ~DEV_PRESENT;
if (link->state & DEV_CONFIG) {
- netif_stop_queue (dev);
- clear_bit(LINK_STATE_START, &dev->state);
+ netif_device_detach(dev);
link->release.expires = jiffies + 5;
add_timer(&link->release);
}
/* Fall through... */
case CS_EVENT_RESET_PHYSICAL:
if (link->state & DEV_CONFIG) {
- if (link->open) {
- netif_stop_queue (dev);
- clear_bit(LINK_STATE_START, &dev->state);
- }
+ if (link->open)
+ netif_device_detach(dev);
+
CardServices(ReleaseConfiguration, link->handle);
}
break;
CardServices(RequestConfiguration, link->handle, &link->conf);
if (link->open) {
netwave_reset(dev);
- set_bit(LINK_STATE_START, &dev->state);
- netif_start_queue (dev);
+ netif_device_attach(dev);
}
}
break;
dev_link_t *link = &priv->link;
int i;
- if ((dev == NULL) || !test_bit(LINK_STATE_START, &dev->state))
+ if ((dev == NULL) || !netif_device_present(dev))
return;
spin_lock (&priv->lock);
case CS_EVENT_CARD_REMOVAL:
link->state &= ~DEV_PRESENT;
if (link->state & DEV_CONFIG) {
- netif_stop_queue (dev);
+ netif_device_detach(dev);
link->release.expires = jiffies + HZ/20;
add_timer(&link->release);
}
/* Fall through... */
case CS_EVENT_RESET_PHYSICAL:
if (link->state & DEV_CONFIG) {
- if (link->open) {
- netif_stop_queue (dev);
- }
+ if (link->open)
+ netif_device_detach(dev);
+
CardServices(ReleaseConfiguration, link->handle);
}
break;
CardServices(RequestConfiguration, link->handle, &link->conf);
if (link->open) {
nmclan_reset(dev);
- netif_start_queue (dev);
+ netif_device_attach(dev);
}
}
break;
return;
}
- if (!test_bit(LINK_STATE_START, &dev->state)) {
+ if (!netif_device_present(dev)) {
DEBUG(2, "%s: interrupt from dead card\n", dev->name);
goto exception;
}
case CS_EVENT_CARD_REMOVAL:
link->state &= ~DEV_PRESENT;
if (link->state & DEV_CONFIG) {
- netif_stop_queue(&info->dev);
- clear_bit(LINK_STATE_START, &info->dev.state);
+ netif_device_detach(&info->dev);
link->release.expires = jiffies + HZ/20;
link->state |= DEV_RELEASE_PENDING;
add_timer(&link->release);
/* Fall through... */
case CS_EVENT_RESET_PHYSICAL:
if (link->state & DEV_CONFIG) {
- if (link->open) {
- netif_stop_queue(&info->dev);
- clear_bit(LINK_STATE_START, &info->dev.state);
- }
+ if (link->open)
+ netif_device_detach(&info->dev);
+
CardServices(ReleaseConfiguration, link->handle);
}
break;
if (link->open) {
pcnet_reset_8390(&info->dev);
NS8390_init(&info->dev, 1);
- netif_start_queue(&info->dev);
- set_bit(LINK_STATE_START, &info->dev.state);
+ netif_device_attach(&info->dev);
}
}
break;
struct net_device *dev = &info->dev;
ioaddr_t nic_base = dev->base_addr;
- if (!test_bit(LINK_STATE_START, &dev->state))
+ if (!netif_device_present(dev))
goto reschedule;
/* Check for pending interrupt with expired latency timer: with
switch (event) {
case CS_EVENT_CARD_REMOVAL:
link->state &= ~DEV_PRESENT;
- netif_stop_queue(dev);
- clear_bit(LINK_STATE_START, &dev->state);
+ netif_device_detach(dev);
if (link->state & DEV_CONFIG) {
link->release.expires = jiffies + HZ/20;
add_timer(&link->release);
/* Fall through... */
case CS_EVENT_RESET_PHYSICAL:
if (link->state & DEV_CONFIG) {
- if (link->open) {
- netif_stop_queue(dev);
- clear_bit(LINK_STATE_START, &dev->state);
- }
+ if (link->open)
+ netif_device_detach(dev);
+
pcmcia_release_configuration(link->handle);
}
break;
pcmcia_request_configuration(link->handle, &link->conf);
if (link->open) {
ray_reset(dev);
- netif_start_queue(dev);
- set_bit(LINK_STATE_START, &dev->state);
+ netif_device_attach(dev);
}
}
break;
case CS_EVENT_CARD_REMOVAL:
link->state &= ~DEV_PRESENT;
if (link->state & DEV_CONFIG) {
- netif_stop_queue (dev);
- clear_bit(LINK_STATE_START, &dev->state);
+ netif_device_detach(dev);
link->release.expires = jiffies + HZ/20;
link->state |= DEV_RELEASE_PENDING;
add_timer(&link->release);
/* Fall through... */
case CS_EVENT_RESET_PHYSICAL:
if (link->state & DEV_CONFIG) {
- if (link->open) {
- netif_stop_queue (dev);
- clear_bit(LINK_STATE_START, &dev->state);
- }
+ if (link->open)
+ netif_device_detach(dev);
+
CardServices(ReleaseConfiguration, link->handle);
}
break;
}
if (link->open) {
smc_reset(dev);
- set_bit(LINK_STATE_START, &dev->state);
- netif_start_queue (dev);
+ netif_device_attach(dev);
}
}
break;
u_short saved_bank, saved_pointer, mask, status;
char bogus_cnt = INTR_WORK; /* Work we are willing to do. */
- if ((smc == NULL) || !test_bit(LINK_STATE_START, &dev->state))
+ if ((smc == NULL) || !netif_device_present(dev))
return;
ioaddr = dev->base_addr;
ioaddr_t ioaddr = dev->base_addr;
u_short i, media, saved_bank;
- if (!test_bit(LINK_STATE_START, &dev->state))
+ if (!netif_device_present(dev))
goto reschedule;
saved_bank = inw(ioaddr + BANK_SELECT);
printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
dev->name, inl(ioaddr + CSR5));
- if (test_bit(LINK_STATE_START, &dev->state))
+ if (netif_device_present(dev))
tulip_down(dev);
free_irq(dev->irq, dev);
struct tulip_private *tp = (struct tulip_private *)dev->priv;
long ioaddr = dev->base_addr;
- if (test_bit(LINK_STATE_START, &dev->state))
+ if (netif_device_present(dev))
tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
return &tp->stats;
lp->reconfig_82593 = FALSE;
wv_82593_config (dev);
- netif_start_queue (dev);
+ netif_wake_queue (dev);
}
}
MOD_DEC_USE_COUNT;
/* If the card is still present */
- if (test_bit(LINK_STATE_START, &dev->state))
+ if (netif_device_present(dev))
{
netif_stop_queue (dev);
if(link->state & DEV_CONFIG)
{
/* Accept no more transmissions */
- netif_stop_queue (dev);
- clear_bit(LINK_STATE_START, &dev->state);
+ netif_device_detach(dev);
/* Release the card */
wv_pcmcia_release((u_long) link);
if(link->state & DEV_CONFIG)
{
if(link->open)
- {
- netif_stop_queue (dev);
- clear_bit(LINK_STATE_START, &dev->state);
- }
+ netif_device_detach(dev);
+
CardServices(ReleaseConfiguration, link->handle);
}
break;
if(link->open) /* If RESET -> True, If RESUME -> False ??? */
{
wv_hw_reset(dev);
- set_bit(LINK_STATE_START, &dev->state);
- netif_start_queue (dev);
+ netif_device_attach(dev);
}
}
break;
case CS_EVENT_CARD_REMOVAL:
link->state &= ~DEV_PRESENT;
if (link->state & DEV_CONFIG) {
- netif_stop_queue(dev);
- clear_bit(LINK_STATE_START, &dev->state);
+ netif_device_detach(dev);
link->release.expires = jiffies + HZ / 20;
add_timer(&link->release);
}
case CS_EVENT_RESET_PHYSICAL:
if (link->state & DEV_CONFIG) {
if (link->open) {
- netif_stop_queue(dev);
- clear_bit(LINK_STATE_START, &dev->state);
+ netif_device_detach(dev);
lp->suspended=1;
do_powerdown(dev);
}
if (link->open) {
do_reset(dev,1);
lp->suspended=0;
- set_bit(LINK_STATE_START, &dev->state);
- netif_start_queue(dev);
+ netif_device_attach(dev);
}
}
break;
*/
spin_lock (&lp->lock);
- if (!test_bit(LINK_STATE_START, &dev->state))
+ if (!netif_device_present(dev))
return;
ioaddr = dev->base_addr;
}
#endif
if (lp->tx_full &&
- test_bit(LINK_STATE_XOFF, &dev->flags) &&
- dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) {
+ netif_queue_stopped(dev) &&
+ dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) {
/* The ring is no longer full, clear tbusy. */
lp->tx_full = 0;
netif_wake_queue (dev);
struct net_local *nl = (struct net_local *)dev->priv;
struct plip_local *snd = &nl->snd_data;
- if (test_bit(LINK_STATE_XOFF, &dev->flags))
+ if (netif_queue_stopped(dev))
return 1;
/* We may need to grab the bus */
rtl8129_interrupt(dev->irq, dev, 0);
}
}
- if (test_bit(LINK_STATE_XOFF, &dev->state) &&
+ if (netif_queue_stopped(dev) &&
(jiffies - dev->trans_start) >= 2*TX_TIMEOUT)
rtl8129_tx_timeout(dev);
struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
long ioaddr = dev->base_addr;
- if (test_bit(LINK_STATE_START, &dev->state)) {
+ if (netif_running(dev)) {
tp->stats.rx_missed_errors += inl(ioaddr + RxMissed);
outl(0, ioaddr + RxMissed);
}
sis_priv->tx_ring[entry].cmdsts = 0;
}
- if (sis_priv->tx_full && test_bit(LINK_STATE_XOFF, &net_dev->flags) &&
+ if (sis_priv->tx_full && netif_queue_stopped(net_dev) &&
sis_priv->cur_tx - sis_priv->dirty_tx < NUM_TX_DESC - 4) {
/* The ring is no longer full, clear tx_full and schedule more transmission
by netif_wake_queue(net_dev) */
// #define RLMT_MODE {"CheckLink", }
-#define DEV_KFREE_SKB(skb) dev_kfree_skb(skb);
+#define DEV_KFREE_SKB(skb) dev_kfree_skb(skb)
+#define DEV_KFREE_SKB_IRQ(skb) dev_kfree_skb_irq(skb)
/* function prototypes ******************************************************/
static void FreeResources(struct net_device *dev);
pci_set_master(pdev);
+#ifdef __sparc__
+ /* Set the proper cache line size value, plus enable
+ * write-invalidate and fast back-to-back on Sparc.
+ */
+ {
+ SK_U16 pci_command;
+
+ SkPciWriteCfgByte(pAC, PCI_CACHE_LINE_SIZE, 0x10);
+
+ SkPciReadCfgWord(pAC, PCI_COMMAND, &pci_command);
+ pci_command |= (PCI_COMMAND_INVALIDATE | PCI_COMMAND_FAST_BACK);
+ SkPciWriteCfgWord(pAC, PCI_COMMAND, pci_command);
+ }
+#endif
+
base_address = pdev->resource[0].start;
#ifdef SK_BIG_ENDIAN
Rc = XmitFrame(pAC, &pAC->TxPort[pAC->ActivePort][TX_PRIO_LOW], skb);
- if (Rc == 0) {
- /* transmitter out of resources */
+ /* Transmitter out of resources? */
+ if (Rc <= 0)
netif_stop_queue(dev);
- /* give buffer ownership back to the queueing layer */
+ /* If not taken, give buffer ownership back to the
+ * queueing layer.
+ */
+ if (Rc < 0)
return (1);
- }
+
dev->trans_start = jiffies;
return (0);
} /* SkGeXmit */
* > 0 - on succes: the number of bytes in the message
* = 0 - on resource shortage: this frame sent or dropped, now
* the ring is full ( -> set tbusy)
- * < 0 - on failure: other problems (not used)
+ * < 0 - on failure: other problems ( -> return failure to upper layers)
*/
static int XmitFrame(
SK_AC *pAC, /* pointer to adapter context */
SK_DBGCAT_DRV_TX_PROGRESS,
("XmitFrame failed\n"));
/* this message can not be sent now */
- return (0);
+ return (-1);
}
}
/* advance head counter behind descriptor needed for this frame */
pci_unmap_single(&pAC->PciDev, PhysAddr,
pTxd->pMBuf->len);
- DEV_KFREE_SKB(pTxd->pMBuf); /* free message */
+ /* free message */
+ if (in_irq())
+ DEV_KFREE_SKB_IRQ(pTxd->pMBuf);
+ else
+ DEV_KFREE_SKB(pTxd->pMBuf); /* free message */
pTxPort->TxdRingFree++;
pTxd->TBControl &= ~TX_CTRL_SOFTWARE;
pTxd = pTxd->pNextTxd; /* point behind fragment with EOF */
/* hardware checksum */
Type = ntohs(*((short*)&pMsg->data[12]));
if (Type == 0x800) {
- Csum1= pRxd->TcpSums & 0xffff;
- Csum2=(pRxd->TcpSums >> 16) & 0xffff;
+ Csum1=le16_to_cpu(pRxd->TcpSums & 0xffff);
+ Csum2=le16_to_cpu((pRxd->TcpSums >> 16) & 0xffff);
if ((Csum1 & 0xfffe) && (Csum2 & 0xfffe)) {
Result = SkCsGetReceiveInfo(pAC,
&pMsg->data[14],
SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
SK_DBGCAT_DRV_RX_PROGRESS,
("D"));
- DEV_KFREE_SKB(pMsg);
+ DEV_KFREE_SKB_IRQ(pMsg);
}
} /* if not for rlmt */
else {
pAC->dev->last_rx = jiffies;
}
else {
- DEV_KFREE_SKB(pMsg);
+ DEV_KFREE_SKB_IRQ(pMsg);
}
} /* if packet for rlmt */
("skge: Error in received frame, dropped!\n"
"Control: %x\nRxStat: %x\n",
Control, FrameStat));
- DEV_KFREE_SKB(pMsg);
+ DEV_KFREE_SKB_IRQ(pMsg);
}
} /* while */
FillRxRing(pAC, pRxPort);
pci_unmap_single(&pAC->PciDev,
PhysAddr,
pAC->RxBufSize - 2);
- DEV_KFREE_SKB(pRxd->pMBuf);
+ DEV_KFREE_SKB_IRQ(pRxd->pMBuf);
pRxd->pMBuf = NULL;
pRxPort->RxdRingFree++;
pRxPort->pRxdRingHead = pRxd->pNextRxd;
SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY,
("SkGeSetMacAddr starts now...\n"));
- if(test_bit(LINK_STATE_START, &dev->state)) {
+ if(netif_running(dev)) {
return -EBUSY;
}
memcpy(dev->dev_addr, addr->sa_data,dev->addr_len);
pFreeMbuf = pMbuf;
do {
pNextMbuf = pFreeMbuf->pNext;
- DEV_KFREE_SKB(pFreeMbuf->pOs);
+ if (in_irq())
+ DEV_KFREE_SKB_IRQ(pFreeMbuf->pOs);
+ else
+ DEV_KFREE_SKB(pFreeMbuf->pOs);
pFreeMbuf = pNextMbuf;
} while ( pFreeMbuf != NULL );
} /* SkDrvFreeRlmtMbuf */
pRlmtMbuf = (SK_MBUF*) Param.pParaPtr;
pMsg = (struct sk_buff*) pRlmtMbuf->pOs;
skb_put(pMsg, pRlmtMbuf->Length);
- XmitFrame(pAC, &pAC->TxPort[pRlmtMbuf->PortIdx][TX_PRIO_LOW],
- pMsg);
+ if (XmitFrame(pAC, &pAC->TxPort[pRlmtMbuf->PortIdx][TX_PRIO_LOW],
+ pMsg) <= 0)
+ DEV_KFREE_SKB(pMsg);
break;
default:
break;
spin_lock_irq(&np->lock);
add_to_tx_ring(np, skb, length);
- dev->trans_start = jiffied;
+ dev->trans_start = jiffies;
/* If we just used up the very last entry in the
* TX ring on this device, tell the queueing
* condition, and space has now been made available,
* wake up the queue.
*/
- if (test_bit(LINK_STATE_XOFF, &dev->state) &&
- ! tx_full(dev))
+ if (netif_queue_stopped(dev) && ! tx_full(dev))
netif_wake_queue(dev);
spin_unlock(&np->lock);
struct slip *sl = (struct slip *) tty->disc_data;
/* First make sure we're connected. */
- if (!sl || sl->magic != SLIP_MAGIC || !test_bit(LINK_STATE_START, &sl->dev->state)) {
+ if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) {
return;
}
if (sl->xleft <= 0) {
spin_lock(&sl->lock);
- if (test_bit(LINK_STATE_XOFF, &dev->state)) {
+ if (netif_queue_stopped(dev)) {
struct slip *sl = (struct slip*)(dev->priv);
- if (!test_bit(LINK_STATE_START, &dev->state))
+ if (!netif_running(dev))
goto out;
/* May be we must check transmitter timeout here ?
struct slip *sl = (struct slip*)(dev->priv);
spin_lock(&sl->lock);
- if (!test_bit(LINK_STATE_START, &dev->state)) {
+ if (!netif_running(dev)) {
spin_unlock(&sl->lock);
printk("%s: xmit call when iface is down\n", dev->name);
dev_kfree_skb(skb);
struct slip *sl = (struct slip *) tty->disc_data;
if (!sl || sl->magic != SLIP_MAGIC ||
- !test_bit(LINK_STATE_START, &sl->dev->state))
+ !netif_running(sl->dev))
return;
/* Read the characters out of the buffer */
unsigned char s = END;
#endif
/* put END into tty queue. Is it right ??? */
- if (!test_bit(LINK_STATE_XOFF, &sl->dev->state))
+ if (!netif_queue_stopped(sl->dev))
{
/* if device busy no outfill */
sl->tty->driver.write(sl->tty, 0, &s, 1);
/* First make sure we're connected. */
if (!strip_info || strip_info->magic != STRIP_MAGIC ||
- !test_bit(LINK_STATE_START, &strip_info->dev.state))
+ !netif_running(&strip_info->dev))
return;
if (strip_info->tx_left > 0)
{
struct strip *strip_info = (struct strip *)(dev->priv);
- if (!test_bit(LINK_STATE_START, &dev->state))
+ if (!netif_running(dev))
{
printk(KERN_ERR "%s: xmit call when iface is down\n", dev->name);
return(1);
const unsigned char *end = cp + count;
if (!strip_info || strip_info->magic != STRIP_MAGIC
- || !test_bit(LINK_STATE_START, &strip_info->dev.state))
+ || !netif_running(&strip_info->dev))
return;
/* Argh! mtu change time! - costs us the packet part received at the change */
-/* $Id: sunbmac.c,v 1.15 2000/02/10 21:14:22 davem Exp $
+/* $Id: sunbmac.c,v 1.16 2000/02/16 10:36:18 davem Exp $
* sunbmac.c: Driver for Sparc BigMAC 100baseT ethernet adapters.
*
* Copyright (C) 1997, 1998, 1999 David S. Miller (davem@redhat.com)
DTX((" DONE, tx_old=%d\n", elem));
bp->tx_old = elem;
- if (test_bit(LINK_STATE_XOFF, &dev->state) &&
+ if (netif_queue_stopped(dev) &&
TX_BUFFS_AVAIL(bp) > 0)
netif_wake_queue(bp->dev);
-/* $Id: sunhme.c,v 1.87 2000/02/10 21:14:22 davem Exp $
+/* $Id: sunhme.c,v 1.90 2000/02/16 10:36:16 davem Exp $
* sunhme.c: Sparc HME/BigMac 10/100baseT half/full duplex auto switching,
* auto carrier detecting ethernet driver. Also known as the
* "Happy Meal Ethernet" found on SunSwift SBUS cards.
#define DEFAULT_IPG2 4 /* For all modes */
#define DEFAULT_JAMSIZE 4 /* Toe jam */
+/* NOTE: In the descriptor writes one _must_ write the address
+ * member _first_. The card must not be allowed to see
+ * the updated descriptor flags until the address is
+ * correct. I've added a write memory barrier between
+ * the two stores so that I can sleep well at night... -DaveM
+ */
+
#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
static void sbus_hme_write32(unsigned long reg, u32 val)
{
static void sbus_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
{
rxd->rx_addr = addr;
+ wmb();
rxd->rx_flags = flags;
}
static void sbus_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
{
- txd->tx_flags = flags;
txd->tx_addr = addr;
+ wmb();
+ txd->tx_flags = flags;
}
static u32 sbus_hme_read_desc32(u32 *p)
static void pci_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
{
rxd->rx_addr = cpu_to_le32(addr);
+ wmb();
rxd->rx_flags = cpu_to_le32(flags);
}
static void pci_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
{
- txd->tx_flags = cpu_to_le32(flags);
txd->tx_addr = cpu_to_le32(addr);
+ wmb();
+ txd->tx_flags = cpu_to_le32(flags);
}
static u32 pci_hme_read_desc32(u32 *p)
{
- return cpu_to_le32(*p);
+ return cpu_to_le32p(p);
}
#define hme_write32(__hp, __reg, __val) \
sbus_readl(__reg)
#define hme_write_rxd(__hp, __rxd, __flags, __addr) \
do { (__rxd)->rx_addr = (__addr); \
+ wmb(); \
(__rxd)->rx_flags = (__flags); \
} while(0)
#define hme_write_txd(__hp, __txd, __flags, __addr) \
do { (__txd)->tx_addr = (__addr); \
+ wmb(); \
(__txd)->tx_flags = (__flags); \
} while(0)
#define hme_read_desc32(__hp, __p) (*(__p))
readl(__reg)
#define hme_write_rxd(__hp, __rxd, __flags, __addr) \
do { (__rxd)->rx_addr = cpu_to_le32(__addr); \
+ wmb(); \
(__rxd)->rx_flags = cpu_to_le32(__flags); \
} while(0)
#define hme_write_txd(__hp, __txd, __flags, __addr) \
do { (__txd)->tx_addr = cpu_to_le32(__addr); \
+ wmb(); \
(__txd)->tx_flags = cpu_to_le32(__flags); \
} while(0)
-#define hme_read_desc32(__hp, __p) cpu_to_le32(*(__p))
+#define hme_read_desc32(__hp, __p) cpu_to_le32p(__p)
#define hme_dma_map(__hp, __ptr, __size) \
pci_map_single((__hp)->happy_dev, (__ptr), (__size))
#define hme_dma_unmap(__hp, __addr, __size) \
hp->tx_old = elem;
TXD((">"));
- if (test_bit(LINK_STATE_XOFF, &dev->state) &&
+ if (netif_queue_stopped(dev) &&
TX_BUFFS_AVAIL(hp) > 0)
netif_wake_queue(dev);
if (TX_BUFFS_AVAIL(hp) <= 0)
netif_stop_queue(dev);
- spin_unlock_irq(&hp->happy_lock);
-
/* Get it going. */
hme_write32(hp, hp->etxregs + ETX_PENDING, ETX_TP_DMAWAKEUP);
+
+ spin_unlock_irq(&hp->happy_lock);
+
dev->trans_start = jiffies;
tx_add_log(hp, TXLOG_ACTION_TXMIT, 0);
-/* $Id: sunlance.c,v 1.97 2000/02/14 09:02:32 davem Exp $
+/* $Id: sunlance.c,v 1.99 2000/02/16 10:36:14 davem Exp $
* lance.c: Linux/Sparc/Lance driver
*
* Written 1995, 1996 by Miguel de Icaza
j = TX_NEXT(j);
}
lp->tx_old = j;
-
- if (test_bit(LINK_STATE_XOFF, &dev->state) &&
+out:
+ if (netif_queue_stopped(dev) &&
TX_BUFFS_AVAIL > 0)
netif_wake_queue(dev);
-out:
+
spin_unlock(&lp->lock);
}
}
lp->tx_old = j;
- if (test_bit(LINK_STATE_XOFF, &dev->state) &&
+ if (netif_queue_stopped(dev) &&
TX_BUFFS_AVAIL > 0)
netif_wake_queue(dev);
out:
volatile struct lance_init_block *ib = lp->init_block;
u16 mode;
- if (!test_bit(LINK_STATE_START, &dev->state))
+ if (!netif_running(dev))
return;
if (lp->tx_old != lp->tx_new) {
-/* $Id: sunqe.c,v 1.44 2000/02/10 21:14:25 davem Exp $
+/* $Id: sunqe.c,v 1.45 2000/02/16 10:36:20 davem Exp $
* sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver.
* Once again I am out to prove that every ethernet
* controller out there can be most efficiently programmed
}
if (qe_status & CREG_STAT_RXIRQ)
qe_rx(qep);
- if (test_bit(LINK_STATE_XOFF, &qep->dev->state) &&
+ if (netif_queue_stopped(qep->dev) &&
(qe_status & CREG_STAT_TXIRQ)) {
spin_lock(&qep->lock);
qe_tx_reclaim(qep);
DPRINTK("New ring status: %02X\n", ring_status);
if (ring_status & LOG_OVERFLOW) {
- if (test_bit(LINK_STATE_XOFF, &dev->state))
+ if (netif_queue_stopped(dev))
ti->readlog_pending = 1;
else
ibmtr_readlog(dev);
struct sockaddr *saddr = addr ;
struct olympic_private *olympic_priv = (struct olympic_private *)dev->priv ;
- if (test_bit(LINK_STATE_START, &dev->state)) {
+ if (netif_running(dev)) {
printk(KERN_WARNING "%s: Cannot set mac/laa address while card is open\n", dev->name) ;
return -EIO ;
}
struct tulip_private *tp = (struct tulip_private *)dev->priv;
long ioaddr = dev->base_addr;
- if (test_bit(LINK_STATE_START, &dev->state))
+ if (netif_running(dev))
tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
return &tp->stats;
np->tx_skbuff[entry] = 0;
}
if (np->tx_full &&
- test_bit(LINK_STATE_XOFF, &dev->flags) &&
+ netif_queue_stopped(dev) &&
np->cur_tx - np->dirty_tx < TX_RING_SIZE - 4) {
/* The ring is no longer full, clear tbusy. */
np->tx_full = 0;
x25_channel_t *chan = dev->priv;
cycx_t *card = chan->card;
- if (test_bit(LINK_STATE_START, &dev->state))
+ if (netif_running(dev))
return -EBUSY; /* only one open is allowed */
netif_start_queue(dev);
x25_channel_t *chan = dev->priv;
printk(KERN_INFO "%-5.5s %-15.15s %d ETH_P_%s\n",
- chan->name, chan->addr, test_bit(LINK_STATE_XOFF, &dev->state),
+ chan->name, chan->addr, netif_queue_stopped(dev),
chan->protocol == ETH_P_IP ? "IP" : "X25");
dev = chan->slave;
}
if (!*(short *)(dev->dev_addr))
return(-EINVAL);
- if (!test_bit(LINK_STATE_START, &dlp->slave->state))
+ if (!netif_running(dlp->slave))
return(-ENOTCONN);
flp = dlp->slave->priv;
if (!master)
return(-ENODEV);
- if (test_bit(LINK_STATE_START, &master->state))
+ if (netif_running(master))
return(-EBUSY);
dlp = master->priv;
dev = lapbeth_get_x25_dev(dev);
- if (dev == NULL || test_bit(LINK_STATE_START, &dev->state) == 0) {
+ if (dev == NULL || !netif_running(dev)) {
kfree_skb(skb);
return 0;
}
* Just to be *really* sure not to send anything if the interface
* is down, the ethernet device may have gone.
*/
- if (!test_bit(LINK_STATE_START, &dev->state)) {
+ if (!netif_running(dev)) {
lapbeth_check_devices(dev);
kfree_skb(skb);
return -ENODEV;
}
sti();
outb(csr0 | RC_CHK, dev->base_addr + CSR0);
- if(test_bit(LINK_STATE_START, &dev->state))
+ if(netif_running(dev))
{
struct timer_list* watchdog = &lp->watchdog;
init_timer(watchdog);
/* struct net_local *lp = (struct net_local *)dev->priv; */
struct sockaddr *saddr = addr;
- if(test_bit(LINK_STATE_START, &dev->state))
+ if(netif_running(dev))
{
/* Only possible while card isn't started */
return -EBUSY;
flp->dlci[i] = abs(flp->dlci[i]);
- if (test_bit(LINK_STATE_START, &slave->state) && (flp->config.station == FRAD_STATION_NODE))
+ if (netif_running(slave) && (flp->config.station == FRAD_STATION_NODE))
sdla_cmd(slave, SDLA_ACTIVATE_DLCI, 0, 0, &flp->dlci[i], sizeof(short), NULL, NULL);
return(0);
flp->dlci[i] = -abs(flp->dlci[i]);
- if (test_bit(LINK_STATE_START, &slave->state) && (flp->config.station == FRAD_STATION_NODE))
+ if (netif_running(slave) && (flp->config.station == FRAD_STATION_NODE))
sdla_cmd(slave, SDLA_DEACTIVATE_DLCI, 0, 0, &flp->dlci[i], sizeof(short), NULL, NULL);
return(0);
flp->dlci[i] = -*(short *)(master->dev_addr);
master->mtu = slave->mtu;
- if (test_bit(LINK_STATE_START, &slave->state)) {
+ if (netif_running(dev)) {
if (flp->config.station == FRAD_STATION_CPE)
sdla_reconfig(slave);
else
MOD_DEC_USE_COUNT;
- if (test_bit(LINK_STATE_START, &slave->state)) {
+ if (netif_running(slave)) {
if (flp->config.station == FRAD_STATION_CPE)
sdla_reconfig(slave);
else
ret = SDLA_RET_OK;
len = sizeof(struct dlci_conf);
- if (test_bit(LINK_STATE_START, &slave->state)) {
+ if (netif_running(slave)) {
if (get)
ret = sdla_cmd(slave, SDLA_READ_DLCI_CONFIGURATION, abs(flp->dlci[i]), 0,
NULL, 0, &dlp->config, &len);
if (!get)
{
- if (test_bit(LINK_STATE_START, &dev->state))
+ if (netif_running(dev))
return(-EBUSY);
if(copy_from_user(&data.config, conf, sizeof(struct frad_conf)))
else
{
/* no sense reading if the CPU isn't started */
- if (test_bit(LINK_STATE_START, &dev->state))
+ if (netif_running(dev))
{
size = sizeof(data);
if (sdla_cmd(dev, SDLA_READ_DLCI_CONFIGURATION, 0, 0, NULL, 0, &data, &size) != SDLA_RET_OK)
flp = dev->priv;
- if (test_bit(LINK_STATE_START, &dev->state))
+ if (netif_running(dev))
return(-EBUSY);
/* for now, you can't change the MTU! */
struct x25_asy *sl = (struct x25_asy *) tty->disc_data;
/* First make sure we're connected. */
- if (!sl || sl->magic != X25_ASY_MAGIC || !test_bit(LINK_STATE_START, &sl->dev->state))
+ if (!sl || sl->magic != X25_ASY_MAGIC || !netif_running(sl->dev))
return;
if (sl->xleft <= 0)
struct x25_asy *sl = (struct x25_asy*)(dev->priv);
int err;
- if (!test_bit(LINK_STATE_START, &sl->dev->state))
+ if (!netif_running(sl->dev))
{
printk("%s: xmit call when iface is down\n", dev->name);
return 1;
static void x25_asy_data_transmit(void *token, struct sk_buff *skb)
{
struct x25_asy *sl=token;
- if(test_bit(LINK_STATE_XOFF, &sl->dev->state))
+ if (netif_queue_stopped(sl->dev))
{
printk(KERN_ERR "x25_asy: tbusy drop\n");
kfree_skb(skb);
{
struct x25_asy *sl = (struct x25_asy *) tty->disc_data;
- if (!sl || sl->magic != X25_ASY_MAGIC || !test_bit(LINK_STATE_START, &sl->dev->state))
+ if (!sl || sl->magic != X25_ASY_MAGIC || !netif_running(sl->dev))
return;
/*
* VSV = if dev->start==0, then device
* unregistered while close proc.
*/
- if (test_bit(LINK_STATE_START, &x25_asy_ctrls[i]->dev.state))
+ if (netif_running(&(x25_asy_ctrls[i]->dev)))
unregister_netdev(&(x25_asy_ctrls[i]->dev));
kfree(x25_asy_ctrls[i]);
net_local *lp = (net_local *) dev->priv;
/* Check if we can do it now ! */
- if (!test_bit(LINK_STATE_START, &dev->state) &&
- test_bit(LINK_STATE_XOFF, &dev->state)) {
+ if (!netif_running(dev) && netif_queue_stopped(dev)) {
lp->reconfig_82586 = 1;
#ifdef DEBUG_CONFIG_INFO
printk(KERN_DEBUG
/* Check the state of the command unit. */
if (((status & SCB_ST_CNA) == SCB_ST_CNA) ||
- (((status & SCB_ST_CUS) != SCB_ST_CUS_ACTV) && test_bit(LINK_STATE_START, &dev->state))) {
+ (((status & SCB_ST_CUS) != SCB_ST_CUS_ACTV) && netif_running(dev))) {
#ifdef DEBUG_INTERRUPT_ERROR
printk(KERN_INFO
"%s: wavelan_interrupt(): CU inactive -- restarting\n",
/* Check the state of the command unit. */
if (((status & SCB_ST_RNR) == SCB_ST_RNR) ||
- (((status & SCB_ST_RUS) != SCB_ST_RUS_RDY) && test_bit(LINK_STATE_START, &dev->state))) {
+ (((status & SCB_ST_RUS) != SCB_ST_RUS_RDY) && netif_running(dev))) {
#ifdef DEBUG_INTERRUPT_ERROR
printk(KERN_INFO
"%s: wavelan_interrupt(): RU not ready -- restarting\n",
/* Code that should never be run! Perhaps remove after testing.. */
{
static int stopit = 10;
- if ((!(test_bit(LINK_STATE_START, &dev->state))) && --stopit < 0) {
+ if ((!(netif_running(dev))) && --stopit < 0) {
printk(KERN_ERR "%s: Emergency stop, looping startup interrupt.\n",
dev->name);
free_irq(irq, dev);
-/* $Id: audio.c,v 1.48 2000/02/09 22:33:19 davem Exp $
+/* $Id: audio.c,v 1.49 2000/02/17 05:52:41 davem Exp $
* drivers/sbus/audio/audio.c
*
* Copyright 1996 Thomas K. Dyas (tdyas@noc.rutgers.edu)
}
dma->regs = sbus_ioremap(&dma->sdev->resource[0], 0,
- PAGE_SIZE, "dma");
+ dma->sdev->resource[0].end - dma->sdev->resource[0].start + 1,
+ "dma");
dma->node = dma->sdev->prom_node;
struct aic7xxx_scb *scbp;
unsigned char queue_depth;
- if (scb->sg_count == 1)
- pci_unmap_single(p->pdev, le32_to_cpu(scb->sg_list[0].address),
- le32_to_cpu(scb->sg_list[0].length));
- else if (scb->sg_count > 1)
+ if (cmd->use_sg > 1)
{
struct scatterlist *sg;
sg = (struct scatterlist *)cmd->request_buffer;
pci_unmap_sg(p->pdev, sg, cmd->use_sg);
}
+ else if (cmd->request_bufflen)
+ pci_unmap_single(p->pdev, le32_to_cpu(scb->sg_list[0].address),
+ le32_to_cpu(scb->sg_list[0].length));
if (scb->flags & SCB_RECOVERY_SCB)
{
p->flags &= ~AHC_ABORT_PENDING;
#endif
/* Print sense information */
-void print_sense(const char * devclass, Scsi_Cmnd * SCpnt)
+static
+void print_sense_internal(const char * devclass,
+ const unsigned char * sense_buffer,
+ kdev_t dev)
{
int i, s;
int sense_class, valid, code;
- unsigned char * sense_buffer = SCpnt->sense_buffer;
const char * error = NULL;
sense_class = (sense_buffer[0] >> 4) & 0x07;
if (sense_class == 7) { /* extended sense data */
s = sense_buffer[7] + 8;
- if(s > sizeof(SCpnt->sense_buffer))
- s = sizeof(SCpnt->sense_buffer);
+ if(s > SCSI_SENSE_BUFFERSIZE)
+ s = SCSI_SENSE_BUFFERSIZE;
if (!valid)
printk("[valid=0] ");
#if (CONSTANTS & CONST_SENSE)
printk( "%s%s: sense key %s\n", devclass,
- kdevname(SCpnt->request.rq_dev), snstext[sense_buffer[2] & 0x0f]);
+ kdevname(dev), snstext[sense_buffer[2] & 0x0f]);
#else
printk("%s%s: sns = %2x %2x\n", devclass,
- kdevname(SCpnt->request.rq_dev), sense_buffer[0], sense_buffer[2]);
+ kdevname(dev), sense_buffer[0], sense_buffer[2]);
#endif
/* Check to see if additional sense information is available */
#if (CONSTANTS & CONST_SENSE)
if (sense_buffer[0] < 15)
printk("%s%s: old sense key %s\n", devclass,
- kdevname(SCpnt->request.rq_dev), snstext[sense_buffer[0] & 0x0f]);
+ kdevname(dev), snstext[sense_buffer[0] & 0x0f]);
else
#endif
printk("%s%s: sns = %2x %2x\n", devclass,
- kdevname(SCpnt->request.rq_dev), sense_buffer[0], sense_buffer[2]);
+ kdevname(dev), sense_buffer[0], sense_buffer[2]);
printk("Non-extended sense class %d code 0x%0x ", sense_class, code);
s = 4;
return;
}
+void print_sense(const char * devclass, Scsi_Cmnd * SCpnt)
+{
+ print_sense_internal(devclass, SCpnt->sense_buffer,
+ SCpnt->request.rq_dev);
+}
+
+void print_req_sense(const char * devclass, Scsi_Request * SRpnt)
+{
+ print_sense_internal(devclass, SRpnt->sr_sense_buffer,
+ SRpnt->sr_request.rq_dev);
+}
+
#if (CONSTANTS & CONST_MSG)
static const char *one_byte_msgs[] = {
/* 0x00 */ "Command Complete", NULL, "Save Pointers",
retval->max_id = 8;
retval->max_lun = 8;
+ /*
+ * All drivers right now should be able to handle 12 byte commands.
+ * Every so often there are requests for 16 byte commands, but individual
+ * low-level drivers need to certify that they actually do something
+ * sensible with such commands.
+ */
+ retval->max_cmd_len = 12;
+
retval->unique_id = 0;
retval->io_port = 0;
retval->hostt = tpnt;
retval->host_blocked = FALSE;
+ retval->host_self_blocked = FALSE;
#ifdef DEBUG
printk("Register %x %x: %d\n", (int)retval, (int)retval->hostt, j);
* initialized, as required.
*/
+ /*
+ * The maximum length of SCSI commands that this host can accept.
+ * Probably 12 for most host adapters, but could be 16 for others.
+ * For drivers that don't set this field, a value of 12 is
+ * assumed. I am leaving this as a number rather than a bit
+ * because you never know what subsequent SCSI standards might do
+ * (i.e. could there be a 20 byte or a 24-byte command a few years
+ * down the road?).
+ */
+ unsigned char max_cmd_len;
+
int this_id;
int can_queue;
short cmd_per_lun;
* Host has rejected a command because it was busy.
*/
unsigned host_blocked:1;
+
+ /*
+ * Host has requested that no further requests come through for the
+ * time being.
+ */
+ unsigned host_self_blocked:1;
/*
* Host uses correct SCSI ordering not PC ordering. The bit is
extern void scsi_free_host_dev(Scsi_Device * SDpnt);
extern Scsi_Device * scsi_get_host_dev(struct Scsi_Host * SHpnt);
+extern void scsi_unblock_requests(struct Scsi_Host * SHpnt);
+extern void scsi_block_requests(struct Scsi_Host * SHpnt);
+extern void scsi_report_bus_reset(struct Scsi_Host * SHpnt, int channel);
+
typedef struct SHN
{
struct SHN * next;
ds = cmd->dataseg;
sg_count = pci_map_sg(hostdata->pci_dev, sg, Cmnd->use_sg);
- Cmnd->use_sg = sg_count;
cmd->segment_cnt = cpu_to_le16(sg_count);
}
sg_count -= n;
}
- } else {
+ } else if (Cmnd->request_bufflen) {
Cmnd->SCp.ptr = (char *)(unsigned long)
pci_map_single(hostdata->pci_dev,
Cmnd->request_buffer,
cmd->dataseg[0].d_count =
cpu_to_le32((u32)Cmnd->request_bufflen);
cmd->segment_cnt = cpu_to_le16(1);
+ } else {
+ cmd->dataseg[0].d_base = 0;
+ cmd->dataseg[0].d_count = 0;
+ cmd->segment_cnt = cpu_to_le16(1); /* Shouldn't this be 0? */
}
/* Committed, record Scsi_Cmd so we can find it later. */
pci_unmap_sg(hostdata->pci_dev,
(struct scatterlist *)Cmnd->buffer,
Cmnd->use_sg);
- else
+ else if (Cmnd->request_bufflen)
pci_unmap_single(hostdata->pci_dev,
(u32)((long)Cmnd->SCp.ptr),
Cmnd->request_bufflen);
*/
static spinlock_t scsi_bhqueue_lock = SPIN_LOCK_UNLOCKED;
+/*
+ * Function: scsi_allocate_request
+ *
+ * Purpose: Allocate a request descriptor.
+ *
+ * Arguments: device - device for which we want a request
+ *
+ * Lock status: No locks assumed to be held. This function is SMP-safe.
+ *
+ * Returns: Pointer to request block.
+ *
+ * Notes: With the new queueing code, it becomes important
+ * to track the difference between a command and a
+ * request. A request is a pending item in the queue that
+ * has not yet reached the top of the queue.
+ */
+
+Scsi_Request *scsi_allocate_request(Scsi_Device * device)
+{
+ Scsi_Request *SRpnt = NULL;
+
+ if (!device)
+ panic("No device passed to scsi_allocate_request().\n");
+
+ SRpnt = (Scsi_Request *) kmalloc(sizeof(Scsi_Request), GFP_ATOMIC);
+ if( SRpnt == NULL )
+ {
+ return NULL;
+ }
+
+ memset(SRpnt, 0, sizeof(Scsi_Request));
+ SRpnt->sr_device = device;
+ SRpnt->sr_host = device->host;
+ SRpnt->sr_magic = SCSI_REQ_MAGIC;
+ SRpnt->sr_data_direction = SCSI_DATA_UNKNOWN;
+
+ return SRpnt;
+}
+
+/*
+ * Function: scsi_release_request
+ *
+ * Purpose: Release a request descriptor.
+ *
+ * Arguments: device - device for which we want a request
+ *
+ * Lock status: No locks assumed to be held. This function is SMP-safe.
+ *
+ * Returns: Pointer to request block.
+ *
+ * Notes: With the new queueing code, it becomes important
+ * to track the difference between a command and a
+ * request. A request is a pending item in the queue that
+ * has not yet reached the top of the queue. We still need
+ * to free a request when we are done with it, of course.
+ */
+void scsi_release_request(Scsi_Request * req)
+{
+ if( req->sr_command != NULL )
+ {
+ scsi_release_command(req->sr_command);
+ req->sr_command = NULL;
+ }
+
+ kfree(req);
+}
+
/*
* Function: scsi_allocate_device
*
* command block, this function will interrupt and return
* NULL in the event that a signal arrives that needs to
* be handled.
+ *
+ * This function is deprecated, and drivers should be
+ * rewritten to use Scsi_Request instead of Scsi_Cmnd.
*/
Scsi_Cmnd *scsi_allocate_device(Scsi_Device * device, int wait,
SCpnt->transfersize = 0; /* No default transfer size */
SCpnt->cmd_len = 0;
+ SCpnt->sc_data_direction = SCSI_DATA_UNKNOWN;
+ SCpnt->sc_request = NULL;
+ SCpnt->sc_magic = SCSI_CMND_MAGIC;
+
SCpnt->result = 0;
SCpnt->underflow = 0; /* Do not flag underflow conditions */
SCpnt->resid = 0;
* gets hidden in this function. Upper level drivers don't
* have any chickens to wave in the air to get things to
* work reliably.
+ *
+ * This function is deprecated, and drivers should be
+ * rewritten to use Scsi_Request instead of Scsi_Cmnd.
*/
void scsi_release_command(Scsi_Cmnd * SCpnt)
{
* drivers go for the same host at the same time.
*/
+void scsi_wait_req (Scsi_Request * SRpnt, const void *cmnd ,
+ void *buffer, unsigned bufflen,
+ int timeout, int retries)
+{
+ DECLARE_MUTEX_LOCKED(sem);
+
+ SRpnt->sr_request.sem = &sem;
+ SRpnt->sr_request.rq_status = RQ_SCSI_BUSY;
+ scsi_do_req (SRpnt, (void *) cmnd,
+ buffer, bufflen, scsi_wait_done, timeout, retries);
+ down (&sem);
+ SRpnt->sr_request.sem = NULL;
+ if( SRpnt->sr_command != NULL )
+ {
+ scsi_release_command(SRpnt->sr_command);
+ SRpnt->sr_command = NULL;
+ }
+
+}
+
+/*
+ * Function: scsi_do_req
+ *
+ * Purpose: Queue a SCSI request
+ *
+ * Arguments: SRpnt - command descriptor.
+ * cmnd - actual SCSI command to be performed.
+ * buffer - data buffer.
+ * bufflen - size of data buffer.
+ * done - completion function to be run.
+ * timeout - how long to let it run before timeout.
+ * retries - number of retries we allow.
+ *
+ * Lock status: With the new queueing code, this is SMP-safe, and no locks
+ * need be held upon entry. The old queueing code the lock was
+ * assumed to be held upon entry.
+ *
+ * Returns: Nothing.
+ *
+ * Notes: Prior to the new queue code, this function was not SMP-safe.
+ * Also, this function is now only used for queueing requests
+ * for things like ioctls and character device requests - this
+ * is because we essentially just inject a request into the
+ * queue for the device. Normal block device handling manipulates
+ * the queue directly.
+ */
+void scsi_do_req(Scsi_Request * SRpnt, const void *cmnd,
+ void *buffer, unsigned bufflen, void (*done) (Scsi_Cmnd *),
+ int timeout, int retries)
+{
+ Scsi_Device * SDpnt = SRpnt->sr_device;
+ struct Scsi_Host *host = SDpnt->host;
+
+ ASSERT_LOCK(&io_request_lock, 0);
+
+ SCSI_LOG_MLQUEUE(4,
+ {
+ int i;
+ int target = SDpnt->id;
+ printk("scsi_do_req (host = %d, channel = %d target = %d, "
+ "buffer =%p, bufflen = %d, done = %p, timeout = %d, "
+ "retries = %d)\n"
+ "command : ", host->host_no, SDpnt->channel, target, buffer,
+ bufflen, done, timeout, retries);
+ for (i = 0; i < 10; ++i)
+ printk("%02x ", ((unsigned char *) cmnd)[i]);
+ printk("\n");
+ });
+
+ if (!host) {
+ panic("Invalid or not present host.\n");
+ }
+
+ /*
+ * If the upper level driver is reusing these things, then
+ * we should release the low-level block now. Another one will
+ * be allocated later when this request is getting queued.
+ */
+ if( SRpnt->sr_command != NULL )
+ {
+ scsi_release_command(SRpnt->sr_command);
+ SRpnt->sr_command = NULL;
+ }
+
+ /*
+ * We must prevent reentrancy to the lowlevel host driver. This prevents
+ * it - we enter a loop until the host we want to talk to is not busy.
+ * Race conditions are prevented, as interrupts are disabled in between the
+ * time we check for the host being not busy, and the time we mark it busy
+ * ourselves.
+ */
+
+
+ /*
+ * Our own function scsi_done (which marks the host as not busy, disables
+ * the timeout counter, etc) will be called by us or by the
+ * scsi_hosts[host].queuecommand() function needs to also call
+ * the completion function for the high level driver.
+ */
+
+ memcpy((void *) SRpnt->sr_cmnd, (const void *) cmnd,
+ sizeof(SRpnt->sr_cmnd));
+ SRpnt->sr_bufflen = bufflen;
+ SRpnt->sr_buffer = buffer;
+ SRpnt->sr_allowed = retries;
+ SRpnt->sr_done = done;
+ SRpnt->sr_timeout_per_command = timeout;
+
+ memcpy((void *) SRpnt->sr_cmnd, (const void *) cmnd,
+ sizeof(SRpnt->sr_cmnd));
+
+ if (SRpnt->sr_cmd_len == 0)
+ SRpnt->sr_cmd_len = COMMAND_SIZE(SRpnt->sr_cmnd[0]);
+
+ /*
+ * At this point, we merely set up the command, stick it in the normal
+ * request queue, and return. Eventually that request will come to the
+ * top of the list, and will be dispatched.
+ */
+ scsi_insert_special_req(SRpnt, 0);
+
+ SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_do_cmd()\n"));
+}
+
+/*
+ * Function: scsi_init_cmd_from_req
+ *
+ * Purpose: Queue a SCSI command
+ * Purpose: Initialize a Scsi_Cmnd from a Scsi_Request
+ *
+ * Arguments: SCpnt - command descriptor.
+ * SRpnt - Request from the queue.
+ *
+ * Lock status: None needed.
+ *
+ * Returns: Nothing.
+ *
+ * Notes: Mainly transfer data from the request structure to the
+ * command structure. The request structure is allocated
+ * using the normal memory allocator, and requests can pile
+ * up to more or less any depth. The command structure represents
+ * a consumable resource, as these are allocated into a pool
+ * when the SCSI subsystem initializes. The preallocation is
+ * required so that in low-memory situations a disk I/O request
+ * won't cause the memory manager to try and write out a page.
+ * The request structure is generally used by ioctls and character
+ * devices.
+ */
+void scsi_init_cmd_from_req(Scsi_Cmnd * SCpnt, Scsi_Request * SRpnt)
+{
+ struct Scsi_Host *host = SCpnt->host;
+
+ ASSERT_LOCK(&io_request_lock, 0);
+
+ SCpnt->owner = SCSI_OWNER_MIDLEVEL;
+ SRpnt->sr_command = SCpnt;
+
+ if (!host) {
+ panic("Invalid or not present host.\n");
+ }
+
+ SCpnt->cmd_len = SRpnt->sr_cmd_len;
+ SCpnt->use_sg = SRpnt->sr_use_sg;
+
+ memcpy((void *) &SCpnt->request, (const void *) &SRpnt->sr_request,
+ sizeof(SRpnt->sr_request));
+ memcpy((void *) SCpnt->data_cmnd, (const void *) SRpnt->sr_cmnd,
+ sizeof(SCpnt->data_cmnd));
+ SCpnt->reset_chain = NULL;
+ SCpnt->serial_number = 0;
+ SCpnt->serial_number_at_timeout = 0;
+ SCpnt->bufflen = SRpnt->sr_bufflen;
+ SCpnt->buffer = SRpnt->sr_buffer;
+ SCpnt->flags = 0;
+ SCpnt->retries = 0;
+ SCpnt->allowed = SRpnt->sr_allowed;
+ SCpnt->done = SRpnt->sr_done;
+ SCpnt->timeout_per_command = SRpnt->sr_timeout_per_command;
+
+ SCpnt->sc_data_direction = SRpnt->sr_data_direction;
+
+ SCpnt->sglist_len = SRpnt->sr_sglist_len;
+ SCpnt->underflow = SRpnt->sr_underflow;
+
+ SCpnt->sc_request = SRpnt;
+
+ memcpy((void *) SCpnt->cmnd, (const void *) SRpnt->sr_cmnd,
+ sizeof(SCpnt->cmnd));
+ /* Zero the sense buffer. Some host adapters automatically request
+ * sense on error. 0 is not a valid sense code.
+ */
+ memset((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer);
+ SCpnt->request_buffer = SRpnt->sr_buffer;
+ SCpnt->request_bufflen = SRpnt->sr_bufflen;
+ SCpnt->old_use_sg = SCpnt->use_sg;
+ if (SCpnt->cmd_len == 0)
+ SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]);
+ SCpnt->old_cmd_len = SCpnt->cmd_len;
+ SCpnt->sc_old_data_direction = SCpnt->sc_data_direction;
+
+ /* Start the timer ticking. */
+
+ SCpnt->internal_timeout = NORMAL_TIMEOUT;
+ SCpnt->abort_reason = 0;
+ SCpnt->result = 0;
+
+ SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_do_cmd()\n"));
+}
+
/*
* Function: scsi_do_cmd
*
if (SCpnt->cmd_len == 0)
SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]);
SCpnt->old_cmd_len = SCpnt->cmd_len;
+ SCpnt->sc_old_data_direction = SCpnt->sc_data_direction;
/* Start the timer ticking. */
SCpnt->request_bufflen = SCpnt->bufflen;
SCpnt->use_sg = SCpnt->old_use_sg;
SCpnt->cmd_len = SCpnt->old_cmd_len;
+ SCpnt->sc_data_direction = SCpnt->sc_old_data_direction;
/*
* Zero the sense information from the last time we tried
{
struct Scsi_Host *host;
Scsi_Device *device;
+ Scsi_Request * SRpnt;
unsigned long flags;
ASSERT_LOCK(&io_request_lock, 0);
/* We can get here with use_sg=0, causing a panic in the upper level (DB) */
SCpnt->use_sg = SCpnt->old_use_sg;
+ /*
+ * If there is an associated request structure, copy the data over before we call the
+ * completion function.
+ */
+ SRpnt = SCpnt->sc_request;
+ if( SRpnt != NULL ) {
+ SRpnt->sr_result = SRpnt->sr_command->result;
+ if( SRpnt->sr_result != 0 ) {
+ memcpy(SRpnt->sr_sense_buffer,
+ SRpnt->sr_command->sense_buffer,
+ sizeof(SRpnt->sr_sense_buffer));
+ }
+ }
+
SCpnt->done(SCpnt);
}
kfree((char *) SCpnt);
}
SDpnt->has_cmdblocks = 0;
+ SDpnt->queue_depth = 0;
spin_unlock_irqrestore(&device_request_lock, flags);
}
printk("Dump of scsi host parameters:\n");
i = 0;
for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
- printk(" %d %d %d : %d\n",
+ printk(" %d %d %d : %d %d\n",
shpnt->host_failed,
shpnt->host_busy,
atomic_read(&shpnt->host_active),
- shpnt->host_blocked);
+ shpnt->host_blocked,
+ shpnt->host_self_blocked);
}
#include <asm/scatterlist.h>
#include <asm/io.h>
+/*
+ * These are the values that the SCpnt->sc_data_direction and
+ * SRpnt->sr_data_direction can take. These need to be set
+ * The SCSI_DATA_UNKNOWN value is essentially the default.
+ * In the event that the command creator didn't bother to
+ * set a value, you will see SCSI_DATA_UNKNOWN.
+ */
+#define SCSI_DATA_UNKNOWN 0
+#define SCSI_DATA_WRITE 1
+#define SCSI_DATA_READ 2
+#define SCSI_DATA_NONE 3
+
/*
* Some defs, in case these are not defined elsewhere.
*/
#define SUGGEST_MASK 0xf0
#define MAX_COMMAND_SIZE 12
+#define SCSI_SENSE_BUFFERSIZE 64
/*
* SCSI command sets
*/
typedef struct scsi_device Scsi_Device;
typedef struct scsi_cmnd Scsi_Cmnd;
+typedef struct scsi_request Scsi_Request;
+
+#define SCSI_CMND_MAGIC 0xE25C23A5
+#define SCSI_REQ_MAGIC 0x75F6D354
/*
* Here is where we prototype most of the mid-layer.
void (*done) (struct scsi_cmnd *),
int timeout, int retries);
extern void scsi_wait_cmd(Scsi_Cmnd *, const void *cmnd,
+ void *buffer, unsigned bufflen,
+ int timeout, int retries);
+extern int scsi_dev_init(void);
+
+/*
+ * Newer request-based interfaces.
+ */
+extern Scsi_Request *scsi_allocate_request(Scsi_Device *);
+extern void scsi_release_request(Scsi_Request *);
+extern void scsi_wait_req(Scsi_Request *, const void *cmnd,
void *buffer, unsigned bufflen,
int timeout, int retries);
-extern int scsi_dev_init(void);
+extern void scsi_do_req(Scsi_Request *, const void *cmnd,
+ void *buffer, unsigned bufflen,
+ void (*done) (struct scsi_cmnd *),
+ int timeout, int retries);
+extern int scsi_insert_special_req(Scsi_Request * SRpnt, int);
+extern void scsi_init_cmd_from_req(Scsi_Cmnd *, Scsi_Request *);
/*
*/
extern void print_command(unsigned char *);
extern void print_sense(const char *, Scsi_Cmnd *);
+extern void print_req_sense(const char *, Scsi_Request *);
extern void print_driverbyte(int scsiresult);
extern void print_hostbyte(int scsiresult);
extern void print_status (int status);
volatile int phase;
} Scsi_Pointer;
+/*
+ * This is essentially a slimmed down version of Scsi_Cmnd. The point of
+ * having this is that requests that are injected into the queue as result
+ * of things like ioctls and character devices shouldn't be using a
+ * Scsi_Cmnd until such a time that the command is actually at the head
+ * of the queue and being sent to the driver.
+ */
+struct scsi_request {
+ int sr_magic;
+ int sr_result; /* Status code from lower level driver */
+ unsigned char sr_sense_buffer[SCSI_SENSE_BUFFERSIZE]; /* obtained by REQUEST SENSE
+ * when CHECK CONDITION is
+ * received on original command
+ * (auto-sense) */
+
+ struct Scsi_Host *sr_host;
+ Scsi_Device *sr_device;
+ Scsi_Cmnd *sr_command;
+ struct request sr_request; /* A copy of the command we are
+ working on */
+ unsigned sr_bufflen; /* Size of data buffer */
+ void *sr_buffer; /* Data buffer */
+ int sr_allowed;
+ unsigned char sr_data_direction;
+ unsigned char sr_cmd_len;
+ unsigned char sr_cmnd[MAX_COMMAND_SIZE];
+ void (*sr_done) (struct scsi_cmnd *); /* Mid-level done function */
+ int sr_timeout_per_command;
+ unsigned short sr_use_sg; /* Number of pieces of scatter-gather */
+ unsigned short sr_sglist_len; /* size of malloc'd scatter-gather list */
+ unsigned sr_underflow; /* Return error if less than
+ this amount is transfered */
+};
/*
* FIXME(eric) - one of the great regrets that I have is that I failed to define
* go back and retrofit at least some of the elements here with with the prefix.
*/
struct scsi_cmnd {
+ int sc_magic;
/* private: */
/*
* This information is private to the scsi mid-layer. Wrapping it in a
unsigned short state;
unsigned short owner;
Scsi_Device *device;
+ Scsi_Request *sc_request;
struct scsi_cmnd *next;
struct scsi_cmnd *reset_chain;
unsigned char channel;
unsigned char cmd_len;
unsigned char old_cmd_len;
+ unsigned char sc_data_direction;
+ unsigned char sc_old_data_direction;
/* These elements define the operation we are about to perform */
unsigned char cmnd[MAX_COMMAND_SIZE];
struct request request; /* A copy of the command we are
working on */
- unsigned char sense_buffer[64]; /* obtained by REQUEST SENSE
+ unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE]; /* obtained by REQUEST SENSE
* when CHECK CONDITION is
* received on original command
* (auto-sense) */
SCpnt->request_bufflen = SCpnt->bufflen;
SCpnt->use_sg = SCpnt->old_use_sg;
SCpnt->cmd_len = SCpnt->old_cmd_len;
+ SCpnt->sc_data_direction = SCpnt->sc_old_data_direction;
scsi_send_eh_cmnd(SCpnt, SCpnt->timeout_per_command);
SCpnt->request_bufflen = 256;
SCpnt->use_sg = 0;
SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]);
+ SCpnt->sc_data_direction = SCSI_DATA_READ;
scsi_send_eh_cmnd(SCpnt, SENSE_TIMEOUT);
SCpnt->request_bufflen = SCpnt->bufflen;
SCpnt->use_sg = SCpnt->old_use_sg;
SCpnt->cmd_len = SCpnt->old_cmd_len;
+ SCpnt->sc_data_direction = SCpnt->sc_old_data_direction;
/*
* Hey, we are done. Let's look to see what happened.
SCpnt->use_sg = 0;
SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]);
scsi_send_eh_cmnd(SCpnt, SENSE_TIMEOUT);
+ SCpnt->sc_data_direction = SCSI_DATA_NONE;
/* Last chance to have valid sense data */
if (!scsi_sense_valid(SCpnt))
SCpnt->request_bufflen = SCpnt->bufflen;
SCpnt->use_sg = SCpnt->old_use_sg;
SCpnt->cmd_len = SCpnt->old_cmd_len;
+ SCpnt->sc_data_direction = SCpnt->sc_old_data_direction;
/*
* Hey, we are done. Let's look to see what happened.
* things.
*/
SCpnt->use_sg = SCpnt->old_use_sg;
+ SCpnt->sc_data_direction = SCpnt->sc_old_data_direction;
*SClist = SCpnt;
}
request_queue_t *q;
if ((host->can_queue > 0 && (host->host_busy >= host->can_queue))
|| (host->host_blocked)
+ || (host->host_self_blocked)
|| (SDpnt->device_blocked)) {
break;
}
return -EINTR;
}
+ SCpnt->sc_data_direction = SCSI_DATA_NONE;
scsi_wait_cmd(SCpnt, cmd, NULL, 0, timeout, retries);
SCSI_LOG_IOCTL(2, printk("Ioctl returned 0x%x\n", SCpnt->result));
int inlen, outlen, cmdlen;
int needed, buf_needed;
int timeout, retries, result;
+ int data_direction;
if (!sic)
return -EINVAL;
if (!buf)
return -ENOMEM;
memset(buf, 0, buf_needed);
- } else
+ if( inlen == 0 ) {
+ data_direction = SCSI_DATA_WRITE;
+ } else if (outlen == 0 ) {
+ data_direction = SCSI_DATA_READ;
+ } else {
+ /*
+ * Can this ever happen?
+ */
+ data_direction = SCSI_DATA_UNKNOWN;
+ }
+
+ } else {
buf = NULL;
+ data_direction = SCSI_DATA_NONE;
+ }
/*
* Obtain the command from the user's address space.
return -EINTR;
}
+ SCpnt->sc_data_direction = data_direction;
scsi_wait_cmd(SCpnt, cmd, buf, needed, timeout, retries);
/*
return 0;
}
+/*
+ * Function: scsi_insert_special_req()
+ *
+ * Purpose: Insert pre-formed request into request queue.
+ *
+ * Arguments: SRpnt - request that is ready to be queued.
+ * at_head - boolean. True if we should insert at head
+ * of queue, false if we should insert at tail.
+ *
+ * Lock status: Assumed that lock is not held upon entry.
+ *
+ * Returns: Nothing
+ *
+ * Notes: This function is called from character device and from
+ * ioctl types of functions where the caller knows exactly
+ * what SCSI command needs to be issued. The idea is that
+ * we merely inject the command into the queue (at the head
+ * for now), and then call the queue request function to actually
+ * process it.
+ */
+int scsi_insert_special_req(Scsi_Request * SRpnt, int at_head)
+{
+ unsigned long flags;
+ request_queue_t *q;
+
+ ASSERT_LOCK(&io_request_lock, 0);
+
+ /*
+ * The SCpnt already contains a request structure - we will doctor the
+ * thing up with the appropriate values and use that in the actual
+ * request queue.
+ */
+ q = &SRpnt->sr_device->request_queue;
+ SRpnt->sr_request.cmd = SPECIAL;
+ SRpnt->sr_request.special = (void *) SRpnt;
+
+ /*
+ * We have the option of inserting the head or the tail of the queue.
+ * Typically we use the tail for new ioctls and so forth. We use the
+ * head of the queue for things like a QUEUE_FULL message from a
+ * device, or a host that is unable to accept a particular command.
+ */
+ spin_lock_irqsave(&io_request_lock, flags);
+
+ if (at_head) {
+ SRpnt->sr_request.next = q->current_request;
+ q->current_request = &SRpnt->sr_request;
+ } else {
+ /*
+ * FIXME(eric) - we always insert at the tail of the
+ * list. Otherwise ioctl commands would always take
+ * precedence over normal I/O. An ioctl on a busy
+ * disk might be delayed indefinitely because the
+ * request might not float high enough in the queue
+ * to be scheduled.
+ */
+ SRpnt->sr_request.next = NULL;
+ if (q->current_request == NULL) {
+ q->current_request = &SRpnt->sr_request;
+ } else {
+ struct request *req;
+
+ for (req = q->current_request; req; req = req->next) {
+ if (req->next == NULL) {
+ req->next = &SRpnt->sr_request;
+ break;
+ }
+ }
+ }
+ }
+
+ /*
+ * Now hit the requeue function for the queue. If the host is
+ * already busy, so be it - we have nothing special to do. If
+ * the host can queue it, then send it off.
+ */
+ q->request_fn(q);
+ spin_unlock_irqrestore(&io_request_lock, flags);
+ return 0;
+}
+
/*
* Function: scsi_init_cmd_errh()
*
*/
SCpnt->old_use_sg = SCpnt->use_sg;
SCpnt->old_cmd_len = SCpnt->cmd_len;
+ SCpnt->sc_old_data_direction = SCpnt->sc_data_direction;
memcpy((void *) SCpnt->data_cmnd,
(const void *) SCpnt->cmnd, sizeof(SCpnt->cmnd));
SCpnt->buffer = SCpnt->request_buffer;
if (((SHpnt->can_queue > 0)
&& (SHpnt->host_busy >= SHpnt->can_queue))
|| (SHpnt->host_blocked)
+ || (SHpnt->host_self_blocked)
|| (SDpnt->device_blocked)) {
break;
}
for (SDpnt = SHpnt->host_queue; SDpnt; SDpnt = SDpnt->next) {
request_queue_t *q;
if ((SHpnt->can_queue > 0 && (SHpnt->host_busy >= SHpnt->can_queue))
- || (SHpnt->host_blocked)) {
+ || (SHpnt->host_blocked)
+ || (SHpnt->host_self_blocked)) {
break;
}
if (SDpnt->device_blocked || !SDpnt->starved) {
{
struct request *req;
Scsi_Cmnd *SCpnt;
+ Scsi_Request *SRpnt;
Scsi_Device *SDpnt;
struct Scsi_Host *SHpnt;
struct Scsi_Device_Template *STpnt;
* the host is no longer able to accept any more requests.
*/
while (1 == 1) {
+ /*
+ * Check this again - each time we loop through we will have
+ * released the lock and grabbed it again, so each time
+ * we need to check to see if the queue is plugged or not.
+ */
+ if (SHpnt->in_recovery
+ || q->plugged) {
+ return;
+ }
+
/*
* If the device cannot accept another request, then quit.
*/
break;
}
if ((SHpnt->can_queue > 0 && (SHpnt->host_busy >= SHpnt->can_queue))
- || (SHpnt->host_blocked)) {
+ || (SHpnt->host_blocked)
+ || (SHpnt->host_self_blocked)) {
/*
* If we are unable to process any commands at all for this
* device, then we consider it to be starved. What this means
if (req->cmd == SPECIAL) {
STpnt = NULL;
SCpnt = (Scsi_Cmnd *) req->special;
+ SRpnt = (Scsi_Request *) req->special;
+
+ if( SRpnt->sr_magic == SCSI_REQ_MAGIC ) {
+ SCpnt = scsi_allocate_device(SRpnt->sr_device,
+ FALSE, FALSE);
+ scsi_init_cmd_from_req(SCpnt, SRpnt);
+ }
+
} else {
STpnt = scsi_get_request_dev(req);
if (!STpnt) {
}
}
+/*
+ * Function: scsi_block_requests()
+ *
+ * Purpose: Utility function used by low-level drivers to prevent further
+ * commands from being queued to the device.
+ *
+ * Arguments: SHpnt - Host in question
+ *
+ * Returns: Nothing
+ *
+ * Lock status: No locks are assumed held.
+ *
+ * Notes: There is no timer nor any other means by which the requests
+ * get unblocked other than the low-level driver calling
+ * scsi_unblock_requests().
+ */
+void scsi_block_requests(struct Scsi_Host * SHpnt)
+{
+ SHpnt->host_self_blocked = TRUE;
+}
+
+/*
+ * Function: scsi_unblock_requests()
+ *
+ * Purpose: Utility function used by low-level drivers to allow further
+ * commands from being queued to the device.
+ *
+ * Arguments: SHpnt - Host in question
+ *
+ * Returns: Nothing
+ *
+ * Lock status: No locks are assumed held.
+ *
+ * Notes: There is no timer nor any other means by which the requests
+ * get unblocked other than the low-level driver calling
+ * scsi_unblock_requests().
+ *
+ * This is done as an API function so that changes to the
+ * internals of the scsi mid-layer won't require wholesale
+ * changes to drivers that use this feature.
+ */
+void scsi_unblock_requests(struct Scsi_Host * SHpnt)
+{
+ SHpnt->host_self_blocked = FALSE;
+}
+
+
+/*
+ * Function: scsi_report_bus_reset()
+ *
+ * Purpose: Utility function used by low-level drivers to report that
+ * they have observed a bus reset on the bus being handled.
+ *
+ * Arguments: SHpnt - Host in question
+ * channel - channel on which reset was observed.
+ *
+ * Returns: Nothing
+ *
+ * Lock status: No locks are assumed held.
+ *
+ * Notes: This only needs to be called if the reset is one which
+ * originates from an unknown location. Resets originated
+ * by the mid-level itself don't need to call this, but there
+ * should be no harm.
+ *
+ * The main purpose of this is to make sure that a CHECK_CONDITION
+ * is properly treated.
+ */
+void scsi_report_bus_reset(struct Scsi_Host * SHpnt, int channel)
+{
+ Scsi_Device *SDloop;
+ for (SDloop = SHpnt->host_queue; SDloop; SDloop = SDloop->next) {
+ if (channel == SDloop->channel) {
+ SDloop->was_reset = 1;
+ SDloop->expecting_cc_ua = 1;
+ }
+ }
+}
+
/*
* FIXME(eric) - these are empty stubs for the moment. I need to re-implement
* host blocking from scratch. The theory is that hosts that wish to block
SCpnt->use_sg = 0;
SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]);
SCpnt->result = 0;
+ SCpnt->sc_data_direction = SCSI_DATA_NONE;
+
/*
* Ugly, ugly. The newer interfaces all assume that the lock
* isn't held. Mustn't disappoint, or we deadlock the system.
if (SCpnt->flags & WAS_SENSE) {
SCpnt->use_sg = SCpnt->old_use_sg;
SCpnt->cmd_len = SCpnt->old_cmd_len;
+ SCpnt->sc_data_direction = SCpnt->sc_old_data_direction;
}
switch (host_byte(result)) {
case DID_OK:
SCpnt->request_bufflen = SCpnt->bufflen;
SCpnt->use_sg = SCpnt->old_use_sg;
SCpnt->cmd_len = SCpnt->old_cmd_len;
+ SCpnt->sc_data_direction = SCpnt->sc_old_data_direction;
SCpnt->result = 0;
/*
* Ugly, ugly. The newer interfaces all
SCpnt->result = result | ((exit & 0xff) << 24);
SCpnt->use_sg = SCpnt->old_use_sg;
SCpnt->cmd_len = SCpnt->old_cmd_len;
+ SCpnt->sc_data_direction = SCpnt->sc_old_data_direction;
/*
* The upper layers assume the lock isn't held. We mustn't
* disappoint them. When the new error handling code is in
* it isn't an issue.
*/
spin_unlock_irq(&io_request_lock);
+ SRpnt = SCpnt->sc_request;
+ if( SRpnt != NULL ) {
+ SRpnt->sr_result = SRpnt->sr_command->result;
+ if( SRpnt->sr_result != 0 ) {
+ memcpy(SRpnt->sr_sense_buffer,
+ SRpnt->sr_command->sense_buffer,
+ sizeof(SRpnt->sr_sense_buffer));
+ }
+ }
+
SCpnt->done(SCpnt);
spin_lock_irq(&io_request_lock);
}
SCpnt->target = SDpnt->id;
SCpnt->lun = SDpnt->lun;
SCpnt->channel = SDpnt->channel;
+ SCpnt->sc_data_direction = SCSI_DATA_NONE;
scsi_wait_cmd (SCpnt, (void *) scsi_cmd,
(void *) NULL,
scsi_cmd[4] = 255;
scsi_cmd[5] = 0;
SCpnt->cmd_len = 0;
+ SCpnt->sc_data_direction = SCSI_DATA_READ;
scsi_wait_cmd (SCpnt, (void *) scsi_cmd,
(void *) scsi_result,
scsi_cmd[4] = 0x2a;
scsi_cmd[5] = 0;
SCpnt->cmd_len = 0;
+ SCpnt->sc_data_direction = SCSI_DATA_NONE;
scsi_wait_cmd (SCpnt, (void *) scsi_cmd,
(void *) scsi_result, 0x2a,
SCSI_TIMEOUT, 3);
#include "sd.h"
#include <scsi/scsicam.h>
+
/*
* This source file contains the symbol table used by scsi loadable
* modules.
*/
-
-extern void print_command(unsigned char *command);
-extern void print_sense(const char *devclass, Scsi_Cmnd * SCpnt);
-
-extern const char *const scsi_device_types[];
-
EXPORT_SYMBOL(scsi_register_module);
EXPORT_SYMBOL(scsi_unregister_module);
EXPORT_SYMBOL(scsi_free);
EXPORT_SYMBOL(scsi_ioctl);
EXPORT_SYMBOL(print_command);
EXPORT_SYMBOL(print_sense);
+EXPORT_SYMBOL(print_req_sense);
EXPORT_SYMBOL(print_msg);
EXPORT_SYMBOL(print_status);
EXPORT_SYMBOL(scsi_dma_free_sectors);
EXPORT_SYMBOL(scsi_logging_level);
#endif
+EXPORT_SYMBOL(scsi_allocate_request);
+EXPORT_SYMBOL(scsi_release_request);
+EXPORT_SYMBOL(scsi_wait_req);
+EXPORT_SYMBOL(scsi_do_req);
+
+EXPORT_SYMBOL(scsi_report_bus_reset);
+EXPORT_SYMBOL(scsi_block_requests);
+EXPORT_SYMBOL(scsi_unblock_requests);
+
EXPORT_SYMBOL(scsi_get_host_dev);
EXPORT_SYMBOL(scsi_free_host_dev);
return 0;
}
SCpnt->cmnd[0] = WRITE_6;
+ SCpnt->sc_data_direction = SCSI_DATA_WRITE;
break;
case READ:
SCpnt->cmnd[0] = READ_6;
+ SCpnt->sc_data_direction = SCSI_DATA_READ;
break;
default:
panic("Unknown sd command %d\n", SCpnt->request.cmd);
SCpnt->cmd_len = 0;
SCpnt->sense_buffer[0] = 0;
SCpnt->sense_buffer[2] = 0;
+ SCpnt->sc_data_direction = SCSI_DATA_NONE;
scsi_wait_cmd (SCpnt, (void *) cmd, (void *) buffer,
0/*512*/, SD_TIMEOUT, MAX_RETRIES);
break;
}
+ /*
+ * If the drive has indicated to us that it doesn't have
+ * any media in it, don't bother with any of the rest of
+ * this crap.
+ */
+ if( the_result != 0
+ && ((driver_byte(the_result) & DRIVER_SENSE) != 0)
+ && SCpnt->sense_buffer[2] == UNIT_ATTENTION
+ && SCpnt->sense_buffer[12] == 0x3A ) {
+ rscsi_disks[i].capacity = 0x1fffff;
+ sector_size = 512;
+ rscsi_disks[i].device->changed = 1;
+ rscsi_disks[i].ready = 0;
+ break;
+ }
+
/* Look for non-removable devices that return NOT_READY.
* Issue command to spin up drive for these cases. */
if (the_result && !rscsi_disks[i].device->removable &&
SCpnt->sense_buffer[0] = 0;
SCpnt->sense_buffer[2] = 0;
+ SCpnt->sc_data_direction = SCSI_DATA_NONE;
scsi_wait_cmd(SCpnt, (void *) cmd, (void *) buffer,
0/*512*/, SD_TIMEOUT, MAX_RETRIES);
}
SCpnt->sense_buffer[0] = 0;
SCpnt->sense_buffer[2] = 0;
+ SCpnt->sc_data_direction = SCSI_DATA_READ;
scsi_wait_cmd(SCpnt, (void *) cmd, (void *) buffer,
8, SD_TIMEOUT, MAX_RETRIES);
SCpnt->sense_buffer[2] = 0;
/* same code as READCAPA !! */
+ SCpnt->sc_data_direction = SCSI_DATA_READ;
scsi_wait_cmd(SCpnt, (void *) cmd, (void *) buffer,
512, SD_TIMEOUT, MAX_RETRIES);
void get_sectorsize(int);
void get_capabilities(int);
-void requeue_sr_request(Scsi_Cmnd * SCpnt);
static int sr_media_change(struct cdrom_device_info *, int);
static int sr_packet(struct cdrom_device_info *, struct cdrom_generic_command *);
return 0;
}
SCpnt->cmnd[0] = WRITE_10;
+ SCpnt->sc_data_direction = SCSI_DATA_WRITE;
break;
case READ:
SCpnt->cmnd[0] = READ_10;
+ SCpnt->sc_data_direction = SCSI_DATA_READ;
break;
default:
panic("Unknown sr command %d\n", SCpnt->request.cmd);
unsigned char *buffer;
int the_result, retries;
int sector_size;
- Scsi_Cmnd *SCpnt;
+ Scsi_Request *SRpnt;
buffer = (unsigned char *) scsi_malloc(512);
- SCpnt = scsi_allocate_device(scsi_CDs[i].device, 1, FALSE);
+ SRpnt = scsi_allocate_request(scsi_CDs[i].device);
retries = 3;
do {
cmd[0] = READ_CAPACITY;
cmd[1] = (scsi_CDs[i].device->lun << 5) & 0xe0;
memset((void *) &cmd[2], 0, 8);
- SCpnt->request.rq_status = RQ_SCSI_BUSY; /* Mark as really busy */
- SCpnt->cmd_len = 0;
+ SRpnt->sr_request.rq_status = RQ_SCSI_BUSY; /* Mark as really busy */
+ SRpnt->sr_cmd_len = 0;
memset(buffer, 0, 8);
/* Do the command and wait.. */
- scsi_wait_cmd(SCpnt, (void *) cmd, (void *) buffer,
+ SRpnt->sr_data_direction = SCSI_DATA_READ;
+ scsi_wait_req(SRpnt, (void *) cmd, (void *) buffer,
512, SR_TIMEOUT, MAX_RETRIES);
- the_result = SCpnt->result;
+ the_result = SRpnt->sr_result;
retries--;
} while (the_result && retries);
- scsi_release_command(SCpnt);
- SCpnt = NULL;
+ scsi_release_request(SRpnt);
+ SRpnt = NULL;
if (the_result) {
scsi_CDs[i].capacity = 0x1fffff;
cmd[2] = 0x2a;
cmd[4] = 128;
cmd[3] = cmd[5] = 0;
- rc = sr_do_ioctl(i, cmd, buffer, 128, 1);
+ rc = sr_do_ioctl(i, cmd, buffer, 128, 1, SCSI_DATA_READ);
if (-EINVAL == rc) {
/* failed, drive has'nt this mode page */
*/
static int sr_packet(struct cdrom_device_info *cdi, struct cdrom_generic_command *cgc)
{
- Scsi_Cmnd *SCpnt;
+ Scsi_Request *SRpnt;
Scsi_Device *device = scsi_CDs[MINOR(cdi->dev)].device;
unsigned char *buffer = cgc->buffer;
int buflen;
/* get the device */
- SCpnt = scsi_allocate_device(device, 1, FALSE);
- if (SCpnt == NULL)
+ SRpnt = scsi_allocate_request(device);
+ if (SRpnt == NULL)
return -ENODEV; /* this just doesn't seem right /axboe */
/* use buffer for ISA DMA */
buflen = (cgc->buflen + 511) & ~511;
- if (cgc->buffer && SCpnt->host->unchecked_isa_dma &&
+ if (cgc->buffer && SRpnt->sr_host->unchecked_isa_dma &&
(virt_to_phys(cgc->buffer) + cgc->buflen - 1 > ISA_DMA_THRESHOLD)) {
buffer = scsi_malloc(buflen);
if (buffer == NULL) {
cgc->cmd[1] |= device->lun << 5;
/* do the locking and issue the command */
- SCpnt->request.rq_dev = cdi->dev;
+ SRpnt->sr_request.rq_dev = cdi->dev;
/* scsi_wait_cmd sets the command length */
- SCpnt->cmd_len = 0;
+ SRpnt->sr_cmd_len = 0;
- scsi_wait_cmd(SCpnt, (void *) cgc->cmd, (void *) buffer, cgc->buflen,
+ /*
+ * FIXME(eric) - need to set the data direction here.
+ */
+ SRpnt->sr_data_direction = SCSI_DATA_UNKNOWN;
+
+ scsi_wait_req(SRpnt, (void *) cgc->cmd, (void *) buffer, cgc->buflen,
SR_TIMEOUT, MAX_RETRIES);
- if ((cgc->stat = SCpnt->result))
- cgc->sense = (struct request_sense *) SCpnt->sense_buffer;
+ if ((cgc->stat = SRpnt->sr_result))
+ cgc->sense = (struct request_sense *) SRpnt->sr_sense_buffer;
/* release */
- SCpnt->request.rq_dev = MKDEV(0, 0);
- scsi_release_command(SCpnt);
- SCpnt = NULL;
+ SRpnt->sr_request.rq_dev = MKDEV(0, 0);
+ scsi_release_request(SRpnt);
+ SRpnt = NULL;
/* write DMA buffer back if used */
if (buffer && (buffer != cgc->buffer)) {
extern Scsi_CD *scsi_CDs;
-int sr_do_ioctl(int, unsigned char *, void *, unsigned, int);
+int sr_do_ioctl(int, unsigned char *, void *, unsigned, int, int);
int sr_lock_door(struct cdrom_device_info *, int);
int sr_tray_move(struct cdrom_device_info *, int);
error code is. Normally the UNIT_ATTENTION code will automatically
clear after one error */
-int sr_do_ioctl(int target, unsigned char *sr_cmd, void *buffer, unsigned buflength, int quiet)
+int sr_do_ioctl(int target, unsigned char *sr_cmd, void *buffer, unsigned buflength, int quiet, int readwrite)
{
- Scsi_Cmnd *SCpnt;
+ Scsi_Request *SRpnt;
Scsi_Device *SDev;
struct request *req;
int result, err = 0, retries = 0;
char *bounce_buffer;
SDev = scsi_CDs[target].device;
- SCpnt = scsi_allocate_device(scsi_CDs[target].device, 1, FALSE);
+ SRpnt = scsi_allocate_request(scsi_CDs[target].device);
+ SRpnt->sr_data_direction = readwrite;
/* use ISA DMA buffer if necessary */
- SCpnt->request.buffer = buffer;
- if (buffer && SCpnt->host->unchecked_isa_dma &&
+ SRpnt->sr_request.buffer = buffer;
+ if (buffer && SRpnt->sr_host->unchecked_isa_dma &&
(virt_to_phys(buffer) + buflength - 1 > ISA_DMA_THRESHOLD)) {
bounce_buffer = (char *) scsi_malloc((buflength + 511) & ~511);
if (bounce_buffer == NULL) {
return -ENODEV;
- scsi_wait_cmd(SCpnt, (void *) sr_cmd, (void *) buffer, buflength,
+ scsi_wait_req(SRpnt, (void *) sr_cmd, (void *) buffer, buflength,
IOCTL_TIMEOUT, IOCTL_RETRIES);
- req = &SCpnt->request;
- if (SCpnt->buffer && req->buffer && SCpnt->buffer != req->buffer) {
- memcpy(req->buffer, SCpnt->buffer, SCpnt->bufflen);
- scsi_free(SCpnt->buffer, (SCpnt->bufflen + 511) & ~511);
- SCpnt->buffer = req->buffer;
+ req = &SRpnt->sr_request;
+ if (SRpnt->sr_buffer && req->buffer && SRpnt->sr_buffer != req->buffer) {
+ memcpy(req->buffer, SRpnt->sr_buffer, SRpnt->sr_bufflen);
+ scsi_free(SRpnt->sr_buffer, (SRpnt->sr_bufflen + 511) & ~511);
+ SRpnt->sr_buffer = req->buffer;
}
- result = SCpnt->result;
+ result = SRpnt->sr_result;
/* Minimal error checking. Ignore cases we know about, and report the rest. */
if (driver_byte(result) != 0) {
- switch (SCpnt->sense_buffer[2] & 0xf) {
+ switch (SRpnt->sr_sense_buffer[2] & 0xf) {
case UNIT_ATTENTION:
scsi_CDs[target].device->changed = 1;
if (!quiet)
err = -ENOMEDIUM;
break;
case NOT_READY: /* This happens if there is no disc in drive */
- if (SCpnt->sense_buffer[12] == 0x04 &&
- SCpnt->sense_buffer[13] == 0x01) {
+ if (SRpnt->sr_sense_buffer[12] == 0x04 &&
+ SRpnt->sr_sense_buffer[13] == 0x01) {
/* sense: Logical unit is in process of becoming ready */
if (!quiet)
printk(KERN_INFO "sr%d: CDROM not ready yet.\n", target);
if (!quiet)
printk(KERN_INFO "sr%d: CDROM not ready. Make sure there is a disc in the drive.\n", target);
#ifdef DEBUG
- print_sense("sr", SCpnt);
+ print_req_sense("sr", SRpnt);
#endif
err = -ENOMEDIUM;
break;
if (!quiet)
printk(KERN_ERR "sr%d: CDROM (ioctl) reports ILLEGAL "
"REQUEST.\n", target);
- if (SCpnt->sense_buffer[12] == 0x20 &&
- SCpnt->sense_buffer[13] == 0x00) {
+ if (SRpnt->sr_sense_buffer[12] == 0x20 &&
+ SRpnt->sr_sense_buffer[13] == 0x00) {
/* sense: Invalid command operation code */
err = -EDRIVE_CANT_DO_THIS;
} else {
}
#ifdef DEBUG
print_command(sr_cmd);
- print_sense("sr", SCpnt);
+ print_req_sense("sr", SRpnt);
#endif
break;
default:
printk(KERN_ERR "sr%d: CDROM (ioctl) error, command: ", target);
print_command(sr_cmd);
- print_sense("sr", SCpnt);
+ print_req_sense("sr", SRpnt);
err = -EIO;
}
}
- result = SCpnt->result;
+ result = SRpnt->sr_result;
/* Wake up a process waiting for device */
- scsi_release_command(SCpnt);
- SCpnt = NULL;
+ scsi_release_request(SRpnt);
+ SRpnt = NULL;
return err;
}
sr_cmd[0] = GPCMD_TEST_UNIT_READY;
sr_cmd[1] = ((scsi_CDs[minor].device->lun) << 5);
sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = sr_cmd[5] = 0;
- return sr_do_ioctl(minor, sr_cmd, NULL, 255, 1);
+ return sr_do_ioctl(minor, sr_cmd, NULL, 255, 1, SCSI_DATA_NONE);
}
int sr_tray_move(struct cdrom_device_info *cdi, int pos)
sr_cmd[2] = sr_cmd[3] = sr_cmd[5] = 0;
sr_cmd[4] = (pos == 0) ? 0x03 /* close */ : 0x02 /* eject */ ;
- return sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 255, 0);
+ return sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 255, 0, SCSI_DATA_NONE);
}
int sr_lock_door(struct cdrom_device_info *cdi, int lock)
sr_cmd[8] = 24;
sr_cmd[9] = 0;
- result = sr_do_ioctl(MINOR(cdi->dev), sr_cmd, buffer, 24, 0);
+ result = sr_do_ioctl(MINOR(cdi->dev), sr_cmd, buffer, 24, 0, SCSI_DATA_READ);
memcpy(mcn->medium_catalog_number, buffer + 9, 13);
mcn->medium_catalog_number[13] = 0;
sr_cmd[2] = (speed >> 8) & 0xff; /* MSB for speed (in kbytes/sec) */
sr_cmd[3] = speed & 0xff; /* LSB */
- if (sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 0, 0))
+ if (sr_do_ioctl(MINOR(cdi->dev), sr_cmd, NULL, 0, 0, SCSI_DATA_NONE))
return -EIO;
return 0;
}
sr_cmd[8] = 12; /* LSB of length */
sr_cmd[9] = 0;
- result = sr_do_ioctl(target, sr_cmd, buffer, 12, 1);
+ result = sr_do_ioctl(target, sr_cmd, buffer, 12, 1, SCSI_DATA_READ);
tochdr->cdth_trk0 = buffer[2];
tochdr->cdth_trk1 = buffer[3];
sr_cmd[8] = 12; /* LSB of length */
sr_cmd[9] = 0;
- result = sr_do_ioctl(target, sr_cmd, buffer, 12, 0);
+ result = sr_do_ioctl(target, sr_cmd, buffer, 12, 0, SCSI_DATA_READ);
tocentry->cdte_ctrl = buffer[5] & 0xf;
tocentry->cdte_adr = buffer[5] >> 4;
cmd[9] = 0x10;
break;
}
- return sr_do_ioctl(minor, cmd, dest, blksize, 0);
+ return sr_do_ioctl(minor, cmd, dest, blksize, 0, SCSI_DATA_READ);
}
/*
cmd[4] = (unsigned char) (lba >> 8) & 0xff;
cmd[5] = (unsigned char) lba & 0xff;
cmd[8] = 1;
- rc = sr_do_ioctl(minor, cmd, dest, blksize, 0);
+ rc = sr_do_ioctl(minor, cmd, dest, blksize, 0, SCSI_DATA_READ);
return rc;
}
modesel->density = density;
modesel->block_length_med = (blocklength >> 8) & 0xff;
modesel->block_length_lo = blocklength & 0xff;
- if (0 == (rc = sr_do_ioctl(minor, cmd, buffer, sizeof(*modesel), 0))) {
+ if (0 == (rc = sr_do_ioctl(minor, cmd, buffer, sizeof(*modesel), 0, SCSI_DATA_WRITE))) {
scsi_CDs[minor].device->sector_size = blocklength;
}
#ifdef DEBUG
cmd[1] = (scsi_CDs[minor].device->lun << 5);
cmd[8] = 12;
cmd[9] = 0x40;
- rc = sr_do_ioctl(minor, cmd, buffer, 12, 1);
+ rc = sr_do_ioctl(minor, cmd, buffer, 12, 1, SCSI_DATA_READ);
if (rc != 0)
break;
if ((buffer[0] << 8) + buffer[1] < 0x0a) {
cmd[0] = 0xde;
cmd[1] = (scsi_CDs[minor].device->lun << 5) | 0x03;
cmd[2] = 0xb0;
- rc = sr_do_ioctl(minor, cmd, buffer, 0x16, 1);
+ rc = sr_do_ioctl(minor, cmd, buffer, 0x16, 1, SCSI_DATA_READ);
if (rc != 0)
break;
if (buffer[14] != 0 && buffer[14] != 0xb0) {
memset(cmd, 0, MAX_COMMAND_SIZE);
cmd[0] = 0xc7;
cmd[1] = (scsi_CDs[minor].device->lun << 5) | 3;
- rc = sr_do_ioctl(minor, cmd, buffer, 4, 1);
+ rc = sr_do_ioctl(minor, cmd, buffer, 4, 1, SCSI_DATA_READ);
if (rc == -EINVAL) {
printk(KERN_INFO "sr%d: Hmm, seems the drive "
"doesn't support multisession CD's\n", minor);
cmd[1] = (scsi_CDs[minor].device->lun << 5);
cmd[8] = 0x04;
cmd[9] = 0x40;
- rc = sr_do_ioctl(minor, cmd, buffer, 0x04, 1);
+ rc = sr_do_ioctl(minor, cmd, buffer, 0x04, 1, SCSI_DATA_READ);
if (rc != 0) {
break;
}
cmd[6] = rc & 0x7f; /* number of last session */
cmd[8] = 0x0c;
cmd[9] = 0x40;
- rc = sr_do_ioctl(minor, cmd, buffer, 12, 1);
+ rc = sr_do_ioctl(minor, cmd, buffer, 12, 1, SCSI_DATA_READ);
if (rc != 0) {
break;
}
/* OSS interface to the ac97s.. */
#define AC97_STEREO_MASK (SOUND_MASK_VOLUME|SOUND_MASK_PCM|\
SOUND_MASK_LINE|SOUND_MASK_CD|\
- SOUND_MIXER_ALTPCM|SOUND_MASK_IGAIN|\
+ SOUND_MASK_ALTPCM|SOUND_MASK_IGAIN|\
SOUND_MASK_LINE1|SOUND_MASK_VIDEO)
#define AC97_SUPPORTED_MASK (AC97_STEREO_MASK | \
SOUND_MASK_BASS|SOUND_MASK_TREBLE|\
SOUND_MASK_SPEAKER|SOUND_MASK_MIC|\
- SOUND_MIXER_PHONEIN|SOUND_MIXER_PHONEOUT)
+ SOUND_MASK_PHONEIN|SOUND_MASK_PHONEOUT)
#define AC97_RECORD_MASK (SOUND_MASK_MIC|\
SOUND_MASK_CD|SOUND_MASK_VIDEO|\
static int __devinit solo1_probe(struct pci_dev *pcidev, const struct pci_device_id *pciid)
{
struct solo1_state *s;
+ struct pm_dev *pmdev;
if (!RSRCISIOREGION(pcidev, 0) ||
!RSRCISIOREGION(pcidev, 1) ||
if (error)
goto out;
- if (ia_valid & ATTR_SIZE) {
- inode->i_size = attr->ia_size;
+ if (ia_valid & ATTR_SIZE)
vmtruncate(inode, attr->ia_size);
- }
if (ia_valid & ATTR_MTIME) {
inode->i_mtime = attr->ia_mtime;
adfs_unix2adfs_time(inode, attr->ia_mtime);
inode->i_uid = attr->ia_uid;
if (ia_valid & ATTR_GID)
inode->i_gid = attr->ia_gid;
- if (ia_valid & ATTR_SIZE) {
- inode->i_size = attr->ia_size;
+ if (ia_valid & ATTR_SIZE)
vmtruncate(inode, attr->ia_size);
- }
if (ia_valid & ATTR_ATIME)
inode->i_atime = attr->ia_atime;
if (ia_valid & ATTR_MTIME)
int generic_commit_write(struct file *file, struct page *page,
unsigned from, unsigned to)
{
- __block_commit_write((struct inode*)page->mapping->host,page,from,to);
+ struct inode *inode = (struct inode*)page->mapping->host;
+ loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
+ __block_commit_write(inode,page,from,to);
kunmap(page);
+ if (pos > inode->i_size)
+ inode->i_size = pos;
return 0;
}
kaddr = (char*)page_address(page);
memcpy(kaddr, symname, len-1);
mapping->a_ops->commit_write(NULL, page, 0, len-1);
- inode->i_size = len-1;
/*
* Notice that we are _not_ going to block here - end of page is
* unmapped, so this will only try to map the rest of page, see
mark_inode_dirty(inode);
return 0;
fail_map:
- inode->i_size = len-1;
UnlockPage(page);
page_cache_release(page);
fail:
static int nfs_commit_write(struct file *file, struct page *page, unsigned offset, unsigned to)
{
long status;
+ loff_t pos = ((loff_t)page->index<<PAGE_CACHE_SHIFT) + to;
+ struct inode *inode = (struct inode*)page->mapping->host;
kunmap(page);
lock_kernel();
status = nfs_updatepage(file, page, offset, to-offset);
unlock_kernel();
+ /* most likely it's already done. CHECKME */
+ if (pos > inode->i_size)
+ inode->i_size = pos;
return status;
}
if (attr->ia_size != fattr.size)
printk("nfs_notify_change: attr=%Ld, fattr=%d??\n",
(long long) attr->ia_size, fattr.size);
- inode->i_size = attr->ia_size;
inode->i_mtime = fattr.mtime.seconds;
+ vmtruncate(inode, attr->ia_size);
}
if (attr->ia_valid & ATTR_MTIME)
inode->i_mtime = fattr.mtime.seconds;
error = nfs_refresh_inode(inode, &fattr);
- if (!error && (attr->ia_valid & ATTR_SIZE))
- vmtruncate(inode, attr->ia_size);
out:
return error;
}
attr->ia_size);
if (error)
goto out;
- /*
- * We don't implement an i_op->truncate operation,
- * so we have to update the page cache here.
- */
- if (attr->ia_size < inode->i_size)
- {
- /* must die */
- truncate_inode_pages(inode->i_mapping, attr->ia_size);
- inode->i_size = attr->ia_size;
- }
+ vmtruncate(inode, attr->ia_size);
refresh = 1;
}
out:
if (refresh)
smb_refresh_inode(dentry);
- if (!error && (attr->ia_valid & ATTR_SIZE))
- vmtruncate(inode, attr->ia_size);
return error;
}
brelse(bh);
kunmap(page);
SetPageUptodate(page);
+ /* only one page here */
+ if (to > inode->i_size)
+ inode->i_size = to;
return 0;
}
#ifndef __SMP__
extern int __local_irq_count;
#define local_irq_count(cpu) ((void)(cpu), __local_irq_count)
+extern unsigned long __irq_attempt[];
+#define irq_attempt(cpu, irq) ((void)(cpu), __irq_attempt[irq])
#else
#define local_irq_count(cpu) (cpu_data[cpu].irq_count)
+#define irq_attempt(cpu, irq) (cpu_data[cpu].irq_attempt[irq])
#endif
/*
outb(0, DMA1_CLR_MASK_REG); \
outb(0, DMA2_CLR_MASK_REG)
-extern unsigned long _alpha_irq_masks[2];
-#define alpha_irq_mask _alpha_irq_masks[0]
-
extern void common_ack_irq(unsigned long irq);
extern void isa_device_interrupt(unsigned long vector, struct pt_regs * regs);
extern void srm_device_interrupt(unsigned long vector, struct pt_regs * regs);
-extern void handle_irq(int irq, int ack, struct pt_regs * regs);
+extern void handle_irq(int irq, struct pt_regs * regs);
#define RTC_IRQ 8
+#if 0 /* on Alpha we want to use only the RTC as timer for SMP issues */
#ifdef CONFIG_RTC
#define TIMER_IRQ 0 /* timer is the pit */
#else
#define TIMER_IRQ RTC_IRQ /* timer is the rtc */
#endif
+#else
+#define TIMER_IRQ RTC_IRQ /* timer is the rtc */
+#endif
/*
* PROBE_MASK is the bitset of irqs that we consider for autoprobing.
#endif
-extern char _stext;
static inline void alpha_do_profile (unsigned long pc)
{
if (prof_buffer && current->pid) {
+ extern char _stext;
+
pc -= (unsigned long) &_stext;
pc >>= prof_shift;
/*
}
}
+static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {}
+extern void no_action(int cpl, void *dev_id, struct pt_regs *regs);
+extern void init_ISA_irqs(void);
+extern void init_RTC_irq(void);
+
#endif
/* Map a single buffer of the indicate size for PCI DMA in streaming
mode. The 32-bit PCI bus mastering address to use is returned.
Once the device is given the dma address, the device owns this memory
- until either pci_unmap_single or pci_sync_single is performed. */
+ until either pci_unmap_single or pci_dma_sync_single is performed. */
extern dma_addr_t pci_map_single(struct pci_dev *, void *, long);
again owns the buffer. */
extern inline void
-pci_sync_single(struct pci_dev *dev, dma_addr_t dma_addr, long size)
+pci_dma_sync_single(struct pci_dev *dev, dma_addr_t dma_addr, long size)
{
/* Nothing to do. */
}
for a scatter-gather list, same rules and usage. */
extern inline void
-pci_sync_sg(struct pci_dev *dev, struct scatterlist *sg, int size)
+pci_dma_sync_sg(struct pci_dev *dev, struct scatterlist *sg, int nents)
{
/* Nothing to do. */
}
#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_page_to_ram(page) do { } while (0)
#define flush_icache_range(start, end) do { } while (0)
+#define flush_icache_page(vma, page) do { } while (0)
/*
* Use a few helper functions to hide the ugly broken ASN
#ifdef __SMP__
#include <linux/threads.h>
+#include <asm/irq.h>
struct cpuinfo_alpha {
unsigned long loops_per_sec;
unsigned long *pte_cache;
unsigned long pgtable_cache_sz;
unsigned long ipi_count;
+ unsigned long irq_attempt[NR_IRQS];
+ unsigned long smp_local_irq_count;
unsigned long prof_multiplier;
unsigned long prof_counter;
int irq_count, bh_count;
-/* $Id: page.h,v 1.47 2000/01/29 00:41:49 anton Exp $
+/* $Id: page.h,v 1.48 2000/02/16 07:34:51 davem Exp $
* page.h: Various defines and such for MMU operations on the Sparc for
* the Linux kernel.
*
-/* $Id: pgtable.h,v 1.88 2000/02/06 22:56:09 zaitcev Exp $ */
+/* $Id: pgtable.h,v 1.91 2000/02/16 08:44:52 anton Exp $ */
#ifndef _SPARC_PGTABLE_H
#define _SPARC_PGTABLE_H
pgprot_val(newprot));
}
-BTFIXUPDEF_CALL(pgd_t *, pgd_offset, struct mm_struct *, unsigned long)
-BTFIXUPDEF_CALL(pmd_t *, pmd_offset, pgd_t *, unsigned long)
-BTFIXUPDEF_CALL(pte_t *, pte_offset, pmd_t *, unsigned long)
+#define pgd_index(address) ((address) >> PGDIR_SHIFT)
+
+/* to find an entry in a page-table-directory */
+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
/* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-/* to find an entry in a page-table-directory */
-#define pgd_offset(mm,addr) BTFIXUP_CALL(pgd_offset)(mm,addr)
+BTFIXUPDEF_CALL(pmd_t *, pmd_offset, pgd_t *, unsigned long)
+BTFIXUPDEF_CALL(pte_t *, pte_offset, pmd_t *, unsigned long)
/* Find an entry in the second-level page table.. */
#define pmd_offset(dir,addr) BTFIXUP_CALL(pmd_offset)(dir,addr)
/* The permissions for pgprot_val to make a page mapped on the obio space */
extern unsigned int pg_iobits;
+#define flush_icache_page(vma, pg) do { } while(0)
+
/* Certain architectures need to do special things when pte's
* within a page table are directly modified. Thus, the following
* hook is made available.
-/* $Id: floppy.h,v 1.26 2000/02/12 23:32:35 davem Exp $
+/* $Id: floppy.h,v 1.27 2000/02/15 02:58:40 davem Exp $
* asm-sparc64/floppy.h: Sparc specific parts of the Floppy driver.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
#endif /* CONFIG_PCI */
-static struct linux_prom_registers fd_regs[2];
-
static unsigned long __init sun_floppy_init(void)
{
char state[128];
- int fd_node, num_regs;
struct sbus_bus *bus;
struct sbus_dev *sdev = NULL;
static int initialized = 0;
return 0;
#endif
}
- fd_node = sdev->prom_node;
- prom_getproperty(fd_node, "status", state, sizeof(state));
+ prom_getproperty(sdev->prom_node, "status", state, sizeof(state));
if(!strncmp(state, "disabled", 8))
return 0;
- num_regs = prom_getproperty(fd_node, "reg", (char *) fd_regs,
- sizeof(fd_regs));
- num_regs = (num_regs / sizeof(fd_regs[0]));
/*
- * We cannot do sparc_alloc_io here: it does request_region,
+ * We cannot do sbus_ioremap here: it does request_region,
* which the generic floppy driver tries to do once again.
+ * But we must use the sdev resource values as they have
+ * had parent ranges applied.
*/
sun_fdc = (struct sun_flpy_controller *)
- ((unsigned long)fd_regs[0].phys_addr +
- (((unsigned long)fd_regs[0].which_io) << 32UL));
+ (sdev->resource[0].start +
+ ((sdev->resource[0].flags & 0x1ffUL) << 32UL));
/* Last minute sanity check... */
if(sbus_readb(&sun_fdc->status1_82077) == 0xff) {
-/* $Id: io.h,v 1.31 2000/02/08 05:11:38 jj Exp $ */
+/* $Id: io.h,v 1.32 2000/02/15 10:04:54 jj Exp $ */
#ifndef __SPARC64_IO_H
#define __SPARC64_IO_H
#define inb_p inb
#define outb_p outb
+#define inw_p inw
+#define outw_p outw
+#define inl_p inl
+#define outl_p outl
extern void outsb(unsigned long addr, const void *src, unsigned long count);
extern void outsw(unsigned long addr, const void *src, unsigned long count);
-/* $Id: page.h,v 1.29 1999/12/09 10:32:43 davem Exp $ */
+/* $Id: page.h,v 1.30 2000/02/16 07:34:54 davem Exp $ */
#ifndef _SPARC64_PAGE_H
#define _SPARC64_PAGE_H
-/* $Id: pgtable.h,v 1.119 2000/02/14 02:53:44 davem Exp $
+/* $Id: pgtable.h,v 1.120 2000/02/16 07:34:54 davem Exp $
* pgtable.h: SpitFire page table operations.
*
* Copyright 1996,1997 David S. Miller (davem@caip.rutgers.edu)
#define AUTOFS_IOC_SETTIMEOUT _IOWR(0x93,0x64,unsigned long)
#define AUTOFS_IOC_EXPIRE _IOR(0x93,0x65,struct autofs_packet_expire)
-#ifdef __KERNEL__
-
-/* Init function */
-int init_autofs_fs(void);
-
-#endif /* __KERNEL__ */
-
#endif /* _LINUX_AUTO_FS_H */
extern void grok_partitions(struct gendisk *dev, int drive, unsigned minors, long size);
extern void register_disk(struct gendisk *dev, kdev_t first, unsigned minors, struct block_device_operations *ops, long size);
extern void generic_unplug_device(void * data);
-extern void generic_plug_device (request_queue_t *q, kdev_t dev);
extern void generic_make_request(int rw, struct buffer_head * bh);
extern request_queue_t * blk_get_queue(kdev_t dev);
#define __constant_ntohl(x) ((__u32)(x))
#define __constant_htons(x) ((__u16)(x))
#define __constant_ntohs(x) ((__u16)(x))
+#define __constant_cpu_to_le64(x) ___swab64((x))
+#define __constant_le64_to_cpu(x) ___swab64((x))
+#define __constant_cpu_to_le32(x) ___swab32((x))
+#define __constant_le32_to_cpu(x) ___swab32((x))
+#define __constant_cpu_to_le16(x) ___swab16((x))
+#define __constant_le16_to_cpu(x) ___swab16((x))
+#define __constant_cpu_to_be64(x) ((__u64)(x))
+#define __constant_be64_to_cpu(x) ((__u64)(x))
+#define __constant_cpu_to_be32(x) ((__u32)(x))
+#define __constant_be32_to_cpu(x) ((__u32)(x))
+#define __constant_cpu_to_be16(x) ((__u16)(x))
+#define __constant_be16_to_cpu(x) ((__u16)(x))
#define __cpu_to_le64(x) __swab64((x))
#define __le64_to_cpu(x) __swab64((x))
#define __cpu_to_le32(x) __swab32((x))
#define __constant_ntohl(x) ___swab32((x))
#define __constant_htons(x) ___swab16((x))
#define __constant_ntohs(x) ___swab16((x))
+#define __constant_cpu_to_le64(x) ((__u64)(x))
+#define __constant_le64_to_cpu(x) ((__u64)(x))
+#define __constant_cpu_to_le32(x) ((__u32)(x))
+#define __constant_le32_to_cpu(x) ((__u32)(x))
+#define __constant_cpu_to_le16(x) ((__u16)(x))
+#define __constant_le16_to_cpu(x) ((__u16)(x))
+#define __constant_cpu_to_be64(x) ___swab64((x))
+#define __constant_be64_to_cpu(x) ___swab64((x))
+#define __constant_cpu_to_be32(x) ___swab32((x))
+#define __constant_be32_to_cpu(x) ___swab32((x))
+#define __constant_cpu_to_be16(x) ___swab16((x))
+#define __constant_be16_to_cpu(x) ___swab16((x))
#define __cpu_to_le64(x) ((__u64)(x))
#define __le64_to_cpu(x) ((__u64)(x))
#define __cpu_to_le32(x) ((__u32)(x))
#define __constant_ntohl(x) ___swahb32((x))
#define __constant_htons(x) ___swab16((x))
#define __constant_ntohs(x) ___swab16((x))
+#define __constant_cpu_to_le64(x) I DON'T KNOW
+#define __constant_le64_to_cpu(x) I DON'T KNOW
+#define __constant_cpu_to_le32(x) ___swahw32((x))
+#define __constant_le32_to_cpu(x) ___swahw32((x))
+#define __constant_cpu_to_le16(x) ((__u16)(x)
+#define __constant_le16_to_cpu(x) ((__u16)(x)
+#define __constant_cpu_to_be64(x) I DON'T KNOW
+#define __constant_be64_to_cpu(x) I DON'T KNOW
+#define __constant_cpu_to_be32(x) ___swahb32((x))
+#define __constant_be32_to_cpu(x) ___swahb32((x))
+#define __constant_cpu_to_be16(x) ___swab16((x))
+#define __constant_be16_to_cpu(x) ___swab16((x))
#define __cpu_to_le64(x) I DON'T KNOW
#define __le64_to_cpu(x) I DON'T KNOW
#define __cpu_to_le32(x) ___swahw32((x))
#define IRQ_REPLAY 8 /* IRQ has been replayed but not acked yet */
#define IRQ_AUTODETECT 16 /* IRQ is being autodetected */
#define IRQ_WAITING 32 /* IRQ not yet seen - for autodetection */
+#define IRQ_LEVEL 64 /* IRQ level triggered */
/*
* Interrupt controller descriptor. This is all we need
unsigned long hh_data[16/sizeof(unsigned long)];
};
+/* These flag bits are private to the generic network queueing
+ * layer, they may not be explicitly referenced by any other
+ * code.
+ */
+
enum netdev_state_t
{
- LINK_STATE_XOFF=0,
- LINK_STATE_DOWN,
- LINK_STATE_START,
- LINK_STATE_RXSEM,
- LINK_STATE_TXSEM,
- LINK_STATE_SCHED
+ __LINK_STATE_XOFF=0,
+ __LINK_STATE_START,
+ __LINK_STATE_PRESENT,
+ __LINK_STATE_SCHED
};
extern __inline__ void __netif_schedule(struct net_device *dev)
{
- if (!test_and_set_bit(LINK_STATE_SCHED, &dev->state)) {
+ if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) {
unsigned long flags;
int cpu = smp_processor_id();
extern __inline__ void netif_schedule(struct net_device *dev)
{
- if (!test_bit(LINK_STATE_XOFF, &dev->state))
+ if (!test_bit(__LINK_STATE_XOFF, &dev->state))
__netif_schedule(dev);
}
extern __inline__ void netif_start_queue(struct net_device *dev)
{
- clear_bit(LINK_STATE_XOFF, &dev->state);
+ clear_bit(__LINK_STATE_XOFF, &dev->state);
}
extern __inline__ void netif_wake_queue(struct net_device *dev)
{
- if (test_and_clear_bit(LINK_STATE_XOFF, &dev->state))
+ if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->state))
__netif_schedule(dev);
}
extern __inline__ void netif_stop_queue(struct net_device *dev)
{
- set_bit(LINK_STATE_XOFF, &dev->state);
+ set_bit(__LINK_STATE_XOFF, &dev->state);
+}
+
+extern __inline__ int netif_queue_stopped(struct net_device *dev)
+{
+ return test_bit(__LINK_STATE_XOFF, &dev->state);
+}
+
+extern __inline__ int netif_running(struct net_device *dev)
+{
+ return test_bit(__LINK_STATE_START, &dev->state);
+}
+
+/* Hot-plugging. */
+extern __inline__ int netif_device_present(struct net_device *dev)
+{
+ return test_bit(__LINK_STATE_PRESENT, &dev->state);
+}
+
+extern __inline__ void netif_device_detach(struct net_device *dev)
+{
+ if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
+ netif_running(dev))
+ netif_stop_queue(dev);
+}
+
+extern __inline__ void netif_device_attach(struct net_device *dev)
+{
+ if (test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
+ netif_running(dev))
+ netif_wake_queue(dev);
}
extern __inline__ void dev_kfree_skb_irq(struct sk_buff *skb)
extern __inline__ void qdisc_run(struct net_device *dev)
{
- while (!test_bit(LINK_STATE_XOFF, &dev->state) &&
+ while (!netif_queue_stopped(dev) &&
qdisc_restart(dev)<0)
/* NOTHING */;
}
count -= status;
pos += status;
buf += status;
- if (pos > inode->i_size)
- inode->i_size = pos;
}
unlock:
/* Mark it unlocked again and drop the page.. */
struct vm_area_struct * mpnt;
struct address_space *mapping = inode->i_mapping;
+ inode->i_size = offset;
truncate_inode_pages(mapping, offset);
spin_lock(&mapping->i_shared_lock);
if (!mapping->i_mmap)
#include <net/br.h>
#include <linux/proc_fs.h>
#include <linux/delay.h>
+#include <net/pkt_sched.h>
#ifndef min
#define min(a, b) (((a) <= (b)) ? (a) : (b))
port_info[port_no].config_pending = FALSE; /* (4.6.1.3.2(10)) */
start_hold_timer(port_no); /* (4.6.1.3.2(11)) */
}
-/* JRP: we want the frame to be xmitted even if no other traffic.
- * net_bh() will do a dev_transmit() that kicks all devices
- */
- mark_bh(NET_BH);
}
static int root_bridge(void)
break;
if (memcmp(port_info[i].ifmac.BRIDGE_ID_ULA, dev->dev_addr, 6) != 0)
break; /* Don't worry about a change of hardware broadcast address! */
- if (dev->start) {
+ if (netif_running(dev)) {
printk(KERN_CRIT "br_device_event: NETDEV_CHANGEADDR on busy device %s - FIX DRIVER!\n",
dev->name);
/* return NOTIFY_BAD; It SHOULD be this, but I want to be friendly... */
if (memcmp(dev->dev_addr, "\x00\x00\x00\x00\x00\x00", ETH_ALEN) == 0)
return -EFAULT;
- dev->start = 1;
- dev->tbusy = 0;
+ netif_start_queue(dev);
return 0;
}
if (br_stats.flags & BR_DEBUG)
printk(KERN_DEBUG "%s: Shutting down.\n", dev->name);
- dev->tbusy = 1;
- dev->start = 0;
+ netif_stop_queue(dev);
return 0;
}
dev_clear_fastroute(pt->dev);
}
#endif
- if(pt->type==htons(ETH_P_ALL))
- {
+ if (pt->type == htons(ETH_P_ALL)) {
netdev_nit++;
pt->next=ptype_all;
ptype_all=pt;
- }
- else
- {
+ } else {
hash=ntohs(pt->type)&15;
pt->next = ptype_base[hash];
ptype_base[hash] = pt;
write_lock_bh(&ptype_lock);
- if(pt->type==htons(ETH_P_ALL))
- {
+ if (pt->type == htons(ETH_P_ALL)) {
netdev_nit--;
pt1=&ptype_all;
- }
- else
+ } else {
pt1=&ptype_base[ntohs(pt->type)&15];
+ }
- for(; (*pt1)!=NULL; pt1=&((*pt1)->next))
- {
- if(pt==(*pt1))
- {
- *pt1=pt->next;
+ for (; (*pt1) != NULL; pt1 = &((*pt1)->next)) {
+ if (pt == (*pt1)) {
+ *pt1 = pt->next;
#ifdef CONFIG_NET_FASTROUTE
if (pt->data)
netdev_fastroute_obstacles--;
{
int i;
char buf[32];
+
/*
* If you need over 100 please also fix the algorithm...
*/
- for(i=0;i<100;i++)
- {
+ for (i = 0; i < 100; i++) {
sprintf(buf,name,i);
- if(__dev_get_by_name(buf)==NULL)
- {
+ if (__dev_get_by_name(buf) == NULL) {
strcpy(dev->name, buf);
return i;
}
struct net_device *dev_alloc(const char *name, int *err)
{
struct net_device *dev=kmalloc(sizeof(struct net_device)+16, GFP_KERNEL);
- if(dev==NULL)
- {
- *err=-ENOBUFS;
+ if (dev == NULL) {
+ *err = -ENOBUFS;
return NULL;
}
memset(dev, 0, sizeof(struct net_device));
- dev->name=(char *)(dev+1); /* Name string space */
- *err=dev_alloc_name(dev,name);
- if(*err<0)
- {
+ dev->name = (char *)(dev + 1); /* Name string space */
+ *err = dev_alloc_name(dev, name);
+ if (*err < 0) {
kfree(dev);
return NULL;
}
void dev_load(const char *name)
{
- if(!__dev_get_by_name(name) && capable(CAP_SYS_MODULE))
+ if (!__dev_get_by_name(name) && capable(CAP_SYS_MODULE))
request_module(name);
}
if (dev->flags&IFF_UP)
return 0;
+ /*
+ * Is it even present?
+ */
+ if (!netif_device_present(dev))
+ return -ENODEV;
+
/*
* Call device private open method
*/
*/
dev->flags |= IFF_UP;
- set_bit(LINK_STATE_START, &dev->state);
+ set_bit(__LINK_STATE_START, &dev->state);
/*
* Initialize multicasting status
dev_deactivate(dev);
- clear_bit(LINK_STATE_START, &dev->state);
+ clear_bit(__LINK_STATE_START, &dev->state);
/*
* Call the device specific close. This cannot fail.
* Only if device is UP
+ *
+ * We allow it to be called even after a DETACH hot-plug
+ * event.
*/
if (dev->stop)
spin_lock(&dev->xmit_lock);
dev->xmit_lock_owner = cpu;
- if (!test_bit(LINK_STATE_XOFF, &dev->state)) {
+ if (!netif_queue_stopped(dev)) {
if (netdev_nit)
dev_queue_xmit_nit(skb,dev);
struct softnet_data *queue;
unsigned long flags;
- if(skb->stamp.tv_sec==0)
+ if (skb->stamp.tv_sec == 0)
get_fast_time(&skb->stamp);
/* The code is rearranged so that the path is the most
int offset;
skb=skb_clone(skb, GFP_ATOMIC);
- if(skb==NULL)
+ if (skb == NULL)
return;
offset=skb->data-skb->mac.raw;
skb_push(skb,offset); /* Put header back on for bridge */
- if(br_receive_frame(skb))
+ if (br_receive_frame(skb))
return;
kfree_skb(skb);
}
struct net_device *dev = head;
head = head->next_sched;
- clear_bit(LINK_STATE_SCHED, &dev->state);
+ clear_bit(__LINK_STATE_SCHED, &dev->state);
if (spin_trylock(&dev->queue_lock)) {
qdisc_run(dev);
static int dev_get_info(char *buffer, char **start, off_t offset, int length)
{
- int len=0;
- off_t begin=0;
- off_t pos=0;
+ int len = 0;
+ off_t begin = 0;
+ off_t pos = 0;
int size;
-
struct net_device *dev;
"Inter-| Receive | Transmit\n"
" face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed\n");
- pos+=size;
- len+=size;
+ pos += size;
+ len += size;
read_lock(&dev_base_lock);
for (dev = dev_base; dev != NULL; dev = dev->next) {
size = sprintf_stats(buffer+len, dev);
- len+=size;
- pos=begin+len;
+ len += size;
+ pos = begin + len;
- if(pos<offset) {
- len=0;
- begin=pos;
+ if (pos < offset) {
+ len = 0;
+ begin = pos;
}
- if(pos>offset+length)
+ if (pos > offset + length)
break;
}
read_unlock(&dev_base_lock);
- *start=buffer+(offset-begin); /* Start of wanted data */
- len-=(offset-begin); /* Start slop */
- if(len>length)
- len=length; /* Ending slop */
- if (len<0)
- len=0;
+ *start = buffer + (offset - begin); /* Start of wanted data */
+ len -= (offset - begin); /* Start slop */
+ if (len > length)
+ len = length; /* Ending slop */
+ if (len < 0)
+ len = 0;
return len;
}
if (len > length)
len = length;
- if(len < 0)
+ if (len < 0)
len = 0;
*start = buffer + offset;
(struct iw_statistics *) NULL);
int size;
- if(stats != (struct iw_statistics *) NULL)
- {
+ if (stats != (struct iw_statistics *) NULL) {
size = sprintf(buffer,
"%6s: %04x %3d%c %3d%c %3d%c %6d %6d %6d\n",
dev->name,
" face | tus | link level noise | nwid crypt misc\n"
);
- pos+=size;
- len+=size;
+ pos += size;
+ len += size;
read_lock(&dev_base_lock);
- for(dev = dev_base; dev != NULL; dev = dev->next) {
- size = sprintf_wireless_stats(buffer+len, dev);
- len+=size;
- pos=begin+len;
-
- if(pos < offset) {
- len=0;
- begin=pos;
+ for (dev = dev_base; dev != NULL; dev = dev->next) {
+ size = sprintf_wireless_stats(buffer + len, dev);
+ len += size;
+ pos = begin + len;
+
+ if (pos < offset) {
+ len = 0;
+ begin = pos;
}
- if(pos > offset + length)
+ if (pos > offset + length)
break;
}
read_unlock(&dev_base_lock);
*start = buffer + (offset - begin); /* Start of wanted data */
len -= (offset - begin); /* Start slop */
- if(len > length)
- len = length; /* Ending slop */
- if (len<0)
- len=0;
+ if (len > length)
+ len = length; /* Ending slop */
+ if (len < 0)
+ len = 0;
return len;
}
case SIOCGIFFLAGS: /* Get interface flags */
ifr->ifr_flags = (dev->flags&~(IFF_PROMISC|IFF_ALLMULTI|IFF_RUNNING))
|(dev->gflags&(IFF_PROMISC|IFF_ALLMULTI));
- if (!test_bit(LINK_STATE_DOWN, &dev->state))
+ if (netif_running(dev))
ifr->ifr_flags |= IFF_RUNNING;
return 0;
if (ifr->ifr_mtu<0)
return -EINVAL;
+ if (!netif_device_present(dev))
+ return -ENODEV;
+
if (dev->change_mtu)
err = dev->change_mtu(dev, ifr->ifr_mtu);
else {
return 0;
case SIOCSIFHWADDR:
- if(dev->set_mac_address==NULL)
+ if (dev->set_mac_address == NULL)
return -EOPNOTSUPP;
- if(ifr->ifr_hwaddr.sa_family!=dev->type)
+ if (ifr->ifr_hwaddr.sa_family!=dev->type)
return -EINVAL;
- err=dev->set_mac_address(dev,&ifr->ifr_hwaddr);
+ if (!netif_device_present(dev))
+ return -ENODEV;
+ err = dev->set_mac_address(dev, &ifr->ifr_hwaddr);
if (!err)
notifier_call_chain(&netdev_chain, NETDEV_CHANGEADDR, dev);
return err;
case SIOCSIFHWBROADCAST:
- if(ifr->ifr_hwaddr.sa_family!=dev->type)
+ if (ifr->ifr_hwaddr.sa_family!=dev->type)
return -EINVAL;
memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data, MAX_ADDR_LEN);
notifier_call_chain(&netdev_chain, NETDEV_CHANGEADDR, dev);
return 0;
case SIOCSIFMAP:
- if (dev->set_config)
+ if (dev->set_config) {
+ if (!netif_device_present(dev))
+ return -ENODEV;
return dev->set_config(dev,&ifr->ifr_map);
+ }
return -EOPNOTSUPP;
case SIOCADDMULTI:
- if(dev->set_multicast_list==NULL ||
- ifr->ifr_hwaddr.sa_family!=AF_UNSPEC)
+ if (dev->set_multicast_list == NULL ||
+ ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
return -EINVAL;
+ if (!netif_device_present(dev))
+ return -ENODEV;
dev_mc_add(dev,ifr->ifr_hwaddr.sa_data, dev->addr_len, 1);
return 0;
case SIOCDELMULTI:
- if(dev->set_multicast_list==NULL ||
- ifr->ifr_hwaddr.sa_family!=AF_UNSPEC)
+ if (dev->set_multicast_list == NULL ||
+ ifr->ifr_hwaddr.sa_family!=AF_UNSPEC)
return -EINVAL;
+ if (!netif_device_present(dev))
+ return -ENODEV;
dev_mc_delete(dev,ifr->ifr_hwaddr.sa_data,dev->addr_len, 1);
return 0;
return 0;
case SIOCSIFTXQLEN:
- if(ifr->ifr_qlen<0)
+ if (ifr->ifr_qlen<0)
return -EINVAL;
dev->tx_queue_len = ifr->ifr_qlen;
return 0;
*/
default:
- if(cmd >= SIOCDEVPRIVATE &&
- cmd <= SIOCDEVPRIVATE + 15) {
- if (dev->do_ioctl)
+ if (cmd >= SIOCDEVPRIVATE &&
+ cmd <= SIOCDEVPRIVATE + 15) {
+ if (dev->do_ioctl) {
+ if (!netif_device_present(dev))
+ return -ENODEV;
return dev->do_ioctl(dev, ifr, cmd);
+ }
return -EOPNOTSUPP;
}
#ifdef WIRELESS_EXT
- if(cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
- if (dev->do_ioctl)
+ if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
+ if (dev->do_ioctl) {
+ if (!netif_device_present(dev))
+ return -ENODEV;
return dev->do_ioctl(dev, ifr, cmd);
+ }
return -EOPNOTSUPP;
}
#endif /* WIRELESS_EXT */
if (dev->rebuild_header == NULL)
dev->rebuild_header = default_rebuild_header;
+ /*
+ * Default initial state at registry is that the
+ * device is present.
+ */
+
+ set_bit(__LINK_STATE_PRESENT, &dev->state);
+
dev->next = NULL;
dev_init_scheduler(dev);
write_lock_bh(&dev_base_lock);
void dev_mc_upload(struct net_device *dev)
{
/* Don't do anything till we up the interface
- [dev_open will call this function so the list will
- stay sane] */
+ * [dev_open will call this function so the list will
+ * stay sane]
+ */
- if(!(dev->flags&IFF_UP))
+ if (!(dev->flags&IFF_UP))
return;
/*
- * Devices with no set multicast don't get set
+ * Devices with no set multicast or which have been
+ * detached don't get set.
*/
- if(dev->set_multicast_list==NULL)
+ if (dev->set_multicast_list == NULL ||
+ !netif_device_present(dev))
return;
read_lock_bh(&dev_mc_lock);
struct dev_mc_list *dmi, **dmip;
write_lock_bh(&dev_mc_lock);
- for (dmip=&dev->mc_list; (dmi=*dmip)!=NULL; dmip=&dmi->next) {
+ for (dmip = &dev->mc_list; (dmi = *dmip) != NULL; dmip = &dmi->next) {
/*
* Find the entry we want to delete. The device could
* have variable length entries so check these too.
*/
- if (memcmp(dmi->dmi_addr,addr,dmi->dmi_addrlen)==0 && alen==dmi->dmi_addrlen) {
+ if (memcmp(dmi->dmi_addr, addr, dmi->dmi_addrlen) == 0 &&
+ alen == dmi->dmi_addrlen) {
if (glbl) {
int old_glbl = dmi->dmi_gusers;
dmi->dmi_gusers = 0;
if (old_glbl == 0)
break;
}
- if(--dmi->dmi_users)
+ if (--dmi->dmi_users)
goto done;
/*
dev->mc_count--;
write_unlock_bh(&dev_mc_lock);
- kfree_s(dmi,sizeof(*dmi));
+ kfree_s(dmi, sizeof(*dmi));
/*
* We have altered the list, so the card
dmi1 = (struct dev_mc_list *)kmalloc(sizeof(*dmi), GFP_ATOMIC);
write_lock_bh(&dev_mc_lock);
- for(dmi=dev->mc_list; dmi!=NULL; dmi=dmi->next) {
- if (memcmp(dmi->dmi_addr,addr,dmi->dmi_addrlen)==0 && dmi->dmi_addrlen==alen) {
+ for (dmi = dev->mc_list; dmi != NULL; dmi = dmi->next) {
+ if (memcmp(dmi->dmi_addr, addr, dmi->dmi_addrlen) == 0 &&
+ dmi->dmi_addrlen == alen) {
if (glbl) {
int old_glbl = dmi->dmi_gusers;
dmi->dmi_gusers = 1;
}
}
- if ((dmi=dmi1)==NULL) {
+ if ((dmi = dmi1) == NULL) {
write_unlock_bh(&dev_mc_lock);
return -ENOMEM;
}
memcpy(dmi->dmi_addr, addr, alen);
- dmi->dmi_addrlen=alen;
- dmi->next=dev->mc_list;
- dmi->dmi_users=1;
- dmi->dmi_gusers=glbl ? 1 : 0;
- dev->mc_list=dmi;
+ dmi->dmi_addrlen = alen;
+ dmi->next = dev->mc_list;
+ dmi->dmi_users = 1;
+ dmi->dmi_gusers = glbl ? 1 : 0;
+ dev->mc_list = dmi;
dev->mc_count++;
write_unlock_bh(&dev_mc_lock);
dev_mc_upload(dev);
void dev_mc_discard(struct net_device *dev)
{
write_lock_bh(&dev_mc_lock);
- while (dev->mc_list!=NULL) {
- struct dev_mc_list *tmp=dev->mc_list;
- dev->mc_list=tmp->next;
+ while (dev->mc_list != NULL) {
+ struct dev_mc_list *tmp = dev->mc_list;
+ dev->mc_list = tmp->next;
if (tmp->dmi_users > tmp->dmi_gusers)
printk("dev_mc_discard: multicast leakage! dmi_users=%d\n", tmp->dmi_users);
- kfree_s(tmp,sizeof(*tmp));
+ kfree_s(tmp, sizeof(*tmp));
}
- dev->mc_count=0;
+ dev->mc_count = 0;
write_unlock_bh(&dev_mc_lock);
}
static int dev_mc_read_proc(char *buffer, char **start, off_t offset,
int length, int *eof, void *data)
{
- off_t pos=0, begin=0;
+ off_t pos = 0, begin = 0;
struct dev_mc_list *m;
- int len=0;
+ int len = 0;
struct net_device *dev;
read_lock(&dev_base_lock);
for (m = dev->mc_list; m; m = m->next) {
int i;
- len += sprintf(buffer+len,"%-4d %-15s %-5d %-5d ", dev->ifindex, dev->name,
- m->dmi_users, m->dmi_gusers);
+ len += sprintf(buffer+len,"%-4d %-15s %-5d %-5d ", dev->ifindex,
+ dev->name, m->dmi_users, m->dmi_gusers);
- for (i=0; i<m->dmi_addrlen; i++)
+ for (i = 0; i < m->dmi_addrlen; i++)
len += sprintf(buffer+len, "%02x", m->dmi_addr[i]);
- len+=sprintf(buffer+len, "\n");
+ len += sprintf(buffer+len, "\n");
- pos=begin+len;
+ pos = begin + len;
if (pos < offset) {
- len=0;
- begin=pos;
+ len = 0;
+ begin = pos;
}
- if (pos > offset+length) {
+ if (pos > offset + length) {
read_unlock_bh(&dev_mc_lock);
goto done;
}
done:
read_unlock(&dev_base_lock);
- *start=buffer+(offset-begin);
- len-=(offset-begin);
- if(len>length)
- len=length;
- if(len<0)
- len=0;
+ *start = buffer + (offset - begin);
+ len -= (offset - begin);
+ if (len > length)
+ len = length;
+ if (len < 0)
+ len = 0;
return len;
}
#endif
r->ifi_flags = dev->flags;
r->ifi_change = change;
- if (test_bit(LINK_STATE_DOWN, &dev->state))
+ if (! netif_running(dev))
r->ifi_flags &= ~IFF_RUNNING;
else
r->ifi_flags |= IFF_RUNNING;
* is up, that means that the "user" really wants to connect. If not
* we notify the user about the possibility of an IrLAN connection
*/
- if (test_bit(LINK_STATE_START, &self->dev.state)) {
+ if (netif_running(&self->dev)) {
/* Open TSAPs */
irlan_client_open_ctrl_tsap(self);
irlan_open_data_tsap(self);
ASSERT(self->magic == IRLAN_MAGIC, return;);
/* Check if device still configured */
- if (test_bit(LINK_STATE_START, &self->dev.state)) {
+ if (netif_running(&self->dev)) {
IRDA_DEBUG(0, __FUNCTION__
"(), notifying irmanager to stop irlan!\n");
mgr_event.event = EVENT_IRLAN_STOP;
ASSERT(self->magic == IRLAN_MAGIC, return;);
/* Check if device is still configured */
- if (test_bit(LINK_STATE_START, &self->dev.state)) {
+ if (netif_running(&self->dev)) {
IRDA_DEBUG(0, __FUNCTION__
"(), Device still configured, closing later!\n");
buf+len);
len += sprintf(buf+len, "tx busy: %s\n",
- test_bit(LINK_STATE_XOFF, &self->dev.state) ? "TRUE" : "FALSE");
+ netif_queue_stopped(&self->dev) ? "TRUE" : "FALSE");
len += sprintf(buf+len, "\n");
}
{
struct net_device_stats *stats = (struct net_device_stats *)dev->priv;
- if (!test_bit(LINK_STATE_START, &dev->state)) {
+ if (!netif_running(dev)) {
stats->rx_errors++;
return 0;
}
struct net_device_stats *stats = (struct net_device_stats *)dev->priv;
#ifdef CONFIG_INET
- if (!test_bit(LINK_STATE_START, &dev->state)) {
+ if (!netif_running(dev)) {
stats->rx_errors++;
return 0;
}
{
struct net_device_stats *stats = (struct net_device_stats *)dev->priv;
- if (!test_bit(LINK_STATE_START, &dev->state)) {
+ if (!netif_running(dev)) {
printk(KERN_ERR "ROSE: rose_xmit - called when iface is down\n");
return 1;
}
if (sch->q.qlen) {
sch->stats.overlimits++;
- if (q->wd_expires && !test_bit(LINK_STATE_XOFF, &sch->dev->state)) {
+ if (q->wd_expires && !netif_queue_stopped(sch->dev)) {
long delay = PSCHED_US2JIFFIE(q->wd_expires);
del_timer(&q->wd_timer);
if (delay <= 0)
/* And release queue */
spin_unlock(&dev->queue_lock);
- if (!test_bit(LINK_STATE_XOFF, &dev->state)) {
+ if (!netif_queue_stopped(dev)) {
if (netdev_nit)
dev_queue_xmit_nit(skb, dev);
spin_lock(&dev->xmit_lock);
if (dev->qdisc != &noop_qdisc) {
- if (test_bit(LINK_STATE_XOFF, &dev->state) &&
+ if (netif_queue_stopped(dev) &&
(jiffies - dev->trans_start) > dev->watchdog_timeo) {
printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n", dev->name);
dev->tx_timeout(dev);
dev_watchdog_down(dev);
- if (test_bit(LINK_STATE_SCHED, &dev->state)) {
+ if (test_bit(__LINK_STATE_SCHED, &dev->state)) {
current->policy |= SCHED_YIELD;
schedule();
}
return skb;
}
- if (!test_bit(LINK_STATE_XOFF, &sch->dev->state)) {
+ if (!netif_queue_stopped(sch->dev)) {
long delay = PSCHED_US2JIFFIE(max(-toks, -ptoks));
if (delay == 0)
if (slave->qdisc_sleeping != q)
continue;
- if (test_bit(LINK_STATE_XOFF, &slave->state) ||
- test_bit(LINK_STATE_DOWN, &slave->state)) {
+ if (netif_queue_stopped(slave) || ! netif_running(slave)) {
busy = 1;
continue;
}
case 0:
if (spin_trylock(&slave->xmit_lock)) {
slave->xmit_lock_owner = smp_processor_id();
- if (!test_bit(LINK_STATE_XOFF, &slave->state) &&
+ if (!netif_queue_stopped(slave) &&
slave->hard_start_xmit(skb, slave) == 0) {
slave->xmit_lock_owner = -1;
spin_unlock(&slave->xmit_lock);
slave->xmit_lock_owner = -1;
spin_unlock(&slave->xmit_lock);
}
- if (test_bit(LINK_STATE_XOFF, &dev->state))
+ if (netif_queue_stopped(dev))
busy = 1;
break;
case 1:
if (dev == NULL)
return -ENODEV; /* interface not found */
- if (test_bit(LINK_STATE_START, &dev->state)) {
+ if (netif_running(dev)) {
if (force) {
printk(KERN_WARNING
"%s: deleting opened interface %s!\n",