say M here and read Documentation/modules.txt. The module will be
called aic7xxx.o.
-Override driver defaults for commands per LUN
-CONFIG_OVERRIDE_CMDS
- Say Y here if you want to override the default maximum number of
- commands that a single device on the aic7xxx controller is allowed
- to have active at one time. This option only affects tagged queueing
- capable devices. The driver uses a value of 24 by default.
- If you say Y here, you can adjust the number of commands per LUN
- with the following configuration option.
+Enable or Disable Tagged Command Queueing by default
+CONFIG_AIC7XXX_TCQ_ON_BY_DEFAULT
+ This option causes the aic7xxx driver to attempt to use tagged command
+ queueing on any devices that claim to support it. If this is set to yes,
+ you can still turn off TCQ on troublesome devices with the use of the
+ tag_info boot parameter. See /usr/src/linux/drivers/scsi/README.aic7xxx
+ for more information on that and other aic7xxx setup commands. If this
+ option is turned off, you may still enable TCQ on known good devices by
+ use of the tag_info boot parameter.
- If unsure, say N.
-
-Maximum number of commands per LUN
-CONFIG_AIC7XXX_CMDS_PER_LUN
- Specify the maximum number of commands you would like to allocate
- per LUN (a LUN is a Logical Unit Number -- some physical SCSI
- devices, e.g. CD jukeboxes, act logically as several separate units,
- each of which gets its own number).
-
- Reasonable figures are in the range of 14 to 32 commands per device,
+ If you are unsure about your devices then it is safest to say N here.
+
+ However, TCQ can increase performance on some hard drives by as much
+ as 50% or more, so I would recommend that if you say N here, that you
+ at least read the README.aic7xxx file so you will know how to enable
+ this option manually should your drives prove to be safe in regards
+ to TCQ.
+
+ Conversely, certain drives are known to lock up or cause bus resets when
+ TCQ is enabled on them. If you have a Western Digital Enterprise SCSI
+ drive for instance, then don't even bother to enable TCQ on it as the
+ drive will become unreliable, and it will actually reduce performance.
+
+Default number of TCQ commands per device
+CONFIG_AIC7XXX_CMDS_PER_DEVICE
+ Specify the number of commands you would like to allocate per SCSI
+ device when Tagged Command Queueing (TCQ) is enabled on that device.
+
+ Reasonable figures are in the range of 8 to 24 commands per device,
but depending on hardware could be increased or decreased from that
figure. If the number is too high for any particular device, the
driver will automatically compensate usually after only 10 minutes
- of uptime and will issue a message to alert you to the fact that the
- number of commands for that device has been reduced. It will not
- hinder performance if some of your devices eventually have their
- commands per LUN reduced, but is a waste of memory if all of your
- devices end up reducing this number down to a more reasonable
- figure. Default: 24
+ of uptime. It will not hinder performance if some of your devices
+ eventually have their command depth reduced, but is a waste of memory
+ if all of your devices end up reducing this number down to a more
+ reasonable figure.
+
+ NOTE: Certain very broken drives are known to lock up when given more
+ commands than they like to deal with. Quantum Fireball drives are the
+ most common in this category. For the Quantum Fireball drives I would
+ suggest no more than 8 commands per device.
+
+ Default: 8
Collect statistics to report in /proc
CONFIG_AIC7XXX_PROC_STATS
Zilog serial support
CONFIG_SUN_ZS
- This driver does not exist at this point, so you might as well
- say N.
+ If you are asked this question, something is wrong with config scripts.
+ Zilog serial driver is always enabled in sparc architecture.
Double Talk PC internal speech card support
CONFIG_DTLK
inserted in and removed from the running kernel whenever you want),
say M and read Documentation/modules.txt. If unsure, say Y.
-#Mostek real time clock support
-#CONFIG_SUN_MOSTEK_RTC
+Mostek real time clock support
+CONFIG_SUN_MOSTEK_RTC
+ The Mostek RTC chip is used on all knows Sun computers except
+ some JavaStation-s. For a JavaStation you need to say Y both here
+ and to CONFIG_RTC.
+
+ Say Y here unless you are building a special purpose kernel.
+
#
#Siemens SAB82532 serial support
#CONFIG_SAB82532
-# $Id: Makefile,v 1.39 1998/09/16 12:31:31 jj Exp $
+# $Id: Makefile,v 1.41 1999/06/04 13:29:05 jj Exp $
# sparc/Makefile
#
# Makefile for the architecture dependent flags and dependencies on the
# Uncomment the first CFLAGS if you are doing kgdb source level
# debugging of the kernel to get the proper debugging information.
-IS_EGCS := $(shell if $(CC) --version 2>&1 | grep 'egcs' > /dev/null; then echo y; else echo n; fi)
+IS_EGCS := $(shell if $(CC) -c -m32 -o _tmp.o arch/sparc/math-emu/fnegs.c >/dev/null 2>&1; then echo y; else echo n; fi; rm -f _tmp.o)
NEW_GAS := $(shell if $(LD) --version 2>&1 | grep 'elf64_sparc' > /dev/null; then echo y; else echo n; fi)
ifeq ($(NEW_GAS),y)
-/* $Id: ebus.c,v 1.2 1998/10/07 11:35:16 jj Exp $
+/* $Id: ebus.c,v 1.3 1999/06/03 15:02:09 davem Exp $
* ebus.c: PCI to EBus bridge device.
*
* Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
*
* Adopted for sparc by V. Roganov and G. Raiko.
+ * Fixes for different platforms by Pete Zaitcev.
*/
#include <linux/config.h>
#undef DEBUG_FILL_EBUS_DEV
#ifdef PROM_DEBUG
-#define dprintf prom_printf
+#define dprintk prom_printf
#else
-#define dprintf printk
+#define dprintk printk
#endif
struct linux_ebus *ebus_chain = 0;
extern int envctrl_init(void);
#endif
+/* We are together with pcic.c under CONFIG_PCI. */
+extern unsigned int pcic_pin_to_irq(unsigned int, char *name);
+
static inline unsigned long ebus_alloc(size_t size)
{
return (unsigned long)kmalloc(size, GFP_ATOMIC);
strcpy(dev->prom_name, lbuf);
len = prom_getproperty(node, "reg", (void *)regs, sizeof(regs));
+ if (len == -1) len = 0;
dev->num_addrs = len / sizeof(regs[0]);
for (i = 0; i < dev->num_addrs; i++) {
dev->base_address[i] = dev->parent->base_address[regs[i]];
}
+ /*
+ * Houston, we have a problem...
+ * Sometimes PROM supplies absolutely meaningless properties.
+ * Still, we take what it gives since we have nothing better.
+ * Children of ebus may be wired on any input pin of PCIC.
+ */
len = prom_getproperty(node, "interrupts", (char *)&irqs, sizeof(irqs));
if ((len == -1) || (len == 0)) {
dev->num_irqs = 0;
- /*
- * Oh, well, some PROMs don't export interrupts
- * property to children of EBus devices...
- *
- * Be smart about PS/2 keyboard and mouse.
- */
- if (!strcmp(dev->parent->prom_name, "8042")) {
+ dev->irqs[0] = 0;
+ if (dev->parent->num_irqs != 0) {
dev->num_irqs = 1;
dev->irqs[0] = dev->parent->irqs[0];
+/* P3 remove */ printk("EBUS: dev %s irq %d from parent\n", dev->prom_name, dev->irqs[0]);
}
} else {
dev->num_irqs = len / sizeof(irqs[0]);
- printk("FIXME: %s irq(%d)\n", dev->prom_name, irqs[0]);
+ if (irqs[0] == 0 || irqs[0] >= 8) {
+ /*
+ * XXX Zero is a valid pin number...
+ * This works as long as Ebus is not wired to INTA#.
+ */
+ printk("EBUS: %s got bad irq %d from PROM\n",
+ dev->prom_name, irqs[0]);
+ dev->num_irqs = 0;
+ dev->irqs[0] = 0;
+ } else {
+ dev->irqs[0] = pcic_pin_to_irq(irqs[0], dev->prom_name);
+/* P3 remove */ printk("EBUS: dev %s irq %d from PROM\n", dev->prom_name, dev->irqs[0]);
+ }
}
#ifdef DEBUG_FILL_EBUS_DEV
dev->num_addrs = len / sizeof(struct linux_prom_registers);
for (i = 0; i < dev->num_addrs; i++) {
- n = (regs[i].which_io - 0x10) >> 2;
+ /*
+ * XXX Collect JE-1 PROM
+ *
+ * Example - JS-E with 3.11:
+ * /ebus
+ * regs
+ * 0x00000000, 0x0, 0x00000000, 0x0, 0x00000000,
+ * 0x82000010, 0x0, 0xf0000000, 0x0, 0x01000000,
+ * 0x82000014, 0x0, 0x38800000, 0x0, 0x00800000,
+ * ranges
+ * 0x00, 0x00000000, 0x02000010, 0x0, 0x0, 0x01000000,
+ * 0x01, 0x01000000, 0x02000014, 0x0, 0x0, 0x00800000,
+ * /ebus/8042
+ * regs
+ * 0x00000001, 0x00300060, 0x00000008,
+ * 0x00000001, 0x00300060, 0x00000008,
+ */
+ n = regs[i].which_io;
+ if (n >= 4) {
+ /* XXX This is copied from old JE-1 by Gleb. */
+ n = (regs[i].which_io - 0x10) >> 2;
+ } else {
+ ;
+ }
dev->base_address[i] = dev->bus->self->base_address[n];
dev->base_address[i] += regs[i].phys_addr;
(unsigned long)sparc_alloc_io (dev->base_address[i], 0,
regs[i].reg_size,
dev->prom_name, 0, 0);
+#if 0
+/*
+ * This release_region() screwes those who do sparc_alloc_io().
+ * Change drivers which do check_region(). See drivers/block/floppy.c.
+ */
/* Some drivers call 'check_region', so we release it */
release_region(dev->base_address[i] & PAGE_MASK, PAGE_SIZE);
+#endif
if (dev->base_address[i] == 0 ) {
panic("ebus: unable sparc_alloc_io for dev %s",
len = prom_getproperty(node, "interrupts", (char *)&irqs, sizeof(irqs));
if ((len == -1) || (len == 0)) {
dev->num_irqs = 0;
+ if ((dev->irqs[0] = dev->bus->self->irq) != 0) {
+ dev->num_irqs = 1;
+/* P3 remove */ printk("EBUS: child %s irq %d from parent\n", dev->prom_name, dev->irqs[0]);
+ }
} else {
- dev->num_irqs = len / sizeof(irqs[0]);
-
-#define IRQ_8042 7
- if (irqs[0] == 4) dev->irqs[0] = IRQ_8042;
- printk("FIXME: %s irq(%d)\n", dev->prom_name, irqs[0]);
+ dev->num_irqs = 1; /* dev->num_irqs = len / sizeof(irqs[0]); */
+ if (irqs[0] == 0 || irqs[0] >= 8) {
+ /* See above for the parent. XXX */
+ printk("EBUS: %s got bad irq %d from PROM\n",
+ dev->prom_name, irqs[0]);
+ dev->num_irqs = 0;
+ dev->irqs[0] = 0;
+ } else {
+ dev->irqs[0] = pcic_pin_to_irq(irqs[0], dev->prom_name);
+/* P3 remove */ printk("EBUS: child %s irq %d from PROM\n", dev->prom_name, dev->irqs[0]);
+ }
}
#ifdef DEBUG_FILL_EBUS_DEV
-/* $Id: entry.S,v 1.159 1999/05/08 03:00:03 davem Exp $
+/* $Id: entry.S,v 1.160 1999/06/03 15:02:11 davem Exp $
* arch/sparc/kernel/entry.S: Sparc trap low-level entry points.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
retl
nop
+#ifdef CONFIG_PCI
+#include <asm/pcic.h>
+
+ .align 4
+ .globl linux_trap_ipi15_pcic
+linux_trap_ipi15_pcic:
+ rd %wim, %l3
+ SAVE_ALL
+
+ /*
+ * First deactivate NMI
+ * or we cannot drop ET, cannot get window spill traps.
+ * The busy loop is necessary because the PIO error
+ * sometimes does not go away quickly and we trap again.
+ */
+ sethi %hi(C_LABEL(pcic_regs)), %o1
+ ld [%o1 + %lo(C_LABEL(pcic_regs))], %o2
+
+ ! Get pending status for printouts later.
+ ld [%o2 + PCI_SYS_INT_PENDING], %o0
+
+ mov PCI_SYS_INT_PENDING_CLEAR_ALL, %o1
+ stb %o1, [%o2 + PCI_SYS_INT_PENDING_CLEAR]
+1:
+ ld [%o2 + PCI_SYS_INT_PENDING], %o1
+ andcc %o1, ((PCI_SYS_INT_PENDING_PIO|PCI_SYS_INT_PENDING_PCI)>>24), %g0
+ bne 1b
+ nop
+
+ or %l0, PSR_PIL, %l4
+ wr %l4, 0x0, %psr
+ WRITE_PAUSE
+ wr %l4, PSR_ET, %psr
+ WRITE_PAUSE
+
+ call C_LABEL(pcic_nmi)
+ add %sp, REGWIN_SZ, %o1 ! struct pt_regs *regs
+ RESTORE_ALL
+
+ .globl C_LABEL(pcic_nmi_trap_patch)
+C_LABEL(pcic_nmi_trap_patch):
+ sethi %hi(linux_trap_ipi15_pcic), %l3
+ jmpl %l3 + %lo(linux_trap_ipi15_pcic), %g0
+ rd %psr, %l0
+ .word 0
+
+#endif /* CONFIG_PCI */
+
/* End of entry.S */
-/* $Id: head.S,v 1.95 1999/04/13 07:40:34 anton Exp $
+/* $Id: head.S,v 1.96 1999/06/03 15:02:15 davem Exp $
* head.S: The initial boot code for the Sparc port of Linux.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
- * Copyright (C) 1995 Peter Zaitcev (Zaitcev@ipmce.su)
+ * Copyright (C) 1995,1999 Pete Zaitcev (zaitcev@metabyte.com)
* Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
* Copyright (C) 1997 Michael A. Griffith (grif@acm.org)
+ *
+ * CompactPCI platform by Eric Brower, 1999.
*/
#include <linux/version.h>
t_irq12:TRAP_ENTRY_INTERRUPT(12) /* IRQ Zilog serial chip */
t_irq13:TRAP_ENTRY_INTERRUPT(13) /* IRQ Audio Intr. */
t_irq14:TRAP_ENTRY_INTERRUPT(14) /* IRQ Timer #2 */
+ .globl t_nmi
#ifndef __SMP__
t_nmi: NMI_TRAP /* Level 15 (NMI) */
#else
- .globl t_nmi
t_nmi: TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m)
#endif
t_racc: TRAP_ENTRY(0x20, do_reg_access) /* General Register Access Error */
cmp %l1, 'c'
be 1f
cmp %l1, 'm'
+ be 1f
+ cmp %l1, 's'
be 1f
cmp %l1, 'd'
be 1f
1: set C_LABEL(cputypval), %l1
ldub [%l1 + 0x4], %l1
cmp %l1, 'm' ! Test for sun4d, sun4e ?
+ be sun4m_init
+ cmp %l1, 's' ! Treat sun4s as sun4m
be sun4m_init
cmp %l1, 'd' ! Let us see how the beast will die
be sun4d_init
-/* $Id: pcic.c,v 1.5 1999/03/16 00:15:20 davem Exp $
+/* $Id: pcic.c,v 1.6 1999/06/03 15:02:18 davem Exp $
* pcic.c: Sparc/PCI controller support
*
* Copyright (C) 1998 V. Roganov and G. Raiko
*
* Code is derived from Ultra/PCI PSYCHO controller support, see that
* for author info.
+ *
+ * Support for diverse IIep based platforms by Pete Zaitcev.
+ * CP-1200 by Eric Brower.
*/
#include <linux/config.h>
#include <asm/ebus.h>
#include <asm/sbus.h> /* for sanity check... */
+#include <asm/swift.h> /* for cache flushing. */
#include <asm/io.h>
#else
+unsigned int pcic_pin_to_irq(unsigned int pin, char *name);
+
+/*
+ * I studied different documents and many live PROMs both from 2.30
+ * family and 3.xx versions. I came to the amazing conclusion: there is
+ * absolutely no way to route interrupts in IIep systems relying on
+ * information which PROM presents. We must hardcode interrupt routing
+ * schematics. And this actually sucks. -- zaitcev 1999/05/12
+ *
+ * To find irq for a device we determine which routing map
+ * is in effect or, in other words, on which machine we are running.
+ * We use PROM name for this although other techniques may be used
+ * in special cases (Gleb reports a PROMless IIep based system).
+ * Once we know the map we take device configuration address and
+ * find PCIC pin number where INT line goes. Then we may either program
+ * preferred irq into the PCIC or supply the preexisting irq to the device.
+ *
+ * XXX Entries for JE-1 are completely bogus. Gleb, Vladimir, please fill them.
+ */
+struct pcic_ca2irq {
+ unsigned char busno; /* PCI bus number */
+ unsigned char devfn; /* Configuration address */
+ unsigned char pin; /* PCIC external interrupt pin */
+ unsigned char irq; /* Preferred IRQ (mappable in PCIC) */
+ unsigned int force; /* Enforce preferred IRQ */
+};
+
+struct pcic_sn2list {
+ char *sysname;
+ struct pcic_ca2irq *intmap;
+ int mapdim;
+};
+
+/*
+ * XXX JE-1 is a little known beast.
+ * One rumor has the map this way: pin 0 - parallel, audio;
+ * pin 1 - Ethernet; pin 2 - su; pin 3 - PS/2 kbd and mouse.
+ * All other comparable systems tie serial and keyboard together,
+ * so we do not code this rumor just yet.
+ */
+static struct pcic_ca2irq pcic_i_je1[] = {
+ { 0, 0x01, 1, 6, 1 }, /* Happy Meal */
+};
+
+/* XXX JS-E entry is incomplete - PCI Slot 2 address (pin 7)? */
+static struct pcic_ca2irq pcic_i_jse[] = {
+ { 0, 0x00, 0, 13, 0 }, /* Ebus - serial and keyboard */
+ { 0, 0x01, 1, 6, 0 }, /* hme */
+ { 0, 0x08, 2, 9, 0 }, /* VGA - we hope not used :) */
+ { 0, 0x18, 6, 8, 0 }, /* PCI INTA# in Slot 1 */
+ { 0, 0x38, 4, 9, 0 }, /* All ISA devices. Read 8259. */
+ { 0, 0x80, 5, 11, 0 }, /* EIDE */
+ /* {0,0x88, 0,0,0} - unknown device... PMU? Probably no interrupt. */
+ { 0, 0xA0, 4, 9, 0 }, /* USB */
+ /*
+ * Some pins belong to non-PCI devices, we hardcode them in drivers.
+ * sun4m timers - irq 10, 14
+ * PC style RTC - pin 7, irq 4 ?
+ * Smart card, Parallel - pin 4 shared with USB, ISA
+ * audio - pin 3, irq 5 ?
+ */
+};
+
+/* SPARCengine-6 was the original release name of CP1200.
+ * The documentation differs between the two versions
+ */
+static struct pcic_ca2irq pcic_i_se6[] = {
+ { 0, 0x08, 0, 2, 0 }, /* SCSI */
+ { 0, 0x01, 1, 6, 0 }, /* HME */
+ { 0, 0x00, 3, 13, 0 }, /* EBus */
+};
+
+/*
+ * Several entries in this list may point to the same routing map
+ * as several PROMs may be installed on the same physical board.
+ */
+#define SN2L_INIT(name, map) \
+ { name, map, sizeof(map)/sizeof(struct pcic_ca2irq) }
+
+static struct pcic_sn2list pcic_known_sysnames[] = {
+ SN2L_INIT("JE-1-name", pcic_i_je1), /* XXX Gleb, put name here, pls */
+ SN2L_INIT("SUNW,JS-E", pcic_i_jse), /* PROLL JavaStation-E */
+ SN2L_INIT("SUNW,SPARCengine-6", pcic_i_se6), /* SPARCengine-6/CP-1200 */
+ { NULL, NULL, 0 }
+};
+
static struct linux_pcic PCIC;
static struct linux_pcic *pcic = NULL;
+unsigned int pcic_regs;
+volatile int pcic_speculative;
+volatile int pcic_trapped;
+
static void pci_do_gettimeofday(struct timeval *tv);
static void pci_do_settimeofday(struct timeval *tv);
pbm->prom_node = node;
prom_getstring(node, "name", namebuf, sizeof(namebuf));
strcpy(pbm->prom_name, namebuf);
+
+ {
+ extern volatile int t_nmi[1];
+ extern int pcic_nmi_trap_patch[1];
+
+ t_nmi[0] = pcic_nmi_trap_patch[0];
+ t_nmi[1] = pcic_nmi_trap_patch[1];
+ t_nmi[2] = pcic_nmi_trap_patch[2];
+ t_nmi[3] = pcic_nmi_trap_patch[3];
+ swift_flush_dcache();
+ pcic_regs = pcic->pcic_regs;
+ }
+
+ prom_getstring(prom_root_node, "name", namebuf, sizeof(namebuf));
+ {
+ struct pcic_sn2list *p;
+
+ for (p = pcic_known_sysnames; p->sysname != NULL; p++) {
+ if (strcmp(namebuf, p->sysname) == 0)
+ break;
+ }
+ pcic->pcic_imap = p->intmap;
+ pcic->pcic_imdim = p->mapdim;
+ }
+ if (pcic->pcic_imap == NULL) {
+ /*
+ * We do not panic here for the sake of embedded systems.
+ */
+ printk("PCIC: System %s is unknown, cannot route interrupts\n",
+ namebuf);
+ }
}
__initfunc(void pcibios_init(void))
pcic->pcic_regs, pcic->pcic_io);
/*
- * FIXME:
* Switch off IOTLB translation.
- * It'll be great to use IOMMU to handle HME's rings
- * but we couldn't. Thus, we have to flush CPU cache
- * in HME.
*/
writeb(PCI_DVMA_CONTROL_IOTLB_DISABLE,
pcic->pcic_regs+PCI_DVMA_CONTROL);
/*
- * FIXME:
* Increase mapped size for PCI memory space (DMA access).
* Should be done in that order (size first, address second).
- * Why we couldn't set up 4GB and forget about it ?
+ * Why we couldn't set up 4GB and forget about it? XXX
*/
writel(0xF0000000UL, pcic->pcic_regs+PCI_SIZE_0);
writel(0+PCI_BASE_ADDRESS_SPACE_MEMORY,
if(err != 0 && err != -1) {
unsigned long devfn = (regs[0].which_io >> 8) & 0xff;
if(devfn == pdev->devfn)
- return node; /* Match */
+ return node;
}
node = prom_getsibling(node);
}
return kmalloc(sizeof(struct pcidev_cookie), GFP_ATOMIC);
}
-
-static void pcic_map_pci_device (struct pci_dev *dev) {
- int node, pcinode;
+static void pcic_map_pci_device (struct pci_dev *dev, int node) {
+ struct linux_prom_pci_assigned_addresses addrs[6];
+ int addrlen;
int i, j;
/* Is any valid address present ? */
if (dev->base_address[j]) i++;
if (!i) return; /* nothing to do */
+ if (node == 0 || node == -1) {
+ printk("PCIC: no prom node for device ID (%x,%x)\n",
+ dev->device, dev->vendor);
+ return;
+ }
+
/*
* find related address and get it's window length
*/
- pcinode = prom_getchild(prom_root_node);
- pcinode = prom_searchsiblings(pcinode, "pci");
- if (!pcinode)
- panic("PCIC: failed to locate 'pci' node");
-
-
- for (node = prom_getchild(pcinode); node;
- node = prom_getsibling(node)) {
- struct linux_prom_pci_assigned_addresses addrs[6];
- int addrlen = prom_getproperty(node,"assigned-addresses",
+ addrlen = prom_getproperty(node,"assigned-addresses",
(char*)addrs, sizeof(addrs));
- if (addrlen == -1)
- continue;
+ if (addrlen == -1) {
+ printk("PCIC: no \"assigned-addresses\" for device (%x,%x)\n",
+ dev->device, dev->vendor);
+ return;
+ }
- addrlen /= sizeof(struct linux_prom_pci_assigned_addresses);
- for (i = 0; i < addrlen; i++ )
- for (j = 0; j < 6; j++) {
- if (!dev->base_address[j] || !addrs[i].phys_lo)
- continue;
- if (addrs[i].phys_lo == dev->base_address[j]) {
- unsigned long address = dev->base_address[j];
- int length = addrs[i].size_lo;
- char namebuf[128] = { 0, };
- unsigned long mapaddr, addrflags;
-
- prom_getstring(node, "name",
- namebuf, sizeof(namebuf));
-
- /* FIXME:
- * failure in allocation too large space
- */
- if (length > 0x200000) {
+ addrlen /= sizeof(struct linux_prom_pci_assigned_addresses);
+ for (i = 0; i < addrlen; i++ )
+ for (j = 0; j < 6; j++) {
+ if (!dev->base_address[j] || !addrs[i].phys_lo)
+ continue;
+ if (addrs[i].phys_lo == dev->base_address[j]) {
+ unsigned long address = dev->base_address[j];
+ int length = addrs[i].size_lo;
+ char namebuf[128] = { 0, };
+ unsigned long mapaddr, addrflags;
+
+ prom_getstring(node, "name", namebuf, sizeof(namebuf));
+
+ /*
+ * failure in allocation too large space
+ */
+ if (length > 0x200000) {
length = 0x200000;
prom_printf("PCIC: map window for device '%s' "
"reduced to 2MB !\n", namebuf);
- }
+ }
- /*
- * Be careful with MEM/IO address flags
- */
- if ((address & PCI_BASE_ADDRESS_SPACE) ==
+ /*
+ * Be careful with MEM/IO address flags
+ */
+ if ((address & PCI_BASE_ADDRESS_SPACE) ==
PCI_BASE_ADDRESS_SPACE_IO) {
mapaddr = address & PCI_BASE_ADDRESS_IO_MASK;
- } else {
+ } else {
mapaddr = address & PCI_BASE_ADDRESS_MEM_MASK;
- }
- addrflags = address ^ mapaddr;
+ }
+ addrflags = address ^ mapaddr;
- dev->base_address[j] =
+ dev->base_address[j] =
(unsigned long)sparc_alloc_io(address, 0,
length,
namebuf, 0, 0);
- if ( dev->base_address[j] == 0 )
+ if ( dev->base_address[j] == 0 )
panic("PCIC: failed make mapping for "
"pci device '%s' with address %lx\n",
namebuf, address);
- dev->base_address[j] ^= addrflags;
- return;
- }
+ dev->base_address[j] ^= addrflags;
+ return;
}
+ }
+
+ printk("PCIC: unable to match addresses for device (%x,%x)\n",
+ dev->device, dev->vendor);
+}
+
+static void pcic_fill_irq(struct pci_dev *dev, int node) {
+ struct pcic_ca2irq *p;
+ int i, ivec;
+ char namebuf[64]; /* P3 remove */
+
+ if (node == -1) {
+ strcpy(namebuf, "???");
+ } else {
+ prom_getstring(node, "name", namebuf, sizeof(namebuf)); /* P3 remove */
}
- panic("PCIC: unable to locate prom node for pci device (%x,%x) \n",
- dev->device, dev->vendor);
+ if ((p = pcic->pcic_imap) == 0) {
+ dev->irq = 0;
+ return;
+ }
+ for (i = 0; i < pcic->pcic_imdim; i++) {
+ if (p->busno == dev->bus->number && p->devfn == dev->devfn)
+ break;
+ p++;
+ }
+ if (i >= pcic->pcic_imdim) {
+ printk("PCIC: device %s devfn %02x:%02x not found in %d\n",
+ namebuf, dev->bus->number, dev->devfn, pcic->pcic_imdim);
+ dev->irq = 0;
+ return;
+ }
+
+ i = p->pin;
+ if (i >= 0 && i < 4) {
+ ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_LO);
+ dev->irq = ivec >> (i << 2) & 0xF;
+ } else if (i >= 4 && i < 8) {
+ ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_HI);
+ dev->irq = ivec >> ((i-4) << 2) & 0xF;
+ } else { /* Corrupted map */
+ printk("PCIC: BAD PIN %d\n", i); for (;;) {}
+ }
+/* P3 remove later */ printk("PCIC: device %s pin %d ivec 0x%x irq %x\n", namebuf, i, ivec, dev->irq);
+
+ /*
+ * dev->irq=0 means PROM did not bothered to program the upper
+ * half of PCIC. This happens on JS-E with PROM 3.11, for instance.
+ */
+ if (dev->irq == 0 || p->force) {
+ if (p->irq == 0 || p->irq >= 15) { /* Corrupted map */
+ printk("PCIC: BAD IRQ %d\n", p->irq); for (;;) {}
+ }
+ printk("PCIC: setting irq %x for device (%x,%x)\n",
+ p->irq, dev->device, dev->vendor);
+ dev->irq = p->irq;
+
+ ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_HI);
+ ivec &= ~(0xF << ((p->pin - 4) << 2));
+ ivec |= p->irq << ((p->pin - 4) << 2);
+ writew(ivec, pcic->pcic_regs+PCI_INT_SELECT_HI);
+ }
+
+ return;
}
/*
writeb((pcic->pcic_io_phys>>24) & PCI_SIBAR_ADDRESS_MASK,
pcic->pcic_regs+PCI_SIBAR);
writeb(PCI_ISIZE_16M, pcic->pcic_regs+PCI_ISIZE);
+
}
if(paddr < pcic->pcic_mapped_io ||
- paddr > pcic->pcic_mapped_io + PCI_SPACE_SIZE)
+ paddr >= pcic->pcic_mapped_io + 0x10000)
return 0;
offset = paddr - pcic->pcic_mapped_io;
*addr = pcic->pcic_io_phys + offset;
struct pci_dev *dev;
int i, has_io, has_mem;
unsigned short cmd;
+ struct linux_pbm_info* pbm = &pcic->pbm;
+ int node;
+ struct pcidev_cookie *pcp;
if(pcic == NULL) {
prom_printf("PCI: Error, PCIC not found.\n");
}
pci_read_config_word(dev, PCI_COMMAND, &cmd);
if (has_io && !(cmd & PCI_COMMAND_IO)) {
- printk("PCI: Enabling I/O for device %02x:%02x\n",
+ printk("PCIC: Enabling I/O for device %02x:%02x\n",
dev->bus->number, dev->devfn);
cmd |= PCI_COMMAND_IO;
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
if (has_mem && !(cmd & PCI_COMMAND_MEMORY)) {
- printk("PCI: Enabling memory for device %02x:%02x\n",
+ printk("PCIC: Enabling memory for device %02x:%02x\n",
dev->bus->number, dev->devfn);
cmd |= PCI_COMMAND_MEMORY;
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
+ node = pdev_to_pnode(pbm, dev);
+ if(node == 0)
+ node = -1;
+
/* cookies */
- {
- struct pcidev_cookie *pcp;
- struct linux_pbm_info* pbm = &pcic->pbm;
- int node = pdev_to_pnode(pbm, dev);
-
- if(node == 0)
- node = -1;
- pcp = pci_devcookie_alloc();
- pcp->pbm = pbm;
- pcp->prom_node = node;
- dev->sysdata = pcp;
- }
+ pcp = pci_devcookie_alloc();
+ pcp->pbm = pbm;
+ pcp->prom_node = node;
+ dev->sysdata = pcp;
/* memory mapping */
- if (!(dev->vendor == PCI_VENDOR_ID_SUN &&
- dev->device == PCI_DEVICE_ID_SUN_EBUS)) {
- pcic_map_pci_device(dev);
- }
-
- /* irq */
-#define SETIRQ(vend,devid,irqn) \
- if (dev->vendor==vend && dev->device==devid) dev->irq = irqn;
+ if ((dev->class>>16) != PCI_BASE_CLASS_BRIDGE)
+ pcic_map_pci_device(dev, node);
- SETIRQ(PCI_VENDOR_ID_SUN,PCI_DEVICE_ID_SUN_HAPPYMEAL,3);
+ pcic_fill_irq(dev, node);
}
+
ebus_init();
}
+/*
+ * pcic_pin_to_irq() is exported to ebus.c.
+ */
+unsigned int
+pcic_pin_to_irq(unsigned int pin, char *name)
+{
+ unsigned int irq;
+ unsigned int ivec;
+
+ if (pin < 4) {
+ ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_LO);
+ irq = ivec >> (pin << 2) & 0xF;
+ } else if (pin < 8) {
+ ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_HI);
+ irq = ivec >> ((pin-4) << 2) & 0xF;
+ } else { /* Corrupted map */
+ printk("PCIC: BAD PIN %d FOR %s\n", pin, name);
+ for (;;) {} /* XXX Cannot panic properly in case of PROLL */
+ }
+/* P3 remove later */ printk("PCIC: dev %s pin %d ivec 0x%x irq %x\n", name, pin, ivec, irq);
+ return irq;
+}
+
/* Makes compiler happy */
static volatile int pcic_timer_dummy;
unsigned char where, unsigned int *value)
{
unsigned long flags;
- if (where&3) return PCIBIOS_BAD_REGISTER_NUMBER;
- if (bus != 0 ||
- (device_fn != 0 && device_fn != 1 && device_fn != 0x80)) {
- *value = 0xffffffff;
- return PCIBIOS_SUCCESSFUL;
- }
- /* FIXME: IGA haven't got high config memory addresses !!! */
- if (device_fn == 0x80 && where > PCI_INTERRUPT_LINE) {
- *value = 0xffffffff;
- return PCIBIOS_SUCCESSFUL;
- }
+ if (where&3) return PCIBIOS_BAD_REGISTER_NUMBER;
save_and_cli(flags);
+#if 0
+ pcic_speculative = 1;
+ pcic_trapped = 0;
+#endif
writel(CONFIG_CMD(bus,device_fn,where), pcic->pcic_config_space_addr);
+#if 0
+ nop();
+ if (pcic_trapped) {
+ restore_flags(flags);
+ *value = ~0;
+ return PCIBIOS_SUCCESSFUL;
+ }
+#endif
+ pcic_speculative = 2;
+ pcic_trapped = 0;
*value = readl(pcic->pcic_config_space_data + (where&4));
+ nop();
+ if (pcic_trapped) {
+ pcic_speculative = 0;
+ restore_flags(flags);
+ *value = ~0;
+ return PCIBIOS_SUCCESSFUL;
+ }
+ pcic_speculative = 0;
restore_flags(flags);
return PCIBIOS_SUCCESSFUL;
}
-
+
int pcibios_write_config_byte (unsigned char bus, unsigned char devfn,
unsigned char where, unsigned char value)
{
unsigned char where, unsigned int value)
{
unsigned long flags;
- if ((where&3) || bus != 0 || (devfn != 0 && devfn != 1 && devfn != 0x80))
- return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ if (where&3) return PCIBIOS_BAD_REGISTER_NUMBER;
save_and_cli(flags);
writel(CONFIG_CMD(bus,devfn,where),pcic->pcic_config_space_addr);
return str;
}
+/*
+ * NMI
+ */
+void pcic_nmi(unsigned int pend, struct pt_regs *regs)
+{
+
+ pend = flip_dword(pend);
+
+ if (!pcic_speculative || (pend & PCI_SYS_INT_PENDING_PIO) == 0) {
+ /*
+ * XXX On CP-1200 PCI #SERR may happen, we do not know
+ * what to do about it yet.
+ */
+ printk("Aiee, NMI pend 0x%x pc 0x%x spec %d, hanging\n",
+ pend, (int)regs->pc, pcic_speculative);
+ for (;;) { }
+ }
+ pcic_speculative = 0;
+ pcic_trapped = 1;
+ regs->pc = regs->npc;
+ regs->npc += 4;
+}
+
/*
* Following code added to handle extra PCI-related system calls
*/
-/* $Id: setup.c,v 1.106 1999/05/28 16:03:18 anton Exp $
+/* $Id: setup.c,v 1.107 1999/06/03 15:02:20 davem Exp $
* linux/arch/sparc/kernel/setup.c
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
if(!strcmp(&cputypval,"sun4 ")) { sparc_cpu_model=sun4; }
if(!strcmp(&cputypval,"sun4c")) { sparc_cpu_model=sun4c; }
if(!strcmp(&cputypval,"sun4m")) { sparc_cpu_model=sun4m; }
+ if(!strcmp(&cputypval,"sun4s")) { sparc_cpu_model=sun4m; } /* CP-1200 with PROM 2.30 -E */
if(!strcmp(&cputypval,"sun4d")) { sparc_cpu_model=sun4d; }
if(!strcmp(&cputypval,"sun4e")) { sparc_cpu_model=sun4e; }
if(!strcmp(&cputypval,"sun4u")) { sparc_cpu_model=sun4u; }
-/* $Id: sys_sunos.c,v 1.97 1999/05/24 19:40:39 davem Exp $
+/* $Id: sys_sunos.c,v 1.98 1999/06/09 08:23:39 davem Exp $
* sys_sunos.c: SunOS specific syscall compatibility support.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
-# $Id: Makefile,v 1.35 1999/01/02 16:45:50 davem Exp $
+# $Id: Makefile,v 1.37 1999/06/04 13:29:10 jj Exp $
# sparc64/Makefile
#
# Makefile for the architecture dependent flags and dependencies on the
CC := sparc64-linux-gcc -D__KERNEL__ -I$(TOPDIR)/include
CC_HAS_ARGS := $(shell if echo "$(CC)" | grep '\(__KERNEL__\| \)' > /dev/null; then echo y; else echo n; fi)
-IS_EGCS := $(shell if $(CC) --version 2>&1 | grep 'egcs' > /dev/null; then echo y; else echo n; fi)
+IS_EGCS := $(shell if $(CC) -c -m64 -mcmodel=medlow -o _tmp.o arch/sparc64/math-emu/fnegq.c >/dev/null 2>&1; then echo y; else echo n; fi; rm -f _tmp.o)
NEW_GAS := $(shell if $(LD) --version 2>&1 | grep 'elf64_sparc' > /dev/null; then echo y; else echo n; fi)
ifneq ($(CC_HAS_ARGS),y)
# CONFIG_SUN_BPP is not set
# CONFIG_SUN_VIDEOPIX is not set
CONFIG_SUN_AURORA=m
+CONFIG_APM_RTC_IS_GMT=y
+# CONFIG_RTC is not set
#
# Linux/SPARC audio subsystem (EXPERIMENTAL)
set_brk(current->mm->start_brk, current->mm->brk);
- p = setup_arg_pages(p, bprm);
+ retval = setup_arg_pages(bprm);
+ if (retval < 0) {
+ /* Someone check-me: is this error path enough? */
+ send_sig(SIGKILL, current, 0);
+ return retval;
+ }
- p = (unsigned long) create_aout32_tables((char *)p, bprm);
- current->mm->start_stack = p;
- start_thread32(regs, ex.a_entry, p);
+ current->mm->start_stack =
+ (unsigned long) create_aout32_tables((char *)bprm->p, bprm);
+ start_thread32(regs, ex.a_entry, current->mm->start_stack);
if (current->flags & PF_PTRACED)
send_sig(SIGTRAP, current, 0);
return 0;
-/* $Id: ioctl32.c,v 1.62.2.1 1999/06/09 04:53:03 davem Exp $
+/* $Id: ioctl32.c,v 1.63 1999/06/09 04:56:14 davem Exp $
* ioctl32.c: Conversion between 32bit and 64bit native ioctls.
*
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
-/* $Id: signal.c,v 1.38 1998/10/16 03:19:04 davem Exp $
+/* $Id: signal.c,v 1.40 1999/06/02 19:19:52 jj Exp $
* arch/sparc64/kernel/signal.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
/* Checks if the fp is valid */
static int invalid_frame_pointer(void *fp, int fplen)
{
- if ((((unsigned long) fp) & 7) || ((unsigned long)fp) > 0x80000000000ULL - fplen)
+ if (((unsigned long) fp) & 7)
return 1;
return 0;
}
goto sigill;
if (current->tss.w_saved != 0) {
+#ifdef DEBUG_SIGNALS
printk ("%s[%d]: Invalid user stack frame for "
"signal delivery.\n", current->comm, current->pid);
+#endif
goto sigill;
}
regs->tnpc = (regs->tpc + 4);
/* 4. return to kernel instructions */
- if (ka->ka_restorer)
- regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
- else {
- /* Flush instruction space. */
- unsigned long address = ((unsigned long)&(sf->insns[0]));
- pgd_t *pgdp = pgd_offset(current->mm, address);
- pmd_t *pmdp = pmd_offset(pgdp, address);
- pte_t *ptep = pte_offset(pmdp, address);
-
- regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
-
- /* mov __NR_sigreturn, %g1 */
- err |= __put_user(0x821020d8, &sf->insns[0]);
-
- /* t 0x6d */
- err |= __put_user(0x91d0206d, &sf->insns[1]);
- if (err)
- goto sigsegv;
-
- if(pte_present(*ptep)) {
- unsigned long page = pte_page(*ptep);
-
- __asm__ __volatile__("
- membar #StoreStore
- flush %0 + %1"
- : : "r" (page), "r" (address & (PAGE_SIZE - 1))
- : "memory");
- }
- }
+ regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
return;
sigill:
goto sigill;
if (current->tss.w_saved != 0) {
+#ifdef DEBUG_SIGNALS
printk ("%s[%d]: Invalid user stack frame for "
"signal delivery.\n", current->comm, current->pid);
+#endif
goto sigill;
}
regs->tnpc = (regs->tpc + 4);
/* 4. return to kernel instructions */
- if (ka->ka_restorer)
- regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
- else {
- /* Flush instruction space. */
- unsigned long address = ((unsigned long)&(sf->insns[0]));
- pgd_t *pgdp = pgd_offset(current->mm, address);
- pmd_t *pmdp = pmd_offset(pgdp, address);
- pte_t *ptep = pte_offset(pmdp, address);
-
- regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
-
- /* mov __NR_rt_sigreturn, %g1 */
- err |= __put_user(0x82102065, &sf->insns[0]);
-
- /* t 0x6d */
- err |= __put_user(0x91d0206d, &sf->insns[1]);
- if (err)
- goto sigsegv;
-
- if(pte_present(*ptep)) {
- unsigned long page = pte_page(*ptep);
-
- __asm__ __volatile__("
- membar #StoreStore
- flush %0 + %1"
- : : "r" (page), "r" (address & (PAGE_SIZE - 1))
- : "memory");
- }
- }
+ regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
return;
sigill:
-/* $Id: sys_sparc.c,v 1.26 1999/01/07 19:07:01 jj Exp $
+/* $Id: sys_sparc.c,v 1.27 1999/06/02 12:06:34 jj Exp $
* linux/arch/sparc64/kernel/sys_sparc.c
*
* This file contains various random system calls that
return 0;
}
-asmlinkage int
-sys_sigaction(int sig, const struct old_sigaction *act,
- struct old_sigaction *oact)
-{
- struct k_sigaction new_ka, old_ka;
- int ret;
-
- if (act) {
- old_sigset_t mask;
- if (verify_area(VERIFY_READ, act, sizeof(*act)) ||
- __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
- __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
- return -EFAULT;
- __get_user(new_ka.sa.sa_flags, &act->sa_flags);
- __get_user(mask, &act->sa_mask);
- siginitset(&new_ka.sa.sa_mask, mask);
- new_ka.ka_restorer = NULL;
- }
-
- ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
-
- if (!ret && oact) {
- if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) ||
- __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
- __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
- return -EFAULT;
- __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
- __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
- }
-
- return ret;
-}
-
asmlinkage int
sys_rt_sigaction(int sig, const struct sigaction *act, struct sigaction *oact,
void *restorer, size_t sigsetsize)
-/* $Id: sys_sparc32.c,v 1.108 1999/05/16 10:50:32 davem Exp $
+/* $Id: sys_sparc32.c,v 1.109 1999/06/03 07:11:31 davem Exp $
* sys_sparc32.c: Conversion between 32bit and 64bit native syscalls.
*
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
* memory to free pages in kernel mem. These are in a format ready
* to be put directly into the top of new user memory.
*/
-static unsigned long
-copy_strings32(int argc,u32 * argv,unsigned long *page,
- unsigned long p)
+static int copy_strings32(int argc, u32 * argv, struct linux_binprm *bprm)
{
- u32 str;
-
- if (!p) return 0; /* bullet-proofing */
while (argc-- > 0) {
+ u32 str;
int len;
unsigned long pos;
- get_user(str, argv+argc);
- if (!str) panic("VFS: argc is wrong");
- len = strlen_user((char *)A(str)); /* includes the '\0' */
- if (p < len) /* this shouldn't happen - 128kB */
- return 0;
- p -= len; pos = p;
+ if (get_user(str, argv + argc) ||
+ !str ||
+ !(len = strlen_user((char *)A(str))))
+ return -EFAULT;
+ if (bprm->p < len)
+ return -E2BIG;
+
+ bprm->p -= len;
+
+ pos = bprm->p;
while (len) {
char *pag;
int offset, bytes_to_copy;
offset = pos % PAGE_SIZE;
- if (!(pag = (char *) page[pos/PAGE_SIZE]) &&
- !(pag = (char *) page[pos/PAGE_SIZE] =
+ if (!(pag = (char *) bprm->page[pos/PAGE_SIZE]) &&
+ !(pag = (char *) bprm->page[pos/PAGE_SIZE] =
(unsigned long *) get_free_page(GFP_USER)))
- return 0;
+ return -ENOMEM;
+
bytes_to_copy = PAGE_SIZE - offset;
if (bytes_to_copy > len)
bytes_to_copy = len;
- copy_from_user(pag + offset, (char *)A(str), bytes_to_copy);
+
+ if (copy_from_user(pag + offset, (char *)A(str), bytes_to_copy))
+ return -EFAULT;
+
pos += bytes_to_copy;
str += bytes_to_copy;
len -= bytes_to_copy;
}
}
- return p;
+ return 0;
}
/*
}
retval = prepare_binprm(&bprm);
+ if (retval < 0)
+ goto out;
- if(retval>=0) {
- bprm.p = copy_strings(1, &bprm.filename, bprm.page, bprm.p, 2);
- bprm.exec = bprm.p;
- bprm.p = copy_strings32(bprm.envc,envp,bprm.page,bprm.p);
- bprm.p = copy_strings32(bprm.argc,argv,bprm.page,bprm.p);
- if (!bprm.p)
- retval = -E2BIG;
- }
-
- if(retval>=0)
- retval = search_binary_handler(&bprm,regs);
- if(retval>=0)
+ retval = copy_strings_kernel(1, &bprm.filename, &bprm);
+ if (retval < 0)
+ goto out;
+
+ bprm.exec = bprm.p;
+ retval = copy_strings32(bprm.envc, envp, &bprm);
+ if (retval < 0)
+ goto out;
+
+ retval = copy_strings32(bprm.argc, argv, &bprm);
+ if (retval < 0)
+ goto out;
+
+ retval = search_binary_handler(&bprm, regs);
+ if (retval >= 0)
/* execve success */
return retval;
+out:
/* Something went wrong, return the inode and free the argument pages*/
- if(bprm.dentry)
+ if (bprm.dentry)
dput(bprm.dentry);
for (i=0 ; i<MAX_ARG_PAGES ; i++)
free_page(bprm.page[i]);
- return(retval);
+
+ return retval;
}
/*
-/* $Id: sys_sunos32.c,v 1.25 1999/05/24 19:40:44 davem Exp $
+/* $Id: sys_sunos32.c,v 1.26 1999/06/09 08:23:54 davem Exp $
* sys_sunos32.c: SunOS binary compatability layer on sparc64.
*
* Copyright (C) 1995, 1996, 1997 David S. Miller (davem@caip.rutgers.edu)
-/* $Id: systbls.S,v 1.53 1999/04/07 17:14:11 davem Exp $
+/* $Id: systbls.S,v 1.54 1999/06/02 12:06:31 jj Exp $
* systbls.S: System call entry point tables for OS compatibility.
* The native Linux system call table lives here also.
*
/*180*/ .word sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_sigpending, sys_query_module
.word sys_setpgid, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_newuname
/*190*/ .word sys_init_module, sys_personality, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
- .word sys_nis_syscall, sys_nis_syscall, sys_getppid, sys_sigaction, sys_sgetmask
+ .word sys_nis_syscall, sys_nis_syscall, sys_getppid, sys_nis_syscall, sys_sgetmask
/*200*/ .word sys_ssetmask, sys_sigsuspend, sys_newlstat, sys_uselib, sys_nis_syscall
.word sys_nis_syscall, sys_socketcall, sys_syslog, sys_nis_syscall, sys_nis_syscall
/*210*/ .word sys_idle, sys_nis_syscall, sys_waitpid, sys_swapoff, sys_sysinfo
-/* $Id: traps.c,v 1.59 1999/05/18 16:57:10 jj Exp $
+/* $Id: traps.c,v 1.60 1999/06/02 19:19:55 jj Exp $
* arch/sparc64/kernel/traps.c
*
* Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
unsigned long sfsr, unsigned long sfar)
{
lock_kernel();
+ if (regs->tstate & TSTATE_PRIV) {
#if 1
- printk("instruction_access_exception: Shit SFSR[%016lx] SFAR[%016lx], going.\n",
- sfsr, sfar);
+ printk("instruction_access_exception: Shit SFSR[%016lx] SFAR[%016lx], going.\n",
+ sfsr, sfar);
#endif
- die_if_kernel("Iax", regs);
+ die_if_kernel("Iax", regs);
+ }
+ current->tss.sig_desc = SUBSIG_ILLINST;
+ current->tss.sig_address = regs->tpc;
+ force_sig(SIGILL, current);
unlock_kernel();
}
-/* $Id: sfp-util.h,v 1.1 1999/05/28 13:43:07 jj Exp $
+/* $Id: sfp-util.h,v 1.2 1999/06/07 18:24:15 jj Exp $
* arch/sparc64/math-emu/sfp-util.h
*
* Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
srlx %7,32,%5
mulx %3,%5,%3
mulx %2,%5,%5
- sethi 0x80000000,%2
+ sethi %%hi(0x80000000),%2
addcc %4,%3,%4
srlx %4,32,%4
add %2,%2,%2
static void floppy_ready(void)
{
- unsigned long flags;
-
CHECK_RESET;
if (start_motor(floppy_ready)) return;
if (fdc_dtr()) return;
if ((raw_cmd->flags & FD_RAW_READ) ||
(raw_cmd->flags & FD_RAW_WRITE))
{
- flags=claim_dma_lock();
+ unsigned long flags = claim_dma_lock();
fd_chose_dma_mode(raw_cmd->kernel_data,
raw_cmd->length);
release_dma_lock(flags);
extern void dtlk_init(void);
extern void pcwatchdog_init(void);
extern int rtc_init(void);
+extern int rtc_sun_init(void); /* Combines MK48T02 and MK48T08 */
extern int rtc_DP8570A_init(void);
extern int rtc_MK48T08_init(void);
extern int dsp56k_init(void);
#ifdef CONFIG_BVME6000
rtc_DP8570A_init();
#endif
-#if defined(CONFIG_RTC) || defined(CONFIG_SUN_MOSTEK_RTC)
+#if defined(CONFIG_SUN_MOSTEK_RTC)
+ rtc_sun_init();
+#endif
+#if defined(CONFIG_RTC)
rtc_init();
#endif
#ifdef CONFIG_ATARI_DSP56K
* 1.08 Miquel van Smoorenburg: disallow certain things on the
* DEC Alpha as the CMOS clock is also used for other things.
* 1.09 Nikita Schmidt: epoch support and some Alpha cleanup.
+ * 1.09a Pete Zaitcev: Sun SPARC
*
*/
-#define RTC_VERSION "1.09"
+#define RTC_VERSION "1.09a"
#define RTC_IRQ 8 /* Can't see this changing soon. */
#define RTC_IO_EXTENT 0x10 /* Only really two ports, but... */
#include <asm/uaccess.h>
#include <asm/system.h>
+#ifdef __sparc__
+#include <asm/ebus.h>
+
+static unsigned long rtc_port;
+#endif
+
/*
* We sponge a minor off of the misc major. No need slurping
* up another valuable major dev number for this. If you add
static unsigned int rtc_poll(struct file *file, poll_table *wait);
-void get_rtc_time (struct rtc_time *rtc_tm);
-void get_rtc_alm_time (struct rtc_time *alm_tm);
-void rtc_dropped_irq(unsigned long data);
+static void get_rtc_time (struct rtc_time *rtc_tm);
+static void get_rtc_alm_time (struct rtc_time *alm_tm);
+static void rtc_dropped_irq(unsigned long data);
-void set_rtc_irq_bit(unsigned char bit);
-void mask_rtc_irq_bit(unsigned char bit);
+static void set_rtc_irq_bit(unsigned char bit);
+static void mask_rtc_irq_bit(unsigned char bit);
static inline unsigned char rtc_is_updating(void);
unsigned long uip_watchdog;
char *guess = NULL;
#endif
+#ifdef __sparc__
+ struct linux_ebus *ebus;
+ struct linux_ebus_device *edev;
+ int rtc_irq;
+#endif
+
printk(KERN_INFO "Real Time Clock Driver v%s\n", RTC_VERSION);
+#ifdef __sparc__
+ for_each_ebus(ebus) {
+ for_each_ebusdev(edev, ebus) {
+ if(strcmp(edev->prom_name, "rtc") == 0) {
+ goto found;
+ }
+ }
+ }
+ printk("rtc_init: no PC rtc found\n");
+ return -EIO;
+
+found:
+ rtc_port = edev->base_address[0];
+ rtc_irq = edev->irqs[0];
+ /*
+ * XXX Interrupt pin #7 in Espresso is shared between RTC and
+ * PCI Slot 2 INTA# (and some INTx# in Slot 1). SA_INTERRUPT here
+ * is asking for trouble with add-on boards. Change to SA_SHIRQ.
+ */
+ if(request_irq(rtc_irq, rtc_interrupt, SA_INTERRUPT, "rtc", (void *)&rtc_port)) {
+ /*
+ * Standard way for sparc to print irq's is to use
+ * __irq_itoa(). I think for EBus it's ok to use %d.
+ */
+ printk("rtc: cannot register IRQ %d\n", rtc_irq);
+ return -EIO;
+ }
+ misc_register(&rtc_dev);
+#else
if(request_irq(RTC_IRQ, rtc_interrupt, SA_INTERRUPT, "rtc", NULL))
{
/* Yeah right, seeing as irq 8 doesn't even hit the bus. */
misc_register(&rtc_dev);
/* Check region? Naaah! Just snarf it up. */
request_region(RTC_PORT(0), RTC_IO_EXTENT, "rtc");
+#endif /* __sparc__ vs. others */
#ifdef __alpha__
rtc_freq = HZ;
* for something that requires a steady > 1KHz signal anyways.)
*/
-void rtc_dropped_irq(unsigned long data)
+static void rtc_dropped_irq(unsigned long data)
{
unsigned long flags;
return uip;
}
-void get_rtc_time(struct rtc_time *rtc_tm)
+static void get_rtc_time(struct rtc_time *rtc_tm)
{
unsigned long flags, uip_watchdog = jiffies;
rtc_tm->tm_mon--;
}
-void get_rtc_alm_time(struct rtc_time *alm_tm)
+static void get_rtc_alm_time(struct rtc_time *alm_tm)
{
unsigned long flags;
unsigned char ctrl;
rtc_irq_data = 0;
}
-void set_rtc_irq_bit(unsigned char bit)
+static void set_rtc_irq_bit(unsigned char bit)
{
unsigned char val;
unsigned long flags;
-/* $Id: parport_ax.c,v 1.17 1999/01/20 06:18:54 davem Exp $
+/* $Id: parport_ax.c,v 1.19 1999/06/09 08:24:40 davem Exp $
* Parallel-port routines for Sun Ultra/AX architecture
*
* Author: Eddie C. Dost <ecd@skynet.be>
eth->h_proto = type;
memcpy(eth->h_source, dev->dev_addr, dev->addr_len);
memcpy(eth->h_dest, neigh->ha, dev->addr_len);
+ hh->hh_len = 16;
return 0;
}
-/* $Id: ptifddi.c,v 1.5 1997/04/16 10:27:27 jj Exp $
+/* $Id: ptifddi.c,v 1.7 1999/06/09 08:19:01 davem Exp $
* ptifddi.c: Network driver for Performance Technologies single-attach
* and dual-attach FDDI sbus cards.
*
DEVICE( INTEL, INTEL_82450GX, "82450GX Orion P6"),
DEVICE( KTI, KTI_ET32P2, "ET32P2"),
DEVICE( ADAPTEC, ADAPTEC_7810, "AIC-7810 RAID"),
+ DEVICE( ADAPTEC, ADAPTEC_7821, "AIC-7860"),
DEVICE( ADAPTEC, ADAPTEC_7850, "AIC-7850"),
DEVICE( ADAPTEC, ADAPTEC_7855, "AIC-7855"),
DEVICE( ADAPTEC, ADAPTEC_5800, "AIC-5800"),
+ DEVICE( ADAPTEC, ADAPTEC_3860, "AIC-7860"),
DEVICE( ADAPTEC, ADAPTEC_7860, "AIC-7860"),
DEVICE( ADAPTEC, ADAPTEC_7861, "AIC-7861"),
DEVICE( ADAPTEC, ADAPTEC_7870, "AIC-7870"),
DEVICE( ADAPTEC, ADAPTEC_7882, "AIC-7882U"),
DEVICE( ADAPTEC, ADAPTEC_7883, "AIC-7883U"),
DEVICE( ADAPTEC, ADAPTEC_7884, "AIC-7884U"),
+ DEVICE( ADAPTEC, ADAPTEC_7885, "AIC-7885U"),
+ DEVICE( ADAPTEC, ADAPTEC_7886, "AIC-7886U"),
+ DEVICE( ADAPTEC, ADAPTEC_7887, "AIC-7887U"),
+ DEVICE( ADAPTEC, ADAPTEC_7888, "AIC-7888U"),
DEVICE( ADAPTEC, ADAPTEC_1030, "ABA-1030 DVB receiver"),
DEVICE( ADAPTEC2, ADAPTEC2_2940U2,"AHA-2940U2"),
- DEVICE( ADAPTEC2, ADAPTEC2_78902, "AIC-7890/1"),
+ DEVICE( ADAPTEC2, ADAPTEC2_2930U2,"AHA-2930U2"),
+ DEVICE( ADAPTEC2, ADAPTEC2_7890B, "AIC-7890/1"),
DEVICE( ADAPTEC2, ADAPTEC2_7890, "AIC-7890/1"),
DEVICE( ADAPTEC2, ADAPTEC2_3940U2,"AHA-3940U2"),
DEVICE( ADAPTEC2, ADAPTEC2_3950U2D,"AHA-3950U2D"),
DEVICE( ADAPTEC2, ADAPTEC2_7896, "AIC-7896/7"),
+ DEVICE( ADAPTEC2, ADAPTEC2_7892A, "AIC-7892"),
+ DEVICE( ADAPTEC2, ADAPTEC2_7892B, "AIC-7892"),
+ DEVICE( ADAPTEC2, ADAPTEC2_7892D, "AIC-7892"),
+ DEVICE( ADAPTEC2, ADAPTEC2_7892P, "AIC-7892"),
+ DEVICE( ADAPTEC2, ADAPTEC2_7899A, "AIC-7899"),
+ DEVICE( ADAPTEC2, ADAPTEC2_7899B, "AIC-7899"),
+ DEVICE( ADAPTEC2, ADAPTEC2_7899D, "AIC-7899"),
+ DEVICE( ADAPTEC2, ADAPTEC2_7899P, "AIC-7899"),
DEVICE( ATRONICS, ATRONICS_2015, "IDE-2015PL"),
DEVICE( TIGERJET, TIGERJET_300, "Tiger300 ISDN"),
DEVICE( ARK, ARK_STING, "Stingray"),
tristate 'Bidirectional parallel port support (EXPERIMENTAL)' CONFIG_SUN_BPP
tristate 'Videopix Frame Grabber (EXPERIMENTAL)' CONFIG_SUN_VIDEOPIX
tristate 'Aurora Multiboard 1600se (EXPERIMENTAL)' CONFIG_SUN_AURORA
+
+ # XXX Why don't we do "source drivers/char/Config.in" somewhere?
+ if [ "$CONFIG_PCI" = "y" ]; then
+ define_bool CONFIG_APM_RTC_IS_GMT y # no shit
+ bool 'PC-style RTC' CONFIG_RTC
+ fi
fi
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/tqueue.h>
+#include <linux/delay.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/system.h>
#include <asm/segment.h>
#include <asm/bitops.h>
-#include <asm/delay.h>
#include <asm/kdebug.h>
#include <asm/sbus.h>
#include <asm/uaccess.h>
instances[idx].enhanced = 0;
instances[idx].direction = 0;
instances[idx].mode = COMPATIBILITY;
- instances[idx].wait_queue = 0;
+ init_waitqueue_head(&instances[idx].wait_queue);
instances[idx].run_length = 0;
instances[idx].run_flag = 0;
init_timer(&instances[idx].timer_list);
-/* $Id: pcikbd.c,v 1.29 1999/05/16 13:47:53 ecd Exp $
+/* $Id: pcikbd.c,v 1.30 1999/06/03 15:02:36 davem Exp $
* pcikbd.c: Ultra/AX PC keyboard support.
*
* Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
- * JavaStation(MrCoffee) support by Pete A. Zaitcev.
+ * JavaStation support by Pete A. Zaitcev.
*
* This code is mainly put together from various places in
* drivers/char, please refer to these sources for credits
#include <asm/io.h>
#include <asm/uaccess.h>
-#ifdef __sparc_v9__
-#define PCI_KB_NAME "kb_ps2"
-#define PCI_MS_NAME "kdmouse"
-#else
-#define PCI_KB_NAME "keyboard"
-#define PCI_MS_NAME "mouse"
-#endif
+/*
+ * Different platforms provide different permutations of names.
+ * AXi - kb_ps2, kdmouse.
+ * MrCoffee - keyboard, mouse.
+ * Espresso - keyboard, kdmouse.
+ */
+#define PCI_KB_NAME1 "kb_ps2"
+#define PCI_KB_NAME2 "keyboard"
+#define PCI_MS_NAME1 "kdmouse"
+#define PCI_MS_NAME2 "mouse"
#include "pcikbd.h"
#include "sunserial.h"
for_each_ebusdev(edev, ebus) {
if(!strcmp(edev->prom_name, "8042")) {
for_each_edevchild(edev, child) {
- if (!strcmp(child->prom_name, PCI_KB_NAME))
+ if (strcmp(child->prom_name, PCI_KB_NAME1) == 0 ||
+ strcmp(child->prom_name, PCI_KB_NAME2) == 0)
goto found;
}
}
found:
pcikbd_iobase = child->base_address[0];
+#ifdef __sparc_v9__
if (check_region(pcikbd_iobase, sizeof(unsigned long))) {
printk("8042: can't get region %lx, %d\n",
pcikbd_iobase, (int)sizeof(unsigned long));
return;
}
request_region(pcikbd_iobase, sizeof(unsigned long), "8042 controller");
+#endif
pcikbd_irq = child->irqs[0];
if (request_irq(pcikbd_irq, &pcikbd_interrupt,
* XXX: my 3.1.3 PROM does not give me the beeper node for the audio
* auxio register, though I know it is there... (ecd)
*
- * Both JE1 & MrCoffe have no beeper. How about Krups? --zaitcev
+ * JavaStations appear not to have beeper. --zaitcev
*/
if (!edev)
pcibeep_iobase = (pcikbd_iobase & ~(0xffffff)) | 0x722000;
}
-
/*
* Here begins the Mouse Driver.
*/
for_each_ebusdev(edev, ebus) {
if(!strcmp(edev->prom_name, "8042")) {
for_each_edevchild(edev, child) {
- if (!strcmp(child->prom_name, PCI_MS_NAME))
+ if (strcmp(child->prom_name, PCI_MS_NAME1) == 0 ||
+ strcmp(child->prom_name, PCI_MS_NAME2) == 0)
goto found;
}
}
__initfunc(int ps2kbd_probe(unsigned long *memory_start))
{
- int pnode, enode, node, dnode;
+ int pnode, enode, node, dnode, xnode;
int kbnode = 0, msnode = 0, bnode = 0;
int devices = 0;
char prop[128];
* For each '8042' on this EBus...
*/
while (node) {
+ dnode = prom_getchild(node);
+
/*
* Does it match?
*/
- dnode = prom_getchild(node);
- dnode = prom_searchsiblings(dnode, PCI_KB_NAME);
- if (dnode == kbnode) {
+ if ((xnode = prom_searchsiblings(dnode, PCI_KB_NAME1)) == kbnode) {
+ ++devices;
+ } else if ((xnode = prom_searchsiblings(dnode, PCI_KB_NAME2)) == kbnode) {
++devices;
}
- dnode = prom_getchild(node);
- dnode = prom_searchsiblings(dnode, PCI_MS_NAME);
- if (dnode == msnode) {
+ if ((xnode = prom_searchsiblings(dnode, PCI_MS_NAME1)) == msnode) {
+ ++devices;
+ } else if ((xnode = prom_searchsiblings(dnode, PCI_MS_NAME2)) == msnode) {
++devices;
}
-/* $Id: rtc.c,v 1.13 1998/08/26 10:29:44 davem Exp $
+/* $Id: rtc.c,v 1.14 1999/06/03 15:02:38 davem Exp $
*
* Linux/SPARC Real Time Clock Driver
* Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu)
static int rtc_open(struct inode *inode, struct file *file)
{
+
if (rtc_busy)
return -EBUSY;
#ifdef MODULE
int init_module(void)
#else
-__initfunc(int rtc_init(void))
+__initfunc(int rtc_sun_init(void))
#endif
{
int error;
+ if (mstk48t02_regs == 0) {
+ /* This diagnostic is a debugging aid... But a useful one. */
+ printk(KERN_ERR "rtc: no Mostek in this computer\n");
+ return -ENODEV;
+ }
+
error = misc_register(&rtc_dev);
if (error) {
- printk(KERN_ERR "rtc: unable to get misc minor\n");
+ printk(KERN_ERR "rtc: unable to get misc minor for Mostek\n");
return error;
}
-/* $Id: su.c,v 1.19 1999/05/12 11:15:14 davem Exp $
+/* $Id: su.c,v 1.20 1999/06/03 15:02:40 davem Exp $
* su.c: Small serial driver for keyboard/mouse interface on sparc32/PCI
*
* Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
- * Coypright (C) 1998 Pete Zaitcev (zaitcev@metabyte.com)
+ * Copyright (C) 1998-1999 Pete Zaitcev (zaitcev@metabyte.com)
*
* This is mainly a variation of drivers/char/serial.c,
* credits go to authors mentioned therein.
int su_serial_console_init(void);
#endif
+enum su_type { SU_PORT_NONE, SU_PORT_MS, SU_PORT_KBD, SU_PORT_PORT };
+static char *su_typev[] = { "???", "mouse", "kbd", "serial" };
+
+#define SU_PROPSIZE 128
+
/*
* serial.c saves memory when it allocates async_info upon first open.
* We have parts of state structure together because we do call startup
int line;
int cflag;
- int kbd_node;
- int ms_node;
+ enum su_type port_type; /* Hookup type: e.g. mouse */
int port_node;
char name[16];
unsigned long last_active; /* For async_struct, to be */
};
+/*
+ * Scan status structure.
+ * "prop" is a local variable but it eats stack to keep it in each
+ * stack frame of a recursive procedure.
+ */
+struct su_probe_scan {
+ int msnode, kbnode; /* PROM nodes for mouse and keyboard */
+ int msx, kbx; /* minors for mouse and keyboard */
+ int devices; /* scan index */
+ char prop[SU_PROPSIZE];
+};
+
static char *serial_name = "PCIO serial driver";
static char serial_version[16];
return 0;
}
-#ifdef __sparc_v9__
-
static inline
unsigned int su_inb(struct su_struct *info, unsigned long offset)
{
static inline void
su_outb(struct su_struct *info, unsigned long offset, int value)
{
- outb(value, info->port + offset);
-}
-
-#else
-
-static inline
-unsigned int su_inb(struct su_struct *info, unsigned long offset)
-{
- return (unsigned int)(*(volatile unsigned char *)(info->port + offset));
-}
-
-static inline void
-su_outb(struct su_struct *info, unsigned long offset, int value)
-{
+#ifndef __sparc_v9__
/*
* MrCoffee has weird schematics: IRQ4 & P10(?) pins of SuperIO are
* connected with a gate then go to SlavIO. When IRQ4 goes tristated
* This problem is similar to what Alpha people suffer, see serial.c.
*/
if (offset == UART_MCR) value |= UART_MCR_OUT2;
- *(volatile unsigned char *)(info->port + offset) = value;
-}
-
#endif
+ outb(value, info->port + offset);
+}
#define serial_in(info, off) su_inb(info, off)
#define serial_inp(info, off) su_inb(info, off)
do {
ch = serial_inp(info, UART_RX);
- if (info->kbd_node) {
+ if (info->port_type == SU_PORT_KBD) {
if(ch == SUNKBD_RESET) {
l1a_state.kbd_id = 1;
l1a_state.l1_down = 0;
(status & UART_MSR_DCD))
hardpps();
#endif
- }
+ }
if (status & UART_MSR_DCTS)
icount->cts++;
wake_up_interruptible(&info->delta_msr_wait);
/*
* Allocate the IRQ if necessary
*/
- if (info->kbd_node || info->ms_node) {
+ if (info->port_type != SU_PORT_PORT) {
retval = request_irq(info->irq, su_kbd_ms_interrupt,
SA_SHIRQ, info->name, info);
} else {
int bits;
unsigned long flags;
- if (!info->kbd_node && !info->ms_node) {
+ if (info->port_type == SU_PORT_PORT) {
if (!info->tty || !info->tty->termios)
return;
if (!info->port)
struct su_struct *info = su_table;
int lsr;
- if (!info->kbd_node)
+ if (!info->port_type != SU_PORT_KBD)
++info;
- if (!info)
+ if (!info->port_type != SU_PORT_KBD)
return;
do {
{
struct su_struct *info = su_table;
- if (!info->ms_node)
+ if (!info->port_type != SU_PORT_MS)
++info;
- if (!info)
+ if (!info->port_type != SU_PORT_MS)
return;
info->cflag &= ~(CBAUDEX | CBAUD);
/*
* ---------------------------------------------------------------------
- * su_init() and friends
+ * su_XXX_init() and friends
*
- * su_init() is called at boot-time to initialize the serial driver.
+ * su_XXX_init() is called at boot-time to initialize the serial driver.
* ---------------------------------------------------------------------
*/
*/
__initfunc(static __inline__ void show_su_version(void))
{
- char *revision = "$Revision: 1.19 $";
+ char *revision = "$Revision: 1.20 $";
char *version, *p;
version = strchr(revision, ' ');
}
/*
- * This routine is called by su_init() to initialize a specific serial
- * port. It determines what type of UART chip this serial port is
+ * This routine is called by su_{serial|kbd_ms}_init() to initialize a specific
+ * serial port. It determines what type of UART chip this serial port is
* using: 8250, 16450, 16550, 16550A. The important question is
* whether or not this UART is a 16550A, since this will determine
* whether or not we can use its FIFO features.
autoconfig(struct su_struct *info)
{
unsigned char status1, status2, scratch, scratch2;
-#ifdef __sparc_v9__
struct linux_ebus_device *dev = 0;
struct linux_ebus *ebus;
-#else
+#ifndef __sparc_v9__
struct linux_prom_registers reg0;
#endif
unsigned long flags;
-#ifdef __sparc_v9__
+ if (!info->port_node || !info->port_type)
+ return;
+
+ /*
+ * First we look for Ebus-bases su's
+ */
for_each_ebus(ebus) {
for_each_ebusdev(dev, ebus) {
- if (!strncmp(dev->prom_name, "su", 2)) {
- if (dev->prom_node == info->kbd_node)
- goto ebus_done;
- if (dev->prom_node == info->ms_node)
- goto ebus_done;
+ if (dev->prom_node == info->port_node) {
+ info->port = dev->base_address[0];
+#ifdef __sparc_v9__
+ if (check_region(info->port, 8))
+ return;
+#endif
+ info->irq = dev->irqs[0];
+ goto ebus_done;
}
}
}
-ebus_done:
- if (!dev)
- return;
-
- info->port = dev->base_address[0];
- if (check_region(info->port, 8))
- return;
- info->irq = dev->irqs[0];
+#ifdef __sparc_v9__
+ /*
+ * Not on Ebus, bailing.
+ */
+ return;
#else
- if (!info->port_node)
- return;
-
+ /*
+ * Not on Ebus, must be OBIO.
+ */
if (prom_getproperty(info->port_node, "reg",
(char *)®0, sizeof(reg0)) == -1) {
prom_printf("su: no \"reg\" property\n");
prom_printf("su: cannot map\n");
return;
}
+
/*
- * There is no intr property on MrCoffee, so hardwire it. Krups?
+ * There is no intr property on MrCoffee, so hardwire it.
*/
info->irq = IRQ_4M(13);
#endif
-#ifdef DEBUG_SERIAL_OPEN
- printk("Found 'su' at %016lx IRQ %s\n", dev->base_address[0],
- __irq_itoa(dev->irqs[0]));
+ebus_done:
+
+#ifdef SERIAL_DEBUG_OPEN
+ printk("Found 'su' at %016lx IRQ %s\n", info->port,
+ __irq_itoa(info->irq));
#endif
info->magic = SERIAL_MAGIC;
save_flags(flags); cli();
-
+
/*
* Do a simple existence test first; if we fail this, there's
* no point trying anything else.
return; /* We failed; there's nothing here */
}
-#if 0 /* P3: This does not work on MrCoffee. OUT2 is 0x80 - should work... */
scratch = serial_inp(info, UART_MCR);
serial_outp(info, UART_MCR, UART_MCR_LOOP | scratch);
serial_outp(info, UART_MCR, UART_MCR_LOOP | 0x0A);
status1 = serial_inp(info, UART_MSR) & 0xF0;
serial_outp(info, UART_MCR, scratch);
if (status1 != 0x90) {
+ /*
+ * This code fragment used to fail, now it fixed itself.
+ * We keep the printout for a case.
+ */
+ printk("su: loopback returned status 0x%02x\n", status1);
restore_flags(flags);
return;
}
-#endif
scratch2 = serial_in(info, UART_LCR);
serial_outp(info, UART_LCR, 0xBF); /* set up for StarTech test */
return;
}
- if (info->kbd_node || info->ms_node)
- sprintf(info->name, "su(%s)", info->ms_node ? "mouse" : "kbd");
- else
- strcpy(info->name, "su(serial)");
+ sprintf(info->name, "su(%s)", su_typev[info->port_type]);
#ifdef __sparc_v9__
request_region(info->port, 8, info->name);
info->tqueue.routine = do_softint;
info->tqueue.data = info;
info->cflag = serial_driver.init_termios.c_cflag;
+ init_waitqueue_head(&info->open_wait);
+ init_waitqueue_head(&info->close_wait);
+ init_waitqueue_head(&info->delta_msr_wait);
autoconfig(info);
if (info->type == PORT_UNKNOWN)
continue;
- printk(KERN_INFO "%s at %16lx (irq = %s) is a %s\n",
- info->name, info->port, __irq_itoa(info->irq),
+ printk(KERN_INFO "%s at 0x%lx (tty %d irq %s) is a %s\n",
+ info->name, (long)info->port, i, __irq_itoa(info->irq),
uart_config[info->type].name);
}
info->type = PORT_UNKNOWN;
info->baud_base = BAUD_BASE;
- if (info->kbd_node)
+ if (info->port_type == SU_PORT_KBD)
info->cflag = B1200 | CS8 | CLOCAL | CREAD;
else
info->cflag = B4800 | CS8 | CLOCAL | CREAD;
+ init_waitqueue_head(&info->open_wait);
+ init_waitqueue_head(&info->close_wait);
+ init_waitqueue_head(&info->delta_msr_wait);
+
autoconfig(info);
if (info->type == PORT_UNKNOWN)
continue;
uart_config[info->type].name);
startup(info);
- if (info->kbd_node)
+ if (info->port_type == SU_PORT_KBD)
keyboard_zsinit(su_put_char_kbd);
else
sun_mouse_zsinit();
return 0;
}
-__initfunc(int su_probe (unsigned long *memory_start))
+/*
+ * We got several platforms which present 'su' in different parts
+ * of device tree. 'su' may be found under obio, ebus, isa and pci.
+ * We walk over the tree and find them wherever PROM hides them.
+ */
+__initfunc(void su_probe_any(struct su_probe_scan *t, int sunode))
{
- struct su_struct *info = su_table;
- int node, enode, tnode, sunode;
- int kbnode = 0, msnode = 0;
- int devices = 0;
- char prop[128];
+ struct su_struct *info;
int len;
- /*
- * Find su on MrCoffee. We return OK code if find any.
- * Then su_init finds every one and initializes them.
- * We do this early because MrCoffee got no aliases.
- */
- node = prom_getchild(prom_root_node);
- if ((node = prom_searchsiblings(node, "obio")) != 0) {
- if ((sunode = prom_getchild(node)) != 0) {
- if ((sunode = prom_searchsiblings(sunode, "su")) != 0) {
- info->port_node = sunode;
-#ifdef CONFIG_SERIAL_CONSOLE
- /*
- * Console must be initiated after the generic
- * initialization.
- * sunserial_setinitfunc inverts order, so
- * call this before next one.
- */
- sunserial_setinitfunc(memory_start,
- su_serial_console_init);
-#endif
- sunserial_setinitfunc(memory_start,
- su_serial_init);
- return 0;
+ if (t->devices >= NR_PORTS) return;
+
+ for (; sunode != 0; sunode = prom_getsibling(sunode)) {
+ len = prom_getproperty(sunode, "name", t->prop, SU_PROPSIZE);
+ if (len <= 1) continue; /* Broken PROM node */
+ if (strncmp(t->prop, "su", len) == 0 ||
+ strncmp(t->prop, "serial", len) == 0 ||
+ strncmp(t->prop, "su_pnp", len) == 0) {
+ info = &su_table[t->devices];
+ if (t->kbnode != 0 && sunode == t->kbnode) {
+ t->kbx = t->devices;
+ info->port_type = SU_PORT_KBD;
+ } else if (t->msnode != 0 && sunode == t->msnode) {
+ t->msx = t->devices;
+ info->port_type = SU_PORT_MS;
+ } else {
+ info->port_type = SU_PORT_PORT;
}
+ info->port_node = sunode;
+ ++t->devices;
+ } else {
+ su_probe_any(t, prom_getchild(sunode));
}
}
+}
+
+__initfunc(int su_probe (unsigned long *memory_start))
+{
+ int node;
+ int len;
+ struct su_probe_scan scan;
+
+ /*
+ * First, we scan the tree.
+ */
+ scan.devices = 0;
+ scan.msx = -1;
+ scan.kbx = -1;
+ scan.kbnode = 0;
+ scan.msnode = 0;
/*
* Get the nodes for keyboard and mouse from 'aliases'...
*/
node = prom_getchild(prom_root_node);
node = prom_searchsiblings(node, "aliases");
- if (!node)
- return -ENODEV;
+ if (node != 0) {
- len = prom_getproperty(node, "keyboard", prop, sizeof(prop));
- if (len > 0) {
- prop[len] = 0;
- kbnode = prom_finddevice(prop);
- }
- if (!kbnode)
- return -ENODEV;
+ len = prom_getproperty(node, "keyboard", scan.prop,SU_PROPSIZE);
+ if (len > 0) {
+ scan.prop[len] = 0;
+ scan.kbnode = prom_finddevice(scan.prop);
+ }
- len = prom_getproperty(node, "mouse", prop, sizeof(prop));
- if (len > 0) {
- prop[len] = 0;
- msnode = prom_finddevice(prop);
+ len = prom_getproperty(node, "mouse", scan.prop, SU_PROPSIZE);
+ if (len > 0) {
+ scan.prop[len] = 0;
+ scan.msnode = prom_finddevice(scan.prop);
+ }
}
- if (!msnode)
- return -ENODEV;
- /*
- * Find matching EBus nodes...
- */
- node = prom_getchild(prom_root_node);
- if ((node = prom_searchsiblings(node, "pci")) == 0) {
- return -ENODEV; /* Plain sparc */
- }
+ su_probe_any(&scan, prom_getchild(prom_root_node));
/*
- * Check for SUNW,sabre on Ultra 5/10/AXi.
+ * Second, we process the special case of keyboard and mouse.
+ *
+ * Currently if we got keyboard and mouse hooked to "su" ports
+ * we do not use any possible remaining "su" as a serial port.
+ * Thus, we ignore values of .msx and .kbx, then compact ports.
+ * Those who want to address this issue need to merge
+ * su_serial_init() and su_ms_kbd_init().
*/
- len = prom_getproperty(node, "model", prop, sizeof(prop));
- if ((len > 0) && !strncmp(prop, "SUNW,sabre", len)) {
- node = prom_getchild(node);
- node = prom_searchsiblings(node, "pci");
+ if (scan.msx != -1 && scan.kbx != -1) {
+ su_table[0].port_type = SU_PORT_MS;
+ su_table[0].port_node = scan.msnode;
+ su_table[1].port_type = SU_PORT_KBD;
+ su_table[1].port_node = scan.kbnode;
+
+ sunserial_setinitfunc(memory_start, su_kbd_ms_init);
+ rs_ops.rs_change_mouse_baud = su_change_mouse_baud;
+ sunkbd_setinitfunc(memory_start, sun_kbd_init);
+ kbd_ops.compute_shiftstate = sun_compute_shiftstate;
+ kbd_ops.setledstate = sun_setledstate;
+ kbd_ops.getledstate = sun_getledstate;
+ kbd_ops.setkeycode = sun_setkeycode;
+ kbd_ops.getkeycode = sun_getkeycode;
+#ifdef CONFIG_PCI
+ sunkbd_install_keymaps(memory_start, sun_key_maps,
+ sun_keymap_count, sun_func_buf, sun_func_table,
+ sun_funcbufsize, sun_funcbufleft,
+ sun_accent_table, sun_accent_table_size);
+#endif
+ return 0;
}
+ if (scan.msx != -1 || scan.kbx != -1) {
+ printk("su_probe: cannot match keyboard and mouse, confused\n");
+ return -ENODEV;
+ }
+
+ if (scan.devices == 0)
+ return -ENODEV;
+#ifdef CONFIG_SERIAL_CONSOLE
/*
- * For each PCI bus...
+ * Console must be initiated after the generic initialization.
+ * sunserial_setinitfunc inverts order, so call this before next one.
*/
- while (node) {
- enode = prom_getchild(node);
- enode = prom_searchsiblings(enode, "ebus");
-
- /*
- * For each EBus on this PCI...
- */
- while (enode) {
- sunode = prom_getchild(enode);
- tnode = prom_searchsiblings(sunode, "su");
- if (!tnode)
- tnode = prom_searchsiblings(sunode, "su_pnp");
- sunode = tnode;
-
- /*
- * For each 'su' on this EBus...
- */
- while (sunode) {
- /*
- * Does it match?
- */
- if (sunode == kbnode) {
- info->kbd_node = sunode;
- ++info;
- ++devices;
- }
- if (sunode == msnode) {
- info->ms_node = sunode;
- ++info;
- ++devices;
- }
-
- /*
- * Found everything we need?
- */
- if (devices == 2)
- goto found;
-
- sunode = prom_getsibling(sunode);
- tnode = prom_searchsiblings(sunode, "su");
- if (!tnode)
- tnode = prom_searchsiblings(sunode,
- "su_pnp");
- sunode = tnode;
- }
- enode = prom_getsibling(enode);
- enode = prom_searchsiblings(enode, "ebus");
- }
- node = prom_getsibling(node);
- node = prom_searchsiblings(node, "pci");
- }
- return -ENODEV;
-
-found:
- sunserial_setinitfunc(memory_start, su_kbd_ms_init);
- rs_ops.rs_change_mouse_baud = su_change_mouse_baud;
- sunkbd_setinitfunc(memory_start, sun_kbd_init);
- kbd_ops.compute_shiftstate = sun_compute_shiftstate;
- kbd_ops.setledstate = sun_setledstate;
- kbd_ops.getledstate = sun_getledstate;
- kbd_ops.setkeycode = sun_setkeycode;
- kbd_ops.getkeycode = sun_getkeycode;
-#ifdef CONFIG_PCI
- sunkbd_install_keymaps(memory_start, sun_key_maps, sun_keymap_count,
- sun_func_buf, sun_func_table,
- sun_funcbufsize, sun_funcbufleft,
- sun_accent_table, sun_accent_table_size);
+ sunserial_setinitfunc(memory_start, su_serial_console_init);
#endif
+ sunserial_setinitfunc(memory_start, su_serial_init);
return 0;
}
int init_vfc_devstruct(struct vfc_dev *dev, int instance)
{
dev->instance=instance;
- dev->device_lock_sem=MUTEX;
+ init_MUTEX(&dev->device_lock_sem);
dev->control_reg=0;
- dev->poll_wait=NULL;
+ init_waitqueue_head(&dev->poll_wait);
dev->busy=0;
return 0;
}
{
struct vfc_debug_inout inout;
unsigned char *buffer;
- int ret;
if(!capable(CAP_SYS_ADMIN)) return -EPERM;
dep_tristate 'Adaptec AHA1740 support' CONFIG_SCSI_AHA1740 $CONFIG_SCSI
dep_tristate 'Adaptec AIC7xxx support' CONFIG_SCSI_AIC7XXX $CONFIG_SCSI
if [ "$CONFIG_SCSI_AIC7XXX" != "n" ]; then
- bool ' Override driver defaults for commands per LUN' CONFIG_OVERRIDE_CMDS N
- if [ "$CONFIG_OVERRIDE_CMDS" != "n" ]; then
- int ' Maximum number of commands per LUN' CONFIG_AIC7XXX_CMDS_PER_LUN 24
- fi
+ bool ' Enable Tagged Command Queueing (TCQ) by default' CONFIG_AIC7XXX_TCQ_ON_BY_DEFAULT
+ int ' Maximum number of TCQ commands per device' CONFIG_AIC7XXX_CMDS_PER_DEVICE 8
bool ' Collect statistics to report in /proc' CONFIG_AIC7XXX_PROC_STATS N
int ' Delay in seconds after SCSI bus reset' CONFIG_AIC7XXX_RESET_DELAY 5
fi
AHA-274xT
AHA-2842
AHA-2910B
+ AHA-2920C
+ AHA-2930
+ AHA-2930U
+ AHA-2930U2
AHA-2940
AHA-2940W
AHA-2940U
Adaptec Cards
----------------------------
AHA-2920 (Only the cards that use the Future Domain chipset are not
- supported, any 2920 cards based on Adaptec AIC chipsets are
- supported)
+ supported, any 2920 cards based on Adaptec AIC chipsets,
+ such as the 2920C, are supported)
AAA-13x Raid Adapters
AAA-113x Raid Port Card
Jess Johnson jester@frenzy.com
(AIC7xxx FAQ author)
Doug Ledford dledford@redhat.com
- (Current Linux aic7xxx-5.x.x Driver/Patch/FTP/FAQ maintainer)
+ (Current Linux aic7xxx-5.x.x Driver/Patch/FTP maintainer)
Special thanks go to John Aycock (aycock@cpsc.ucalgary.ca), the original
author of the driver. John has since retired from the project. Thanks
list and someone can help you out.
"aic7xxx=tag_info:{{8,8..},{8,8..},..}" - This option is used to disable
- tagged queueing on specific devices. As of driver version 5.1.8, we
- now globally enable tagged queueing by default. In order to
- disable tagged queueing for certian devices at boot time, a user may
- use this boot param. The driver will then parse this message out
- and disable the specific device entries that are present based upon
+ or enable Tagged Command Queueing (TCQ) on specific devices. As of
+ driver version 5.1.11, TCQ is now either on or off by default
+ according to the setting you choose during the make config process.
+ In order to en/disable TCQ for certian devices at boot time, a user
+ may use this boot param. The driver will then parse this message out
+ and en/disable the specific device entries that are present based upon
the value given. The param line is parsed in the following manner:
{ - first instance indicates the start of this parameter values
see this documentation, you need to use one of the advanced configuration
programs (menuconfig and xconfig). If you are using the "make menuconfig"
method of configuring your kernel, then you would simply highlight the
- option in question and hit the F1 key. If you are using the "make xconfig"
- method of configuring your kernel, then simply click on the help button next
- to the option you have questions about. The help information from the
- Configure.help file will then get automatically displayed.
+ option in question and hit the ? key. If you are using the "make xconfig"
+ method of configuring your kernel, then simply click on the help button
+ next to the option you have questions about. The help information from
+ the Configure.help file will then get automatically displayed.
/proc support
------------------------------
* under normal conditions.
*/
-/*
- * AIC7XXX_FAKE_NEGOTIATION_CMDS
- * We now have two distinctly different methods of device negotiation
- * in this code. The two methods are selected by either defining or not
- * defining this option. The difference is as follows:
- *
- * With AIC7XXX_FAKE_NEGOTIATION_CMDS not set (commented out)
- * When the driver is in need of issuing a negotiation command for any
- * given device, it will add the negotiation message on to part of a
- * regular SCSI command for the device. In the process, if the device
- * is configured for and using tagged queueing, then the code will
- * also issue that single command as a non-tagged command, attach the
- * negotiation message to that one command, and use a temporary
- * queue depth of one to keep the untagged and tagged commands from
- * overlapping.
- * Pros: This doesn't use any extra SCB structures, it's simple, it
- * works most of the time (if not all of the time now), and
- * since we get the device capability info frmo the INQUIRY data
- * now, shouldn't cause any problems.
- * Cons: When we need to send a negotiation command to a device, we
- * must use a command that is being sent to LUN 0 of the device.
- * If we try sending one to high LUN numbers, then some devices
- * get noticeably upset. Since we have to wait for a command with
- * LUN == 0 to come along, we may not be able to renegotiate when
- * we want if the user is actually using say LUN 1 of a CD Changer
- * instead of using LUN 0 for an extended period of time.
- *
- * With AIC7XXX_FAKE_NEGOTIATION_CMDS defined
- * When we need to negotiate with a device, instead of attaching our
- * negotiation message to an existing command, we insert our own
- * fictional Scsi_Cmnd into the chain that has the negotiation message
- * attached to it. We send this one command as untagged regardless
- * of the device type, and we fiddle with the queue depth the same as
- * we would with the option unset to avoid overlapping commands. The
- * primary difference between this and the unset option is that the
- * negotiation message is no longer attached to a specific command,
- * instead it is its own command and is merely triggered by a
- * combination of both A) We need to negotiate and B) The mid level
- * SCSI code has sent us a command. We still don't do any negotiation
- * unless there is a valid SCSI command to be processed.
- * Pros: This fixes the problem above in the Cons section. Since we
- * issue our own fake command, we can set the LUN to 0 regardless
- * of what the LUN is in the real command. It also means that if
- * the device get's nasty over negotiation issues, it won't be
- * showing up on a regular command, so we won't get any SENSE buffer
- * data or STATUS_BYTE returns to the mid level code that are caused
- * by snits in the negotiation code.
- * Cons: We add more code, and more complexity. This means more ways
- * in which things could break. It means a larger driver. It means
- * more resource consumption for the fake commands. However, the
- * biggest problem is this. Take a system where there is a CD-ROM
- * on the SCSI bus. Someone has a CD in the CD-ROM and is using it.
- * For some reason the SCSI bus gets reset. We don't touch the
- * CD-ROM again for quite a period of time (so we don't renegotiate
- * after the reset until we do touch the CD-ROM again). In the
- * time while we aren't using the CD-ROM, the current disc is
- * removed and a new one put in. When we go to check that disc, we
- * will first have to renegotiate. In so doing, we issue our fake
- * SCSI command, which happens to be TEST_UNIT_READY. The CD-ROM
- * negotiates with us, then responds to our fake command with a
- * CHECK_CONDITION status. We REQUEST_SENSE from the CD-ROM, it
- * then sends the SENSE data to our fake command to tell it that
- * it has been through a disc change. There, now we've cleared out
- * the SENSE data along with our negotiation command, and when the
- * real command executes, it won't pick up that the CD was changed.
- * That's the biggest Con to this approach. In the future, I could
- * probably code around this problem though, so this option is still
- * viable.
- *
- * So, which command style should you use? I would appreciate it if people
- * could try out both types. I want to know about any cases where one
- * method works and the other doesn't. If one method works on significantly
- * more systems than another, then it will become the default. If the second
- * option turns out to work best, then I'll find a way to work around that
- * big con I listed.
- *
- * -- July 7, 02:33
- * OK...I just added some code that should make the Con listed for the
- * fake commands a non issue now. However, it needs testing. For now,
- * I'm going to make the default to use the fake commands, we'll see how
- * it goes.
- */
-
-#define AIC7XXX_FAKE_NEGOTIATION_CMDS
-
/*
* AIC7XXX_STRICT_PCI_SETUP
* Should we assume the PCI config options on our controllers are set with
#include "aic7xxx/sequencer.h"
#include "aic7xxx/scsi_message.h"
#include "aic7xxx_reg.h"
+#include <scsi/scsicam.h>
#include <linux/stat.h>
#include <linux/malloc.h> /* for kmalloc() */
0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL
};
-#define AIC7XXX_C_VERSION "5.1.10"
+#define AIC7XXX_C_VERSION "5.1.17"
#define NUMBER(arr) (sizeof(arr) / sizeof(arr[0]))
#define MIN(a,b) (((a) < (b)) ? (a) : (b))
* You can try raising me if tagged queueing is enabled, or lowering
* me if you only have 4 SCBs.
*/
-#ifdef CONFIG_AIC7XXX_CMDS_PER_LUN
-#define AIC7XXX_CMDS_PER_LUN CONFIG_AIC7XXX_CMDS_PER_LUN
+#ifdef CONFIG_AIC7XXX_CMDS_PER_DEVICE
+#define AIC7XXX_CMDS_PER_DEVICE CONFIG_AIC7XXX_CMDS_PER_DEVICE
#else
-#define AIC7XXX_CMDS_PER_LUN 24
+#define AIC7XXX_CMDS_PER_DEVICE 8
#endif
/* Set this to the delay in seconds after SCSI bus reset. */
*
* *** Determining commands per LUN ***
*
- * When AIC7XXX_CMDS_PER_LUN is not defined, the driver will use its
+ * When AIC7XXX_CMDS_PER_DEVICE is not defined, the driver will use its
* own algorithm to determine the commands/LUN. If SCB paging is
* enabled, which is always now, the default is 8 commands per lun
* that indicates it supports tagged queueing. All non-tagged devices
* Make a define that will tell the driver not to use tagged queueing
* by default.
*/
+#ifdef CONFIG_AIC7XXX_TCQ_ON_BY_DEFAULT
+#define DEFAULT_TAG_COMMANDS {0, 0, 0, 0, 0, 0, 0, 0,\
+ 0, 0, 0, 0, 0, 0, 0, 0}
+#else
#define DEFAULT_TAG_COMMANDS {255, 255, 255, 255, 255, 255, 255, 255,\
255, 255, 255, 255, 255, 255, 255, 255}
+#endif
/*
* Modify this as you see fit for your system. By setting tag_commands
};
*/
+static adapter_tag_info_t aic7xxx_tag_info[] =
+{
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS}
+};
+
+
/*
* Define an array of board names that can be indexed by aha_type.
* Don't forget to change this when changing the types!
"Adaptec AHA-2944 Ultra SCSI host adapter", /* AIC_7884 */
"Adaptec AIC-7895 Ultra SCSI host adapter", /* AIC_7895 */
"Adaptec AIC-7890/1 Ultra2 SCSI host adapter", /* AIC_7890 */
+ "Adaptec AHA-293X Ultra2 SCSI host adapter", /* AIC_7890 */
"Adaptec AHA-294X Ultra2 SCSI host adapter", /* AIC_7890 */
"Adaptec AIC-7896/7 Ultra2 SCSI host adapter", /* AIC_7896 */
"Adaptec AHA-394X Ultra2 SCSI host adapter", /* AIC_7897 */
"Adaptec AHA-395X Ultra2 SCSI host adapter", /* AIC_7897 */
"Adaptec PCMCIA SCSI controller", /* card bus stuff */
+ "Adaptec AIC-7892 Ultra 160/m SCSI host adapter", /* AIC_7892 */
+ "Adaptec AIC-7899 Ultra 160/m SCSI host adapter", /* AIC_7899 */
};
/*
SCB_DEVICE_RESET = 0x0020,
SCB_RESET = 0x0040,
SCB_RECOVERY_SCB = 0x0080,
- SCB_WAS_BUSY = 0x0100,
+ SCB_MSGOUT_PPR = 0x0100,
SCB_MSGOUT_SENT = 0x0200,
SCB_MSGOUT_SDTR = 0x0400,
SCB_MSGOUT_WDTR = 0x0800,
- SCB_MSGOUT_BITS = SCB_MSGOUT_SENT |
+ SCB_MSGOUT_BITS = SCB_MSGOUT_PPR |
+ SCB_MSGOUT_SENT |
SCB_MSGOUT_SDTR |
SCB_MSGOUT_WDTR,
SCB_QUEUED_ABORT = 0x1000,
- SCB_QUEUED_FOR_DONE = 0x2000
+ SCB_QUEUED_FOR_DONE = 0x2000,
+ SCB_WAS_BUSY = 0x4000
} scb_flag_type;
typedef enum {
AHC_AIC7890 = 0x0006,
AHC_AIC7895 = 0x0007,
AHC_AIC7896 = 0x0008,
+ AHC_AIC7892 = 0x0009,
+ AHC_AIC7899 = 0x000a,
AHC_VL = 0x0100,
AHC_EISA = 0x0200,
AHC_PCI = 0x0400,
AHC_QUEUE_REGS = 0x0040,
AHC_SG_PRELOAD = 0x0080,
AHC_SPIOCAP = 0x0100,
+ AHC_ULTRA3 = 0x0200,
AHC_AIC7770_FE = AHC_FENONE,
AHC_AIC7850_FE = AHC_SPIOCAP,
AHC_AIC7860_FE = AHC_ULTRA|AHC_SPIOCAP,
AHC_QUEUE_REGS|AHC_SG_PRELOAD,
AHC_AIC7895_FE = AHC_MORE_SRAM|AHC_CMD_CHAN|AHC_ULTRA,
AHC_AIC7896_FE = AHC_AIC7890_FE,
+ AHC_AIC7892_FE = AHC_AIC7890_FE|AHC_ULTRA3,
+ AHC_AIC7899_FE = AHC_AIC7890_FE|AHC_ULTRA3,
} ahc_feature;
struct aic7xxx_scb {
unsigned char goal_period;
unsigned char cur_offset;
unsigned char goal_offset;
+ unsigned char cur_options;
+ unsigned char goal_options;
unsigned char user_width;
unsigned char user_period;
unsigned char user_offset;
+ unsigned char user_options;
} transinfo_type;
/*
unsigned long isr_count; /* Interrupt count */
unsigned long spurious_int;
scb_data_type *scb_data;
+ volatile unsigned short needdv;
+ volatile unsigned short needppr;
volatile unsigned short needsdtr;
- volatile unsigned short sdtr_pending;
volatile unsigned short needwdtr;
- volatile unsigned short wdtr_pending;
+ volatile unsigned short dtr_pending;
struct aic7xxx_cmd_queue {
Scsi_Cmnd *head;
Scsi_Cmnd *tail;
#define DEVICE_PRESENT 0x01
#define BUS_DEVICE_RESET_PENDING 0x02
#define DEVICE_RESET_DELAY 0x04
-#define DEVICE_PRINT_SDTR 0x08
-#define DEVICE_PRINT_WDTR 0x10
+#define DEVICE_PRINT_DTR 0x08
+#define DEVICE_PARITY_ERROR 0x10
#define DEVICE_WAS_BUSY 0x20
+#define DEVICE_SCSI_3 0x40
#define DEVICE_SCANNED 0x80
volatile unsigned char dev_flags[MAX_TARGETS];
volatile unsigned char dev_active_cmds[MAX_TARGETS];
#endif
-#ifdef AIC7XXX_FAKE_NEGOTIATION_CMDS
- Scsi_Cmnd *dev_wdtr_cmnd[MAX_TARGETS];
- Scsi_Cmnd *dev_sdtr_cmnd[MAX_TARGETS];
-#endif
+ Scsi_Cmnd *dev_dtr_cmnd[MAX_TARGETS];
+ unsigned int dev_checksum[MAX_TARGETS];
+
unsigned char dev_last_queue_full[MAX_TARGETS];
unsigned char dev_last_queue_full_count[MAX_TARGETS];
unsigned char dev_max_queue_depth[MAX_TARGETS];
volatile scb_queue_type delayed_scbs[MAX_TARGETS];
- unsigned char msg_buf[9]; /* The message for the target */
+ unsigned char msg_buf[13]; /* The message for the target */
unsigned char msg_type;
#define MSG_TYPE_NONE 0x00
#define MSG_TYPE_INITIATOR_MSGOUT 0x01
int scsi_id_b; /* channel B for twin adapters */
unsigned int bios_address;
int board_name_index;
+ unsigned short needppr_copy; /* default config */
unsigned short needsdtr_copy; /* default config */
unsigned short needwdtr_copy; /* default config */
unsigned short ultraenb; /* Ultra mode target list */
* Provides a mapping of transfer periods in ns/4 to the proper value to
* stick in the SCSIRATE reg to use that transfer rate.
*/
-#define AHC_SYNCRATE_ULTRA2 0
-#define AHC_SYNCRATE_ULTRA 2
-#define AHC_SYNCRATE_FAST 5
+#define AHC_SYNCRATE_ULTRA3 0
+#define AHC_SYNCRATE_ULTRA2 1
+#define AHC_SYNCRATE_ULTRA 3
+#define AHC_SYNCRATE_FAST 6
+#define AHC_SYNCRATE_CRC 0x40
+#define AHC_SYNCRATE_SE 0x10
static struct aic7xxx_syncrate {
/* Rates in Ultra mode have bit 8 of sxfr set */
#define ULTRA_SXFR 0x100
unsigned char period;
const char *rate[2];
} aic7xxx_syncrates[] = {
+ { 0x42, 0x000, 9, {"80.0", "160.0"} },
{ 0x13, 0x000, 10, {"40.0", "80.0"} },
{ 0x14, 0x000, 11, {"33.0", "66.6"} },
{ 0x15, 0x100, 12, {"20.0", "40.0"} },
#endif
-/*
- * See the comments earlier in the file for what this item is all about
- * If you have more than 4 controllers, you will need to increase the
- * the number of items in the array below. Additionally, if you don't
- * want to have lilo pass a humongous config line to the aic7xxx driver,
- * then you can get in and manually adjust these instead of leaving them
- * at the default. Pay attention to the comments earlier in this file
- * concerning this array if you are going to hand modify these values.
- */
-static adapter_tag_info_t aic7xxx_tag_info[] =
-{
- {DEFAULT_TAG_COMMANDS},
- {DEFAULT_TAG_COMMANDS},
- {DEFAULT_TAG_COMMANDS},
- {DEFAULT_TAG_COMMANDS},
- {DEFAULT_TAG_COMMANDS},
- {DEFAULT_TAG_COMMANDS},
- {DEFAULT_TAG_COMMANDS},
- {DEFAULT_TAG_COMMANDS},
- {DEFAULT_TAG_COMMANDS},
- {DEFAULT_TAG_COMMANDS},
- {DEFAULT_TAG_COMMANDS},
- {DEFAULT_TAG_COMMANDS},
- {DEFAULT_TAG_COMMANDS},
- {DEFAULT_TAG_COMMANDS},
- {DEFAULT_TAG_COMMANDS},
- {DEFAULT_TAG_COMMANDS}
-};
-
#define VERBOSE_NORMAL 0x0000
#define VERBOSE_NEGOTIATION 0x0001
#define VERBOSE_SEQINT 0x0002
aic_outb(p, ((instr.integer >> 8) & 0xff), SEQRAM);
aic_outb(p, ((instr.integer >> 16) & 0xff), SEQRAM);
aic_outb(p, ((instr.integer >> 24) & 0xff), SEQRAM);
+ udelay(15);
break;
default:
*-F*************************************************************************/
static struct aic7xxx_syncrate *
aic7xxx_find_syncrate(struct aic7xxx_host *p, unsigned int *period,
- unsigned int maxsync)
+ unsigned int maxsync, unsigned char *options)
{
struct aic7xxx_syncrate *syncrate;
+ int done = FALSE;
+ switch(*options)
+ {
+ case MSG_EXT_PPR_OPTION_DT_CRC:
+ case MSG_EXT_PPR_OPTION_DT_UNITS:
+ if(!(p->features & AHC_ULTRA3))
+ {
+ *options = 0;
+ maxsync = MAX(maxsync, AHC_SYNCRATE_ULTRA2);
+ }
+ break;
+ case MSG_EXT_PPR_OPTION_DT_CRC_QUICK:
+ case MSG_EXT_PPR_OPTION_DT_UNITS_QUICK:
+ if(!(p->features & AHC_ULTRA3))
+ {
+ *options = 0;
+ maxsync = MAX(maxsync, AHC_SYNCRATE_ULTRA2);
+ }
+ else
+ {
+ /*
+ * we don't support the Quick Arbitration variants of dual edge
+ * clocking. As it turns out, we want to send back the
+ * same basic option, but without the QA attribute.
+ * We know that we are responding because we would never set
+ * these options ourself, we would only respond to them.
+ */
+ switch(*options)
+ {
+ case MSG_EXT_PPR_OPTION_DT_CRC_QUICK:
+ *options = MSG_EXT_PPR_OPTION_DT_CRC;
+ break;
+ case MSG_EXT_PPR_OPTION_DT_UNITS_QUICK:
+ *options = MSG_EXT_PPR_OPTION_DT_UNITS;
+ break;
+ }
+ }
+ break;
+ default:
+ *options = 0;
+ maxsync = MAX(maxsync, AHC_SYNCRATE_ULTRA2);
+ break;
+ }
syncrate = &aic7xxx_syncrates[maxsync];
while ( (syncrate->rate[0] != NULL) &&
(!(p->features & AHC_ULTRA2) || syncrate->sxfr_ultra2) )
{
- if ( *period <= syncrate->period )
+ if (*period <= syncrate->period)
{
- /*
- * When responding to a target that requests sync, the requested rate
- * may fall between two rates that we can output, but still be a rate
- * that we can receive. Because of this, we want to respond with the
- * same rate that it sent to us even if the persiod we use to send
- * data to it is lower. Only lower the response period if we must.
- */
- if(syncrate == &aic7xxx_syncrates[maxsync])
+ switch(*options)
+ {
+ case MSG_EXT_PPR_OPTION_DT_CRC:
+ case MSG_EXT_PPR_OPTION_DT_UNITS:
+ if(!(syncrate->sxfr_ultra2 & AHC_SYNCRATE_CRC))
+ {
+ done = TRUE;
+ /*
+ * oops, we went too low for the CRC/DualEdge signalling, so
+ * clear the options byte
+ */
+ *options = 0;
+ /*
+ * We'll be sending a reply to this packet to set the options
+ * properly, so unilaterally set the period as well.
+ */
+ *period = syncrate->period;
+ }
+ else
+ {
+ done = TRUE;
+ if(syncrate == &aic7xxx_syncrates[maxsync])
+ {
+ *period = syncrate->period;
+ }
+ }
+ break;
+ default:
+ if(!(syncrate->sxfr_ultra2 & AHC_SYNCRATE_CRC))
+ {
+ done = TRUE;
+ if(syncrate == &aic7xxx_syncrates[maxsync])
+ {
+ *period = syncrate->period;
+ }
+ }
+ break;
+ }
+ if(done)
{
- *period = syncrate->period;
+ break;
}
- break;
}
syncrate++;
}
/*
* Use async transfers for this target
*/
+ *options = 0;
*period = 0;
syncrate = NULL;
}
{
struct aic7xxx_syncrate *syncrate;
- if ((p->features & AHC_ULTRA2) != 0)
+ if (p->features & AHC_ULTRA2)
{
scsirate &= SXFR_ULTRA2;
}
syncrate = &aic7xxx_syncrates[maxsync];
while (syncrate->rate[0] != NULL)
{
- if ((p->features & AHC_ULTRA2) != 0)
+ if (p->features & AHC_ULTRA2)
{
if (syncrate->sxfr_ultra2 == 0)
break;
else if (scsirate == syncrate->sxfr_ultra2)
return (syncrate->period);
+ else if (scsirate == (syncrate->sxfr_ultra2 & ~AHC_SYNCRATE_CRC))
+ return (syncrate->period);
}
else if (scsirate == (syncrate->sxfr & ~ULTRA_SXFR))
{
static void
aic7xxx_set_syncrate(struct aic7xxx_host *p, struct aic7xxx_syncrate *syncrate,
int target, int channel, unsigned int period, unsigned int offset,
- unsigned int type)
+ unsigned char options, unsigned int type)
{
unsigned char tindex;
unsigned short target_mask;
- unsigned char lun;
+ unsigned char lun, old_options;
unsigned int old_period, old_offset;
tindex = target | (channel << 3);
old_period = p->transinfo[tindex].cur_period;
old_offset = p->transinfo[tindex].cur_offset;
+ old_options = p->transinfo[tindex].cur_options;
if (type & AHC_TRANS_CUR)
scsirate &= ~SXFR_ULTRA2;
if (syncrate != NULL)
{
- scsirate |= syncrate->sxfr_ultra2;
+ switch(options)
+ {
+ case MSG_EXT_PPR_OPTION_DT_UNITS:
+ /*
+ * mask off the CRC bit in the xfer settings
+ */
+ scsirate |= (syncrate->sxfr_ultra2 & ~AHC_SYNCRATE_CRC);
+ break;
+ default:
+ scsirate |= syncrate->sxfr_ultra2;
+ break;
+ }
}
if (type & AHC_TRANS_ACTIVE)
{
aic_outb(p, scsirate, TARG_SCSIRATE + tindex);
p->transinfo[tindex].cur_period = period;
p->transinfo[tindex].cur_offset = offset;
+ p->transinfo[tindex].cur_options = options;
if ( !(type & AHC_TRANS_QUITE) &&
(aic7xxx_verbose & VERBOSE_NEGOTIATION) &&
- (p->dev_flags[tindex] & DEVICE_PRINT_SDTR) )
+ (p->dev_flags[tindex] & DEVICE_PRINT_DTR) )
{
if (offset)
{
printk(INFO_LEAD "Using asynchronous transfers.\n",
p->host_no, channel, target, lun);
}
- p->dev_flags[tindex] &= ~DEVICE_PRINT_SDTR;
+ p->dev_flags[tindex] &= ~DEVICE_PRINT_DTR;
}
}
{
p->transinfo[tindex].goal_period = period;
p->transinfo[tindex].goal_offset = offset;
+ p->transinfo[tindex].goal_options = options;
}
if (type & AHC_TRANS_USER)
{
p->transinfo[tindex].user_period = period;
p->transinfo[tindex].user_offset = offset;
+ p->transinfo[tindex].user_options = options;
}
}
{
unsigned char tindex;
unsigned short target_mask;
- unsigned int old_width, new_offset;
+ unsigned int old_width;
tindex = target | (channel << 3);
target_mask = 1 << tindex;
old_width = p->transinfo[tindex].cur_width;
- if (p->features & AHC_ULTRA2)
- new_offset = MAX_OFFSET_ULTRA2;
- else if (width == MSG_EXT_WDTR_BUS_16_BIT)
- new_offset = MAX_OFFSET_16BIT;
- else
- new_offset = MAX_OFFSET_8BIT;
-
if (type & AHC_TRANS_CUR)
{
unsigned char scsirate;
p->transinfo[tindex].cur_width = width;
- if ((aic7xxx_verbose & VERBOSE_NEGOTIATION2) &&
- (p->dev_flags[tindex] & DEVICE_PRINT_WDTR))
+ if ( !(type & AHC_TRANS_QUITE) &&
+ (aic7xxx_verbose & VERBOSE_NEGOTIATION2) &&
+ (p->dev_flags[tindex] & DEVICE_PRINT_DTR) )
{
printk(INFO_LEAD "Using %s transfers\n", p->host_no, channel, target,
lun, (scsirate & WIDEXFER) ? "Wide(16bit)" : "Narrow(8bit)" );
- p->dev_flags[tindex] &= ~DEVICE_PRINT_WDTR;
}
}
if (type & AHC_TRANS_USER)
p->transinfo[tindex].user_width = width;
- /*
- * Having just set the width, the SDTR should come next, and we need a valid
- * offset for the SDTR. So, we make sure we put a valid one in here now as
- * the goal_offset.
- */
if (p->transinfo[tindex].goal_offset)
- p->transinfo[tindex].goal_offset = new_offset;
-
+ {
+ if (p->features & AHC_ULTRA2)
+ {
+ p->transinfo[tindex].goal_offset = MAX_OFFSET_ULTRA2;
+ }
+ else if (width == MSG_EXT_WDTR_BUS_16_BIT)
+ {
+ p->transinfo[tindex].goal_offset = MAX_OFFSET_16BIT;
+ }
+ else
+ {
+ p->transinfo[tindex].goal_offset = MAX_OFFSET_8BIT;
+ }
+ }
}
/*+F*************************************************************************
if (match != 0)
match = ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL));
- if (aic7xxx_verbose & (VERBOSE_ABORT_PROCESS | VERBOSE_RESET_PROCESS))
- {
- printk(KERN_INFO "(scsi%d:%d:%d:%d:tag%d) %s search criteria"
- " (scsi%d:%d:%d:%d:tag%d)\n", p->host_no, CTL_OF_SCB(scb),
- scb->hscb->tag, (match) ? "matches" : "doesn't match",
- p->host_no, channel, target, lun, tag);
- }
-
return (match);
}
* to the free list.
*-F*************************************************************************/
static unsigned char
-aic7xxx_rem_scb_from_disc_list(struct aic7xxx_host *p, unsigned char scbptr)
+aic7xxx_rem_scb_from_disc_list(struct aic7xxx_host *p, unsigned char scbptr,
+ unsigned char prev)
{
unsigned char next;
- unsigned char prev;
aic_outb(p, scbptr, SCBPTR);
next = aic_inb(p, SCB_NEXT);
- prev = aic_inb(p, SCB_PREV);
aic7xxx_add_curscb_to_free_list(p);
if (prev != SCB_LIST_NULL)
aic_outb(p, next, DISCONNECTED_SCBH);
}
- if (next != SCB_LIST_NULL)
- {
- aic_outb(p, next, SCBPTR);
- aic_outb(p, prev, SCB_PREV);
- }
return next;
}
* Place in the scb array; never is removed
*/
p->scb_data->scb_array[p->scb_data->numscbs++] = scbp;
- scbq_insert_head(&p->scb_data->free_scbs, scbp);
+ scbq_insert_tail(&p->scb_data->free_scbs, scbp);
}
scbp->kmalloc_ptr = scb_ap;
}
Scsi_Cmnd *cmd;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,95)
unsigned int cpu_flags = 0;
+#endif
DRIVER_LOCK
while (p->completeq.head != NULL)
cmd = p->completeq.head;
p->completeq.head = (Scsi_Cmnd *)cmd->host_scribble;
cmd->host_scribble = NULL;
- sti();
cmd->scsi_done(cmd);
- cli();
}
DRIVER_UNLOCK
-#else
- while (p->completeq.head != NULL)
- {
- cmd = p->completeq.head;
- p->completeq.head = (Scsi_Cmnd *)cmd->host_scribble;
- cmd->host_scribble = NULL;
- cmd->scsi_done(cmd);
- }
-#endif
}
/*+F*************************************************************************
}
#define WIDE_INQUIRY_BITS 0x60
#define SYNC_INQUIRY_BITS 0x10
+#define SCSI_VERSION_BITS 0x07
if ( (buffer[7] & WIDE_INQUIRY_BITS) &&
(p->features & AHC_WIDE) )
{
p->needwdtr |= (1<<tindex);
p->needwdtr_copy |= (1<<tindex);
- if ( (p->flags & AHC_SEEPROM_FOUND) &&
- (p->transinfo[tindex].user_width != MSG_EXT_WDTR_BUS_16_BIT) )
- p->transinfo[tindex].goal_width = MSG_EXT_WDTR_BUS_8_BIT;
- else
- p->transinfo[tindex].goal_width = MSG_EXT_WDTR_BUS_16_BIT;
+ p->transinfo[tindex].goal_width = p->transinfo[tindex].user_width;
}
else
{
p->needsdtr |= (1<<tindex);
p->needsdtr_copy |= (1<<tindex);
- if (p->flags & AHC_SEEPROM_FOUND)
+ p->transinfo[tindex].goal_period = p->transinfo[tindex].user_period;
+ p->transinfo[tindex].goal_options = p->transinfo[tindex].user_options;
+ if (p->transinfo[tindex].user_offset)
{
- p->transinfo[tindex].goal_period = p->transinfo[tindex].user_period;
- p->transinfo[tindex].goal_offset = p->transinfo[tindex].user_offset;
- }
- else
- {
- if (p->features & AHC_ULTRA2)
- {
- p->transinfo[tindex].goal_period =
- aic7xxx_syncrates[AHC_SYNCRATE_ULTRA2].period;
- }
- else if (p->features & AHC_ULTRA)
- {
- p->transinfo[tindex].goal_period =
- aic7xxx_syncrates[AHC_SYNCRATE_ULTRA].period;
- }
- else
- {
- p->transinfo[tindex].goal_period =
- aic7xxx_syncrates[AHC_SYNCRATE_FAST].period;
- }
if (p->features & AHC_ULTRA2)
p->transinfo[tindex].goal_offset = MAX_OFFSET_ULTRA2;
else if (p->transinfo[tindex].goal_width == MSG_EXT_WDTR_BUS_16_BIT)
p->needsdtr_copy &= ~(1<<tindex);
p->transinfo[tindex].goal_period = 0;
p->transinfo[tindex].goal_offset = 0;
+ p->transinfo[tindex].goal_options = 0;
+ }
+ if ( (buffer[2] & SCSI_VERSION_BITS) == 3 )
+ {
+ p->dev_flags[tindex] |= DEVICE_SCSI_3;
+ /*
+ * OK, we are a SCSI 3 device and we are in need of negotiation.
+ * Use PPR messages instead of WDTR and SDTR messages.
+ */
+ if ( (p->needsdtr & (1<<tindex)) ||
+ (p->needwdtr & (1<<tindex)) )
+ {
+ p->needppr |= (1<<tindex);
+ p->needppr_copy |= (1<<tindex);
+ }
+ p->needwdtr &= ~(1<<tindex);
+ p->needwdtr_copy &= ~(1<<tindex);
+ p->needsdtr &= ~(1<<tindex);
+ p->needsdtr_copy &= ~(1<<tindex);
+ }
+ /*
+ * Get the INQUIRY checksum. We use this on Ultra 160/m
+ * and older devices both. It allows us to drop speed on any bus type
+ * while at the same time giving us the needed domain validation for
+ * Ultra 160/m
+ *
+ * Note: We only get the checksum and set the SCANNED bit if this is
+ * one of our dtr commands. If we don't do this, then we end up
+ * getting bad checksum results on the mid-level SCSI code's INQUIRY
+ * commands.
+ */
+ if(p->dev_dtr_cmnd[tindex] == cmd) {
+ unsigned int checksum = 0;
+ int *ibuffer;
+ int i=0;
+
+ ibuffer = (int *)buffer;
+ for( i = 0; i < (cmd->request_bufflen >> 2); i++)
+ {
+ checksum += ibuffer[i];
+ }
+ p->dev_checksum[tindex] = checksum;
+ p->dev_flags[tindex] |= DEVICE_SCANNED;
+ p->dev_flags[tindex] |= DEVICE_PRINT_DTR;
}
- p->dev_flags[tindex] |= DEVICE_SCANNED;
- p->dev_flags[tindex] |= DEVICE_PRINT_WDTR | DEVICE_PRINT_SDTR;
#undef WIDE_INQUIRY_BITS
#undef SYNC_INQUIRY_BITS
+#undef SCSI_VERSION_BITS
}
}
- else if ((scb->flags & (SCB_MSGOUT_WDTR | SCB_MSGOUT_SDTR)) != 0)
+ else if ((scb->flags & SCB_MSGOUT_BITS) != 0)
{
unsigned short mask;
int message_error = FALSE;
if (scb->flags & SCB_MSGOUT_WDTR)
{
- p->wdtr_pending &= ~mask;
+ p->dtr_pending &= ~mask;
if (message_error)
{
if ( (aic7xxx_verbose & VERBOSE_NEGOTIATION2) &&
- (p->dev_flags[tindex] & DEVICE_PRINT_WDTR) )
+ (p->dev_flags[tindex] & DEVICE_PRINT_DTR) )
{
printk(INFO_LEAD "Device failed to complete Wide Negotiation "
"processing and\n", p->host_no, CTL_OF_SCB(scb));
"disabling future\n", p->host_no, CTL_OF_SCB(scb));
printk(INFO_LEAD "Wide negotiation to this device.\n", p->host_no,
CTL_OF_SCB(scb));
- p->dev_flags[tindex] &= ~DEVICE_PRINT_WDTR;
}
p->needwdtr &= ~mask;
p->needwdtr_copy &= ~mask;
}
if (scb->flags & SCB_MSGOUT_SDTR)
{
- p->sdtr_pending &= ~mask;
+ p->dtr_pending &= ~mask;
if (message_error)
{
if ( (aic7xxx_verbose & VERBOSE_NEGOTIATION2) &&
- (p->dev_flags[tindex] & DEVICE_PRINT_SDTR) )
+ (p->dev_flags[tindex] & DEVICE_PRINT_DTR) )
{
printk(INFO_LEAD "Device failed to complete Sync Negotiation "
"processing and\n", p->host_no, CTL_OF_SCB(scb));
"disabling future\n", p->host_no, CTL_OF_SCB(scb));
printk(INFO_LEAD "Sync negotiation to this device.\n", p->host_no,
CTL_OF_SCB(scb));
- p->dev_flags[tindex] &= ~DEVICE_PRINT_SDTR;
+ p->dev_flags[tindex] &= ~DEVICE_PRINT_DTR;
}
p->needsdtr &= ~mask;
p->needsdtr_copy &= ~mask;
}
}
+ if (scb->flags & SCB_MSGOUT_PPR)
+ {
+ p->dtr_pending &= ~mask;
+ if(message_error)
+ {
+ if ( (aic7xxx_verbose & VERBOSE_NEGOTIATION2) &&
+ (p->dev_flags[tindex] & DEVICE_PRINT_DTR) )
+ {
+ printk(INFO_LEAD "Device failed to complete Parallel Protocol "
+ "Request processing and\n", p->host_no, CTL_OF_SCB(scb));
+ printk(INFO_LEAD "returned a sense error code for invalid message, "
+ "disabling future\n", p->host_no, CTL_OF_SCB(scb));
+ printk(INFO_LEAD "Parallel Protocol Request negotiation to this "
+ "device.\n", p->host_no, CTL_OF_SCB(scb));
+ }
+ /*
+ * Disable PPR negotiation and revert back to WDTR and SDTR setup
+ */
+ p->needppr &= ~mask;
+ p->needppr_copy &= ~mask;
+ p->needsdtr |= mask;
+ p->needsdtr_copy |= mask;
+ p->needwdtr |= mask;
+ p->needwdtr_copy |= mask;
+ }
+ }
}
queue_depth = p->dev_temp_queue_depth[tindex];
if (queue_depth >= p->dev_active_cmds[tindex])
p->dev_active_cmds[tindex]--;
p->activescbs--;
- /*
- * If this was an untagged I/O, unbusy the target so the sequencer won't
- * mistake things later
- */
- if (aic7xxx_index_busy_target(p, scb->hscb->target_channel_lun, FALSE) ==
- scb->hscb->tag)
- {
- aic7xxx_index_busy_target(p, scb->hscb->target_channel_lun, TRUE);
- }
-
{
int actual;
#endif /* AIC7XXX_PROC_STATS */
}
#ifdef AIC7XXX_PROC_STATS
- x = -10;
+ x = -11;
while(actual)
{
actual >>= 1;
if (aic7xxx_verbose & (VERBOSE_ABORT_PROCESS | VERBOSE_RESET_PROCESS))
printk(INFO_LEAD "Cleaning up status information "
"and delayed_scbs.\n", p->host_no, channel, i, lun);
- p->dev_flags[i] &= ~BUS_DEVICE_RESET_PENDING;
+ p->dev_flags[i] &= ~(BUS_DEVICE_RESET_PENDING | DEVICE_PARITY_ERROR);
if ( tag == SCB_LIST_NULL )
{
- p->dev_flags[i] |= DEVICE_PRINT_WDTR | DEVICE_PRINT_SDTR |
- DEVICE_RESET_DELAY;
+ p->dev_flags[i] |= DEVICE_PRINT_DTR | DEVICE_RESET_DELAY;
p->dev_expires[i] = jiffies + (4 * HZ);
p->dev_timer_active |= (0x01 << i);
p->dev_last_queue_full_count[i] = 0;
if (aic7xxx_verbose & (VERBOSE_ABORT_PROCESS | VERBOSE_RESET_PROCESS))
printk(INFO_LEAD "Cleaning disconnected scbs "
"list.\n", p->host_no, channel, target, lun);
- if (p->features & AHC_PAGESCBS)
+ if (p->flags & AHC_PAGESCBS)
{
unsigned char next, prev, scb_index;
printk(WARN_LEAD "Disconnected List inconsistency; SCB index=%d, "
"numscbs=%d\n", p->host_no, channel, target, lun, scb_index,
p->scb_data->numscbs);
- next = aic7xxx_rem_scb_from_disc_list(p, next);
+ next = aic7xxx_rem_scb_from_disc_list(p, next, prev);
}
else
{
scbp = p->scb_data->scb_array[scb_index];
if (aic7xxx_match_scb(p, scbp, target, channel, lun, tag))
{
- next = aic7xxx_rem_scb_from_disc_list(p, next);
+ next = aic7xxx_rem_scb_from_disc_list(p, next, prev);
if (scbp->flags & SCB_WAITINGQ)
{
p->dev_active_cmds[TARGET_INDEX(scbp->cmd)]++;
* Walk the free list making sure no entries on the free list have
* a valid SCB_TAG value or SCB_CONTROL byte.
*/
- if (p->features & AHC_PAGESCBS)
+ if (p->flags & AHC_PAGESCBS)
{
unsigned char next;
{
aic_outb(p, SCB_LIST_NULL, SCB_TAG);
aic_outb(p, SCB_LIST_NULL, SCB_NEXT);
- aic_outb(p, SCB_LIST_NULL, SCB_PREV);
aic_outb(p, 0, SCB_CONTROL);
aic7xxx_add_curscb_to_free_list(p);
}
if (channel == 1)
{
p->needsdtr |= (p->needsdtr_copy & 0xFF00);
- p->sdtr_pending &= 0x00FF;
+ p->dtr_pending &= 0x00FF;
offset_min = 8;
offset_max = 16;
}
else
{
- if (p->features & AHC_WIDE)
+ if (p->features & AHC_TWIN)
{
- p->needsdtr = p->needsdtr_copy;
- p->needwdtr = p->needwdtr_copy;
- p->sdtr_pending = 0x0;
- p->wdtr_pending = 0x0;
+ /* Channel A */
+ p->needsdtr |= (p->needsdtr_copy & 0x00FF);
+ p->dtr_pending &= 0xFF00;
offset_min = 0;
- offset_max = 16;
+ offset_max = 8;
}
else
{
- /* Channel A */
- p->needsdtr |= (p->needsdtr_copy & 0x00FF);
- p->sdtr_pending &= 0xFF00;
+ p->needppr = p->needppr_copy;
+ p->needsdtr = p->needsdtr_copy;
+ p->needwdtr = p->needwdtr_copy;
+ p->dtr_pending = 0x0;
offset_min = 0;
- offset_max = 8;
+ if (p->features & AHC_WIDE)
+ {
+ offset_max = 16;
+ }
+ else
+ {
+ offset_max = 8;
+ }
}
}
#endif
}
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_construct_ppr
+ *
+ * Description:
+ * Build up a Parallel Protocol Request message for use with SCSI-3
+ * devices.
+ *-F*************************************************************************/
+static void
+aic7xxx_construct_ppr(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
+{
+ int tindex = TARGET_INDEX(scb->cmd);
+
+ p->msg_buf[p->msg_index++] = MSG_EXTENDED;
+ p->msg_buf[p->msg_index++] = MSG_EXT_PPR_LEN;
+ p->msg_buf[p->msg_index++] = MSG_EXT_PPR;
+ p->msg_buf[p->msg_index++] = p->transinfo[tindex].goal_period;
+ p->msg_buf[p->msg_index++] = 0;
+ p->msg_buf[p->msg_index++] = p->transinfo[tindex].goal_offset;
+ p->msg_buf[p->msg_index++] = p->transinfo[tindex].goal_width;
+ p->msg_buf[p->msg_index++] = p->transinfo[tindex].goal_options;
+ p->msg_len += 8;
+}
+
/*+F*************************************************************************
* Function:
* aic7xxx_construct_sdtr
/*
* Go back to async/narrow transfers and renegotiate.
*/
+ p->needppr |= (p->needppr_copy & targ_mask);
p->needsdtr |= (p->needsdtr_copy & targ_mask);
p->needwdtr |= (p->needwdtr_copy & targ_mask);
- p->sdtr_pending &= ~targ_mask;
- p->wdtr_pending &= ~targ_mask;
+ p->dtr_pending &= ~targ_mask;
aic_outb(p, 0, TARG_SCSIRATE + tindex);
if (p->features & AHC_ULTRA2)
aic_outb(p, 0, TARG_OFFSET + tindex);
if (aic7xxx_verbose & VERBOSE_RESET_PROCESS)
printk(INFO_LEAD "Bus Device Reset delivered.\n", p->host_no, channel,
target, -1);
- aic7xxx_run_done_queue(p, /*complete*/ FALSE);
+ aic7xxx_run_done_queue(p, /*complete*/ TRUE);
}
/*+F*************************************************************************
p->host_no, channel, target, lun,
aic_inb(p, SAVED_TCL), aic_inb(p, ARG_1),
(aic_inb(p, SEQADDR1) << 8) | aic_inb(p, SEQADDR0));
+ if (aic7xxx_panic_on_abort)
+ aic7xxx_panic_abort(p, NULL);
}
break;
lun, aic_inb(p, LASTPHASE), aic_inb(p, SAVED_TCL));
aic7xxx_reset_channel(p, channel, /*initiate reset*/ TRUE);
- aic7xxx_run_done_queue(p, FALSE);
+ aic7xxx_run_done_queue(p, TRUE);
}
break;
aic7xxx_reset_device(p, target, channel, lun, i);
reset++;
}
- aic7xxx_run_done_queue(p, FALSE);
+ aic7xxx_run_done_queue(p, TRUE);
}
}
aic7xxx_verbose = old_verbose;
aic_outb(p, aic_inb(p, SCSISIGI) | ATNO, SCSISIGO);
}
}
+ else if (scb->flags & SCB_MSGOUT_PPR)
+ {
+ /*
+ * As per the draft specs, any device capable of supporting any of
+ * the option values other than 0 are not allowed to reject the
+ * PPR message. Instead, they must negotiate out what they do
+ * support instead of rejecting our offering.
+ */
+ p->needppr &= ~target_mask;
+ p->needppr_copy &= ~target_mask;
+ aic7xxx_set_width(p, target, channel, lun, MSG_EXT_WDTR_BUS_8_BIT,
+ (AHC_TRANS_ACTIVE|AHC_TRANS_CUR|AHC_TRANS_QUITE));
+ aic7xxx_set_syncrate(p, NULL, target, channel, 0, 0, 0,
+ AHC_TRANS_ACTIVE|AHC_TRANS_CUR|AHC_TRANS_QUITE);
+ p->transinfo[tindex].goal_options = 0;
+ p->dtr_pending &= ~target_mask;
+ scb->flags &= ~SCB_MSGOUT_BITS;
+ if(aic7xxx_verbose & VERBOSE_NEGOTIATION2)
+ {
+ printk(INFO_LEAD "Device is rejecting PPR messages, falling "
+ "back.\n", p->host_no, channel, target, lun);
+ }
+ if ( p->transinfo[tindex].goal_width )
+ {
+ p->needwdtr |= target_mask;
+ p->needwdtr_copy |= target_mask;
+ p->dtr_pending |= target_mask;
+ scb->flags |= SCB_MSGOUT_WDTR;
+ }
+ if ( p->transinfo[tindex].goal_offset )
+ {
+ p->needsdtr |= target_mask;
+ p->needsdtr_copy |= target_mask;
+ if( !(p->dtr_pending & target_mask) )
+ {
+ p->dtr_pending |= target_mask;
+ scb->flags |= SCB_MSGOUT_SDTR;
+ }
+ }
+ if ( p->dtr_pending & target_mask )
+ {
+ aic_outb(p, HOST_MSG, MSG_OUT);
+ aic_outb(p, aic_inb(p, SCSISIGI) | ATNO, SCSISIGO);
+ }
+ }
else if (scb->flags & SCB_MSGOUT_WDTR)
{
/*
*/
p->needwdtr &= ~target_mask;
p->needwdtr_copy &= ~target_mask;
- p->wdtr_pending &= ~target_mask;
+ p->dtr_pending &= ~target_mask;
scb->flags &= ~SCB_MSGOUT_BITS;
aic7xxx_set_width(p, target, channel, lun, MSG_EXT_WDTR_BUS_8_BIT,
(AHC_TRANS_ACTIVE|AHC_TRANS_GOAL|AHC_TRANS_CUR));
- aic7xxx_set_syncrate(p, NULL, target, channel, 0, 0,
+ aic7xxx_set_syncrate(p, NULL, target, channel, 0, 0, 0,
AHC_TRANS_ACTIVE|AHC_TRANS_CUR|AHC_TRANS_QUITE);
- if ( (p->needsdtr_copy & target_mask) &&
- !(p->sdtr_pending & target_mask) )
+ if(aic7xxx_verbose & VERBOSE_NEGOTIATION2)
{
- p->sdtr_pending |= target_mask;
- scb->flags |= SCB_MSGOUT_SDTR;
- aic_outb(p, HOST_MSG, MSG_OUT);
- aic_outb(p, aic_inb(p, SCSISIGO) | ATNO, SCSISIGO);
+ printk(INFO_LEAD "Device is rejecting WDTR messages, using "
+ "narrow transfers.\n", p->host_no, channel, target, lun);
}
+ p->needsdtr |= (p->needsdtr_copy & target_mask);
}
else if (scb->flags & SCB_MSGOUT_SDTR)
{
*/
p->needsdtr &= ~target_mask;
p->needsdtr_copy &= ~target_mask;
- p->sdtr_pending &= ~target_mask;
+ p->dtr_pending &= ~target_mask;
scb->flags &= ~SCB_MSGOUT_SDTR;
- aic7xxx_set_syncrate(p, NULL, target, channel, 0, 0,
+ aic7xxx_set_syncrate(p, NULL, target, channel, 0, 0, 0,
(AHC_TRANS_CUR|AHC_TRANS_ACTIVE|AHC_TRANS_GOAL));
+ if(aic7xxx_verbose & VERBOSE_NEGOTIATION2)
+ {
+ printk(INFO_LEAD "Device is rejecting SDTR messages, using "
+ "async transfers.\n", p->host_no, channel, target, lun);
+ }
}
else if (aic7xxx_verbose & VERBOSE_SEQINT)
{
* However, if this SCB already was attempting to negotiate,
* then we assume this isn't the problem and skip this part.
*/
-#ifdef AIC7XXX_FAKE_NEGOTIATION_CMDS
if ( (scb->cmd->cmnd[0] != TEST_UNIT_READY) &&
(p->dev_flags[tindex] & DEVICE_SCANNED) &&
- !(p->wdtr_pending & target_mask) &&
- !(p->sdtr_pending & target_mask) )
+ !(p->dtr_pending & target_mask) )
{
+ p->needppr |= (p->needppr_copy & target_mask);
p->needwdtr |= (p->needwdtr_copy & target_mask);
p->needsdtr |= (p->needsdtr_copy & target_mask);
}
- else if ( (scb->cmd == p->dev_wdtr_cmnd[tindex]) ||
- (scb->cmd == p->dev_sdtr_cmnd[tindex]) )
+ else if ( scb->cmd == p->dev_dtr_cmnd[tindex] )
{
/*
* This is already a negotiation command, so we must have
- * already done either WDTR or SDTR (or maybe both). So
- * we simply check sdtr_pending and needsdtr to see if we
- * should throw out SDTR on this command.
- *
- * Note: Don't check the needsdtr_copy here, instead just
- * check to see if WDTR wiped out our SDTR and set needsdtr.
- * Even if WDTR did wipe out SDTR and set needsdtr, if
- * parse_msg() then turned around and started our SDTR
- * in back to back fasion, then conclusion of that should
- * have negated any needsdtr setting. That's why we only
- * check needsdtr and sdtr_pending.
+ * already done PPR, WDTR or SDTR. Since our negotiation
+ * could have gotten rejected, we don't really know the
+ * full state of things. Don't do anything here, and allow
+ * the negotiation_complete() handler to do the right
+ * thing.
*/
- scb->flags &= ~SCB_MSGOUT_BITS;
- if ( (scb->cmd == p->dev_wdtr_cmnd[tindex]) &&
- !(p->sdtr_pending & target_mask) &&
- (p->needsdtr & target_mask) )
- {
- p->sdtr_pending |= target_mask;
- hscb->control |= MK_MESSAGE;
- scb->flags |= SCB_MSGOUT_SDTR;
- }
/*
* This is the important part though. We are getting sense
hscb->data_pointer = scb->sg_list[0].address;
}
}
-#else
- if ( (scb->cmd->cmnd[0] != TEST_UNIT_READY) &&
- !(scb->flags & SCB_MSGOUT_BITS) &&
- (scb->cmd->lun == 0) &&
- (p->dev_flags[TARGET_INDEX(scb->cmd)] & DEVICE_SCANNED) )
- {
- if ( (p->needwdtr_copy & target_mask) &&
- !(p->wdtr_pending & target_mask) &&
- !(p->sdtr_pending & target_mask) )
- {
- p->needwdtr |= target_mask;
- p->wdtr_pending |= target_mask;
- hscb->control |= MK_MESSAGE;
- scb->flags |= SCB_MSGOUT_WDTR;
- }
- if ( p->needsdtr_copy & target_mask )
- {
- p->needsdtr |= target_mask;
- if ( !(p->wdtr_pending & target_mask) &&
- !(p->sdtr_pending & target_mask) )
- {
- p->sdtr_pending |= target_mask;
- hscb->control |= MK_MESSAGE;
- scb->flags |= SCB_MSGOUT_SDTR;
- }
- }
- }
- else
- scb->flags &= ~SCB_MSGOUT_BITS;
-#endif /* AIC7XXX_FAKE_NEGOTIATION_CMDS */
scb->flags |= SCB_SENSE;
/*
* Ensure the target is busy since this will be an
* an untagged request.
*/
#ifdef AIC7XXX_VERBOSE_DEBUGGING
- if (aic7xxx_verbose > 0xffff)
+ if (aic7xxx_verbose & VERBOSE_NEGOTIATION2)
{
if (scb->flags & SCB_MSGOUT_BITS)
printk(INFO_LEAD "Requesting SENSE with %s\n", p->host_no,
}
}
#ifdef AIC7XXX_VERBOSE_DEBUGGING
- if (aic7xxx_verbose & VERBOSE_MINOR_ERROR)
+ if( (aic7xxx_verbose & VERBOSE_MINOR_ERROR) ||
+ (aic7xxx_verbose > 0xffff) )
{
if (queue_flag)
printk(INFO_LEAD "Queue full received; queue depth %d, "
#endif
if (queue_flag)
{
- p->dev_temp_queue_depth[tindex] =
- p->dev_active_cmds[tindex];
if ( p->dev_last_queue_full[tindex] !=
p->dev_active_cmds[tindex] )
{
p->dev_active_cmds[tindex];
p->dev_last_queue_full[tindex] = 0;
p->dev_last_queue_full_count[tindex] = 0;
+ p->dev_temp_queue_depth[tindex] =
+ p->dev_active_cmds[tindex];
}
- else
+ else if (p->dev_active_cmds[tindex] == 0)
{
- p->dev_flags[tindex] |= DEVICE_WAS_BUSY;
- }
+ if (aic7xxx_verbose & VERBOSE_NEGOTIATION)
+ {
+ printk(INFO_LEAD "QUEUE_FULL status received with 0 "
+ "commands active.\n", p->host_no, CTL_OF_SCB(scb));
+ printk(INFO_LEAD "Tagged Command Queueing disabled\n",
+ p->host_no, CTL_OF_SCB(scb));
+ }
+ p->dev_max_queue_depth[tindex] = 1;
+ p->dev_temp_queue_depth[tindex] = 1;
+ scb->tag_action = 0;
+ scb->hscb->control &= ~(MSG_ORDERED_Q_TAG|MSG_SIMPLE_Q_TAG);
+ }
+ else
+ {
+ p->dev_flags[tindex] |= DEVICE_WAS_BUSY;
+ p->dev_temp_queue_depth[tindex] =
+ p->dev_active_cmds[tindex];
+ }
}
break;
}
*/
if ( !(scb->flags & SCB_DEVICE_RESET) &&
- (aic_inb(p, MSG_OUT) == MSG_IDENTIFYFLAG) &&
+ (msg_out == MSG_IDENTIFYFLAG) &&
(scb->hscb->control & TAG_ENB) )
{
p->msg_buf[p->msg_index++] = scb->tag_action;
printk(INFO_LEAD "Abort message mailed.\n", p->host_no,
CTL_OF_SCB(scb));
}
+ else if (scb->flags & SCB_MSGOUT_PPR)
+ {
+ unsigned int max_sync, period;
+ unsigned char options = p->transinfo[tindex].goal_options;
+
+ if (p->features & AHC_ULTRA2)
+ {
+ if ( (aic_inb(p, SBLKCTL) & ENAB40) &&
+ !(aic_inb(p, SSTAT2) & EXP_ACTIVE) )
+ {
+ if( (p->features & AHC_ULTRA3) &&
+ (p->dev_flags[tindex] & DEVICE_SCSI_3) &&
+ (p->transinfo[tindex].goal_width ==
+ MSG_EXT_WDTR_BUS_16_BIT) &&
+ (options != 0) )
+ {
+ max_sync = AHC_SYNCRATE_ULTRA3;
+ }
+ else
+ {
+ max_sync = AHC_SYNCRATE_ULTRA2;
+ }
+ }
+ else
+ {
+ max_sync = AHC_SYNCRATE_ULTRA;
+ }
+ }
+ else if (p->features & AHC_ULTRA)
+ {
+ max_sync = AHC_SYNCRATE_ULTRA;
+ }
+ else
+ {
+ max_sync = AHC_SYNCRATE_FAST;
+ }
+ period = p->transinfo[tindex].goal_period;
+ aic7xxx_find_syncrate(p, &period, max_sync, &options);
+ p->transinfo[tindex].goal_period = period;
+ p->transinfo[tindex].goal_options = options;
+ if (aic7xxx_verbose & VERBOSE_NEGOTIATION2)
+ {
+ printk(INFO_LEAD "Sending PPR (%d/%d/%d/%d) message.\n",
+ p->host_no, CTL_OF_SCB(scb), period,
+ p->transinfo[tindex].goal_offset,
+ p->transinfo[tindex].goal_width, options);
+ }
+ aic7xxx_construct_ppr(p, scb);
+ }
else if (scb->flags & SCB_MSGOUT_WDTR)
{
-#ifdef AIC7XXX_VERBOSE_DEBUGGING
- if (aic7xxx_verbose > 0xffff)
+ if (aic7xxx_verbose & VERBOSE_NEGOTIATION2)
+ {
printk(INFO_LEAD "Sending WDTR message.\n", p->host_no,
CTL_OF_SCB(scb));
-#endif
- aic7xxx_construct_wdtr(p,
- p->transinfo[TARGET_INDEX(scb->cmd)].goal_width);
+ }
+ aic7xxx_construct_wdtr(p, p->transinfo[tindex].goal_width);
}
else if (scb->flags & SCB_MSGOUT_SDTR)
{
unsigned int max_sync, period;
- /*
- * We need to set an accurate goal_offset instead of
- * the ridiculously high one we default to. We should
- * now know if we are wide. Plus, the WDTR code will
- * set our goal_offset for us as well.
- */
- if (p->transinfo[tindex].goal_offset)
- {
- if (p->features & AHC_ULTRA2)
- p->transinfo[tindex].goal_offset = MAX_OFFSET_ULTRA2;
- else if (p->transinfo[tindex].cur_width == MSG_EXT_WDTR_BUS_16_BIT)
- p->transinfo[tindex].goal_offset = MAX_OFFSET_16BIT;
- else
- p->transinfo[tindex].goal_offset = MAX_OFFSET_8BIT;
- }
+ unsigned char options = 0;
/*
* Now that the device is selected, use the bits in SBLKCTL and
* SSTAT2 to determine the max sync rate for this device.
max_sync = AHC_SYNCRATE_FAST;
}
period = p->transinfo[tindex].goal_period;
- aic7xxx_find_syncrate(p, &period, max_sync);
-#ifdef AIC7XXX_VERBOSE_DEBUGGING
- if (aic7xxx_verbose > 0xffff)
+ aic7xxx_find_syncrate(p, &period, max_sync, &options);
+ if (aic7xxx_verbose & VERBOSE_NEGOTIATION2)
+ {
printk(INFO_LEAD "Sending SDTR %d/%d message.\n", p->host_no,
CTL_OF_SCB(scb),
p->transinfo[tindex].goal_period,
p->transinfo[tindex].goal_offset);
-#endif
+ }
aic7xxx_construct_sdtr(p, period,
p->transinfo[tindex].goal_offset);
}
#if AIC7XXX_NOT_YET
case TRACEPOINT:
{
- printk(INFO_LEAD "Tracepoint #1 reached.\n", p->host_no, channel,
- target, lun);
+ printk(INFO_LEAD "Tracepoint #1 reached.\n", p->host_no,
+ channel, target, lun);
}
break;
case TRACEPOINT2:
{
- printk(INFO_LEAD "Tracepoint #2 reached.\n", p->host_no, channel,
- target, lun);
+ printk(INFO_LEAD "Tracepoint #2 reached.\n", p->host_no,
+ channel, target, lun);
}
break;
case MSG_EXT_SDTR:
{
unsigned int period, offset;
- unsigned char maxsync, saved_offset;
+ unsigned char maxsync, saved_offset, options;
struct aic7xxx_syncrate *syncrate;
if (p->msg_buf[1] != MSG_EXT_SDTR_LEN)
period = p->msg_buf[3];
saved_offset = offset = p->msg_buf[4];
+ options = 0;
+ /*
+ * Even if we are an Ultra3 card, don't allow Ultra3 sync rates when
+ * using the SDTR messages. We need the PPR messages to enable the
+ * higher speeds that include things like Dual Edge clocking.
+ */
if (p->features & AHC_ULTRA2)
{
if ( (aic_inb(p, SBLKCTL) & ENAB40) &&
if ( (scb->flags & (SCB_MSGOUT_SENT|SCB_MSGOUT_SDTR)) !=
(SCB_MSGOUT_SENT|SCB_MSGOUT_SDTR) )
{
- if (!(p->dev_flags[tindex] & DEVICE_SCANNED))
+ if (!(p->dev_flags[tindex] & DEVICE_SCANNED) &&
+ !(p->needsdtr_copy & target_mask) &&
+ (p->transinfo[tindex].user_offset) )
{
/*
* Not only is the device starting this up, but it also hasn't
*/
p->transinfo[tindex].goal_period =
p->transinfo[tindex].user_period;
- p->transinfo[tindex].goal_offset =
- p->transinfo[tindex].user_offset;
+ if(p->features & AHC_ULTRA2)
+ {
+ p->transinfo[tindex].goal_offset = MAX_OFFSET_ULTRA2;
+ }
+ else if (p->transinfo[tindex].cur_width)
+ {
+ p->transinfo[tindex].goal_offset = MAX_OFFSET_16BIT;
+ }
+ else
+ {
+ p->transinfo[tindex].goal_offset = MAX_OFFSET_8BIT;
+ }
p->needsdtr_copy |= target_mask;
}
+ if (aic7xxx_verbose & VERBOSE_NEGOTIATION2)
+ {
+ printk(INFO_LEAD "Received pre-emptive SDTR message from "
+ "target.\n", p->host_no, CTL_OF_SCB(scb));
+ }
if ( !p->transinfo[tindex].goal_offset )
period = 255;
if ( p->transinfo[tindex].goal_period > period )
period = p->transinfo[tindex].goal_period;
}
- syncrate = aic7xxx_find_syncrate(p, &period, maxsync);
+ syncrate = aic7xxx_find_syncrate(p, &period, maxsync, &options);
aic7xxx_validate_offset(p, syncrate, &offset,
target_scsirate & WIDEXFER);
aic7xxx_set_syncrate(p, syncrate, target, channel, period,
- offset, AHC_TRANS_ACTIVE|AHC_TRANS_CUR);
+ offset, options, AHC_TRANS_ACTIVE|AHC_TRANS_CUR);
/*
- * Did we drop to async? If so, are we sending a reply? If we are,
+ * Did we drop to async? Or are we sending a reply? If we are,
* then we have to make sure that the reply value reflects the proper
* settings so we need to set the goal values according to what
* we need to send.
*/
- if ( (offset == 0) || (offset != saved_offset) ||
+ if ( (offset != saved_offset) ||
((scb->flags & (SCB_MSGOUT_SENT|SCB_MSGOUT_SDTR)) !=
(SCB_MSGOUT_SENT|SCB_MSGOUT_SDTR) ) )
{
- aic7xxx_set_syncrate(p, syncrate, target, channel, period,
- offset, AHC_TRANS_GOAL|AHC_TRANS_QUITE);
- if ( offset == 0 )
- {
- p->needsdtr_copy &= ~target_mask;
- }
+ aic7xxx_set_syncrate(p, syncrate, target, channel, period, offset,
+ options, AHC_TRANS_GOAL|AHC_TRANS_QUITE);
}
/*
* go async, then send an SDTR back to the target
*/
p->needsdtr &= ~target_mask;
- p->sdtr_pending &= ~target_mask;
- if ( ((scb->flags & (SCB_MSGOUT_SENT|SCB_MSGOUT_SDTR)) ==
- (SCB_MSGOUT_SENT|SCB_MSGOUT_SDTR)) &&
- (offset == saved_offset) )
- {
- scb->flags &= ~SCB_MSGOUT_BITS;
- }
- else
+ p->dtr_pending &= ~target_mask;
+ if ( ((scb->flags & (SCB_MSGOUT_SENT|SCB_MSGOUT_SDTR)) !=
+ (SCB_MSGOUT_SENT|SCB_MSGOUT_SDTR)) ||
+ (offset != saved_offset) )
{
+ reply = TRUE;
+ p->dtr_pending |= target_mask;
scb->flags &= ~SCB_MSGOUT_BITS;
scb->flags |= SCB_MSGOUT_SDTR;
aic_outb(p, HOST_MSG, MSG_OUT);
{
reject = TRUE;
if ( (aic7xxx_verbose & VERBOSE_NEGOTIATION2) &&
- ((p->dev_flags[tindex] & DEVICE_PRINT_WDTR) ||
+ ((p->dev_flags[tindex] & DEVICE_PRINT_DTR) ||
(aic7xxx_verbose > 0xffff)) )
{
printk(INFO_LEAD "Requesting %d bit transfers, rejecting.\n",
p->host_no, CTL_OF_SCB(scb), 8 * (0x01 << bus_width));
- p->dev_flags[tindex] &= ~DEVICE_PRINT_WDTR;
}
} /* We fall through on purpose */
case MSG_EXT_WDTR_BUS_8_BIT:
break;
}
}
- scb->flags &= ~SCB_MSGOUT_BITS;
- p->wdtr_pending &= ~target_mask;
+ p->dtr_pending &= ~target_mask;
p->needwdtr &= ~target_mask;
}
else
{
- scb->flags &= ~SCB_MSGOUT_BITS;
- scb->flags |= SCB_MSGOUT_WDTR;
- reply = TRUE;
if ( !(p->dev_flags[tindex] & DEVICE_SCANNED) )
{
/*
*/
p->transinfo[tindex].goal_period =
p->transinfo[tindex].user_period;
- p->transinfo[tindex].goal_offset =
- p->transinfo[tindex].user_offset;
+ if(p->transinfo[tindex].user_offset)
+ {
+ if(p->features & AHC_ULTRA2)
+ {
+ p->transinfo[tindex].goal_offset = MAX_OFFSET_ULTRA2;
+ }
+ else if( p->transinfo[tindex].user_width &&
+ (bus_width == MSG_EXT_WDTR_BUS_16_BIT) &&
+ p->features & AHC_WIDE )
+ {
+ p->transinfo[tindex].goal_offset = MAX_OFFSET_16BIT;
+ }
+ else
+ {
+ p->transinfo[tindex].goal_offset = MAX_OFFSET_8BIT;
+ }
+ }
p->transinfo[tindex].goal_width =
p->transinfo[tindex].user_width;
p->needwdtr_copy |= target_mask;
p->needsdtr_copy |= target_mask;
}
+ if (aic7xxx_verbose & VERBOSE_NEGOTIATION2)
+ {
+ printk(INFO_LEAD "Received pre-emptive WDTR message from "
+ "target.\n", p->host_no, CTL_OF_SCB(scb));
+ }
switch(bus_width)
{
default:
break;
}
}
+ reply = TRUE;
+ scb->flags &= ~SCB_MSGOUT_BITS;
+ scb->flags |= SCB_MSGOUT_WDTR;
p->needwdtr &= ~target_mask;
- p->wdtr_pending &= ~target_mask;
+ p->dtr_pending |= target_mask;
aic_outb(p, HOST_MSG, MSG_OUT);
aic_outb(p, aic_inb(p, SCSISIGO) | ATNO, SCSISIGO);
}
* supports SDTR at all. Therefore, we check needsdtr_copy instead
* of needstr.
*/
- aic7xxx_set_syncrate(p, NULL, target, channel, 0, 0,
+ aic7xxx_set_syncrate(p, NULL, target, channel, 0, 0, 0,
AHC_TRANS_ACTIVE|AHC_TRANS_CUR|AHC_TRANS_QUITE);
- if ( (p->needsdtr_copy & target_mask) &&
- !(p->sdtr_pending & target_mask))
+ p->needsdtr |= (p->needsdtr_copy & target_mask);
+ done = TRUE;
+ break;
+ }
+ case MSG_EXT_PPR:
+ {
+ unsigned char bus_width, trans_options, new_trans_options;
+ unsigned int period, offset;
+ unsigned char maxsync, saved_offset;
+ struct aic7xxx_syncrate *syncrate;
+
+ if (p->msg_buf[1] != MSG_EXT_PPR_LEN)
+ {
+ reject = TRUE;
+ break;
+ }
+
+ /*
+ * If we aren't on one of the new Ultra3 cards, then reject any PPR
+ * message since we can't support any option field other than 0
+ */
+ if( !(p->features & AHC_ULTRA3) )
+ {
+ reject = TRUE;
+ break;
+ }
+
+ if (p->msg_len < (MSG_EXT_PPR_LEN + 2))
+ {
+ break;
+ }
+
+ period = p->msg_buf[3];
+ offset = saved_offset = p->msg_buf[5];
+ bus_width = p->msg_buf[6];
+ trans_options = new_trans_options = p->msg_buf[7] & 0xf;
+
+ if(aic7xxx_verbose & VERBOSE_NEGOTIATION2)
+ {
+ printk(INFO_LEAD "Parsing PPR message (%d/%d/%d/%d)\n",
+ p->host_no, CTL_OF_SCB(scb), period, offset, bus_width,
+ trans_options);
+ }
+
+ if ( (aic_inb(p, SBLKCTL) & ENAB40) &&
+ !(aic_inb(p, SSTAT2) & EXP_ACTIVE) )
+ {
+ if(p->features & AHC_ULTRA3)
+ {
+ maxsync = AHC_SYNCRATE_ULTRA3;
+ }
+ else
+ {
+ maxsync = AHC_SYNCRATE_ULTRA2;
+ }
+ }
+ else
{
- p->needsdtr |= target_mask;
- if ( !reject && !reply )
+ maxsync = AHC_SYNCRATE_ULTRA;
+ }
+ /*
+ * We might have a device that is starting negotiation with us
+ * before we can start up negotiation with it....be prepared to
+ * have a device ask for a higher speed then we want to give it
+ * in that case
+ */
+ if ( (scb->flags & (SCB_MSGOUT_SENT|SCB_MSGOUT_PPR)) !=
+ (SCB_MSGOUT_SENT|SCB_MSGOUT_PPR) )
+ {
+ reply = TRUE;
+ scb->flags &= ~SCB_MSGOUT_BITS;
+ scb->flags |= SCB_MSGOUT_PPR;
+ if (!(p->dev_flags[tindex] & DEVICE_SCANNED))
{
- scb->flags &= ~SCB_MSGOUT_WDTR;
- if (p->transinfo[tindex].goal_period)
+ /*
+ * Not only is the device starting this up, but it also hasn't
+ * been scanned yet, so this would likely be our TUR or our
+ * INQUIRY command at scan time, so we need to use the
+ * settings from the SEEPROM if they existed. Of course, even
+ * if we didn't find a SEEPROM, we stuffed default values into
+ * the user settings anyway, so use those in all cases.
+ */
+ p->transinfo[tindex].goal_period =
+ p->transinfo[tindex].user_period;
+ if(p->transinfo[tindex].user_offset)
{
- p->sdtr_pending |= target_mask;
- scb->flags |= SCB_MSGOUT_SDTR;
- aic_outb(p, HOST_MSG, MSG_OUT);
- aic_outb(p, aic_inb(p, SCSISIGO) | ATNO, SCSISIGO);
+ if(p->features & AHC_ULTRA2)
+ {
+ p->transinfo[tindex].goal_offset = MAX_OFFSET_ULTRA2;
+ }
+ else if( p->transinfo[tindex].user_width &&
+ (bus_width == MSG_EXT_WDTR_BUS_16_BIT) &&
+ p->features & AHC_WIDE )
+ {
+ p->transinfo[tindex].goal_offset = MAX_OFFSET_16BIT;
+ }
+ else
+ {
+ p->transinfo[tindex].goal_offset = MAX_OFFSET_8BIT;
+ }
+ }
+ p->transinfo[tindex].goal_width =
+ p->transinfo[tindex].user_width;
+ p->transinfo[tindex].goal_options =
+ p->transinfo[tindex].user_options;
+ p->needppr_copy |= target_mask;
+ }
+ if (aic7xxx_verbose & VERBOSE_NEGOTIATION2)
+ {
+ printk(INFO_LEAD "Received pre-emptive PPR message from "
+ "target.\n", p->host_no, CTL_OF_SCB(scb));
+ }
+ if ( !p->transinfo[tindex].goal_offset )
+ period = 255;
+ if ( p->transinfo[tindex].goal_period > period )
+ period = p->transinfo[tindex].goal_period;
+ if ( p->transinfo[tindex].goal_options == 0 )
+ new_trans_options = 0;
+ switch(bus_width)
+ {
+ default:
+ {
+ if ( (p->features & AHC_WIDE) &&
+ (p->transinfo[tindex].goal_width ==
+ MSG_EXT_WDTR_BUS_16_BIT) )
+ {
+ bus_width = MSG_EXT_WDTR_BUS_16_BIT;
+ break;
+ }
+ } /* Fall through if we aren't a wide card */
+ case MSG_EXT_WDTR_BUS_8_BIT:
+ {
+ p->needwdtr_copy &= ~target_mask;
+ bus_width = MSG_EXT_WDTR_BUS_8_BIT;
+ aic7xxx_set_width(p, target, channel, lun, bus_width,
+ AHC_TRANS_GOAL|AHC_TRANS_QUITE);
+ break;
}
}
}
+ else
+ {
+ switch(bus_width)
+ {
+ default:
+ {
+ reply = TRUE;
+ if ( (aic7xxx_verbose & VERBOSE_NEGOTIATION2) &&
+ ((p->dev_flags[tindex] & DEVICE_PRINT_DTR) ||
+ (aic7xxx_verbose > 0xffff)) )
+ {
+ printk(INFO_LEAD "Requesting %d bit transfers, rejecting.\n",
+ p->host_no, CTL_OF_SCB(scb), 8 * (0x01 << bus_width));
+ }
+ } /* We fall through on purpose */
+ case MSG_EXT_WDTR_BUS_8_BIT:
+ {
+ /*
+ * According to the spec, if we aren't wide, we also can't be
+ * Dual Edge so clear the options byte
+ */
+ new_trans_options = 0;
+ bus_width = MSG_EXT_WDTR_BUS_8_BIT;
+ break;
+ }
+ case MSG_EXT_WDTR_BUS_16_BIT:
+ {
+ break;
+ }
+ }
+ }
+
+ aic7xxx_set_width(p, target, channel, lun, bus_width,
+ AHC_TRANS_ACTIVE|AHC_TRANS_CUR);
+ syncrate = aic7xxx_find_syncrate(p, &period, maxsync,
+ &new_trans_options);
+ aic7xxx_validate_offset(p, syncrate, &offset, bus_width);
+ aic7xxx_set_syncrate(p, syncrate, target, channel, period,
+ offset, new_trans_options,
+ AHC_TRANS_ACTIVE|AHC_TRANS_CUR);
+
+ if( (offset != saved_offset) ||
+ (trans_options != new_trans_options) ||
+ ((scb->flags & (SCB_MSGOUT_SENT|SCB_MSGOUT_PPR)) !=
+ (SCB_MSGOUT_SENT|SCB_MSGOUT_PPR)) )
+ {
+ aic7xxx_set_width(p, target, channel, lun, bus_width,
+ AHC_TRANS_GOAL|AHC_TRANS_QUITE);
+ aic7xxx_set_syncrate(p, syncrate, target, channel, period,
+ offset, new_trans_options,
+ AHC_TRANS_GOAL|AHC_TRANS_QUITE);
+ reply = TRUE;
+ }
+ p->dtr_pending &= ~target_mask;
+ p->needppr &= ~target_mask;
+ if(reply)
+ {
+ p->dtr_pending |= target_mask;
+ scb->flags &= ~SCB_MSGOUT_BITS;
+ scb->flags |= SCB_MSGOUT_PPR;
+ aic_outb(p, HOST_MSG, MSG_OUT);
+ aic_outb(p, aic_inb(p, SCSISIGO) | ATNO, SCSISIGO);
+ }
done = TRUE;
break;
}
} /* end of switch(p->msg_type) */
} /* end of if (!reject && (p->msg_len > 2)) */
- if (reject)
+ if (!reply && reject)
{
aic_outb(p, MSG_MESSAGE_REJECT, MSG_OUT);
aic_outb(p, aic_inb(p, SCSISIGO) | ATNO, SCSISIGO);
if (aic7xxx_verbose & VERBOSE_RESET)
printk(WARN_LEAD "Someone else reset the channel!!\n",
p->host_no, channel, -1, -1);
+ if (aic7xxx_panic_on_abort)
+ aic7xxx_panic_abort(p, NULL);
/*
* Go through and abort all commands for the channel, but do not
* reset the channel again.
*/
aic7xxx_reset_channel(p, channel, /* Initiate Reset */ FALSE);
- aic7xxx_run_done_queue(p, FALSE);
+ aic7xxx_run_done_queue(p, TRUE);
scb = NULL;
}
else if ( ((status & BUSFREE) != 0) && ((status & SELTO) == 0) )
CTL_OF_SCB(scb), scb->hscb->tag);
aic7xxx_reset_device(p, target, channel, ALL_LUNS,
(message == MSG_ABORT) ? SCB_LIST_NULL : scb->hscb->tag );
- aic7xxx_run_done_queue(p, FALSE);
+ aic7xxx_run_done_queue(p, TRUE);
scb = NULL;
printerror = 0;
}
printerror = 0;
}
}
+ if ( (scb != NULL) &&
+ (scb->cmd == p->dev_dtr_cmnd[TARGET_INDEX(scb->cmd)]) )
+ {
+ /*
+ * This might be a SCSI-3 device that is dropping the bus due to
+ * errors and signalling that we should reduce the transfer speed.
+ * All we have to do is complete this command (since it's a negotiation
+ * command already) and the checksum routine should flag an error and
+ * reduce the speed setting and renegotiate. We call the reset routing
+ * just to clean out the hardware from this scb.
+ */
+ printerror = 0;
+ aic7xxx_reset_device(p, target, channel, ALL_LUNS, scb->hscb->tag);
+ aic7xxx_run_done_queue(p, TRUE);
+ scb = NULL;
+ }
if (printerror != 0)
{
if (scb != NULL)
tag = SCB_LIST_NULL;
}
aic7xxx_reset_device(p, target, channel, ALL_LUNS, tag);
- aic7xxx_run_done_queue(p, FALSE);
+ aic7xxx_run_done_queue(p, TRUE);
+ }
+ else
+ {
+ aic7xxx_reset_device(p, target, channel, ALL_LUNS, SCB_LIST_NULL);
+ aic7xxx_run_done_queue(p, TRUE);
}
printk(INFO_LEAD "Unexpected busfree, LASTPHASE = 0x%x, "
"SEQADDR = 0x%x\n", p->host_no, channel, target, -1, lastphase,
cmd->result = 0;
scb = NULL;
}
+ if (scb->cmd == p->dev_dtr_cmnd[TARGET_INDEX(scb->cmd)])
+ {
+ /*
+ * Turn off the needsdtr, needwdtr, and needppr bits since this device
+ * doesn't seem to exist.
+ */
+ p->needppr &= ~(0x01 << TARGET_INDEX(scb->cmd));
+ p->needppr_copy &= ~(0x01 << TARGET_INDEX(scb->cmd));
+ p->needsdtr &= ~(0x01 << TARGET_INDEX(scb->cmd));
+ p->needsdtr_copy &= ~(0x01 << TARGET_INDEX(scb->cmd));
+ p->needwdtr &= ~(0x01 << TARGET_INDEX(scb->cmd));
+ p->needwdtr_copy &= ~(0x01 << TARGET_INDEX(scb->cmd));
+ }
}
/*
* Restarting the sequencer will stop the selection and make sure devices
* are allowed to reselect in.
*/
aic_outb(p, 0, SCSISEQ);
+ aic_outb(p, CLRSELINGO, CLRSINT0);
aic_outb(p, aic_inb(p, SIMODE1) & ~(ENREQINIT|ENBUSFREE), SIMODE1);
p->flags &= ~AHC_HANDLING_REQINITS;
aic_outb(p, CLRSELTIMEO | CLRBUSFREE, CLRSINT1);
Scsi_Cmnd *cmd;
unsigned char mesg_out = MSG_NOOP;
unsigned char lastphase = aic_inb(p, LASTPHASE);
+ unsigned char sstat2 = aic_inb(p, SSTAT2);
+ unsigned char tindex = TARGET_INDEX(scb->cmd);
cmd = scb->cmd;
switch (lastphase)
break;
}
- /*
- * A parity error has occurred during a data
- * transfer phase. Flag it and continue.
- */
- printk(WARN_LEAD "Parity error during %s phase.\n",
- p->host_no, CTL_OF_SCB(scb), phase);
+ /*
+ * A parity error has occurred during a data
+ * transfer phase. Flag it and continue.
+ */
+ if( (aic_inb(p, SCSIRATE) & AHC_SYNCRATE_CRC) && (lastphase == P_DATAIN) )
+ {
+ printk(WARN_LEAD "CRC error during %s phase.\n",
+ p->host_no, CTL_OF_SCB(scb), phase);
+ if(sstat2 & CRCVALERR)
+ {
+ printk(WARN_LEAD " CRC error in intermediate CRC packet.\n",
+ p->host_no, CTL_OF_SCB(scb));
+ }
+ if(sstat2 & CRCENDERR)
+ {
+ printk(WARN_LEAD " CRC error in ending CRC packet.\n",
+ p->host_no, CTL_OF_SCB(scb));
+ }
+ if(sstat2 & CRCREQERR)
+ {
+ printk(WARN_LEAD " Target incorrectly requested a CRC packet.\n",
+ p->host_no, CTL_OF_SCB(scb));
+ }
+ if(sstat2 & DUAL_EDGE_ERROR)
+ {
+ printk(WARN_LEAD " Dual Edge transmission error.\n",
+ p->host_no, CTL_OF_SCB(scb));
+ }
+ }
+ else
+ {
+ printk(WARN_LEAD "Parity error during %s phase.\n",
+ p->host_no, CTL_OF_SCB(scb), phase);
+ }
+
+ if(p->dev_flags[tindex] & DEVICE_PARITY_ERROR)
+ {
+ struct aic7xxx_syncrate *syncrate;
+ unsigned int period = p->transinfo[tindex].cur_period;
+ unsigned char options = p->transinfo[tindex].cur_options;
+ /*
+ * oops, we had a failure, lower the transfer rate and try again. It's
+ * worth noting here that it might be wise to also check for typical
+ * wide setting on narrow cable type problems and try disabling wide
+ * instead of slowing down if those exist. That's hard to do with simple
+ * checksums though.
+ */
+ if((syncrate = aic7xxx_find_syncrate(p, &period, 0, &options)) != NULL)
+ {
+ syncrate++;
+ if( (syncrate->rate[0] != NULL) &&
+ (!(p->features & AHC_ULTRA2) || (syncrate->sxfr_ultra2 == 0)) )
+ {
+ p->transinfo[tindex].goal_period = syncrate->period;
+ if( !(syncrate->sxfr_ultra2 & 0x40) )
+ {
+ p->transinfo[tindex].goal_options = 0;
+ }
+ }
+ else
+ {
+ p->transinfo[tindex].goal_offset = 0;
+ p->transinfo[tindex].goal_period = 0;
+ p->transinfo[tindex].goal_options = 0;
+ }
+ p->needppr |= (p->needppr_copy & (1<<tindex));
+ p->needsdtr |= (p->needsdtr_copy & (1<<tindex));
+ p->needwdtr |= (p->needwdtr_copy & (1<<tindex));
+ }
+ p->dev_flags[tindex] &= ~DEVICE_PARITY_ERROR;
+ }
+ else
+ {
+ p->dev_flags[tindex] |= DEVICE_PARITY_ERROR;
+ }
/*
* We've set the hardware to assert ATN if we get a parity
printk("HSCB %d bad, SCB_NEXT points to self.\n", i);
bogus = TRUE;
}
- temp = aic_inb(p, SCB_PREV);
- if ((temp != SCB_LIST_NULL) &&
- (temp >= p->scb_data->maxhscbs))
- {
- printk("HSCB %d bad, SCB_PREV invalid(%d).\n", i, temp);
- bogus = TRUE;
- }
if (scb_status[i] == 0)
lost++;
if (lost > 1)
unsigned char scb_index;
#ifdef AIC7XXX_VERBOSE_DEBUGGING
- if(aic7xxx_verbose > 0xffff)
+ if( (p->isr_count < 16) && (aic7xxx_verbose > 0xffff) )
printk(INFO_LEAD "Command Complete Int.\n", p->host_no, -1, -1, -1);
#endif
* Determines the queue depth for a given device. There are two ways
* a queue depth can be obtained for a tagged queueing device. One
* way is the default queue depth which is determined by whether
- * AIC7XXX_CMDS_PER_LUN is defined. If it is defined, then it is used
+ * AIC7XXX_CMDS_PER_DEVICE is defined. If it is defined, then it is used
* as the default queue depth. Otherwise, we use either 4 or 8 as the
* default queue depth (dependent on the number of hardware SCBs).
* The other way we determine queue depth is through the use of the
{
int tag_enabled = TRUE;
- default_depth = AIC7XXX_CMDS_PER_LUN;
+ default_depth = AIC7XXX_CMDS_PER_DEVICE;
if (!(p->discenable & target_mask))
{
}
printk("\n");
#endif
- if (checksum != scarray[len - 1])
+ if ( (checksum != scarray[len - 1]) || (checksum == 0) )
{
return (0);
}
aic_outb(p, i, SCBPTR);
aic_outb(p, 0, SCB_CONTROL); /* Clear the control byte. */
aic_outb(p, i + 1, SCB_NEXT); /* Set the next pointer. */
- aic_outb(p, i - 1, SCB_PREV); /* Set the prev pointer. */
aic_outb(p, SCB_LIST_NULL, SCB_TAG); /* Make the tag invalid. */
aic_outb(p, SCB_LIST_NULL, SCB_BUSYTARGETS); /* no busy untagged */
aic_outb(p, SCB_LIST_NULL, SCB_BUSYTARGETS+1);/* targets active yet */
*/
aic7xxx_loadseq(p);
+ /*
+ * Make sure the AUTOFLUSHDIS bit is *not* set in the SBLKCTL register
+ */
+ aic_outb(p, aic_inb(p, SBLKCTL) & ~AUTOFLUSHDIS, SBLKCTL);
+
if ( (p->chip & AHC_CHIPID_MASK) == AHC_AIC7770 )
{
aic_outb(p, ENABLE, BCTL); /* Enable the boards BUS drivers. */
{
p->transinfo[i].goal_period = 0;
p->transinfo[i].goal_offset = 0;
+ p->transinfo[i].goal_options = 0;
p->transinfo[i].goal_width = MSG_EXT_WDTR_BUS_8_BIT;
}
DRIVER_LOCK_INIT
*/
for (i = 0; i < MAX_TARGETS; i++)
{
- if(p->dev_wdtr_cmnd[i])
- kfree(p->dev_wdtr_cmnd[i]);
- if(p->dev_sdtr_cmnd[i])
- kfree(p->dev_sdtr_cmnd[i]);
+ if(p->dev_dtr_cmnd[i])
+ {
+ if(p->dev_dtr_cmnd[i]->request_buffer)
+ {
+ kfree(p->dev_dtr_cmnd[i]->request_buffer);
+ }
+ kfree(p->dev_dtr_cmnd[i]);
+ }
}
}
{
printk("aic7xxx: Using leftover BIOS values.\n");
}
- if ( *sxfrctl1 & STPWEN )
+ if ( ((p->chip & ~AHC_CHIPID_MASK) == AHC_PCI) && (*sxfrctl1 & STPWEN) )
{
p->flags |= AHC_TERM_ENB_SE_LOW | AHC_TERM_ENB_SE_HIGH;
sc->adapter_control &= ~CFAUTOTERM;
sc->adapter_control |= CFSTERM | CFWSTERM | CFLVDSTERM;
}
if (aic7xxx_extended)
- p->flags |= AHC_EXTEND_TRANS_A | AHC_EXTEND_TRANS_B;
+ p->flags |= (AHC_EXTEND_TRANS_A | AHC_EXTEND_TRANS_B);
+ else
+ p->flags &= ~(AHC_EXTEND_TRANS_A | AHC_EXTEND_TRANS_B);
}
else
{
* Limit to 16 targets just in case. The 2842 for one is known to
* blow the max_targets setting, future cards might also.
*/
- max_targets = MIN(sc->max_targets & CFMAXTARG,
- ((p->features & (AHC_TWIN | AHC_WIDE)) ? 16 : 8));
+ max_targets = ((p->features & (AHC_TWIN | AHC_WIDE)) ? 16 : 8);
if (have_seeprom)
{
mask = (0x01 << i);
if (!have_seeprom)
{
- if(aic_inb(p, SCSISEQ) != 0)
+ if (aic_inb(p, SCSISEQ) != 0)
{
/*
* OK...the BIOS set things up and left behind the settings we need.
sc->device_flags[i] = CFDISC;
if (p->features & AHC_WIDE)
sc->device_flags[i] |= CFWIDEB;
- if (p->features & AHC_ULTRA2)
+ if (p->features & AHC_ULTRA3)
+ sc->device_flags[i] |= 2;
+ else if (p->features & AHC_ULTRA2)
sc->device_flags[i] |= 3;
else if (p->features & AHC_ULTRA)
sc->device_flags[i] |= CFSYNCHISULTRA;
}
if (p->flags & AHC_NEWEEPROM_FMT)
{
- if (sc->device_flags[i] & CFSYNCHISULTRA)
- {
- p->ultraenb |= mask;
- }
- else if (sc->device_flags[i] & CFNEWULTRAFORMAT)
+ if ( (sc->device_flags[i] & CFNEWULTRAFORMAT) &&
+ !(p->features & AHC_ULTRA2) )
{
- if ( ((sc->device_flags[i] & (CFSYNCHISULTRA | CFXFER)) == 0x03) &&
- !(p->features & AHC_ULTRA2) )
+ /*
+ * I know of two different Ultra BIOSes that do this differently.
+ * One on the Gigabyte 6BXU mb that wants flags[i] & CFXFER to
+ * be == to 0x03 and SYNCISULTRA to be true to mean 40MByte/s
+ * while on the IBM Netfinity 5000 they want the same thing
+ * to be something else, while flags[i] & CFXFER == 0x03 and
+ * SYNCISULTRA false should be 40MByte/s. So, we set both to
+ * 40MByte/s and the lower speeds be damned. People will have
+ * to select around the conversely mapped lower speeds in order
+ * to select lower speeds on these boards.
+ */
+ if ((sc->device_flags[i] & (CFXFER)) == 0x03)
{
sc->device_flags[i] &= ~CFXFER;
sc->device_flags[i] |= CFSYNCHISULTRA;
- p->ultraenb |= mask;
}
}
+ if (sc->device_flags[i] & CFSYNCHISULTRA)
+ {
+ p->ultraenb |= mask;
+ }
}
else if (sc->adapter_control & CFULTRAEN)
{
p->ultraenb &= ~mask;
p->transinfo[i].user_offset = 0;
p->transinfo[i].user_period = 0;
+ p->transinfo[i].user_options = 0;
p->transinfo[i].cur_offset = 0;
p->transinfo[i].cur_period = 0;
+ p->transinfo[i].cur_options = 0;
p->needsdtr_copy &= ~mask;
}
else
{
- if (p->features & AHC_ULTRA2)
+ if (p->features & AHC_ULTRA3)
+ {
+ p->transinfo[i].user_offset = MAX_OFFSET_ULTRA2;
+ p->transinfo[i].cur_offset = aic_inb(p, TARG_OFFSET + i);
+ if( (sc->device_flags[i] & CFXFER) < 0x03 )
+ {
+ scsirate = (sc->device_flags[i] & CFXFER);
+ p->transinfo[i].user_options = MSG_EXT_PPR_OPTION_DT_CRC;
+ if( (aic_inb(p, TARG_SCSIRATE + i) & CFXFER) < 0x03 )
+ {
+ p->transinfo[i].cur_options =
+ ((aic_inb(p, TARG_SCSIRATE + i) & 0x40) ?
+ MSG_EXT_PPR_OPTION_DT_CRC : MSG_EXT_PPR_OPTION_DT_UNITS);
+ }
+ else
+ {
+ p->transinfo[i].cur_options = 0;
+ }
+ }
+ else
+ {
+ scsirate = (sc->device_flags[i] & CFXFER) |
+ ((p->ultraenb & mask) ? 0x18 : 0x10);
+ p->transinfo[i].user_options = 0;
+ p->transinfo[i].cur_options = 0;
+ }
+ p->transinfo[i].user_period = aic7xxx_find_period(p, scsirate,
+ AHC_SYNCRATE_ULTRA3);
+ p->transinfo[i].cur_period = aic7xxx_find_period(p,
+ aic_inb(p, TARG_SCSIRATE + i),
+ AHC_SYNCRATE_ULTRA3);
+ }
+ else if (p->features & AHC_ULTRA2)
{
p->transinfo[i].user_offset = MAX_OFFSET_ULTRA2;
p->transinfo[i].cur_offset = aic_inb(p, TARG_OFFSET + i);
scsirate = (sc->device_flags[i] & CFXFER) |
((p->ultraenb & mask) ? 0x18 : 0x10);
+ p->transinfo[i].user_options = 0;
+ p->transinfo[i].cur_options = 0;
p->transinfo[i].user_period = aic7xxx_find_period(p, scsirate,
AHC_SYNCRATE_ULTRA2);
p->transinfo[i].cur_period = aic7xxx_find_period(p,
else
{
scsirate = (sc->device_flags[i] & CFXFER) << 4;
- if (sc->device_flags[i] & CFWIDEB)
- p->transinfo[i].user_offset = MAX_OFFSET_16BIT;
- else
- p->transinfo[i].user_offset = MAX_OFFSET_8BIT;
+ p->transinfo[i].user_options = 0;
+ p->transinfo[i].cur_options = 0;
+ p->transinfo[i].user_offset = MAX_OFFSET_8BIT;
if (p->features & AHC_ULTRA)
{
short ultraenb;
}
aic_outb(p, ~(p->discenable & 0xFF), DISC_DSB);
aic_outb(p, ~((p->discenable >> 8) & 0xFF), DISC_DSB + 1);
+ p->needppr = p->needppr_copy = p->needdv = 0;
p->needwdtr = p->needwdtr_copy;
p->needsdtr = p->needsdtr_copy;
- p->wdtr_pending = p->sdtr_pending = 0;
+ p->dtr_pending = 0;
/*
* We set the p->ultraenb from the SEEPROM to begin with, but now we make
{
case AHC_AIC7895:
case AHC_AIC7896:
+ case AHC_AIC7899:
if (p->adapter_control & CFBPRIMARY)
p->flags |= AHC_CHANNEL_B_PRIMARY;
default:
{PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7855, AHC_AIC7850,
AHC_PAGESCBS, AHC_AIC7850_FE, 6,
32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7821, AHC_AIC7860,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
+ AHC_AIC7860_FE, 7,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_3860, AHC_AIC7860,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
+ AHC_AIC7860_FE, 7,
+ 32, C46 },
{PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7860, AHC_AIC7860,
AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
AHC_AIC7860_FE, 7,
{PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7884, AHC_AIC7880,
AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7880_FE, 18,
32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7885, AHC_AIC7880,
+ AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7880_FE, 18,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7886, AHC_AIC7880,
+ AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7880_FE, 18,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7887, AHC_AIC7880,
+ AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7880_FE, 18,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7888, AHC_AIC7880,
+ AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7880_FE, 18,
+ 32, C46 },
{PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7895, AHC_AIC7895,
AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL,
AHC_AIC7895_FE, 19,
AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
AHC_AIC7890_FE, 20,
32, C46 },
- {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_78902, AHC_AIC7890,
+ {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7890B, AHC_AIC7890,
AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
AHC_AIC7890_FE, 20,
32, C46 },
- {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_2940U2, AHC_AIC7890,
+ {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_2930U2, AHC_AIC7890,
AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
AHC_AIC7890_FE, 21,
32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_2940U2, AHC_AIC7890,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
+ AHC_AIC7890_FE, 22,
+ 32, C46 },
{PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7896, AHC_AIC7896,
AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL,
- AHC_AIC7896_FE, 22,
+ AHC_AIC7896_FE, 23,
32, C56_66 },
{PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_3940U2, AHC_AIC7896,
AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL,
- AHC_AIC7896_FE, 23,
+ AHC_AIC7896_FE, 24,
32, C56_66 },
{PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_3950U2D, AHC_AIC7896,
AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL,
- AHC_AIC7896_FE, 24,
+ AHC_AIC7896_FE, 25,
32, C56_66 },
{PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_1480A, AHC_AIC7860,
AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
- AHC_AIC7860_FE, 25,
+ AHC_AIC7860_FE, 26,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7892A, AHC_AIC7892,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
+ AHC_AIC7892_FE, 27,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7892B, AHC_AIC7892,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
+ AHC_AIC7892_FE, 27,
32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7892D, AHC_AIC7892,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
+ AHC_AIC7892_FE, 27,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7892P, AHC_AIC7892,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
+ AHC_AIC7892_FE, 27,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7899A, AHC_AIC7899,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL,
+ AHC_AIC7899_FE, 28,
+ 32, C56_66 },
+ {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7899B, AHC_AIC7899,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL,
+ AHC_AIC7899_FE, 28,
+ 32, C56_66 },
+ {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7899D, AHC_AIC7899,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL,
+ AHC_AIC7899_FE, 28,
+ 32, C56_66 },
+ {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7899P, AHC_AIC7899,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL,
+ AHC_AIC7899_FE, 28,
+ 32, C56_66 },
};
unsigned short command;
}
#ifdef AIC7XXX_STRICT_PCI_SETUP
command |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY |
- PCI_COMMAND_INVALIDATE | PCI_COMMAND_MASTER |
- PCI_COMMAND_MEMORY | PCI_COMMAND_IO;
+ PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO;
#else
command |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO;
#endif
+ command &= ~PCI_COMMAND_INVALIDATE;
if (aic7xxx_pci_parity == 0)
command &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
pci_write_config_word(pdev, PCI_COMMAND, command);
{
printk("aic7xxx: Initial DEVCONFIG value was 0x%x\n", devconfig);
}
- devconfig |= 0x80000000;
- if ((aic7xxx_pci_parity == 0) || (aic7xxx_pci_parity == -1))
- {
- devconfig &= ~(0x00000008);
- }
- else
- {
- devconfig |= 0x00000008;
- }
+ devconfig |= 0x80000040;
pci_write_config_dword(pdev, DEVCONFIG, devconfig);
#endif /* AIC7XXX_STRICT_PCI_SETUP */
#else /* LINUX_VERSION_CODE > KERNEL_VERSION(2,1,92) */
}
#ifdef AIC7XXX_STRICT_PCI_SETUP
command |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY |
- PCI_COMMAND_INVALIDATE | PCI_COMMAND_MASTER |
- PCI_COMMAND_MEMORY | PCI_COMMAND_IO;
+ PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO;
#else
command |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO;
#endif
+ command &= ~PCI_COMMAND_INVALIDATE;
if (aic7xxx_pci_parity == 0)
command &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
pcibios_write_config_word(pci_bus, pci_devfn, PCI_COMMAND, command);
{
printk("aic7xxx: Initial DEVCONFIG value was 0x%x\n", devconfig);
}
- devconfig |= 0x80000000;
- if ((aic7xxx_pci_parity == 0) || (aic7xxx_pci_parity == -1))
- {
- devconfig &= ~(0x00000008);
- }
- else
- {
- devconfig |= 0x00000008;
- }
+ devconfig |= 0x80000040;
pcibios_write_config_dword(pci_bus, pci_devfn, DEVCONFIG, devconfig);
#endif /* AIC7XXX_STRICT_PCI_SETUP */
#endif /* LINUIX_VERSION_CODE > KERNEL_VERSION(2,1,92) */
case AHC_AIC7895: /* 7895 */
case AHC_AIC7896: /* 7896/7 */
+ case AHC_AIC7899: /* 7899 */
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,92)
if (PCI_FUNC(temp_p->pdev->devfn) != 0)
{
*/
switch (temp_p->chip & AHC_CHIPID_MASK)
{
- case AHC_AIC7890:
- case AHC_AIC7896:
+ case AHC_AIC7892:
+ case AHC_AIC7899:
aic_outb(temp_p, 0, SCAMCTL);
/*
- * We used to set DPARCKEN in this register, but after talking
- * to a tech from Adaptec, I found out they don't use that
- * particular bit in their own register settings, and when you
- * combine that with the fact that I determined that we were
- * seeing Data-Path Parity Errors on things we shouldn't see
- * them on, I think there is a bug in the silicon and the way
- * to work around it is to disable this particular check. Also
- * This bug only showed up on certain commands, so it seems to
- * be pattern related or some such. The commands we would
- * typically send as a linux TEST_UNIT_READY or INQUIRY command
- * could cause it to be triggered, while regular commands that
- * actually made reasonable use of the SG array capabilities
- * seemed not to cause the problem.
+ * Switch to the alt mode of the chip...
*/
+ aic_outb(temp_p, aic_inb(temp_p, SFUNCT) | ALT_MODE, SFUNCT);
/*
- aic_outb(temp_p, aic_inb(temp_p, DSCOMMAND0) |
- CACHETHEN | DPARCKEN | MPARCKEN |
- USCBSIZE32 | CIOPARCKEN,
- DSCOMMAND0);
+ * Set our options...the last two items set our CRC after x byte
+ * count in target mode...
*/
+ aic_outb(temp_p, AUTO_MSGOUT_DE | DIS_MSGIN_DUALEDGE, OPTIONMODE);
+ aic_outb(temp_p, 0x00, 0x0b);
+ aic_outb(temp_p, 0x10, 0x0a);
+ /*
+ * switch back to normal mode...
+ */
+ aic_outb(temp_p, aic_inb(temp_p, SFUNCT) & ~ALT_MODE, SFUNCT);
+ aic_outb(temp_p, CRCVALCHKEN | CRCENDCHKEN | CRCREQCHKEN |
+ TARGCRCENDEN | TARGCRCCNTEN,
+ CRCCONTROL1);
+ aic_outb(temp_p, ((aic_inb(temp_p, DSCOMMAND0) | USCBSIZE32 |
+ MPARCKEN | CIOPARCKEN | CACHETHEN) &
+ ~DPARCKEN), DSCOMMAND0);
+ aic7xxx_load_seeprom(temp_p, &sxfrctl1);
+ break;
+ case AHC_AIC7890:
+ case AHC_AIC7896:
+ aic_outb(temp_p, 0, SCAMCTL);
aic_outb(temp_p, (aic_inb(temp_p, DSCOMMAND0) |
CACHETHEN | MPARCKEN | USCBSIZE32 |
CIOPARCKEN) & ~DPARCKEN, DSCOMMAND0);
- /* FALLTHROUGH */
- default:
- /*
- * We attempt to read a SEEPROM on *everything*. If we fail,
- * then we fail, but this covers things like 2910c cards that
- * now have SEEPROMs with their 7856 chipset that we would
- * otherwise ignore. They still don't have a BIOS, but they
- * have a SEEPROM that the SCSISelect utility on the Adaptec
- * diskettes can configure.
- */
aic7xxx_load_seeprom(temp_p, &sxfrctl1);
break;
case AHC_AIC7850:
aic_outb(temp_p, (aic_inb(temp_p, DSCOMMAND0) |
CACHETHEN | MPARCKEN) & ~DPARCKEN,
DSCOMMAND0);
+ /* FALLTHROUGH */
+ default:
aic7xxx_load_seeprom(temp_p, &sxfrctl1);
break;
case AHC_AIC7880:
/*
- * Only set the DSCOMMAND0 register if this is a Rev B.
- * chipset. For those, we also enable Ultra mode by
- * force due to brain-damage on the part of some BIOSes
- * We overload the devconfig variable here since we can.
+ * Check the rev of the chipset before we change DSCOMMAND0
*/
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,92)
pci_read_config_dword(pdev, DEVCONFIG, &devconfig);
{
case AHC_AIC7895:
case AHC_AIC7896:
+ case AHC_AIC7899:
current_p = list_p;
while(current_p != NULL)
{
break;
case AHC_AIC7895:
case AHC_AIC7896:
+ case AHC_AIC7899:
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,92)
pci_read_config_dword(pdev, DEVCONFIG, &devconfig);
#else
*/
if (temp_p->features & AHC_ULTRA2)
{
- aic_outb(temp_p, RD_DFTHRSH_75 | WR_DFTHRSH_75, DFF_THRSH);
+ aic_outb(temp_p, RD_DFTHRSH_MAX | WR_DFTHRSH_MAX, DFF_THRSH);
}
else
{
}
}
/*
- * Are we dealing with a 7985 where we need to sort the
+ * Are we dealing with a 7895/6/7/9 where we need to sort the
* channels as well, if so, the bios_address values should
* be the same
*/
return (found);
}
-#ifdef AIC7XXX_FAKE_NEGOTIATION_CMDS
+static void aic7xxx_build_negotiation_cmnd(struct aic7xxx_host *p,
+ Scsi_Cmnd *old_cmd, int tindex);
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_allocate_negotiation_command
+ *
+ * Description:
+ * allocate the actual command struct and fill in the gaps...
+ *-F*************************************************************************/
+static Scsi_Cmnd *
+aic7xxx_allocate_negotiation_command(struct aic7xxx_host *p,
+ Scsi_Cmnd *old_cmd, int tindex)
+{
+ Scsi_Cmnd *cmd;
+ char *buffer;
+
+ if (!(p->dev_dtr_cmnd[tindex] = kmalloc(sizeof(Scsi_Cmnd), GFP_ATOMIC)) )
+ {
+ return(NULL);
+ }
+ if (!(buffer = kmalloc(256, GFP_ATOMIC)))
+ {
+ kfree(p->dev_dtr_cmnd[tindex]);
+ p->dev_dtr_cmnd[tindex] = NULL;
+ return(NULL);
+ }
+ cmd = p->dev_dtr_cmnd[tindex];
+ memset(cmd, 0, sizeof(Scsi_Cmnd));
+ memcpy(cmd, old_cmd, sizeof(Scsi_Cmnd));
+ memset(&cmd->cmnd[0], 0, sizeof(cmd->cmnd));
+ memset(&cmd->data_cmnd[0], 0, sizeof(cmd->data_cmnd));
+ cmd->lun = 0;
+ cmd->request_bufflen = 255;
+ cmd->request_buffer = buffer;
+ cmd->use_sg = cmd->old_use_sg = cmd->sglist_len = 0;
+ cmd->bufflen = 0;
+ cmd->buffer = NULL;
+ cmd->underflow = 0;
+ cmd->cmd_len = 6;
+ cmd->cmnd[0] = cmd->data_cmnd[0] = INQUIRY;
+ cmd->cmnd[1] = cmd->data_cmnd[1] = 0;
+ cmd->cmnd[2] = cmd->data_cmnd[2] = 0;
+ cmd->cmnd[3] = cmd->data_cmnd[3] = 0;
+ cmd->cmnd[4] = cmd->data_cmnd[4] = 255; /* match what scsi.c does here */
+ cmd->cmnd[5] = cmd->data_cmnd[5] = 0;
+ return(cmd);
+}
/*+F*************************************************************************
* Function:
static void
aic7xxx_negotiation_complete(Scsi_Cmnd *cmd)
{
+ unsigned int checksum;
+ int i;
+ int *ibuffer;
+ struct aic7xxx_host *p = (struct aic7xxx_host *)cmd->host->hostdata;
+ int tindex = TARGET_INDEX(cmd);
+ struct aic7xxx_syncrate *syncrate;
+
+ /*
+ * perform our minimalistic domain validation
+ */
+ if(p->dev_flags[tindex] & DEVICE_SCANNED)
+ {
+ ibuffer = (int *)cmd->request_buffer;
+ checksum = 0;
+ for(i = 0; i < (cmd->request_bufflen >> 2); i++)
+ {
+ checksum += ibuffer[i];
+ }
+ if( (checksum != p->dev_checksum[tindex]) &&
+ (p->transinfo[tindex].cur_offset != 0) )
+ {
+ unsigned int period = p->transinfo[tindex].cur_period;
+ unsigned char options = p->transinfo[tindex].cur_options;
+
+ if (p->needdv & (1<<tindex))
+ {
+ /*
+ * oops, we had a failure, lower the transfer rate and try again. It's
+ * worth noting here that it might be wise to also check for typical
+ * wide setting on narrow cable type problems and try disabling wide
+ * instead of slowing down if those exist. That's hard to do with simple
+ * checksums though.
+ */
+ if(aic7xxx_verbose & VERBOSE_NEGOTIATION)
+ {
+ printk(INFO_LEAD "reducing SCSI transfer speed due to Domain "
+ "validation failure.\n", p->host_no, CTL_OF_CMD(cmd));
+ }
+ if((syncrate = aic7xxx_find_syncrate(p, &period, 0, &options)) != NULL)
+ {
+ syncrate++;
+ if( (syncrate->rate[0] != NULL) &&
+ (!(p->features & AHC_ULTRA2) || (syncrate->sxfr_ultra2 == 0)) )
+ {
+ p->transinfo[tindex].goal_period = syncrate->period;
+ if( !(syncrate->sxfr_ultra2 & 0x40) )
+ {
+ p->transinfo[tindex].goal_options = 0;
+ }
+ }
+ else
+ {
+ p->transinfo[tindex].goal_offset = 0;
+ p->transinfo[tindex].goal_period = 0;
+ p->transinfo[tindex].goal_options = 0;
+ }
+ p->needppr |= (p->needppr_copy & (1<<tindex));
+ p->needsdtr |= (p->needsdtr_copy & (1<<tindex));
+ p->needwdtr |= (p->needwdtr_copy & (1<<tindex));
+ }
+ p->needdv &= ~(1<<tindex);
+ }
+ else
+ {
+ if(aic7xxx_verbose & VERBOSE_NEGOTIATION)
+ {
+ printk(INFO_LEAD "Performing Domain validation.\n",
+ p->host_no, CTL_OF_CMD(cmd));
+ }
+ /*
+ * Update the checksum in case the INQUIRY data has changed, maybe
+ * in relation to a change in the mode pages, or whatever.
+ */
+ p->dev_checksum[tindex] = checksum;
+ /*
+ * Signal that we are trying out the domain validation
+ */
+ p->needdv |= (1<<tindex);
+ /*
+ * Signal that we need to re-negotiate things, this also gets us our
+ * INQUIRY command to re-checksum off of.
+ */
+ p->needppr |= (p->needppr_copy & (1<<tindex));
+ p->needsdtr |= (p->needsdtr_copy & (1<<tindex));
+ p->needwdtr |= (p->needwdtr_copy & (1<<tindex));
+ }
+ }
+ else
+ {
+ if( (aic7xxx_verbose & VERBOSE_NEGOTIATION) &&
+ (p->needdv & (1<<tindex)) )
+ {
+ printk(INFO_LEAD "Successfully completed Domain validation.\n",
+ p->host_no, CTL_OF_CMD(cmd));
+ }
+ /*
+ * We successfully did our checksum, so don't leave the needdv flag set
+ * in case we might have set it last time through.
+ */
+ p->needdv &= ~(1<<tindex);
+ }
+ }
+
+ p->dtr_pending &= ~(0x01 << tindex);
+ /*
+ * This looks recursive in the extreme, but if this was a WDTR negotiation
+ * and we didn't follow up with SDTR yet, then this will get it started.
+ * For all other cases, this should work out to be a no-op, unless we are
+ * doing domain validation and happen to need a new negotiation command.
+ */
+ aic7xxx_build_negotiation_cmnd(p, cmd->next, tindex);
return;
}
int tindex)
{
- if ( (p->needwdtr & (1<<tindex)) && !(p->wdtr_pending & (1<<tindex)) )
+ if ( !(p->dtr_pending & (1<<tindex)) &&
+ ( (p->needppr & (1<<tindex)) ||
+ (p->needwdtr & (1<<tindex)) ||
+ (p->needsdtr & (1<<tindex)) ) )
{
- if(p->dev_wdtr_cmnd[tindex] == NULL)
+ if ( (p->dev_dtr_cmnd[tindex] == NULL) &&
+ (aic7xxx_allocate_negotiation_command(p, old_cmd, tindex) == NULL) )
{
- Scsi_Cmnd *cmd;
-
- if (!(p->dev_wdtr_cmnd[tindex] = kmalloc(sizeof(Scsi_Cmnd), GFP_ATOMIC)) )
- {
- return;
- }
- cmd = p->dev_wdtr_cmnd[tindex];
- memset(cmd, 0, sizeof(Scsi_Cmnd));
- memcpy(cmd, old_cmd, sizeof(Scsi_Cmnd));
- memset(&cmd->cmnd[0], 0, sizeof(cmd->cmnd));
- memset(&cmd->data_cmnd[0], 0, sizeof(cmd->data_cmnd));
- cmd->lun = 0;
- cmd->request_bufflen = 0;
- cmd->request_buffer = NULL;
- cmd->use_sg = cmd->old_use_sg = cmd->sglist_len = 0;
- cmd->bufflen = 0;
- cmd->buffer = NULL;
- cmd->underflow = 0;
- cmd->cmd_len = 6;
+ return;
}
/*
- * Before sending this thing out, we also amke the cmd->next pointer
+ * Before sending this thing out, we also make the cmd->next pointer
* point to the real command so we can stuff any possible SENSE data
- * intp the real command instead of this fake command. This has to be
+ * into the real command instead of this fake command. This has to be
* done each time the command is built, not just the first time, hence
* it's outside of the above if()...
*/
- p->dev_wdtr_cmnd[tindex]->next = old_cmd;
- aic7xxx_queue(p->dev_wdtr_cmnd[tindex],
- aic7xxx_negotiation_complete);
- }
- else if ( (p->needsdtr & (1<<tindex)) && !(p->sdtr_pending & (1<<tindex)) &&
- !(p->wdtr_pending & (1<<tindex)) )
- {
- if(p->dev_sdtr_cmnd[tindex] == NULL)
+ p->dev_dtr_cmnd[tindex]->next = old_cmd;
+ /*
+ * Clear the buffer so checksums come out right....
+ */
+ memset(p->dev_dtr_cmnd[tindex]->request_buffer, 0,
+ p->dev_dtr_cmnd[tindex]->request_bufflen);
+ /*
+ * Remove any commands for this particular device that might be on the
+ * waiting_scbs queue or qinfifo so that this command goes out first.
+ * This is vital for our implementation of domain validation.
+ */
+ pause_sequencer(p);
+ aic7xxx_search_qinfifo(p, old_cmd->target, old_cmd->channel, ALL_LUNS,
+ SCB_LIST_NULL, 0, TRUE, &p->delayed_scbs[tindex]);
+ unpause_sequencer(p, FALSE);
{
- Scsi_Cmnd *cmd;
+ struct aic7xxx_scb *scb, *next;
- if (!(p->dev_sdtr_cmnd[tindex] = kmalloc(sizeof(Scsi_Cmnd), GFP_ATOMIC)) )
+ scb = p->waiting_scbs.head;
+ while(scb != NULL)
{
- return;
+ if( aic7xxx_match_scb(p, scb, old_cmd->target, old_cmd->channel,
+ ALL_LUNS, SCB_LIST_NULL) )
+ {
+ next = scb->q_next;
+ scbq_remove(&p->waiting_scbs, scb);
+ scbq_insert_tail(&p->delayed_scbs[tindex], scb);
+ scb = next;
+ }
+ else
+ {
+ scb = scb->q_next;
+ }
}
- cmd = p->dev_sdtr_cmnd[tindex];
- memset(cmd, 0, sizeof(Scsi_Cmnd));
- memcpy(cmd, old_cmd, sizeof(Scsi_Cmnd));
- memset(&cmd->cmnd[0], 0, sizeof(cmd->cmnd));
- memset(&cmd->data_cmnd[0], 0, sizeof(cmd->data_cmnd));
- cmd->lun = 0;
- cmd->request_bufflen = 0;
- cmd->request_buffer = NULL;
- cmd->use_sg = cmd->old_use_sg = cmd->sglist_len = 0;
- cmd->bufflen = 0;
- cmd->buffer = NULL;
- cmd->underflow = 0;
- cmd->cmd_len = 6;
}
- /*
- * Before sending this thing out, we also amke the cmd->next pointer
- * point to the real command so we can stuff any possible SENSE data
- * intp the real command instead of this fake command. This has to be
- * done each time the command is built, not just the first time, hence
- * it's outside of the above if()...
- */
- p->dev_sdtr_cmnd[tindex]->next = old_cmd;
- aic7xxx_queue(p->dev_sdtr_cmnd[tindex],
+ aic7xxx_queue(p->dev_dtr_cmnd[tindex],
aic7xxx_negotiation_complete);
}
}
-#endif
-
#ifdef AIC7XXX_VERBOSE_DEBUGGING
/*+F*************************************************************************
* Function:
{
unsigned short mask;
struct aic7xxx_hwscb *hscb;
+ unsigned char tindex = TARGET_INDEX(cmd);
- mask = (0x01 << TARGET_INDEX(cmd));
+ mask = (0x01 << tindex);
hscb = scb->hscb;
/*
if (p->discenable & mask)
{
hscb->control |= DISCENB;
- if (p->tagenable & mask)
+ if ( (p->tagenable & mask) &&
+ (cmd->cmnd[0] != TEST_UNIT_READY) )
{
cmd->tag = hscb->tag;
- p->dev_commands_sent[TARGET_INDEX(cmd)]++;
- if (p->dev_commands_sent[TARGET_INDEX(cmd)] < 200)
+ p->dev_commands_sent[tindex]++;
+ if (p->dev_commands_sent[tindex] < 200)
{
hscb->control |= MSG_SIMPLE_Q_TAG;
scb->tag_action = MSG_SIMPLE_Q_TAG;
hscb->control |= MSG_SIMPLE_Q_TAG;
scb->tag_action = MSG_SIMPLE_Q_TAG;
}
- p->dev_commands_sent[TARGET_INDEX(cmd)] = 0;
+ p->dev_commands_sent[tindex] = 0;
}
}
}
- if (p->dev_flags[TARGET_INDEX(cmd)] & DEVICE_SCANNED)
+ if ( cmd == p->dev_dtr_cmnd[tindex] )
{
-#ifdef AIC7XXX_FAKE_NEGOTIATION_CMDS
- if ( (p->needwdtr & mask) && !(p->wdtr_pending & mask) )
+ p->dtr_pending |= mask;
+ scb->tag_action = 0;
+ if (p->dev_flags[tindex] & DEVICE_SCANNED)
{
- if (cmd == p->dev_wdtr_cmnd[TARGET_INDEX(cmd)])
+ hscb->control &= DISCENB;
+ hscb->control |= MK_MESSAGE;
+ if(p->needppr & mask)
{
- p->wdtr_pending |= mask;
- scb->flags |= SCB_MSGOUT_WDTR;
- hscb->control &= DISCENB;
- hscb->control |= MK_MESSAGE;
- scb->tag_action = 0;
+ scb->flags |= SCB_MSGOUT_PPR;
}
- else
+ else if(p->needwdtr & mask)
{
- aic7xxx_build_negotiation_cmnd(p, cmd, TARGET_INDEX(cmd));
+ scb->flags |= SCB_MSGOUT_WDTR;
}
- }
- else if ( (p->needsdtr & mask) && !(p->sdtr_pending & mask) &&
- !(p->wdtr_pending & mask) )
- {
- if (cmd == p->dev_sdtr_cmnd[TARGET_INDEX(cmd)])
+ else if(p->needsdtr & mask)
{
- p->sdtr_pending |= mask;
scb->flags |= SCB_MSGOUT_SDTR;
- hscb->control &= DISCENB;
- hscb->control |= MK_MESSAGE;
- scb->tag_action = 0;
}
- else if (cmd != p->dev_wdtr_cmnd[TARGET_INDEX(cmd)])
- {
- aic7xxx_build_negotiation_cmnd(p, cmd, TARGET_INDEX(cmd));
- }
- }
-#else
- if ( (p->needwdtr & mask) && !(p->wdtr_pending & mask) &&
- !(p->sdtr_pending & mask) && (cmd->lun == 0) )
- {
- p->wdtr_pending |= mask;
- scb->flags |= SCB_MSGOUT_WDTR;
- hscb->control &= DISCENB;
- hscb->control |= MK_MESSAGE;
- scb->tag_action = 0;
-#ifdef AIC7XXX_VERBOSE_DEBUGGING
- if (aic7xxx_verbose > 0xffff)
- printk(INFO_LEAD "Building WDTR command.\n", p->host_no,
- CTL_OF_CMD(cmd));
-#endif
- }
- else if ( (p->needsdtr & mask) && !(p->wdtr_pending & mask) &&
- !(p->sdtr_pending & mask) && (cmd->lun == 0) )
- {
- p->sdtr_pending |= mask;
- scb->flags |= SCB_MSGOUT_SDTR;
- hscb->control &= DISCENB;
- hscb->control |= MK_MESSAGE;
- scb->tag_action = 0;
-#ifdef AIC7XXX_VERBOSE_DEBUGGING
- if (aic7xxx_verbose > 0xffff)
- printk(INFO_LEAD "Building SDTR command.\n", p->host_no,
- CTL_OF_CMD(cmd));
-#endif
}
-#endif
+ }
+ if ( !(p->dtr_pending & mask) &&
+ ( (p->needppr & mask) ||
+ (p->needwdtr & mask) ||
+ (p->needsdtr & mask) ) )
+ {
+ aic7xxx_build_negotiation_cmnd(p, cmd, tindex);
}
hscb->target_channel_lun = ((cmd->target << 4) & 0xF0) |
((cmd->channel & 0x01) << 3) | (cmd->lun & 0x07);
scb->sg_count = cmd->use_sg;
hscb->SG_segment_count = cmd->use_sg;
hscb->SG_list_pointer = cpu_to_le32(VIRT_TO_BUS(&scb->sg_list[1]));
-
}
else
{
hscb->data_pointer = 0;
}
}
-#ifdef AIC7XXX_VERBOSE_DEBUGGING
- if((cmd->cmnd[0] == TEST_UNIT_READY) && (aic7xxx_verbose & VERBOSE_PROBE2))
- {
- aic7xxx_print_scb(p, scb);
- }
-#endif
}
/*+F*************************************************************************
if(p->dev_flags[i] & DEVICE_PRESENT)
{
mask = (0x01 << i);
- printk(INFO_LEAD "dev_flags=0x%x, WDTR:%c/%c/%c, SDTR:%c/%c/%c,"
- " q_depth=%d:%d\n",
+ printk(INFO_LEAD "dev_flags=0x%x, Pending:%c, PPR:%c/%c, WDTR:%c/%c, "
+ "SDTR:%c/%c, q_depth=%d:%d\n",
p->host_no, 0, i, 0, p->dev_flags[i],
- (p->wdtr_pending & mask) ? 'Y' : 'N',
+ (p->dtr_pending & mask) ? 'Y' : 'N',
+ (p->needppr & mask) ? 'Y' : 'N',
+ (p->needppr_copy & mask) ? 'Y' : 'N',
(p->needwdtr & mask) ? 'Y' : 'N',
(p->needwdtr_copy & mask) ? 'Y' : 'N',
- (p->sdtr_pending & mask) ? 'Y' : 'N',
(p->needsdtr & mask) ? 'Y' : 'N',
(p->needsdtr_copy & mask) ? 'Y' : 'N',
p->dev_active_cmds[i],
* We haven't found the offending SCB yet, and it should be around
* somewhere, so go look for it in the cards SCBs.
*/
- printk("SCBPTR CONTROL TAG PREV NEXT\n");
+ printk("SCBPTR CONTROL TAG NEXT\n");
for(i=0; i<p->scb_data->maxhscbs; i++)
{
aic_outb(p, i, SCBPTR);
- printk(" %3d %02x %02x %02x %02x\n", i,
+ printk(" %3d %02x %02x %02x\n", i,
aic_inb(p, SCB_CONTROL), aic_inb(p, SCB_TAG),
- aic_inb(p, SCB_PREV), aic_inb(p, SCB_NEXT));
+ aic_inb(p, SCB_NEXT));
}
}
if ((found == 0) && (scb->flags & SCB_WAITINGQ))
{
int tindex = TARGET_INDEX(cmd);
-#ifdef AIC7XXX_FAKE_NEGOTIATION_CMDS
unsigned short mask;
mask = (1 << tindex);
- if (p->wdtr_pending & mask)
- {
- if (p->dev_wdtr_cmnd[tindex]->next != cmd)
- found = 1;
- else
- found = 0;
- }
- else if (p->sdtr_pending & mask)
+ if (p->dtr_pending & mask)
{
- if (p->dev_sdtr_cmnd[tindex]->next != cmd)
+ if (p->dev_dtr_cmnd[tindex]->next != cmd)
found = 1;
else
found = 0;
DRIVER_UNLOCK
return(SCSI_ABORT_PENDING);
}
-#endif
if (aic7xxx_verbose & VERBOSE_ABORT_PROCESS)
printk(INFO_LEAD "SCB found on waiting list and "
"aborted.\n", p->host_no, CTL_OF_SCB(scb));
if(aic7xxx_verbose & VERBOSE_RESET_RETURN)
printk(INFO_LEAD "SCB on qoutfifo, returning.\n", p->host_no,
CTL_OF_SCB(scb));
+ aic7xxx_run_done_queue(p, TRUE);
+ aic7xxx_run_waiting_queues(p);
unpause_sequencer(p, FALSE);
DRIVER_UNLOCK
return(SCSI_RESET_NOT_RUNNING);
int
aic7xxx_biosparam(Disk *disk, kdev_t dev, int geom[])
{
- int heads, sectors, cylinders;
+ int heads, sectors, cylinders, ret;
struct aic7xxx_host *p;
+ struct buffer_head *bh;
p = (struct aic7xxx_host *) disk->device->host->hostdata;
+ bh = bread(MKDEV(MAJOR(dev), MINOR(dev)&~0xf), 0, 1024);
- /*
- * XXX - if I could portably find the card's configuration
- * information, then this could be autodetected instead
- * of left to a boot-time switch.
- */
+ if ( bh )
+ {
+ ret = scsi_partsize(bh, disk->capacity, &geom[2], &geom[0], &geom[1]);
+ brelse(bh);
+ if ( ret != -1 )
+ return(ret);
+ }
+
heads = 64;
sectors = 32;
cylinders = disk->capacity / (heads * sectors);
0x84, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9a, 0x9f, 0x9f,
0xe0, 0xf1, 0xf4, 0xf4, 0xf6, 0xf6, 0xf8, 0xf8, 0xfa, 0xfc,
0xfe, 0xff} },
+ {12, {0x00, 0x05, 0x08, 0x11, 0x18, 0x1f, 0x60, 0x60, 0x62, 0x66, /*7892*/
+ 0x84, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9a, 0x9c, 0x9f,
+ 0xe0, 0xf1, 0xf4, 0xfc} },
+ {12, {0x00, 0x05, 0x08, 0x11, 0x18, 0x1f, 0x60, 0x60, 0x62, 0x66, /*7899*/
+ 0x84, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9a, 0x9c, 0x9f,
+ 0xe0, 0xf1, 0xf4, 0xfc} },
};
#ifdef CONFIG_PCI
static struct register_ranges cards_ns[] = {
{ 5, {0x04, 0x08, 0x0c, 0x1b, 0x30, 0x34, 0x3c, 0x43, 0xdc, 0xe3} },
{ 6, {0x04, 0x08, 0x0c, 0x0e, 0x10, 0x17, 0x30, 0x34, 0x3c, 0x47,
0xdc, 0xe3} },
+ { 6, {0x04, 0x08, 0x0c, 0x1b, 0x30, 0x34, 0x3c, 0x43, 0xdc, 0xe3,
+ 0xff, 0xff} },
+ { 6, {0x04, 0x08, 0x0c, 0x1b, 0x30, 0x34, 0x3c, 0x43, 0xdc, 0xe3,
+ 0xff, 0xff} },
{ 6, {0x04, 0x08, 0x0c, 0x1b, 0x30, 0x34, 0x3c, 0x43, 0xdc, 0xe3,
0xff, 0xff} }
};
access_mode RW
}
+/*
+ * Option Mode Register (Alternate Mode) (p. 5-198)
+ * This register is used to set certain options on Ultra3 based chips.
+ * The chip must be in alternate mode (bit ALT_MODE in SFUNCT must be set)
+ */
+register OPTIONMODE {
+ address 0x008
+ access_mode RW
+ bit AUTORATEEN 0x80
+ bit AUTOACKEN 0x40
+ bit ATNMGMNTEN 0x20
+ bit BUSFREEREV 0x10
+ bit EXPPHASEDIS 0x08
+ bit SCSIDATL_IMGEN 0x04
+ bit AUTO_MSGOUT_DE 0x02
+ bit DIS_MSGIN_DUALEDGE 0x01
+}
+
+
/*
* Clear SCSI Interrupt 0 (p. 3-20)
* Writing a 1 to a bit clears the associated SCSI Interrupt in SSTAT0.
address 0x00d
access_mode RO
bit OVERRUN 0x80
+ bit SHVALID 0x40
+ bit WIDE_RES 0x20
bit EXP_ACTIVE 0x10 /* SCSI Expander Active */
+ bit CRCVALERR 0x08 /* CRC Value Error */
+ bit CRCENDERR 0x04 /* CRC End Error */
+ bit CRCREQERR 0x02 /* CRC REQ Error */
+ bit DUAL_EDGE_ERROR 0x01 /* Invalid pins for Dual Edge phase */
mask SFCNT 0x1f
}
bit SQPARERR 0x08
bit ILLOPCODE 0x04
bit ILLSADDR 0x02
+ bit DSCTMOUT 0x02 /* Ultra3 only */
bit ILLHADDR 0x01
}
access_mode RO
}
+/*
+ * SCSIDATL IMAGE Register (p. 5-104)
+ * Write to this register also go to SCSIDATL but this register will preserve
+ * the data for later reading as long as the SCSIDATL_IMGEN bit in the
+ * OPTIONMODE register is set.
+ */
+register SCSIDATL_IMG {
+ address 0x09c
+ access_mode RW
+}
+
/*
* Queue Out FIFO (p. 3-61)
* Queue of SCBs that have completed and await the host
access_mode WO
}
+/*
+ * CRC Control 1 Register (p. 5-105)
+ * Control bits for the Ultra 160/m CRC facilities
+ */
+register CRCCONTROL1 {
+ address 0x09d
+ access_mode RW
+ bit CRCONSEEN 0x80 /* CRC ON Single Edge ENable */
+ bit CRCVALCHKEN 0x40 /* CRC Value Check Enable */
+ bit CRCENDCHKEN 0x20 /* CRC End Check Enable */
+ bit CRCREQCHKEN 0x10
+ bit TARGCRCENDEN 0x08 /* Enable End CRC transfer when target */
+ bit TARGCRCCNTEN 0x40 /* Enable CRC transfer when target */
+}
+
/*
* Queue Out Count (p. 3-61)
* Number of queued SCBs in the Out FIFO
access_mode RO
}
+/*
+ * SCSI Phase Register (p. 5-106)
+ * Current bus phase
+ */
+register SCSIPHASE {
+ address 0x09e
+ access_mode RO
+ bit SP_STATUS 0x20
+ bit SP_COMMAND 0x10
+ bit SP_MSG_IN 0x08
+ bit SP_MSG_OUT 0x04
+ bit SP_DATA_IN 0x02
+ bit SP_DATA_OUT 0x01
+}
+
/*
* Special Function
*/
register SFUNCT {
address 0x09f
access_mode RW
+ bit ALT_MODE 0x80
}
/*
address 0x0F4
}
+register HESCB_QOFF {
+ address 0x0F5
+}
+
register SNSCB_QOFF {
address 0x0F6
}
+register SESCB_QOFF {
+ address 0x0F7
+}
+
register SDSCB_QOFF {
address 0x0F8
}
register QOFF_CTLSTA {
address 0x0FA
+ bit ESTABLISH_SCB_AVAIL 0x80
bit SCB_AVAIL 0x40
bit SNSCB_ROLLOVER 0x20
bit SDSCB_ROLLOVER 0x10
+ bit SESCB_ROLLOVER 0x08
mask SCB_QSIZE 0x07
mask SCB_QSIZE_256 0x06
}
/*
* Adaptec 274x/284x/294x device driver firmware for Linux and FreeBSD.
*
- * Copyright (c) 1994-1998 Justin Gibbs.
+ * Copyright (c) 1994-1999 Justin Gibbs.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
reset:
clr SCSISIGO; /* De-assert BSY */
+ and SXFRCTL1, ~BITBUCKET;
/* Always allow reselection */
if ((p->flags & AHC_TARGETMODE) != 0) {
mvi SCSISEQ, ENSELI|ENRSELI|ENAUTOATNP;
}
call clear_target_state;
- and SXFRCTL0, ~SPIOEN;
poll_for_work:
+ and SXFRCTL0, ~SPIOEN;
if ((p->features & AHC_QUEUE_REGS) == 0) {
mov A, QINPOS;
}
mvi DMAPARAMS, HDMAEN|DIRECTION|FIFORESET;
mov RETURN_2 call dma_scb;
+/*
+ * Preset the residual fields in case we never go through a data phase.
+ * This isn't done by the host so we can avoid a DMA to clear these
+ * fields for the normal case of I/O that completes without underrun
+ * or overrun conditions.
+ */
+ if ((p->features & AHC_CMD_CHAN) != 0) {
+ bmov SCB_RESID_DCNT, SCB_DATACNT, 3;
+ } else {
+ mov SCB_RESID_DCNT[0],SCB_DATACNT[0];
+ mov SCB_RESID_DCNT[1],SCB_DATACNT[1];
+ mov SCB_RESID_DCNT[2],SCB_DATACNT[2];
+ }
+ mov SCB_RESID_SGCNT, SCB_SGCOUNT;
+
start_scb:
/*
* Place us on the waiting list in case our selection
* set in SXFRCTL0.
*/
initialize_channel:
- or A, CLRSTCNT|CLRCHN, SINDEX;
- or SXFRCTL0, A;
+ or SXFRCTL0, CLRSTCNT|CLRCHN, SINDEX;
if ((p->features & AHC_ULTRA) != 0) {
ultra:
mvi SINDEX, ULTRA_ENB+1;
mvi INTSTAT, BAD_PHASE;
clear_target_state:
- clr DFCNTRL; /*
- * We assume that the kernel driver
- * may reset us at any time, even
- * in the middle of a DMA, so clear
- * DFCNTRL too.
- */
- clr SCSIRATE; /*
- * We don't know the target we will
- * connect to, so default to narrow
- * transfers to avoid parity problems.
- */
- and SXFRCTL0, ~(FAST20);
+ /*
+ * We assume that the kernel driver may reset us
+ * at any time, even in the middle of a DMA, so
+ * clear DFCNTRL too.
+ */
+ clr DFCNTRL;
+
+ /*
+ * We don't know the target we will connect to,
+ * so default to narrow transfers to avoid
+ * parity problems.
+ */
+ if ((p->features & AHC_ULTRA2) != 0) {
+ bmov SCSIRATE, ALLZEROS, 2;
+ } else {
+ clr SCSIRATE;
+ and SXFRCTL0, ~(FAST20);
+ }
mvi LASTPHASE, P_BUSFREE;
/* clear target specific flags */
- and SEQ_FLAGS, (WIDE_BUS|TWIN_BUS) ret;
+ clr SEQ_FLAGS ret;
/*
* If we re-enter the data phase after going through another phase, the
* STCNT may have been cleared, so restore it from the residual field.
*/
data_phase_reinit:
- mvi DINDEX, STCNT;
- mvi SCB_RESID_DCNT call bcopy_3;
+ if ((p->features & AHC_CMD_CHAN) != 0) {
+ if ((p->features & AHC_ULTRA2) != 0) {
+ bmov HCNT, SCB_RESID_DCNT, 3;
+ }
+ bmov STCNT, SCB_RESID_DCNT, 3;
+ } else {
+ mvi DINDEX, STCNT;
+ mvi SCB_RESID_DCNT call bcopy_3;
+ }
jmp data_phase_loop;
p_data:
*/
if ((p->features & AHC_CMD_CHAN) != 0) {
bmov HADDR, SCB_DATAPTR, 7;
+ bmov STCNT, HCNT, 3;
+ bmov SG_COUNT, SCB_SGCOUNT, 5;
} else {
mvi DINDEX, HADDR;
mvi SCB_DATAPTR call bcopy_7;
- }
-
- if ((p->features & AHC_ULTRA2) == 0) {
call set_stcnt_from_hcnt;
+ mvi DINDEX, SG_COUNT;
+ mvi SCB_SGCOUNT call bcopy_5;
}
- mov SG_COUNT,SCB_SGCOUNT;
-
- mvi DINDEX, SG_NEXT;
- mvi SCB_SGPTR call bcopy_4;
-
data_phase_loop:
/* Guard against overruns */
test SG_COUNT, 0xff jnz data_phase_inbounds;
*/
or SXFRCTL1,BITBUCKET;
and DMAPARAMS, ~(HDMAEN|SDMAEN);
- if ((p->features & AHC_ULTRA2) != 0) {
- bmov HCNT, ALLONES, 3;
+ if ((p->features & AHC_CMD_CHAN) != 0) {
+ if ((p->features & AHC_ULTRA2) != 0) {
+ bmov HCNT, ALLONES, 3;
+ }
+ bmov STCNT, ALLONES, 3;
} else {
mvi STCNT[0], 0xFF;
mvi STCNT[1], 0xFF;
}
data_phase_inbounds:
/* If we are the last SG block, tell the hardware. */
+if ((p->features & AHC_ULTRA2) == 0) {
cmp SG_COUNT,0x01 jne data_phase_wideodd;
- if ((p->features & AHC_ULTRA2) != 0) {
- or SG_CACHEPTR, LAST_SEG;
- } else {
- and DMAPARAMS, ~WIDEODD;
- }
+ and DMAPARAMS, ~WIDEODD;
+}
data_phase_wideodd:
if ((p->features & AHC_ULTRA2) != 0) {
mov SINDEX, ALLONES;
mov DFCNTRL, DMAPARAMS;
- test SSTAT0, SDONE jnz .;/* Wait for preload to complete */
+ test SSTAT0, SDONE jnz .;
data_phase_dma_loop:
- test SSTAT0, SDONE jnz data_phase_dma_done;
+ test SSTAT0, SDONE jnz data_phase_dma_done;
test SSTAT1,PHASEMIS jz data_phase_dma_loop; /* ie. underrun */
data_phase_dma_phasemis:
test SSTAT0,SDONE jnz . + 2;
- mov SINDEX,ALLZEROS; /* Remeber the phasemiss */
+ clr SINDEX; /* Remember the phasemiss */
} else {
mov DMAPARAMS call dma;
}
mvi CCSGCTL, CCSGRESET;
prefetched_segs_avail:
bmov HADDR, CCSGRAM, 8;
+ if ((p->features & AHC_ULTRA2) == 0) {
+ bmov STCNT, HCNT, 3;
+ }
} else {
mvi DINDEX, HADDR;
mvi SG_NEXT call bcopy_4;
* };
*/
mvi HADDR call dfdat_in_7;
- }
-
- if ((p->features & AHC_ULTRA2) == 0) {
- /* Load STCNT as well. It is a mirror of HCNT */
call set_stcnt_from_hcnt;
}
add SG_NEXT[0],SG_SIZEOF;
adc SG_NEXT[1],A;
+ test SSTAT1, REQINIT jz .;
test SSTAT1,PHASEMIS jz data_phase_loop;
- /* Ensure the last seg is visable at the shaddow layer */
+
if ((p->features & AHC_ULTRA2) != 0) {
- or DFCNTRL, PRELOADEN;
+ mov DFCNTRL, DMAPARAMS;
+ test SSTAT0, SDONE jnz .;
}
data_phase_finish:
- if ((p->features & AHC_ULTRA2) != 0) {
- call ultra2_dmafinish;
- }
/*
* After a DMA finishes, save the SG and STCNT residuals back into the SCB
* We use STCNT instead of HCNT, since it's a reflection of how many bytes
* were transferred on the SCSI (as opposed to the host) bus.
*/
- mov SCB_RESID_DCNT[0],STCNT[0];
- mov SCB_RESID_DCNT[1],STCNT[1];
- mov SCB_RESID_DCNT[2],STCNT[2];
- mov SCB_RESID_SGCNT, SG_COUNT;
-
if ((p->features & AHC_ULTRA2) != 0) {
- or SXFRCTL0, CLRSTCNT|CLRCHN;
+ call ultra2_dmafinish;
+ }
+ if ((p->features & AHC_ULTRA2) == 0) {
+ if ((p->features & AHC_CMD_CHAN) != 0) {
+ bmov SCB_RESID_DCNT, STCNT, 3;
+ mov SCB_RESID_SGCNT, SG_COUNT;
+ } else {
+ mov SCB_RESID_DCNT[0],STCNT[0];
+ mov SCB_RESID_DCNT[1],STCNT[1];
+ mov SCB_RESID_DCNT[2],STCNT[2];
+ mov SCB_RESID_SGCNT, SG_COUNT;
+ }
}
jmp ITloop;
data_phase_overrun:
if ((p->features & AHC_ULTRA2) != 0) {
call ultra2_dmafinish;
- or SXFRCTL0, CLRSTCNT|CLRCHN;
}
/*
* Turn off BITBUCKET mode and notify the host
ultra2_dmahalt:
and DFCNTRL, ~(SCSIEN|HDMAEN);
test DFCNTRL, HDMAEN jnz .;
+ bmov SCB_RESID_DCNT, STCNT, 3;
+ mov SCB_RESID_SGCNT, SG_COUNT;
+ or SXFRCTL0, CLRSTCNT|CLRCHN;
ret;
}
/*
* Load HADDR and HCNT.
*/
- if ((p->features & AHC_ULTRA2) != 0) {
- or SG_CACHEPTR, LAST_SEG;
- }
-
if ((p->features & AHC_CMD_CHAN) != 0) {
bmov HADDR, SCB_CMDPTR, 5;
bmov HCNT[1], ALLZEROS, 2;
+ if ((p->features & AHC_ULTRA2) == 0) {
+ bmov STCNT, HCNT, 3;
+ }
} else {
mvi DINDEX, HADDR;
mvi SCB_CMDPTR call bcopy_5;
clr HCNT[1];
clr HCNT[2];
+ call set_stcnt_from_hcnt;
}
if ((p->features & AHC_ULTRA2) == 0) {
- call set_stcnt_from_hcnt;
mvi (SCSIEN|SDMAEN|HDMAEN|DIRECTION|FIFORESET) call dma;
} else {
- mvi (PRELOADEN|SCSIEN|HDMAEN|DIRECTION) call dma;
+ mvi DFCNTRL, (PRELOADEN|SCSIEN|HDMAEN|DIRECTION);
+ test SSTAT0, SDONE jnz .;
+p_command_dma_loop:
+ test SSTAT0, DMADONE jnz p_command_ultra2_dma_done;
+ test SSTAT1,PHASEMIS jz p_command_dma_loop; /* ie. underrun */
+p_command_ultra2_dma_done:
+ and DFCNTRL, ~(SCSIEN|HDMAEN);
+ test DFCNTRL, HDMAEN jnz .;
+ or SXFRCTL0, CLRSTCNT|CLRCHN;
}
jmp ITloop;
* in case the target decides to put us in this phase for some strange
* reason.
*/
+p_mesgout_retry:
+ or SCSISIGO,ATNO,LASTPHASE;/* turn on ATN for the retry */
p_mesgout:
mov SINDEX, MSG_OUT;
cmp SINDEX, MSG_IDENTIFYFLAG jne p_mesgout_from_host;
* that the target is requesting that the last message(s) be resent.
*/
call phase_lock;
- cmp LASTPHASE, P_MESGOUT jne p_mesgout_done;
- or SCSISIGO,ATNO,LASTPHASE;/* turn on ATN for the retry */
- jmp p_mesgout;
+ cmp LASTPHASE, P_MESGOUT je p_mesgout_retry;
p_mesgout_done:
mvi CLRSINT1,CLRATNO; /* Be sure to turn ATNO off */
*/
mesgin_sdptrs:
test SEQ_FLAGS, DPHASE jz mesgin_done;
- mov SCB_SGCOUNT,SG_COUNT;
-
- /* The SCB SGPTR becomes the next one we'll download */
- mvi DINDEX, SCB_SGPTR;
- mvi SG_NEXT call bcopy_4;
-
- /* The SCB DATAPTR0 becomes the current SHADDR */
- mvi DINDEX, SCB_DATAPTR;
- mvi SHADDR call bcopy_4;
-
-/*
- * Use the residual number since STCNT is corrupted by any message transfer.
- */
- mvi SCB_RESID_DCNT call bcopy_3;
-
+ /*
+ * The SCB SGPTR becomes the next one we'll download,
+ * and the SCB DATAPTR becomes the current SHADDR.
+ * Use the residual number since STCNT is corrupted by
+ * any message transfer.
+ */
+ if ((p->features & AHC_CMD_CHAN) != 0) {
+ bmov SCB_SGCOUNT, SG_COUNT, 5;
+ bmov SCB_DATAPTR, SHADDR, 4;
+ bmov SCB_DATACNT, SCB_RESID_DCNT, 3;
+ } else {
+ mvi DINDEX, SCB_SGCOUNT;
+ mvi SG_COUNT call bcopy_5;
+ mvi DINDEX, SCB_DATAPTR;
+ mvi SHADDR call bcopy_4;
+ mvi SCB_RESID_DCNT call bcopy_3;
+ }
jmp mesgin_done;
/*
}
or SAVED_TCL,A; /* SAVED_TCL should be complete now */
+ mvi ARG_2, SCB_LIST_NULL; /* SCBID of prev SCB in disc List */
call get_untagged_SCBID;
cmp ARG_1, SCB_LIST_NULL je snoop_tag;
if ((p->flags & AHC_PAGESCBS) != 0) {
get_tag:
mvi ARG_1 call inb_next; /* tag value */
- if ((p->flags & AHC_PAGESCBS) == 0) {
-index_by_tag:
- mov SCBPTR,ARG_1;
- test SCB_CONTROL,TAG_ENB jz not_found;
- mov SCBPTR call rem_scb_from_disc_list;
- } else {
- /*
- * Ensure that the SCB the tag points to is for
- * an SCB transaction to the reconnecting target.
- */
use_retrieveSCB:
- call retrieveSCB;
- }
+ call retrieveSCB;
setup_SCB:
mov A, SAVED_TCL;
cmp SCB_TCL, A jne not_found_cleanup_scb;
* host->scsi, or 0x39 for scsi->host. The SCSI channel is cleared
* during initialization.
*/
+if ((p->features & AHC_ULTRA2) == 0) {
dma:
mov DFCNTRL,SINDEX;
dma_loop:
* to drain the data fifo until there is space for the input
* latch to drain and HDMAEN de-asserts.
*/
- if ((p->features & AHC_ULTRA2) == 0) {
- mov NONE, DFDAT;
- }
- test DFCNTRL, HDMAEN jnz dma_halt;
+ mov NONE, DFDAT;
+ test DFCNTRL, (SCSIEN|SDMAEN|HDMAEN) jnz dma_halt;
+}
return:
ret;
mov A, ARG_1; /* Tag passed in ARG_1 */
mvi SCB_TAG jmp findSCB_loop; /* &SCB_TAG -> SINDEX */
findSCB_next:
+ mov ARG_2, SCBPTR;
cmp SCB_NEXT, SCB_LIST_NULL je notFound;
mov SCBPTR,SCB_NEXT;
dec SINDEX; /* Last comparison moved us too far */
/*
* This routine expects SINDEX to contain the index of the SCB to be
- * removed and SCBPTR to be pointing to that SCB.
+ * removed, SCBPTR to be pointing to that SCB, and ARG_2 to be the
+ * SCBID of the SCB just previous to this one in the list or SCB_LIST_NULL
+ * if it is at the head.
*/
rem_scb_from_disc_list:
/* Remove this SCB from the disconnection list */
- cmp SCB_NEXT,SCB_LIST_NULL je unlink_prev;
- mov DINDEX, SCB_PREV;
- mov SCBPTR, SCB_NEXT;
- mov SCB_PREV, DINDEX;
- mov SCBPTR, SINDEX;
-unlink_prev:
- cmp SCB_PREV,SCB_LIST_NULL je rHead;/* At the head of the list */
+ cmp ARG_2, SCB_LIST_NULL je rHead;
mov DINDEX, SCB_NEXT;
- mov SCBPTR, SCB_PREV;
+ mov SCBPTR, ARG_2;
mov SCB_NEXT, DINDEX;
mov SCBPTR, SINDEX ret;
rHead:
phase_lock:
test SSTAT1, REQINIT jz phase_lock;
test SSTAT1, SCSIPERR jnz phase_lock;
- and LASTPHASE, PHASE_MASK, SCSISIGI;
- mov SCSISIGO, LASTPHASE ret;
+ and SCSISIGO, PHASE_MASK, SCSISIGI;
+ and LASTPHASE, PHASE_MASK, SCSISIGI ret;
+if ((p->features & AHC_CMD_CHAN) == 0) {
set_stcnt_from_hcnt:
mov STCNT[0], HCNT[0];
mov STCNT[1], HCNT[1];
mov DINDIR, SINDIR;
mov DINDIR, SINDIR;
mov DINDIR, SINDIR ret;
+}
/*
* Setup addr assuming that A is an index into
* Wait for DMA from host memory to data FIFO to complete, then disable
* DMA and wait for it to acknowledge that it's off.
*/
+if ((p->features & AHC_CMD_CHAN) == 0) {
dma_finish:
test DFSTATUS,HDONE jz dma_finish;
/* Turn off DMA */
and DFCNTRL, ~HDMAEN;
test DFCNTRL, HDMAEN jnz .;
ret;
+}
add_scb_to_free_list:
if ((p->flags & AHC_PAGESCBS) != 0) {
mvi DMAPARAMS, FIFORESET;
mov SCB_TAG call dma_scb;
unlink_disc_scb:
- /* jmp instead of call since we want to return anyway */
- mov SCBPTR jmp rem_scb_from_disc_list;
+ mov DISCONNECTED_SCBH, SCB_NEXT ret;
dequeue_free_scb:
mov SCBPTR, FREE_SCBH;
mov FREE_SCBH, SCB_NEXT ret;
* candidates for paging out an SCB if one is needed for a new command.
* Modifying the disconnected list is a critical(pause dissabled) section.
*/
- mvi SCB_PREV, SCB_LIST_NULL;
mov SCB_NEXT, DISCONNECTED_SCBH;
- mov DISCONNECTED_SCBH, SCBPTR;
- cmp SCB_NEXT,SCB_LIST_NULL je return;
- mov SCBPTR,SCB_NEXT;
- mov SCB_PREV,DISCONNECTED_SCBH;
- mov SCBPTR,DISCONNECTED_SCBH ret;
+ mov DISCONNECTED_SCBH, SCBPTR ret;
#define MSG_EXT_WDTR_LEN 0x02
#define MSG_EXT_WDTR_BUS_8_BIT 0x00
#define MSG_EXT_WDTR_BUS_16_BIT 0x01
-#define MSG_EXT_WDTR_BUS_32_BIT 0x02
+#define MSG_EXT_WDTR_BUS_32_BIT 0x02
+
+#define MSG_EXT_PPR 0x04
+#define MSG_EXT_PPR_LEN 0x06
+#define MSG_EXT_PPR_OPTION_ST 0x00
+#define MSG_EXT_PPR_OPTION_DT_CRC 0x02
+#define MSG_EXT_PPR_OPTION_DT_UNITS 0x03
+#define MSG_EXT_PPR_OPTION_DT_CRC_QUICK 0x04
+#define MSG_EXT_PPR_OPTION_DT_UNITS_QUICK 0x05
size += sprintf(BLS, "%s", AIC7XXX_H_VERSION);
size += sprintf(BLS, "\n");
size += sprintf(BLS, "Compile Options:\n");
-#ifdef AIC7XXX_RESET_DELAY
- size += sprintf(BLS, " AIC7XXX_RESET_DELAY : %d\n", AIC7XXX_RESET_DELAY);
+#ifdef CONFIG_AIC7XXX_TCQ_ON_BY_DEFAULT
+ size += sprintf(BLS, " TCQ Enabled By Default : Enabled\n");
+#else
+ size += sprintf(BLS, " TCQ Enabled By Default : Disabled\n");
#endif
- size += sprintf(BLS, " AIC7XXX_TAGGED_QUEUEING: Adapter Support Enabled\n");
- size += sprintf(BLS, " Check below to see "
- "which\n"
- " devices use tagged "
- "queueing\n");
- size += sprintf(BLS, " AIC7XXX_PAGE_ENABLE : Enabled (This is no longer "
- "an option)\n");
#ifdef AIC7XXX_PROC_STATS
size += sprintf(BLS, " AIC7XXX_PROC_STATS : Enabled\n");
#else
size += sprintf(BLS, " AIC7XXX_PROC_STATS : Disabled\n");
#endif
+ size += sprintf(BLS, " AIC7XXX_RESET_DELAY : %d\n", AIC7XXX_RESET_DELAY);
size += sprintf(BLS, "\n");
size += sprintf(BLS, "Adapter Configuration:\n");
size += sprintf(BLS, " SCSI Adapter: %s\n",
}
if (p->features & AHC_WIDE)
wide = "Wide ";
- if (p->features & AHC_ULTRA2)
- ultra = "Ultra2-LVD/SE ";
+ if (p->features & AHC_ULTRA3)
+ {
+ switch(p->chip & AHC_CHIPID_MASK)
+ {
+ case AHC_AIC7892:
+ case AHC_AIC7899:
+ ultra = "Ultra-160/m LVD/SE ";
+ break;
+ default:
+ ultra = "Ultra-3 LVD/SE ";
+ break;
+ }
+ }
+ else if (p->features & AHC_ULTRA2)
+ ultra = "Ultra-2 LVD/SE ";
else if (p->features & AHC_ULTRA)
ultra = "Ultra ";
size += sprintf(BLS, " %s%sController%s\n",
}
size += sprintf(BLS, " Tag Queue Enable Flags: 0x%04x\n", p->tagenable);
size += sprintf(BLS, "Ordered Queue Tag Flags: 0x%04x\n", p->orderedtag);
-#ifdef AIC7XXX_CMDS_PER_LUN
- size += sprintf(BLS, "Default Tag Queue Depth: %d\n", AIC7XXX_CMDS_PER_LUN);
-#else
- size += sprintf(BLS, "Default Tag Queue Depth: %d\n", 8);
-#endif
+ size += sprintf(BLS, "Default Tag Queue Depth: %d\n", AIC7XXX_CMDS_PER_DEVICE);
size += sprintf(BLS, " Tagged Queue By Device array for aic7xxx host "
"instance %d:\n", p->instance);
size += sprintf(BLS, " {");
if (p->transinfo[target].cur_offset != 0)
{
struct aic7xxx_syncrate *sync_rate;
+ unsigned char options = p->transinfo[target].cur_options;
int period = p->transinfo[target].cur_period;
int rate = (p->transinfo[target].cur_width ==
MSG_EXT_WDTR_BUS_16_BIT) ? 1 : 0;
- sync_rate = aic7xxx_find_syncrate(p, &period, AHC_SYNCRATE_ULTRA2);
+ sync_rate = aic7xxx_find_syncrate(p, &period, 0, &options);
if (sync_rate != NULL)
{
size += sprintf(BLS, "%s MByte/sec, offset %d\n",
}
}
size += sprintf(BLS, " Transinfo settings: ");
- size += sprintf(BLS, "current(%d/%d/%d), ",
+ size += sprintf(BLS, "current(%d/%d/%d/%d), ",
p->transinfo[target].cur_period,
p->transinfo[target].cur_offset,
- p->transinfo[target].cur_width);
- size += sprintf(BLS, "goal(%d/%d/%d), ",
+ p->transinfo[target].cur_width,
+ p->transinfo[target].cur_options);
+ size += sprintf(BLS, "goal(%d/%d/%d/%d), ",
p->transinfo[target].goal_period,
p->transinfo[target].goal_offset,
- p->transinfo[target].goal_width);
- size += sprintf(BLS, "user(%d/%d/%d)\n",
+ p->transinfo[target].goal_width,
+ p->transinfo[target].goal_options);
+ size += sprintf(BLS, "user(%d/%d/%d/%d)\n",
p->transinfo[target].user_period,
p->transinfo[target].user_offset,
- p->transinfo[target].user_width);
+ p->transinfo[target].user_width,
+ p->transinfo[target].user_options);
#ifdef AIC7XXX_PROC_STATS
size += sprintf(BLS, " Total transfers %ld (%ld reads and %ld writes)\n",
sp->r_total + sp->w_total, sp->r_total, sp->w_total);
#define STCNT 0x08
+#define OPTIONMODE 0x08
+#define AUTORATEEN 0x80
+#define AUTOACKEN 0x40
+#define ATNMGMNTEN 0x20
+#define BUSFREEREV 0x10
+#define EXPPHASEDIS 0x08
+#define SCSIDATL_IMGEN 0x04
+#define AUTO_MSGOUT_DE 0x02
+#define DIS_MSGIN_DUALEDGE 0x01
+
#define CLRSINT0 0x0b
#define CLRSELDO 0x40
#define CLRSELDI 0x20
#define SSTAT2 0x0d
#define OVERRUN 0x80
+#define SHVALID 0x40
+#define WIDE_RES 0x20
#define SFCNT 0x1f
#define EXP_ACTIVE 0x10
+#define CRCVALERR 0x08
+#define CRCENDERR 0x04
+#define CRCREQERR 0x02
+#define DUAL_EDGE_ERROR 0x01
#define SSTAT3 0x0e
#define SCSICNT 0xf0
#define DPARERR 0x10
#define SQPARERR 0x08
#define ILLOPCODE 0x04
+#define DSCTMOUT 0x02
#define ILLSADDR 0x02
#define ILLHADDR 0x01
#define QINCNT 0x9c
+#define SCSIDATL_IMG 0x9c
+
#define QOUTFIFO 0x9d
+#define CRCCONTROL1 0x9d
+#define CRCONSEEN 0x80
+#define TARGCRCCNTEN 0x40
+#define CRCVALCHKEN 0x40
+#define CRCENDCHKEN 0x20
+#define CRCREQCHKEN 0x10
+#define TARGCRCENDEN 0x08
+
+#define SCSIPHASE 0x9e
+#define SP_STATUS 0x20
+#define SP_COMMAND 0x10
+#define SP_MSG_IN 0x08
+#define SP_MSG_OUT 0x04
+#define SP_DATA_IN 0x02
+#define SP_DATA_OUT 0x01
+
#define QOUTCNT 0x9e
#define SFUNCT 0x9f
+#define ALT_MODE 0x80
#define SCB_CONTROL 0xa0
#define MK_MESSAGE 0x80
#define HNSCB_QOFF 0xf4
+#define HESCB_QOFF 0xf5
+
#define SNSCB_QOFF 0xf6
+#define SESCB_QOFF 0xf7
+
#define SDSCB_QOFF 0xf8
#define QOFF_CTLSTA 0xfa
+#define ESTABLISH_SCB_AVAIL 0x80
#define SCB_AVAIL 0x40
#define SNSCB_ROLLOVER 0x20
#define SDSCB_ROLLOVER 0x10
+#define SESCB_ROLLOVER 0x08
#define SCB_QSIZE 0x07
#define SCB_QSIZE_256 0x06
*/
static unsigned char seqprog[] = {
0xff, 0x6a, 0x06, 0x08,
+ 0x7f, 0x02, 0x04, 0x08,
0x32, 0x6a, 0x00, 0x00,
0x12, 0x6a, 0x00, 0x00,
0xff, 0x6a, 0xd6, 0x09,
0xff, 0x6a, 0xdc, 0x09,
- 0x00, 0x65, 0x38, 0x59,
+ 0x00, 0x65, 0x42, 0x59,
0xf7, 0x01, 0x02, 0x08,
0xff, 0x4e, 0xc8, 0x08,
0xbf, 0x60, 0xc0, 0x08,
- 0x60, 0x0b, 0x7c, 0x68,
+ 0x60, 0x0b, 0x86, 0x68,
0x40, 0x00, 0x0e, 0x68,
0x08, 0x1f, 0x3e, 0x10,
- 0x60, 0x0b, 0x7c, 0x68,
+ 0x60, 0x0b, 0x86, 0x68,
0x40, 0x00, 0x0e, 0x68,
0x08, 0x1f, 0x3e, 0x10,
- 0xff, 0x3e, 0x3e, 0x60,
- 0x40, 0xfa, 0x10, 0x78,
+ 0xff, 0x3e, 0x4a, 0x60,
+ 0x40, 0xfa, 0x12, 0x78,
0xff, 0xf6, 0xd4, 0x08,
0x01, 0x4e, 0x9c, 0x18,
0x40, 0x60, 0xc0, 0x00,
- 0x00, 0x4d, 0x10, 0x70,
+ 0x00, 0x4d, 0x12, 0x70,
0x01, 0x4e, 0x9c, 0x18,
0xbf, 0x60, 0xc0, 0x08,
- 0x00, 0x6a, 0x72, 0x5c,
+ 0x00, 0x6a, 0x92, 0x5c,
0xff, 0x4e, 0xc8, 0x18,
- 0x02, 0x6a, 0x88, 0x5b,
+ 0x02, 0x6a, 0xa8, 0x5b,
0xff, 0x52, 0x20, 0x09,
0x0d, 0x6a, 0x6a, 0x00,
- 0x00, 0x52, 0xfe, 0x5b,
+ 0x00, 0x52, 0x1e, 0x5c,
+ 0x03, 0xb0, 0x52, 0x31,
+ 0xff, 0xb0, 0x52, 0x09,
+ 0xff, 0xb1, 0x54, 0x09,
+ 0xff, 0xb2, 0x56, 0x09,
+ 0xff, 0xa3, 0x50, 0x09,
0xff, 0x3e, 0x74, 0x09,
0xff, 0x90, 0x7c, 0x08,
0xff, 0x3e, 0x20, 0x09,
- 0x00, 0x65, 0x44, 0x58,
+ 0x00, 0x65, 0x50, 0x58,
0x00, 0x65, 0x0e, 0x40,
0xf7, 0x1f, 0xca, 0x08,
0x08, 0xa1, 0xc8, 0x08,
0x0f, 0x05, 0x0a, 0x08,
0x00, 0x05, 0x0a, 0x00,
0x5a, 0x6a, 0x00, 0x04,
- 0x12, 0x65, 0xc8, 0x00,
- 0x00, 0x01, 0x02, 0x00,
+ 0x12, 0x65, 0x02, 0x00,
0x31, 0x6a, 0xca, 0x00,
- 0x80, 0x37, 0x64, 0x68,
+ 0x80, 0x37, 0x6e, 0x68,
0xff, 0x65, 0xca, 0x18,
0xff, 0x37, 0xdc, 0x08,
0xff, 0x6e, 0xc8, 0x08,
- 0x00, 0x6c, 0x6c, 0x78,
+ 0x00, 0x6c, 0x76, 0x78,
0x20, 0x01, 0x02, 0x00,
0x4c, 0x37, 0xc8, 0x28,
- 0x08, 0x1f, 0x74, 0x78,
+ 0x08, 0x1f, 0x7e, 0x78,
0x08, 0x37, 0x6e, 0x00,
0x08, 0x64, 0xc8, 0x00,
0x70, 0x64, 0xca, 0x18,
0xff, 0x6c, 0x0a, 0x08,
0x20, 0x64, 0xca, 0x18,
0xff, 0x6c, 0x08, 0x0c,
- 0x40, 0x0b, 0x04, 0x69,
- 0x80, 0x0b, 0xf6, 0x78,
+ 0x40, 0x0b, 0x0e, 0x69,
+ 0x80, 0x0b, 0x00, 0x79,
0xa4, 0x6a, 0x06, 0x00,
0x40, 0x6a, 0x16, 0x00,
- 0x10, 0x03, 0xf2, 0x78,
+ 0x10, 0x03, 0xfc, 0x78,
0xff, 0x50, 0xc8, 0x08,
0x88, 0x6a, 0xcc, 0x00,
- 0x49, 0x6a, 0xee, 0x5b,
+ 0x49, 0x6a, 0x0e, 0x5c,
0x01, 0x6a, 0x26, 0x01,
0xff, 0x6a, 0xca, 0x08,
0x08, 0x01, 0x02, 0x00,
- 0x02, 0x0b, 0x92, 0x78,
+ 0x02, 0x0b, 0x9c, 0x78,
0xf7, 0x01, 0x02, 0x08,
0xff, 0x06, 0xcc, 0x08,
0xff, 0x66, 0x32, 0x09,
0x01, 0x65, 0xca, 0x18,
- 0x80, 0x66, 0xa0, 0x78,
+ 0x80, 0x66, 0xaa, 0x78,
0xff, 0x66, 0xa2, 0x08,
- 0x10, 0x03, 0x90, 0x68,
+ 0x10, 0x03, 0x9a, 0x68,
0xfc, 0x65, 0xc8, 0x18,
- 0x00, 0x65, 0xa8, 0x48,
+ 0x00, 0x65, 0xb2, 0x48,
0xff, 0x6a, 0x32, 0x01,
0x01, 0x64, 0x18, 0x19,
0xff, 0x6a, 0x1a, 0x09,
0xff, 0x6a, 0x1c, 0x09,
0x84, 0x6a, 0x06, 0x00,
0x08, 0x01, 0x02, 0x00,
- 0x02, 0x0b, 0xb2, 0x78,
+ 0x02, 0x0b, 0xbc, 0x78,
0xff, 0x06, 0xc8, 0x08,
0xff, 0x64, 0x32, 0x09,
0xff, 0x6a, 0xca, 0x08,
0x0b, 0x65, 0xca, 0x18,
0xff, 0x65, 0xc8, 0x08,
0x00, 0x8c, 0x18, 0x19,
- 0x02, 0x0b, 0xce, 0x78,
- 0x01, 0x65, 0xd4, 0x60,
+ 0x02, 0x0b, 0xd8, 0x78,
+ 0x01, 0x65, 0xde, 0x60,
0xf7, 0x01, 0x02, 0x08,
0xff, 0x06, 0x32, 0x09,
0xff, 0x65, 0xca, 0x18,
- 0xff, 0x65, 0xce, 0x68,
+ 0xff, 0x65, 0xd8, 0x68,
0x0a, 0x93, 0x26, 0x01,
- 0x00, 0x65, 0x64, 0x5c,
- 0x40, 0x51, 0xe6, 0x78,
+ 0x00, 0x65, 0x84, 0x5c,
+ 0x40, 0x51, 0xf0, 0x78,
0xe4, 0x6a, 0x06, 0x00,
0x08, 0x01, 0x02, 0x00,
- 0x04, 0x6a, 0x18, 0x5b,
+ 0x04, 0x6a, 0x40, 0x5b,
0x01, 0x50, 0xa0, 0x18,
- 0x00, 0x50, 0xec, 0xe0,
+ 0x00, 0x50, 0xf6, 0xe0,
0xff, 0x6a, 0xa0, 0x08,
0xff, 0x6a, 0x3a, 0x01,
0x02, 0x6a, 0x22, 0x01,
- 0x40, 0x51, 0xf2, 0x68,
+ 0x40, 0x51, 0xfc, 0x68,
0xff, 0x6a, 0x06, 0x08,
0x00, 0x65, 0x0e, 0x40,
0x20, 0x6a, 0x16, 0x00,
0xf0, 0x19, 0x6e, 0x08,
0x08, 0x6a, 0x18, 0x00,
0x08, 0x11, 0x22, 0x00,
- 0x08, 0x6a, 0x5a, 0x58,
+ 0x08, 0x6a, 0x66, 0x58,
0x08, 0x6a, 0x68, 0x00,
- 0x00, 0x65, 0x18, 0x41,
+ 0x00, 0x65, 0x22, 0x41,
0x12, 0x6a, 0x00, 0x00,
0x40, 0x6a, 0x16, 0x00,
0xff, 0x3e, 0x20, 0x09,
0xff, 0xa1, 0x6e, 0x08,
0x08, 0x6a, 0x18, 0x00,
0x08, 0x11, 0x22, 0x00,
- 0x08, 0x6a, 0x5a, 0x58,
+ 0x08, 0x6a, 0x66, 0x58,
0x80, 0x6a, 0x68, 0x00,
0x80, 0x36, 0x6c, 0x00,
- 0x00, 0x65, 0xd2, 0x5b,
+ 0x00, 0x65, 0xf2, 0x5b,
0xff, 0x3d, 0xc8, 0x08,
- 0xbf, 0x64, 0x48, 0x79,
- 0x80, 0x64, 0xf0, 0x71,
- 0xa0, 0x64, 0x0e, 0x72,
- 0xc0, 0x64, 0x08, 0x72,
- 0xe0, 0x64, 0x52, 0x72,
+ 0xbf, 0x64, 0x58, 0x79,
+ 0x80, 0x64, 0x0e, 0x72,
+ 0xa0, 0x64, 0x3a, 0x72,
+ 0xc0, 0x64, 0x32, 0x72,
+ 0xe0, 0x64, 0x7a, 0x72,
0x01, 0x6a, 0x22, 0x01,
- 0x00, 0x65, 0x18, 0x41,
+ 0x00, 0x65, 0x22, 0x41,
0xf7, 0x11, 0x22, 0x08,
- 0x00, 0x65, 0x38, 0x59,
+ 0x00, 0x65, 0x42, 0x59,
0xff, 0x06, 0xd4, 0x08,
0xf7, 0x01, 0x02, 0x08,
- 0x09, 0x0c, 0x32, 0x79,
+ 0x09, 0x0c, 0x3c, 0x79,
0x08, 0x0c, 0x0e, 0x68,
0x01, 0x6a, 0x22, 0x01,
0xff, 0x6a, 0x26, 0x09,
+ 0x02, 0x6a, 0x08, 0x30,
0xff, 0x6a, 0x08, 0x08,
0xdf, 0x01, 0x02, 0x08,
0x01, 0x6a, 0x7a, 0x00,
- 0x03, 0x36, 0x6c, 0x0c,
+ 0xff, 0x6a, 0x6c, 0x0c,
+ 0x03, 0xa9, 0x18, 0x31,
+ 0x03, 0xa9, 0x10, 0x30,
0x08, 0x6a, 0xcc, 0x00,
- 0xa9, 0x6a, 0xe8, 0x5b,
- 0x00, 0x65, 0x66, 0x41,
+ 0xa9, 0x6a, 0x08, 0x5c,
+ 0x00, 0x65, 0x78, 0x41,
0xa8, 0x6a, 0x6a, 0x00,
0x79, 0x6a, 0x6a, 0x00,
- 0x40, 0x3d, 0x50, 0x69,
+ 0x40, 0x3d, 0x60, 0x69,
0x04, 0x35, 0x6a, 0x00,
- 0x00, 0x65, 0x3a, 0x5b,
+ 0x00, 0x65, 0x62, 0x5b,
0x80, 0x6a, 0xd4, 0x01,
- 0x10, 0x36, 0x42, 0x69,
+ 0x10, 0x36, 0x4e, 0x69,
0x10, 0x36, 0x6c, 0x00,
0x07, 0xac, 0x10, 0x31,
+ 0x03, 0x8c, 0x10, 0x30,
+ 0x05, 0xa3, 0x70, 0x30,
0x88, 0x6a, 0xcc, 0x00,
- 0xac, 0x6a, 0xe0, 0x5b,
- 0x00, 0x65, 0xda, 0x5b,
- 0xff, 0xa3, 0x70, 0x08,
- 0x39, 0x6a, 0xcc, 0x00,
- 0xa4, 0x6a, 0xe6, 0x5b,
- 0xff, 0x38, 0x74, 0x69,
+ 0xac, 0x6a, 0x00, 0x5c,
+ 0x00, 0x65, 0xfa, 0x5b,
+ 0x38, 0x6a, 0xcc, 0x00,
+ 0xa3, 0x6a, 0x04, 0x5c,
+ 0xff, 0x38, 0x88, 0x69,
0x80, 0x02, 0x04, 0x00,
0xe7, 0x35, 0x6a, 0x08,
0x03, 0x69, 0x18, 0x31,
+ 0x03, 0x69, 0x10, 0x30,
0xff, 0x6a, 0x10, 0x00,
0xff, 0x6a, 0x12, 0x00,
0xff, 0x6a, 0x14, 0x00,
- 0x01, 0x38, 0x7a, 0x61,
- 0x02, 0xfc, 0xf8, 0x01,
+ 0x01, 0x38, 0x8c, 0x61,
0xbf, 0x35, 0x6a, 0x08,
0xff, 0x69, 0xca, 0x08,
0xff, 0x35, 0x26, 0x09,
- 0x04, 0x0b, 0x7e, 0x69,
- 0x04, 0x0b, 0x8a, 0x69,
- 0x10, 0x0c, 0x80, 0x79,
- 0x04, 0x0b, 0x88, 0x69,
+ 0x04, 0x0b, 0x90, 0x69,
+ 0x04, 0x0b, 0x9c, 0x69,
+ 0x10, 0x0c, 0x92, 0x79,
+ 0x04, 0x0b, 0x9a, 0x69,
0xff, 0x6a, 0xca, 0x08,
- 0x00, 0x35, 0x22, 0x5b,
- 0x80, 0x02, 0xd6, 0x69,
- 0xff, 0x65, 0xc8, 0x79,
+ 0x00, 0x35, 0x4a, 0x5b,
+ 0x80, 0x02, 0xf0, 0x69,
+ 0xff, 0x65, 0xe0, 0x79,
0xff, 0x38, 0x70, 0x18,
- 0xff, 0x38, 0xc8, 0x79,
- 0x80, 0xea, 0xaa, 0x61,
+ 0xff, 0x38, 0xe0, 0x79,
+ 0x80, 0xea, 0xbc, 0x61,
0xef, 0x38, 0xc8, 0x18,
0x80, 0x6a, 0xc8, 0x00,
- 0x00, 0x65, 0x9c, 0x49,
+ 0x00, 0x65, 0xae, 0x49,
0x33, 0x38, 0xc8, 0x28,
0xff, 0x64, 0xd0, 0x09,
0x04, 0x39, 0xc0, 0x31,
0x09, 0x6a, 0xd6, 0x01,
- 0x80, 0xeb, 0xa2, 0x79,
+ 0x80, 0xeb, 0xb4, 0x79,
0xf7, 0xeb, 0xd6, 0x09,
- 0x08, 0xeb, 0xa6, 0x69,
+ 0x08, 0xeb, 0xb8, 0x69,
0x01, 0x6a, 0xd6, 0x01,
0x08, 0xe9, 0x10, 0x31,
+ 0x03, 0x8c, 0x10, 0x30,
0x88, 0x6a, 0xcc, 0x00,
- 0x39, 0x6a, 0xe6, 0x5b,
+ 0x39, 0x6a, 0x06, 0x5c,
0x08, 0x6a, 0x18, 0x01,
0xff, 0x6a, 0x1a, 0x09,
0xff, 0x6a, 0x1c, 0x09,
0x0d, 0x93, 0x26, 0x01,
- 0x00, 0x65, 0x64, 0x5c,
- 0x88, 0x6a, 0x54, 0x5c,
- 0x00, 0x65, 0xda, 0x5b,
+ 0x00, 0x65, 0x84, 0x5c,
+ 0x88, 0x6a, 0x74, 0x5c,
+ 0x00, 0x65, 0xfa, 0x5b,
0xff, 0x6a, 0xc8, 0x08,
0x08, 0x39, 0x72, 0x18,
0x00, 0x3a, 0x74, 0x20,
- 0x10, 0x0c, 0x66, 0x79,
- 0x80, 0x93, 0x26, 0x01,
- 0x00, 0x65, 0xe0, 0x59,
+ 0x01, 0x0c, 0xd8, 0x79,
+ 0x10, 0x0c, 0x78, 0x79,
+ 0xff, 0x35, 0x26, 0x09,
+ 0x04, 0x0b, 0xde, 0x69,
+ 0x00, 0x65, 0xf8, 0x59,
+ 0x03, 0x08, 0x52, 0x31,
+ 0xff, 0x38, 0x50, 0x09,
0xff, 0x08, 0x52, 0x09,
0xff, 0x09, 0x54, 0x09,
0xff, 0x0a, 0x56, 0x09,
0xff, 0x38, 0x50, 0x09,
- 0x12, 0x01, 0x02, 0x00,
- 0x00, 0x65, 0x18, 0x41,
- 0x00, 0x65, 0xe0, 0x59,
- 0x12, 0x01, 0x02, 0x00,
+ 0x00, 0x65, 0x22, 0x41,
+ 0x00, 0x65, 0xf8, 0x59,
0x7f, 0x02, 0x04, 0x08,
0xe1, 0x6a, 0x22, 0x01,
- 0x00, 0x65, 0x18, 0x41,
- 0x04, 0x93, 0xea, 0x69,
+ 0x00, 0x65, 0x22, 0x41,
+ 0x04, 0x93, 0x02, 0x6a,
0xdf, 0x93, 0x26, 0x09,
- 0x20, 0x93, 0xe4, 0x69,
+ 0x20, 0x93, 0xfc, 0x69,
0x02, 0x93, 0x26, 0x01,
- 0x01, 0x94, 0xe6, 0x79,
+ 0x01, 0x94, 0xfe, 0x79,
0xd7, 0x93, 0x26, 0x09,
- 0x08, 0x93, 0xec, 0x69,
+ 0x08, 0x93, 0x04, 0x6a,
+ 0x03, 0x08, 0x52, 0x31,
+ 0xff, 0x38, 0x50, 0x09,
+ 0x12, 0x01, 0x02, 0x00,
0xff, 0x6a, 0xd4, 0x0c,
- 0x00, 0x65, 0x3a, 0x5b,
- 0x02, 0xfc, 0xf8, 0x01,
+ 0x00, 0x65, 0x62, 0x5b,
0x05, 0xb4, 0x10, 0x31,
0x02, 0x6a, 0x1a, 0x31,
+ 0x03, 0x8c, 0x10, 0x30,
0x88, 0x6a, 0xcc, 0x00,
- 0xb4, 0x6a, 0xe4, 0x5b,
+ 0xb4, 0x6a, 0x04, 0x5c,
0xff, 0x6a, 0x1a, 0x09,
0xff, 0x6a, 0x1c, 0x09,
- 0x00, 0x65, 0xda, 0x5b,
- 0x3d, 0x6a, 0x22, 0x5b,
- 0xac, 0x6a, 0x22, 0x5b,
- 0x00, 0x65, 0x18, 0x41,
- 0x00, 0x65, 0x3a, 0x5b,
+ 0x00, 0x65, 0xfa, 0x5b,
+ 0x3d, 0x6a, 0x4a, 0x5b,
+ 0xac, 0x6a, 0x26, 0x01,
+ 0x04, 0x0b, 0x24, 0x6a,
+ 0x01, 0x0b, 0x2a, 0x6a,
+ 0x10, 0x0c, 0x26, 0x7a,
+ 0xd7, 0x93, 0x26, 0x09,
+ 0x08, 0x93, 0x2c, 0x6a,
+ 0x12, 0x01, 0x02, 0x00,
+ 0x00, 0x65, 0x22, 0x41,
+ 0x00, 0x65, 0x62, 0x5b,
0xff, 0x06, 0x44, 0x09,
- 0x00, 0x65, 0x18, 0x41,
+ 0x00, 0x65, 0x22, 0x41,
+ 0x10, 0x3d, 0x06, 0x00,
0xff, 0x34, 0xca, 0x08,
- 0x80, 0x65, 0x32, 0x62,
+ 0x80, 0x65, 0x5e, 0x62,
0x0f, 0xa1, 0xca, 0x08,
0x07, 0xa1, 0xca, 0x08,
0x40, 0xa0, 0xc8, 0x08,
0x00, 0x65, 0xca, 0x00,
0x80, 0x65, 0xca, 0x00,
- 0x80, 0xa0, 0x22, 0x7a,
+ 0x80, 0xa0, 0x4e, 0x7a,
0xff, 0x65, 0x0c, 0x08,
- 0x00, 0x65, 0x34, 0x42,
- 0x20, 0xa0, 0x3a, 0x7a,
+ 0x00, 0x65, 0x60, 0x42,
+ 0x20, 0xa0, 0x66, 0x7a,
0xff, 0x65, 0x0c, 0x08,
- 0x00, 0x65, 0xd2, 0x5b,
- 0xa0, 0x3d, 0x46, 0x62,
+ 0x00, 0x65, 0xf2, 0x5b,
+ 0xa0, 0x3d, 0x6e, 0x62,
0x23, 0xa0, 0x0c, 0x08,
- 0x00, 0x65, 0xd2, 0x5b,
- 0xa0, 0x3d, 0x46, 0x62,
- 0x00, 0xb9, 0x3a, 0x42,
- 0xff, 0x65, 0x3a, 0x62,
+ 0x00, 0x65, 0xf2, 0x5b,
+ 0xa0, 0x3d, 0x6e, 0x62,
+ 0x00, 0xb9, 0x66, 0x42,
+ 0xff, 0x65, 0x66, 0x62,
0xa1, 0x6a, 0x22, 0x01,
0xff, 0x6a, 0xd4, 0x08,
- 0x10, 0x51, 0x46, 0x72,
+ 0x10, 0x51, 0x6e, 0x72,
0x40, 0x6a, 0x18, 0x00,
0xff, 0x65, 0x0c, 0x08,
- 0x00, 0x65, 0xd2, 0x5b,
- 0xa0, 0x3d, 0x46, 0x62,
- 0x10, 0x3d, 0x06, 0x00,
- 0x00, 0x65, 0x0e, 0x42,
+ 0x00, 0x65, 0xf2, 0x5b,
+ 0xa0, 0x3d, 0x38, 0x72,
0x40, 0x6a, 0x18, 0x00,
0xff, 0x34, 0xa6, 0x08,
- 0x80, 0x34, 0x4e, 0x62,
+ 0x80, 0x34, 0x76, 0x62,
0x7f, 0xa0, 0x40, 0x09,
0x08, 0x6a, 0x68, 0x00,
- 0x00, 0x65, 0x18, 0x41,
- 0x64, 0x6a, 0x12, 0x5b,
- 0x80, 0x64, 0xbe, 0x6a,
- 0x04, 0x64, 0xa4, 0x72,
- 0x02, 0x64, 0xaa, 0x72,
- 0x00, 0x6a, 0x6c, 0x72,
- 0x03, 0x64, 0xba, 0x72,
- 0x01, 0x64, 0xa0, 0x72,
- 0x07, 0x64, 0x00, 0x73,
- 0x08, 0x64, 0x68, 0x72,
+ 0x00, 0x65, 0x22, 0x41,
+ 0x64, 0x6a, 0x3a, 0x5b,
+ 0x80, 0x64, 0xea, 0x6a,
+ 0x04, 0x64, 0xcc, 0x72,
+ 0x02, 0x64, 0xd2, 0x72,
+ 0x00, 0x6a, 0x94, 0x72,
+ 0x03, 0x64, 0xe6, 0x72,
+ 0x01, 0x64, 0xc8, 0x72,
+ 0x07, 0x64, 0x28, 0x73,
+ 0x08, 0x64, 0x90, 0x72,
0x11, 0x6a, 0x22, 0x01,
- 0x07, 0x6a, 0x04, 0x5b,
+ 0x07, 0x6a, 0x2c, 0x5b,
0xff, 0x06, 0xd4, 0x08,
- 0x00, 0x65, 0x18, 0x41,
- 0xff, 0xa8, 0x70, 0x6a,
- 0xff, 0xa2, 0x88, 0x7a,
+ 0x00, 0x65, 0x22, 0x41,
+ 0xff, 0xa8, 0x98, 0x6a,
+ 0xff, 0xa2, 0xb0, 0x7a,
0x01, 0x6a, 0x6a, 0x00,
- 0x00, 0xb9, 0xfe, 0x5b,
- 0xff, 0xa2, 0x88, 0x7a,
+ 0x00, 0xb9, 0x1e, 0x5c,
+ 0xff, 0xa2, 0xb0, 0x7a,
0x71, 0x6a, 0x22, 0x01,
0xff, 0x6a, 0xd4, 0x08,
- 0x40, 0x51, 0x88, 0x62,
+ 0x40, 0x51, 0xb0, 0x62,
0x0d, 0x6a, 0x6a, 0x00,
- 0x00, 0xb9, 0xfe, 0x5b,
+ 0x00, 0xb9, 0x1e, 0x5c,
0xff, 0x3e, 0x74, 0x09,
0xff, 0x90, 0x7c, 0x08,
- 0x00, 0x65, 0x44, 0x58,
- 0x00, 0x65, 0x2a, 0x41,
- 0x20, 0xa0, 0x90, 0x6a,
+ 0x00, 0x65, 0x50, 0x58,
+ 0x00, 0x65, 0x34, 0x41,
+ 0x20, 0xa0, 0xb8, 0x6a,
0xff, 0x37, 0xc8, 0x08,
- 0x00, 0x6a, 0xa8, 0x5b,
- 0xff, 0x6a, 0xbe, 0x5b,
+ 0x00, 0x6a, 0xc8, 0x5b,
+ 0xff, 0x6a, 0xde, 0x5b,
0xff, 0xf8, 0xc8, 0x08,
0xff, 0x4f, 0xc8, 0x08,
- 0x01, 0x6a, 0xa8, 0x5b,
- 0x00, 0xb9, 0xbe, 0x5b,
+ 0x01, 0x6a, 0xc8, 0x5b,
+ 0x00, 0xb9, 0xde, 0x5b,
0x01, 0x4f, 0x9e, 0x18,
0x02, 0x6a, 0x22, 0x01,
- 0x00, 0x65, 0x6c, 0x5c,
- 0x00, 0x65, 0x2a, 0x41,
+ 0x00, 0x65, 0x8c, 0x5c,
+ 0x00, 0x65, 0x34, 0x41,
0x41, 0x6a, 0x22, 0x01,
- 0x00, 0x65, 0x18, 0x41,
+ 0x00, 0x65, 0x22, 0x41,
0x04, 0xa0, 0x40, 0x01,
- 0x00, 0x65, 0x84, 0x5c,
- 0x00, 0x65, 0x2a, 0x41,
- 0x10, 0x36, 0x68, 0x7a,
- 0xff, 0x38, 0x46, 0x09,
- 0xa4, 0x6a, 0xcc, 0x00,
- 0x39, 0x6a, 0xe6, 0x5b,
+ 0x00, 0x65, 0xa4, 0x5c,
+ 0x00, 0x65, 0x34, 0x41,
+ 0x10, 0x36, 0x90, 0x7a,
+ 0x05, 0x38, 0x46, 0x31,
+ 0x04, 0x14, 0x58, 0x31,
+ 0x03, 0xa9, 0x60, 0x31,
+ 0xa3, 0x6a, 0xcc, 0x00,
+ 0x38, 0x6a, 0x04, 0x5c,
0xac, 0x6a, 0xcc, 0x00,
- 0x14, 0x6a, 0xe6, 0x5b,
- 0xa9, 0x6a, 0xe8, 0x5b,
- 0x00, 0x65, 0x68, 0x42,
+ 0x14, 0x6a, 0x06, 0x5c,
+ 0xa9, 0x6a, 0x08, 0x5c,
+ 0x00, 0x65, 0x90, 0x42,
0xef, 0x36, 0x6c, 0x08,
- 0x00, 0x65, 0x68, 0x42,
+ 0x00, 0x65, 0x90, 0x42,
0x0f, 0x64, 0xc8, 0x08,
0x07, 0x64, 0xc8, 0x08,
0x00, 0x37, 0x6e, 0x00,
- 0x00, 0x65, 0x78, 0x5b,
- 0xff, 0x51, 0xce, 0x72,
- 0x20, 0x36, 0xde, 0x7a,
- 0x00, 0x90, 0x5c, 0x5b,
- 0x00, 0x65, 0xe0, 0x42,
+ 0xff, 0x6a, 0xa4, 0x00,
+ 0x00, 0x65, 0x98, 0x5b,
+ 0xff, 0x51, 0xfc, 0x72,
+ 0x20, 0x36, 0x06, 0x7b,
+ 0x00, 0x90, 0x86, 0x5b,
+ 0x00, 0x65, 0x08, 0x43,
0xff, 0x06, 0xd4, 0x08,
- 0x00, 0x65, 0xd2, 0x5b,
- 0xe0, 0x3d, 0xfa, 0x62,
- 0x20, 0x12, 0xfa, 0x62,
- 0x51, 0x6a, 0x08, 0x5b,
- 0xff, 0x51, 0x20, 0x09,
- 0x20, 0xa0, 0xfa, 0x7a,
- 0x00, 0x90, 0x5c, 0x5b,
- 0x00, 0x65, 0x56, 0x5b,
+ 0x00, 0x65, 0xf2, 0x5b,
+ 0xe0, 0x3d, 0x22, 0x63,
+ 0x20, 0x12, 0x22, 0x63,
+ 0x51, 0x6a, 0x30, 0x5b,
+ 0x00, 0x65, 0x80, 0x5b,
0xff, 0x37, 0xc8, 0x08,
- 0x00, 0xa1, 0xf2, 0x62,
- 0x04, 0xa0, 0xf2, 0x7a,
+ 0x00, 0xa1, 0x1a, 0x63,
+ 0x04, 0xa0, 0x1a, 0x7b,
0xfb, 0xa0, 0x40, 0x09,
0x80, 0x36, 0x6c, 0x00,
- 0x80, 0xa0, 0x68, 0x7a,
+ 0x80, 0xa0, 0x90, 0x7a,
0x7f, 0xa0, 0x40, 0x09,
- 0xff, 0x6a, 0x04, 0x5b,
- 0x00, 0x65, 0x68, 0x42,
- 0x04, 0xa0, 0xf8, 0x7a,
- 0x00, 0x65, 0x84, 0x5c,
- 0x00, 0x65, 0xfa, 0x42,
- 0x00, 0x65, 0x6c, 0x5c,
+ 0xff, 0x6a, 0x2c, 0x5b,
+ 0x00, 0x65, 0x90, 0x42,
+ 0x04, 0xa0, 0x20, 0x7b,
+ 0x00, 0x65, 0xa4, 0x5c,
+ 0x00, 0x65, 0x22, 0x43,
+ 0x00, 0x65, 0x8c, 0x5c,
0x31, 0x6a, 0x22, 0x01,
- 0x0c, 0x6a, 0x04, 0x5b,
- 0x00, 0x65, 0x68, 0x42,
+ 0x0c, 0x6a, 0x2c, 0x5b,
+ 0x00, 0x65, 0x90, 0x42,
0x61, 0x6a, 0x22, 0x01,
- 0x00, 0x65, 0x68, 0x42,
+ 0x00, 0x65, 0x90, 0x42,
0x10, 0x3d, 0x06, 0x00,
0xff, 0x65, 0x68, 0x0c,
0xff, 0x06, 0xd4, 0x08,
- 0x01, 0x0c, 0x0a, 0x7b,
- 0x04, 0x0c, 0x0a, 0x6b,
+ 0x01, 0x0c, 0x32, 0x7b,
+ 0x04, 0x0c, 0x32, 0x6b,
0xe0, 0x03, 0x7a, 0x08,
- 0xe0, 0x3d, 0x1e, 0x63,
+ 0xe0, 0x3d, 0x46, 0x63,
0xff, 0x65, 0xcc, 0x08,
0xff, 0x12, 0xda, 0x0c,
0xff, 0x06, 0xd4, 0x0c,
0xff, 0x65, 0x0c, 0x08,
- 0x02, 0x0b, 0x1a, 0x7b,
+ 0x02, 0x0b, 0x42, 0x7b,
0xff, 0x6a, 0xd4, 0x0c,
0xd1, 0x6a, 0x22, 0x01,
- 0x00, 0x65, 0x18, 0x41,
+ 0x00, 0x65, 0x22, 0x41,
0xff, 0x65, 0x26, 0x09,
- 0x01, 0x0b, 0x32, 0x6b,
- 0x10, 0x0c, 0x24, 0x7b,
- 0x04, 0x0b, 0x2c, 0x6b,
+ 0x01, 0x0b, 0x5a, 0x6b,
+ 0x10, 0x0c, 0x4c, 0x7b,
+ 0x04, 0x0b, 0x54, 0x6b,
0xff, 0x6a, 0xca, 0x08,
- 0x04, 0x93, 0x30, 0x6b,
- 0x01, 0x94, 0x2e, 0x7b,
- 0x10, 0x94, 0x30, 0x6b,
+ 0x04, 0x93, 0x58, 0x6b,
+ 0x01, 0x94, 0x56, 0x7b,
+ 0x10, 0x94, 0x58, 0x6b,
0xc7, 0x93, 0x26, 0x09,
0xff, 0x99, 0xd4, 0x08,
- 0x08, 0x93, 0x34, 0x6b,
+ 0x38, 0x93, 0x5c, 0x6b,
0xff, 0x6a, 0xd4, 0x0c,
- 0x80, 0x36, 0x38, 0x6b,
+ 0x80, 0x36, 0x60, 0x6b,
0x21, 0x6a, 0x22, 0x05,
0xff, 0x65, 0x20, 0x09,
- 0xff, 0x51, 0x46, 0x63,
+ 0xff, 0x51, 0x6e, 0x63,
0xff, 0x37, 0xc8, 0x08,
- 0xa1, 0x6a, 0x50, 0x43,
+ 0xa1, 0x6a, 0x7a, 0x43,
0xff, 0x51, 0xc8, 0x08,
- 0xb9, 0x6a, 0x50, 0x43,
- 0xff, 0xba, 0x54, 0x73,
+ 0xb9, 0x6a, 0x7a, 0x43,
+ 0xff, 0x90, 0xa4, 0x08,
+ 0xff, 0xba, 0x7e, 0x73,
0xff, 0xba, 0x20, 0x09,
0xff, 0x65, 0xca, 0x18,
- 0x00, 0x6c, 0x4a, 0x63,
+ 0x00, 0x6c, 0x72, 0x63,
0xff, 0x90, 0xca, 0x0c,
0xff, 0x6a, 0xca, 0x04,
- 0x20, 0x36, 0x72, 0x7b,
- 0x00, 0x90, 0x3e, 0x5b,
- 0xff, 0x65, 0x72, 0x73,
- 0xff, 0xba, 0x66, 0x73,
- 0xff, 0xbb, 0xcc, 0x08,
- 0xff, 0xba, 0x20, 0x09,
- 0xff, 0x66, 0x76, 0x09,
- 0xff, 0x65, 0x20, 0x09,
- 0xff, 0xbb, 0x70, 0x73,
+ 0x20, 0x36, 0x92, 0x7b,
+ 0x00, 0x90, 0x66, 0x5b,
+ 0xff, 0x65, 0x92, 0x73,
+ 0xff, 0x52, 0x90, 0x73,
0xff, 0xba, 0xcc, 0x08,
- 0xff, 0xbb, 0x20, 0x09,
+ 0xff, 0x52, 0x20, 0x09,
0xff, 0x66, 0x74, 0x09,
0xff, 0x65, 0x20, 0x0d,
0xff, 0xba, 0x7e, 0x0c,
- 0x00, 0x6a, 0x72, 0x5c,
+ 0x00, 0x6a, 0x92, 0x5c,
0x0d, 0x6a, 0x6a, 0x00,
- 0x00, 0x51, 0xfe, 0x43,
- 0xff, 0x3f, 0xcc, 0x73,
+ 0x00, 0x51, 0x1e, 0x44,
+ 0xff, 0x3f, 0xec, 0x73,
0xff, 0x6a, 0xa2, 0x00,
- 0x00, 0x3f, 0x3e, 0x5b,
- 0xff, 0x65, 0xcc, 0x73,
+ 0x00, 0x3f, 0x66, 0x5b,
+ 0xff, 0x65, 0xec, 0x73,
0x20, 0x36, 0x6c, 0x00,
- 0x20, 0xa0, 0x86, 0x6b,
+ 0x20, 0xa0, 0xa6, 0x6b,
0xff, 0xb9, 0xa2, 0x0c,
0xff, 0x6a, 0xa2, 0x04,
0xff, 0x65, 0xa4, 0x08,
0xe0, 0x6a, 0xcc, 0x00,
- 0x45, 0x6a, 0xf2, 0x5b,
+ 0x45, 0x6a, 0x12, 0x5c,
0x01, 0x6a, 0xd0, 0x01,
0x09, 0x6a, 0xd6, 0x01,
- 0x80, 0xeb, 0x92, 0x7b,
+ 0x80, 0xeb, 0xb2, 0x7b,
0x01, 0x6a, 0xd6, 0x01,
0x01, 0xe9, 0xa4, 0x34,
0x88, 0x6a, 0xcc, 0x00,
- 0x45, 0x6a, 0xf2, 0x5b,
+ 0x45, 0x6a, 0x12, 0x5c,
0x01, 0x6a, 0x18, 0x01,
0xff, 0x6a, 0x1a, 0x09,
0xff, 0x6a, 0x1c, 0x09,
0x0d, 0x6a, 0x26, 0x01,
- 0x00, 0x65, 0x64, 0x5c,
+ 0x00, 0x65, 0x84, 0x5c,
0xff, 0x99, 0xa4, 0x0c,
0xff, 0x65, 0xa4, 0x08,
0xe0, 0x6a, 0xcc, 0x00,
- 0x45, 0x6a, 0xf2, 0x5b,
+ 0x45, 0x6a, 0x12, 0x5c,
0x01, 0x6a, 0xd0, 0x01,
0x01, 0x6a, 0xdc, 0x05,
0x88, 0x6a, 0xcc, 0x00,
- 0x45, 0x6a, 0xf2, 0x5b,
+ 0x45, 0x6a, 0x12, 0x5c,
0x01, 0x6a, 0x18, 0x01,
0xff, 0x6a, 0x1a, 0x09,
0xff, 0x6a, 0x1c, 0x09,
0x01, 0x6a, 0x26, 0x05,
0x01, 0x65, 0xd8, 0x31,
0x09, 0xee, 0xdc, 0x01,
- 0x80, 0xee, 0xc2, 0x7b,
+ 0x80, 0xee, 0xe2, 0x7b,
0xff, 0x6a, 0xdc, 0x0d,
0xff, 0x65, 0x32, 0x09,
0x0a, 0x93, 0x26, 0x01,
- 0x00, 0x65, 0x64, 0x44,
+ 0x00, 0x65, 0x84, 0x44,
0xff, 0x37, 0xc8, 0x08,
- 0x00, 0x6a, 0x88, 0x5b,
+ 0x00, 0x6a, 0xa8, 0x5b,
0xff, 0x52, 0xa2, 0x0c,
- 0x01, 0x0c, 0xd2, 0x7b,
- 0x04, 0x0c, 0xd2, 0x6b,
- 0xe0, 0x03, 0x7a, 0x08,
- 0xff, 0x3d, 0x06, 0x0c,
+ 0x01, 0x0c, 0xf2, 0x7b,
+ 0x04, 0x0c, 0xf2, 0x6b,
+ 0xe0, 0x03, 0x06, 0x08,
+ 0xe0, 0x03, 0x7a, 0x0c,
0xff, 0x8c, 0x10, 0x08,
0xff, 0x8d, 0x12, 0x08,
0xff, 0x8e, 0x14, 0x0c,
0x00, 0x6c, 0xda, 0x24,
0xff, 0x65, 0xc8, 0x08,
0xe0, 0x6a, 0xcc, 0x00,
- 0x41, 0x6a, 0xee, 0x5b,
+ 0x41, 0x6a, 0x0e, 0x5c,
0xff, 0x90, 0xe2, 0x09,
0x20, 0x6a, 0xd0, 0x01,
- 0x04, 0x35, 0x10, 0x7c,
+ 0x04, 0x35, 0x30, 0x7c,
0x1d, 0x6a, 0xdc, 0x01,
- 0xdc, 0xee, 0x0c, 0x64,
- 0x00, 0x65, 0x1c, 0x44,
+ 0xdc, 0xee, 0x2c, 0x64,
+ 0x00, 0x65, 0x3c, 0x44,
0x01, 0x6a, 0xdc, 0x01,
0x20, 0xa0, 0xd8, 0x31,
0x09, 0xee, 0xdc, 0x01,
- 0x80, 0xee, 0x16, 0x7c,
+ 0x80, 0xee, 0x36, 0x7c,
0x19, 0x6a, 0xdc, 0x01,
- 0xd8, 0xee, 0x1a, 0x64,
+ 0xd8, 0xee, 0x3a, 0x64,
0xff, 0x6a, 0xdc, 0x09,
- 0x18, 0xee, 0x1e, 0x6c,
+ 0x18, 0xee, 0x3e, 0x6c,
0xff, 0x6a, 0xd4, 0x0c,
0x88, 0x6a, 0xcc, 0x00,
- 0x41, 0x6a, 0xee, 0x5b,
+ 0x41, 0x6a, 0x0e, 0x5c,
0x20, 0x6a, 0x18, 0x01,
0xff, 0x6a, 0x1a, 0x09,
0xff, 0x6a, 0x1c, 0x09,
0xff, 0x35, 0x26, 0x09,
- 0x04, 0x35, 0x48, 0x6c,
+ 0x04, 0x35, 0x68, 0x6c,
0xa0, 0x6a, 0xca, 0x00,
0x20, 0x65, 0xc8, 0x18,
0xff, 0x6c, 0x32, 0x09,
0xff, 0x6c, 0x32, 0x09,
0xff, 0x6c, 0x32, 0x09,
0xff, 0x6c, 0x32, 0x09,
- 0x00, 0x65, 0x34, 0x64,
+ 0x00, 0x65, 0x54, 0x64,
0x0a, 0x93, 0x26, 0x01,
- 0x00, 0x65, 0x64, 0x5c,
- 0x04, 0x35, 0x38, 0x7b,
- 0xa0, 0x6a, 0x54, 0x5c,
- 0x00, 0x65, 0x56, 0x5c,
- 0x00, 0x65, 0x56, 0x5c,
- 0x00, 0x65, 0x56, 0x44,
+ 0x00, 0x65, 0x84, 0x5c,
+ 0x04, 0x35, 0x60, 0x7b,
+ 0xa0, 0x6a, 0x74, 0x5c,
+ 0x00, 0x65, 0x76, 0x5c,
+ 0x00, 0x65, 0x76, 0x5c,
+ 0x00, 0x65, 0x76, 0x44,
0xff, 0x65, 0xcc, 0x08,
0xff, 0x99, 0xda, 0x08,
0xff, 0x99, 0xda, 0x08,
0xff, 0x99, 0xda, 0x08,
0xff, 0x99, 0xda, 0x08,
0xff, 0x99, 0xda, 0x0c,
- 0x08, 0x94, 0x64, 0x7c,
+ 0x08, 0x94, 0x84, 0x7c,
0xf7, 0x93, 0x26, 0x09,
- 0x08, 0x93, 0x68, 0x6c,
+ 0x08, 0x93, 0x88, 0x6c,
0xff, 0x6a, 0xd4, 0x0c,
0xff, 0x40, 0x74, 0x09,
0xff, 0x90, 0x80, 0x08,
0xff, 0x6a, 0x72, 0x05,
- 0xff, 0x40, 0x80, 0x64,
- 0xff, 0x3f, 0x78, 0x64,
+ 0xff, 0x40, 0xa0, 0x64,
+ 0xff, 0x3f, 0x98, 0x64,
0xff, 0x6a, 0xca, 0x04,
0xff, 0x3f, 0x20, 0x09,
0x01, 0x6a, 0x6a, 0x00,
- 0x00, 0xb9, 0xfe, 0x5b,
- 0x00, 0x90, 0x5c, 0x43,
+ 0x00, 0xb9, 0x1e, 0x5c,
+ 0xff, 0xba, 0x7e, 0x0c,
0xff, 0x40, 0x20, 0x09,
0xff, 0xba, 0x80, 0x0c,
- 0xff, 0x6a, 0x76, 0x01,
0xff, 0x3f, 0x74, 0x09,
- 0xff, 0x90, 0x7e, 0x08,
- 0xff, 0xba, 0x38, 0x73,
- 0xff, 0xba, 0x20, 0x09,
- 0xff, 0x3f, 0x76, 0x09,
- 0xff, 0x3f, 0x20, 0x0d,
+ 0xff, 0x90, 0x7e, 0x0c,
};
+static int aic7xxx_patch13_func(struct aic7xxx_host *p);
+
+static int
+aic7xxx_patch13_func(struct aic7xxx_host *p)
+{
+ return ((p->chip & AHC_CHIPID_MASK) == AHC_AIC7895);
+}
+
static int aic7xxx_patch12_func(struct aic7xxx_host *p);
static int
aic7xxx_patch12_func(struct aic7xxx_host *p)
{
- return ((p->chip & AHC_CHIPID_MASK) == AHC_AIC7895);
+ return ((p->features & AHC_CMD_CHAN) == 0);
}
static int aic7xxx_patch11_func(struct aic7xxx_host *p);
skip_instr :10,
skip_patch :12;
} sequencer_patches[] = {
- { aic7xxx_patch1_func, 1, 1, 2 },
- { aic7xxx_patch0_func, 2, 1, 1 },
- { aic7xxx_patch2_func, 3, 2, 1 },
- { aic7xxx_patch3_func, 7, 1, 1 },
+ { aic7xxx_patch1_func, 2, 1, 2 },
+ { aic7xxx_patch0_func, 3, 1, 1 },
+ { aic7xxx_patch2_func, 4, 2, 1 },
{ aic7xxx_patch3_func, 8, 1, 1 },
- { aic7xxx_patch4_func, 11, 4, 1 },
- { aic7xxx_patch5_func, 16, 3, 2 },
- { aic7xxx_patch0_func, 19, 4, 1 },
- { aic7xxx_patch6_func, 23, 1, 1 },
- { aic7xxx_patch7_func, 26, 1, 1 },
- { aic7xxx_patch4_func, 34, 4, 1 },
- { aic7xxx_patch8_func, 38, 3, 2 },
- { aic7xxx_patch0_func, 41, 3, 1 },
- { aic7xxx_patch9_func, 47, 7, 1 },
- { aic7xxx_patch4_func, 55, 3, 1 },
- { aic7xxx_patch8_func, 58, 2, 1 },
- { aic7xxx_patch1_func, 63, 60, 1 },
- { aic7xxx_patch8_func, 164, 1, 2 },
- { aic7xxx_patch0_func, 165, 1, 1 },
- { aic7xxx_patch2_func, 169, 1, 1 },
- { aic7xxx_patch2_func, 172, 1, 2 },
- { aic7xxx_patch0_func, 173, 2, 1 },
- { aic7xxx_patch10_func, 175, 1, 1 },
- { aic7xxx_patch8_func, 182, 1, 2 },
- { aic7xxx_patch0_func, 183, 3, 1 },
- { aic7xxx_patch8_func, 187, 1, 2 },
- { aic7xxx_patch0_func, 188, 1, 1 },
- { aic7xxx_patch8_func, 189, 7, 2 },
- { aic7xxx_patch0_func, 196, 1, 1 },
- { aic7xxx_patch2_func, 201, 13, 2 },
- { aic7xxx_patch0_func, 214, 8, 1 },
- { aic7xxx_patch10_func, 222, 1, 1 },
- { aic7xxx_patch8_func, 227, 1, 1 },
- { aic7xxx_patch8_func, 228, 1, 1 },
- { aic7xxx_patch8_func, 233, 1, 1 },
- { aic7xxx_patch8_func, 235, 2, 1 },
- { aic7xxx_patch8_func, 240, 8, 1 },
- { aic7xxx_patch8_func, 249, 1, 1 },
- { aic7xxx_patch2_func, 250, 2, 2 },
- { aic7xxx_patch0_func, 252, 4, 1 },
- { aic7xxx_patch10_func, 256, 2, 2 },
- { aic7xxx_patch0_func, 258, 1, 1 },
- { aic7xxx_patch11_func, 265, 1, 2 },
- { aic7xxx_patch0_func, 266, 1, 1 },
- { aic7xxx_patch5_func, 328, 1, 2 },
- { aic7xxx_patch0_func, 329, 1, 1 },
- { aic7xxx_patch3_func, 332, 1, 1 },
- { aic7xxx_patch11_func, 351, 1, 2 },
- { aic7xxx_patch0_func, 352, 1, 1 },
- { aic7xxx_patch6_func, 356, 1, 1 },
- { aic7xxx_patch7_func, 364, 3, 2 },
- { aic7xxx_patch0_func, 367, 1, 1 },
- { aic7xxx_patch1_func, 396, 3, 1 },
- { aic7xxx_patch10_func, 410, 1, 1 },
- { aic7xxx_patch2_func, 453, 7, 2 },
- { aic7xxx_patch0_func, 460, 8, 1 },
- { aic7xxx_patch2_func, 469, 4, 2 },
- { aic7xxx_patch0_func, 473, 6, 1 },
- { aic7xxx_patch2_func, 479, 4, 2 },
- { aic7xxx_patch0_func, 483, 3, 1 },
- { aic7xxx_patch2_func, 512, 17, 4 },
- { aic7xxx_patch12_func, 520, 4, 2 },
- { aic7xxx_patch0_func, 524, 2, 1 },
- { aic7xxx_patch0_func, 529, 33, 1 },
- { aic7xxx_patch6_func, 566, 2, 1 },
- { aic7xxx_patch6_func, 569, 9, 1 },
+ { aic7xxx_patch3_func, 9, 1, 1 },
+ { aic7xxx_patch4_func, 12, 4, 1 },
+ { aic7xxx_patch5_func, 17, 3, 2 },
+ { aic7xxx_patch0_func, 20, 4, 1 },
+ { aic7xxx_patch6_func, 24, 1, 1 },
+ { aic7xxx_patch7_func, 27, 1, 1 },
+ { aic7xxx_patch2_func, 30, 1, 2 },
+ { aic7xxx_patch0_func, 31, 3, 1 },
+ { aic7xxx_patch4_func, 40, 4, 1 },
+ { aic7xxx_patch8_func, 44, 3, 2 },
+ { aic7xxx_patch0_func, 47, 3, 1 },
+ { aic7xxx_patch9_func, 52, 7, 1 },
+ { aic7xxx_patch4_func, 60, 3, 1 },
+ { aic7xxx_patch8_func, 63, 2, 1 },
+ { aic7xxx_patch1_func, 68, 60, 1 },
+ { aic7xxx_patch8_func, 162, 1, 2 },
+ { aic7xxx_patch0_func, 163, 2, 1 },
+ { aic7xxx_patch2_func, 167, 2, 3 },
+ { aic7xxx_patch8_func, 167, 1, 1 },
+ { aic7xxx_patch0_func, 169, 2, 1 },
+ { aic7xxx_patch8_func, 172, 1, 2 },
+ { aic7xxx_patch0_func, 173, 1, 1 },
+ { aic7xxx_patch2_func, 177, 1, 1 },
+ { aic7xxx_patch2_func, 180, 3, 2 },
+ { aic7xxx_patch0_func, 183, 5, 1 },
+ { aic7xxx_patch2_func, 191, 2, 3 },
+ { aic7xxx_patch8_func, 191, 1, 1 },
+ { aic7xxx_patch0_func, 193, 3, 1 },
+ { aic7xxx_patch10_func, 196, 2, 1 },
+ { aic7xxx_patch8_func, 198, 7, 2 },
+ { aic7xxx_patch0_func, 205, 1, 1 },
+ { aic7xxx_patch2_func, 210, 14, 3 },
+ { aic7xxx_patch10_func, 223, 1, 1 },
+ { aic7xxx_patch0_func, 224, 9, 1 },
+ { aic7xxx_patch8_func, 238, 2, 1 },
+ { aic7xxx_patch8_func, 240, 1, 1 },
+ { aic7xxx_patch10_func, 241, 6, 3 },
+ { aic7xxx_patch2_func, 241, 2, 2 },
+ { aic7xxx_patch0_func, 243, 4, 1 },
+ { aic7xxx_patch8_func, 248, 1, 1 },
+ { aic7xxx_patch8_func, 252, 11, 1 },
+ { aic7xxx_patch2_func, 264, 3, 3 },
+ { aic7xxx_patch10_func, 266, 1, 1 },
+ { aic7xxx_patch0_func, 267, 5, 1 },
+ { aic7xxx_patch10_func, 272, 1, 2 },
+ { aic7xxx_patch0_func, 273, 7, 1 },
+ { aic7xxx_patch11_func, 287, 1, 2 },
+ { aic7xxx_patch0_func, 288, 1, 1 },
+ { aic7xxx_patch5_func, 348, 1, 2 },
+ { aic7xxx_patch0_func, 349, 1, 1 },
+ { aic7xxx_patch3_func, 352, 1, 1 },
+ { aic7xxx_patch2_func, 362, 3, 2 },
+ { aic7xxx_patch0_func, 365, 5, 1 },
+ { aic7xxx_patch11_func, 373, 1, 2 },
+ { aic7xxx_patch0_func, 374, 1, 1 },
+ { aic7xxx_patch6_func, 379, 1, 1 },
+ { aic7xxx_patch1_func, 416, 3, 1 },
+ { aic7xxx_patch10_func, 421, 11, 1 },
+ { aic7xxx_patch2_func, 469, 7, 2 },
+ { aic7xxx_patch0_func, 476, 8, 1 },
+ { aic7xxx_patch2_func, 485, 4, 2 },
+ { aic7xxx_patch0_func, 489, 6, 1 },
+ { aic7xxx_patch2_func, 495, 4, 2 },
+ { aic7xxx_patch0_func, 499, 3, 1 },
+ { aic7xxx_patch12_func, 509, 10, 1 },
+ { aic7xxx_patch2_func, 528, 17, 4 },
+ { aic7xxx_patch13_func, 536, 4, 2 },
+ { aic7xxx_patch0_func, 540, 2, 1 },
+ { aic7xxx_patch0_func, 545, 33, 1 },
+ { aic7xxx_patch12_func, 578, 4, 1 },
+ { aic7xxx_patch6_func, 582, 2, 1 },
+ { aic7xxx_patch6_func, 585, 9, 1 },
};
#define SCSI_1 1
#define SCSI_1_CCS 2
#define SCSI_2 3
+#define SCSI_3 4
/*
* Every SCSI command starts with a one byte OP-code.
scd->type < MAX_SCSI_DEVICE_CODE ?
scsi_device_types[(int)scd->type] : "Unknown " );
y += sprintf(buffer + len + y, " ANSI"
- " SCSI revision: %02x", (scd->scsi_level < 3)?1:2);
+ " SCSI revision: %02x", (scd->scsi_level - 1)?scd->scsi_level - 1:1);
if (scd->scsi_level == 2)
y += sprintf(buffer + len + y, " CCS\n");
else
#include "constants.h"
#include "sd.h"
+#include <scsi/scsicam.h>
/*
* This source file contains the symbol table used by scsi loadable
* modules.
*/
-extern int scsicam_bios_param (Disk * disk,
- int dev, int *ip );
-
extern void print_command (unsigned char *command);
extern void print_sense(const char * devclass, Scsi_Cmnd * SCpnt);
EXPORT_SYMBOL(scsi_register);
EXPORT_SYMBOL(scsi_unregister);
EXPORT_SYMBOL(scsicam_bios_param);
+EXPORT_SYMBOL(scsi_partsize);
EXPORT_SYMBOL(scsi_allocate_device);
EXPORT_SYMBOL(scsi_do_cmd);
EXPORT_SYMBOL(scsi_command_size);
#include "scsi.h"
#include "hosts.h"
#include "sd.h"
+#include <scsi/scsicam.h>
-static int partsize(struct buffer_head *bh, unsigned long capacity,
- unsigned int *cyls, unsigned int *hds, unsigned int *secs);
static int setsize(unsigned long capacity,unsigned int *cyls,unsigned int *hds,
unsigned int *secs);
return -1;
/* try to infer mapping from partition table */
- ret_code = partsize (bh, (unsigned long) size, (unsigned int *) ip + 2,
+ ret_code = scsi_partsize (bh, (unsigned long) size, (unsigned int *) ip + 2,
(unsigned int *) ip + 0, (unsigned int *) ip + 1);
brelse (bh);
}
/*
- * Function : static int partsize(struct buffer_head *bh, unsigned long
+ * Function : static int scsi_partsize(struct buffer_head *bh, unsigned long
* capacity,unsigned int *cyls, unsigned int *hds, unsigned int *secs);
*
* Purpose : to determine the BIOS mapping used to create the partition
*
*/
-static int partsize(struct buffer_head *bh, unsigned long capacity,
+int scsi_partsize(struct buffer_head *bh, unsigned long capacity,
unsigned int *cyls, unsigned int *hds, unsigned int *secs) {
struct partition *p, *largest = NULL;
int i, largest_cyl;
-/* $Id: atyfb.c,v 1.106 1999/04/16 11:20:49 geert Exp $
+/* $Id: atyfb.c,v 1.107 1999/06/08 19:59:03 geert Exp $
* linux/drivers/video/atyfb.c -- Frame buffer device for ATI Mach64
*
* Copyright (C) 1997-1998 Geert Uytterhoeven
struct atyfb_par default_par;
struct atyfb_par current_par;
u32 total_vram;
+ u32 ref_clk_per;
u32 pll_per;
u32 mclk_per;
u16 chip_type;
static void init_engine(const struct atyfb_par *par, struct fb_info_aty *info);
static void aty_st_514(int offset, u8 val, const struct fb_info_aty *info);
static void aty_st_pll(int offset, u8 val, const struct fb_info_aty *info);
-#if defined(__sparc__) || defined(DEBUG)
static u8 aty_ld_pll(int offset, const struct fb_info_aty *info);
-#endif
static void aty_set_crtc(const struct fb_info_aty *info,
const struct crtc *crtc);
static int aty_var_to_crtc(const struct fb_info_aty *info,
const struct pll_gx *pll);
static int aty_var_to_pll_18818(u32 vclk_per, struct pll_gx *pll);
static int aty_var_to_pll_514(u32 vclk_per, struct pll_gx *pll);
-static int aty_pll_gx_to_var(const struct pll_gx *pll, u32 *vclk_per);
+static int aty_pll_gx_to_var(const struct pll_gx *pll, u32 *vclk_per,
+ const struct fb_info_aty *info);
static void aty_set_pll_ct(const struct fb_info_aty *info,
const struct pll_ct *pll);
static int aty_dsp_gt(const struct fb_info_aty *info, u8 mclk_fb_div,
u8 bpp, struct pll_ct *pll);
static int aty_var_to_pll_ct(const struct fb_info_aty *info, u32 vclk_per,
u8 bpp, struct pll_ct *pll);
-static int aty_pll_ct_to_var(const struct pll_ct *pll, u32 *vclk_per);
+static int aty_pll_ct_to_var(const struct pll_ct *pll, u32 *vclk_per,
+ const struct fb_info_aty *info);
static void atyfb_set_par(const struct atyfb_par *par,
struct fb_info_aty *info);
static int atyfb_decode_var(const struct fb_var_screeninfo *var,
static int default_pll __initdata = 0;
static int default_mclk __initdata = 0;
-static const u32 ref_clk_per = 1000000000000ULL/14318180;
-
#if defined(CONFIG_PPC)
static int default_vmode __initdata = VMODE_NVRAM;
static int default_cmode __initdata = CMODE_NVRAM;
aty_st_8(CLOCK_CNTL + 1, (offset << 2) & ~PLL_WR_EN, info);
}
-#if defined(__sparc__) || defined(DEBUG)
static u8 aty_ld_pll(int offset, const struct fb_info_aty *info)
{
u8 res;
eieio();
return res;
}
-#endif
#if defined(CONFIG_PPC)
/* FIXME: ATI18818?? */
-static int aty_pll_gx_to_var(const struct pll_gx *pll, u32 *vclk_per)
+static int aty_pll_gx_to_var(const struct pll_gx *pll, u32 *vclk_per,
+ const struct fb_info_aty *info)
{
u8 df, vco_div_count, ref_div_count;
vco_div_count = pll->m & 0x3f;
ref_div_count = pll->n;
- *vclk_per = ((ref_clk_per*ref_div_count)<<(3-df))/(vco_div_count+65);
+ *vclk_per = ((info->ref_clk_per*ref_div_count)<<(3-df))/(vco_div_count+65);
return 0;
}
pll->pll_vclk_cntl = 0x03; /* VCLK = PLL_VCLK/VCLKx_POST */
- pll_ref_div = info->pll_per*2*255/ref_clk_per;
+ pll_ref_div = info->pll_per*2*255/info->ref_clk_per;
/* FIXME: use the VTB/GTB /3 post divider if it's better suited */
- q = ref_clk_per*pll_ref_div*4/info->mclk_per; /* actually 8*q */
+ q = info->ref_clk_per*pll_ref_div*4/info->mclk_per; /* actually 8*q */
if (q < 16*8 || q > 255*8)
FAIL("mclk out of range");
else if (q < 32*8)
mclk_fb_div = q*mclk_post_div/8;
/* FIXME: use the VTB/GTB /{3,6,12} post dividers if they're better suited */
- q = ref_clk_per*pll_ref_div*4/vclk_per; /* actually 8*q */
+ q = info->ref_clk_per*pll_ref_div*4/vclk_per; /* actually 8*q */
if (q < 16*8 || q > 255*8)
FAIL("vclk out of range");
else if (q < 32*8)
return 0;
}
-static int aty_pll_ct_to_var(const struct pll_ct *pll, u32 *vclk_per)
+static int aty_pll_ct_to_var(const struct pll_ct *pll, u32 *vclk_per,
+ const struct fb_info_aty *info)
{
u8 pll_ref_div = pll->pll_ref_div;
u8 vclk_fb_div = pll->vclk_fb_div;
(vclk_post_div & 3)];
if (vpostdiv == 0)
return -EINVAL;
- *vclk_per = pll_ref_div*vpostdiv*ref_clk_per/vclk_fb_div/2;
+ *vclk_per = pll_ref_div*vpostdiv*info->ref_clk_per/vclk_fb_div/2;
return 0;
}
if ((err = aty_crtc_to_var(&par->crtc, var)))
return err;
if ((Gx == GX_CHIP_ID) || (Gx == CX_CHIP_ID))
- err = aty_pll_gx_to_var(&par->pll.gx, &var->pixclock);
+ err = aty_pll_gx_to_var(&par->pll.gx, &var->pixclock, info);
else
- err = aty_pll_ct_to_var(&par->pll.ct, &var->pixclock);
+ err = aty_pll_ct_to_var(&par->pll.ct, &var->pixclock, info);
if (err)
return err;
int j, k;
struct fb_var_screeninfo var;
struct display *disp;
- const char *chipname = NULL, *ramname = NULL;
+ const char *chipname = NULL, *ramname = NULL, *xtal;
int pll, mclk, gtb_memsize;
#if defined(CONFIG_PPC)
int sense;
#endif
+ u8 pll_ref_div;
info->aty_cmap_regs = (struct aty_cmap_regs *)(info->ati_regbase+0xc0);
chip_id = aty_ld_le32(CONFIG_CHIP_ID, info);
}
}
+ info->ref_clk_per = 1000000000000ULL/14318180;
+ xtal = "14.31818";
+ if (!(Gx == GX_CHIP_ID || Gx == CX_CHIP_ID || Gx == CT_CHIP_ID ||
+ Gx == ET_CHIP_ID ||
+ ((Gx == VT_CHIP_ID || Gx == GT_CHIP_ID) && !(Rev & 0x07))) &&
+ (pll_ref_div = aty_ld_pll(PLL_REF_DIV, info))) {
+ int diff1, diff2;
+ diff1 = 510*14/pll_ref_div-pll;
+ diff2 = 510*29/pll_ref_div-pll;
+ if (diff1 < 0)
+ diff1 = -diff1;
+ if (diff2 < 0)
+ diff2 = -diff2;
+ if (diff2 < diff1) {
+ info->ref_clk_per = 1000000000000ULL/29498928;
+ xtal = "29.498928";
+ }
+ }
+
i = aty_ld_le32(MEM_CNTL, info);
gtb_memsize = !(Gx == GX_CHIP_ID || Gx == CX_CHIP_ID || Gx == CT_CHIP_ID ||
Gx == ET_CHIP_ID ||
if (default_mclk)
mclk = default_mclk;
- printk("%d%c %s, %d MHz PLL, %d Mhz MCLK\n",
+ printk("%d%c %s, %s MHz XTAL, %d MHz PLL, %d Mhz MCLK\n",
info->total_vram == 0x80000 ? 512 : (info->total_vram >> 20),
- info->total_vram == 0x80000 ? 'K' : 'M', ramname, pll, mclk);
+ info->total_vram == 0x80000 ? 'K' : 'M', ramname, xtal, pll, mclk);
if (mclk < 44)
info->mem_refresh_rate = 0; /* 000 = 10 Mhz - 43 Mhz */
struct fb_info_iga *info;
unsigned long addr;
extern int con_is_present(void);
+ int iga2000 = 0;
/* Do not attach when we have a serial console. */
if (!con_is_present())
pdev = pci_find_device(PCI_VENDOR_ID_INTERG,
PCI_DEVICE_ID_INTERG_1682, 0);
- if(pdev == NULL)
- return;
+ if (pdev == NULL) {
+ pdev = pci_find_device(PCI_VENDOR_ID_INTERG,
+ 0x2000, 0);
+ if(pdev == NULL)
+ return;
+ iga2000 = 1;
+ }
info = kmalloc(sizeof(struct fb_info_iga), GFP_ATOMIC);
if (!info) {
memset(info, 0, sizeof(struct fb_info_iga));
info->frame_buffer = pdev->base_address[0];
- if (!info->frame_buffer)
+ if (!info->frame_buffer) {
+ kfree(info);
return;
+ }
pcibios_read_config_dword(0, pdev->devfn,
PCI_BASE_ADDRESS_0,
info->frame_buffer_phys = addr & PCI_BASE_ADDRESS_MEM_MASK;
#ifdef __sparc__
-
+
info->io_base_phys = info->frame_buffer_phys;
-
- /* Obtain virtual address and correct physical by PCIC shift */
- info->io_base = pcic_alloc_io(&info->io_base_phys);
+
+ /*
+ * The right test would be to look if there is a base I/O address.
+ * But it appears that IGA 1682 reuses _memory_ address as a base
+ * for I/O accesses.
+ */
+ if (iga2000) {
+ info->io_base = (int) sparc_alloc_io(info->frame_buffer_phys |
+ 0x00800000, NULL, 0x1000, "iga", 0, 0);
+ } else {
+ /* Obtain virtual address and correct physical by PCIC shift */
+ info->io_base = pcic_alloc_io(&info->io_base_phys);
+ }
if (!info->io_base) {
+ kfree(info);
return;
}
info->mmap_map = kmalloc(4 * sizeof(*info->mmap_map), GFP_ATOMIC);
if (!info->mmap_map) {
printk("igafb_init: can't alloc mmap_map\n");
+ /* XXX Here we left I/O allocated */
kfree(info);
return;
}
}
#endif
- if (!iga_init(info)) {
- if (info->mmap_map)
- kfree(info->mmap_map);
- kfree(info);
- }
+ if (!iga_init(info)) {
+ if (info->mmap_map)
+ kfree(info->mmap_map);
+ kfree(info);
+ }
#ifdef __sparc__
/*
fi
if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
tristate 'NFS server support' CONFIG_NFSD
- fi
- if [ "$CONFIG_NFSD" != "n" ]; then
- bool ' Emulate SUN NFS server' CONFIG_NFSD_SUN
+ if [ "$CONFIG_NFSD" != "n" ]; then
+ bool ' Emulate SUN NFS server' CONFIG_NFSD_SUN
+ fi
fi
if [ "$CONFIG_NFS_FS" = "y" -o "$CONFIG_NFSD" = "y" ]; then
define_bool CONFIG_SUNRPC y
if (N_MAGIC(ex) == OMAGIC) {
#if defined(__alpha__) || defined(__sparc__)
do_brk(N_TXTADDR(ex) & PAGE_MASK,
- ex.a_text+ex.a_data + PAGE_SIZE - 1)
+ ex.a_text+ex.a_data + PAGE_SIZE - 1);
read_exec(bprm->dentry, fd_offset, (char *) N_TXTADDR(ex),
ex.a_text+ex.a_data, 0);
#else
}
link = bh->b_data;
}
+ UPDATE_ATIME(inode);
base = lookup_dentry(link, base, follow);
if (bh)
brelse(bh);
i++;
if (copy_to_user(buffer, link, i))
i = -EFAULT;
- UPDATE_ATIME(inode);
if (bh)
brelse (bh);
return i;
base = lookup_dentry (IRIX32_EMUL,
dget (current->fs->root),
- (LOOKUP_FOLLOW | LOOKUP_DIRECTORY | LOOKUP_SLASHOK));
+ (LOOKUP_FOLLOW | LOOKUP_DIRECTORY));
if (IS_ERR (base)) return base;
-/* $Id: io.h,v 1.19 1999/05/14 07:26:09 davem Exp $ */
+/* $Id: io.h,v 1.20 1999/06/03 15:02:50 davem Exp $ */
#ifndef __SPARC_IO_H
#define __SPARC_IO_H
#define virt_to_phys(x) __pa((unsigned long)(x))
#define phys_to_virt(x) __va((unsigned long)(x))
+/*
+ * At the moment, we do not use CMOS_READ anywhere outside of rtc.c,
+ * so rtc_port is static in it. This should not change unless a new
+ * hardware pops up.
+ */
+
+#define RTC_PORT(x) (rtc_port + (x))
+#define RTC_ALWAYS_BCD 0
+
/* Nothing to do */
#define dma_cache_inv(_start,_size) do { } while (0)
base = lookup_dentry (emul,
dget (current->fs->root),
- (LOOKUP_FOLLOW | LOOKUP_DIRECTORY | LOOKUP_SLASHOK));
+ (LOOKUP_FOLLOW | LOOKUP_DIRECTORY));
if (IS_ERR (base)) return NULL;
-/* $Id: pcic.h,v 1.1 1998/09/22 05:54:39 jj Exp $
+/* $Id: pcic.h,v 1.2 1999/06/03 15:02:51 davem Exp $
* pcic.h: JavaEngine 1 specific PCI definitions.
*
* Copyright (C) 1998 V. Roganov and G. Raiko
#ifndef __SPARC_PCIC_H
#define __SPARC_PCIC_H
+#ifndef __ASSEMBLY__
+
#include <linux/types.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
unsigned long pcic_config_space_addr;
unsigned long pcic_config_space_data;
struct linux_pbm_info pbm;
+ struct pcic_ca2irq *pcic_imap;
+ int pcic_imdim;
};
extern unsigned long pcic_alloc_io(unsigned long* addr);
extern void pcic_probe(void);
extern void sun4m_pci_init_IRQ(void);
-/* Size of PCI Space */
+#endif
+
+/* Size of PCI I/O space which we relocate. */
#define PCI_SPACE_SIZE 0x1000000 /* 16 MB */
/* PCIC Register Set. */
#define PCI_SOFTWARE_INT_CLEAR 0x6a /* 16 bits */
#define PCI_SOFTWARE_INT_SET 0x6e /* 16 bits */
#define PCI_SYS_INT_PENDING 0x70 /* 32 bits */
+#define PCI_SYS_INT_PENDING_PIO 0x40000000
+#define PCI_SYS_INT_PENDING_DMA 0x20000000
+#define PCI_SYS_INT_PENDING_PCI 0x10000000
+#define PCI_SYS_INT_PENDING_APSR 0x08000000
#define PCI_SYS_INT_TARGET_MASK 0x74 /* 32 bits */
#define PCI_SYS_INT_TARGET_MASK_CLEAR 0x78 /* 32 bits */
#define PCI_SYS_INT_TARGET_MASK_SET 0x7c /* 32 bits */
#define PCI_SYS_INT_PENDING_CLEAR 0x83 /* 8 bits */
+#define PCI_SYS_INT_PENDING_CLEAR_ALL 0x80
+#define PCI_SYS_INT_PENDING_CLEAR_PIO 0x40
+#define PCI_SYS_INT_PENDING_CLEAR_DMA 0x20
+#define PCI_SYS_INT_PENDING_CLEAR_PCI 0x10
#define PCI_IOTLB_CONTROL 0x84 /* 8 bits */
#define PCI_INT_SELECT_LO 0x88 /* 16 bits */
#define PCI_ARBITRATION_SELECT 0x8a /* 16 bits */
base = lookup_dentry (emul,
dget (current->fs->root),
- (LOOKUP_FOLLOW | LOOKUP_DIRECTORY | LOOKUP_SLASHOK));
+ (LOOKUP_FOLLOW | LOOKUP_DIRECTORY));
if (IS_ERR (base)) return NULL;
char loaded;
};
-extern __inline__ int ip_check_mc(struct device *dev, u32 mc_addr)
-{
- struct in_device *in_dev = dev->ip_ptr;
- struct ip_mc_list *im;
-
- if (in_dev) {
- for (im=in_dev->mc_list; im; im=im->next)
- if (im->multiaddr == mc_addr)
- return 1;
- }
- return 0;
-}
-
+extern int ip_check_mc(struct device *dev, u32 mc_addr);
extern int igmp_rcv(struct sk_buff *, unsigned short);
extern int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr);
extern int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr);
extern void inet_proto_init(struct net_proto *pro);
extern char *in_ntoa(__u32 in);
+extern char *in_ntoa2(__u32 in, char *buf);
extern __u32 in_aton(const char *str);
#endif
extern void devinet_init(void);
extern struct in_device *inetdev_init(struct device *dev);
extern struct in_device *inetdev_by_index(int);
-extern u32 inet_select_addr(struct device *dev, u32 dst, int scope);
+extern u32 inet_select_addr(const struct device *dev, u32 dst, int scope);
extern struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, u32 prefix, u32 mask);
extern void inet_forward_change(void);
struct hh_cache *hh_next; /* Next entry */
atomic_t hh_refcnt; /* number of users */
unsigned short hh_type; /* protocol identifier, f.e ETH_P_IP */
+ int hh_len; /* length of header */
int (*hh_output)(struct sk_buff *skb);
rwlock_t hh_lock;
/* cached hardware header; allow for machine alignment needs. */
void *atalk_ptr; /* AppleTalk link */
void *ip_ptr; /* IPv4 specific data */
void *dn_ptr; /* DECnet specific data */
+ void *ip6_ptr; /* IPv6 specific data */
struct Qdisc *qdisc;
struct Qdisc *qdisc_sleeping;
/* hard_start_xmit synchronizer */
spinlock_t xmit_lock;
+ /* cpu id of processor entered to hard_start_xmit or -1,
+ if nobody entered there.
+ */
+ int xmit_lock_owner;
+ /* device queue lock */
+ spinlock_t queue_lock;
+ atomic_t refcnt;
/* Pointers to interface service routines. */
int (*open)(struct device *dev);
#define PCI_VENDOR_ID_ADAPTEC 0x9004
#define PCI_DEVICE_ID_ADAPTEC_7810 0x1078
+#define PCI_DEVICE_ID_ADAPTEC_7821 0x2178
#define PCI_DEVICE_ID_ADAPTEC_7850 0x5078
#define PCI_DEVICE_ID_ADAPTEC_7855 0x5578
#define PCI_DEVICE_ID_ADAPTEC_5800 0x5800
+#define PCI_DEVICE_ID_ADAPTEC_3860 0x6038
#define PCI_DEVICE_ID_ADAPTEC_1480A 0x6075
#define PCI_DEVICE_ID_ADAPTEC_7860 0x6078
#define PCI_DEVICE_ID_ADAPTEC_7861 0x6178
#define PCI_DEVICE_ID_ADAPTEC_7882 0x8278
#define PCI_DEVICE_ID_ADAPTEC_7883 0x8378
#define PCI_DEVICE_ID_ADAPTEC_7884 0x8478
+#define PCI_DEVICE_ID_ADAPTEC_7885 0x8578
+#define PCI_DEVICE_ID_ADAPTEC_7886 0x8678
+#define PCI_DEVICE_ID_ADAPTEC_7887 0x8778
+#define PCI_DEVICE_ID_ADAPTEC_7888 0x8878
#define PCI_DEVICE_ID_ADAPTEC_1030 0x8b78
#define PCI_VENDOR_ID_ADAPTEC2 0x9005
#define PCI_DEVICE_ID_ADAPTEC2_2940U2 0x0010
-#define PCI_DEVICE_ID_ADAPTEC2_78902 0x0013
+#define PCI_DEVICE_ID_ADAPTEC2_2930U2 0x0011
+#define PCI_DEVICE_ID_ADAPTEC2_7890B 0x0013
#define PCI_DEVICE_ID_ADAPTEC2_7890 0x001f
#define PCI_DEVICE_ID_ADAPTEC2_3940U2 0x0050
#define PCI_DEVICE_ID_ADAPTEC2_3950U2D 0x0051
#define PCI_DEVICE_ID_ADAPTEC2_7896 0x005f
+#define PCI_DEVICE_ID_ADAPTEC2_7892A 0x0080
+#define PCI_DEVICE_ID_ADAPTEC2_7892B 0x0081
+#define PCI_DEVICE_ID_ADAPTEC2_7892D 0x0083
+#define PCI_DEVICE_ID_ADAPTEC2_7892P 0x008f
+#define PCI_DEVICE_ID_ADAPTEC2_7899A 0x00c0
+#define PCI_DEVICE_ID_ADAPTEC2_7899B 0x00c1
+#define PCI_DEVICE_ID_ADAPTEC2_7899D 0x00c3
+#define PCI_DEVICE_ID_ADAPTEC2_7899P 0x00cf
#define PCI_VENDOR_ID_ATRONICS 0x907f
#define PCI_DEVICE_ID_ATRONICS_2015 0x2015
__u32 pps; /* Current flow packet rate */
__u32 qlen;
__u32 backlog;
+#ifdef __KERNEL__
+ spinlock_t *lock;
+#endif
};
struct tc_estimator
#ifdef __KERNEL__
-extern atomic_t rtnl_rlockct;
-extern wait_queue_head_t rtnl_wait;
-
extern __inline__ int rtattr_strcmp(struct rtattr *rta, char *str)
{
int len = strlen(str) + 1;
#define RTA_PUT(skb, attrtype, attrlen, data) \
({ if (skb_tailroom(skb) < (int)RTA_SPACE(attrlen)) goto rtattr_failure; \
__rta_fill(skb, attrtype, attrlen, data); })
-
-extern unsigned long rtnl_wlockct;
-
-/* NOTE: these locks are not interrupt safe, are not SMP safe,
- * they are even not atomic. 8)8)8) ... and it is not a bug.
- * Really, if these locks will be programmed correctly,
- * all the addressing/routing machine would become SMP safe,
- * but is absolutely useless at the moment, because all the kernel
- * is not reenterable in any case. --ANK
- *
- * Well, atomic_* and set_bit provide the only thing here:
- * gcc is confused not to overoptimize them, that's all.
- * I remember as gcc splitted ++ operation, but cannot reproduce
- * it with gcc-2.7.*. --ANK
- *
- * One more note: rwlock facility should be written and put
- * to a kernel wide location: f.e. current implementation of semaphores
- * (especially, for x86) looks like a wonder. It would be good
- * to have something similar for rwlock. Recursive lock could be also
- * useful thing. --ANK
- */
-
-extern __inline__ int rtnl_shlock_nowait(void)
-{
- atomic_inc(&rtnl_rlockct);
- if (test_bit(0, &rtnl_wlockct)) {
- atomic_dec(&rtnl_rlockct);
- return -EAGAIN;
- }
- return 0;
-}
-
-extern __inline__ void rtnl_shlock(void)
-{
- while (rtnl_shlock_nowait())
- sleep_on(&rtnl_wait);
-}
-
-/* Check for possibility to PROMOTE shared lock to exclusive.
- Shared lock must be already grabbed with rtnl_shlock*().
- */
-
-extern __inline__ int rtnl_exlock_nowait(void)
-{
- if (atomic_read(&rtnl_rlockct) > 1)
- return -EAGAIN;
- if (test_and_set_bit(0, &rtnl_wlockct))
- return -EAGAIN;
- return 0;
-}
-
-extern __inline__ void rtnl_exlock(void)
-{
- while (rtnl_exlock_nowait())
- sleep_on(&rtnl_wait);
-}
-
-#if 0
-extern __inline__ void rtnl_shunlock(void)
-{
- atomic_dec(&rtnl_rlockct);
- if (atomic_read(&rtnl_rlockct) <= 1) {
- wake_up(&rtnl_wait);
- if (rtnl && rtnl->receive_queue.qlen)
- rtnl->data_ready(rtnl, 0);
- }
-}
-#else
-
-/* The problem: inline requires to include <net/sock.h> and, hence,
- almost all of net includes :-(
- */
-
-#define rtnl_shunlock() ({ \
- atomic_dec(&rtnl_rlockct); \
- if (atomic_read(&rtnl_rlockct) <= 1) { \
- wake_up(&rtnl_wait); \
- if (rtnl && rtnl->receive_queue.qlen) \
- rtnl->data_ready(rtnl, 0); \
- } \
-})
#endif
-/* Release exclusive lock. Note, that we do not wake up rtnetlink socket,
- * it will be done later after releasing shared lock.
- */
-
-extern __inline__ void rtnl_exunlock(void)
-{
- clear_bit(0, &rtnl_wlockct);
- wake_up(&rtnl_wait);
-}
+extern struct semaphore rtnl_sem;
-#else
+#define rtnl_exlock() do { } while(0)
+#define rtnl_exunlock() do { } while(0)
+#define rtnl_exlock_nowait() (0)
-extern __inline__ void rtnl_shlock(void)
-{
- while (atomic_read(&rtnl_rlockct))
- sleep_on(&rtnl_wait);
- atomic_inc(&rtnl_rlockct);
-}
-
-extern __inline__ void rtnl_shunlock(void)
-{
- if (atomic_dec_and_test(&rtnl_rlockct))
- wake_up(&rtnl_wait);
-}
-
-extern __inline__ void rtnl_exlock(void)
-{
-}
-
-extern __inline__ void rtnl_exunlock(void)
-{
-}
+#define rtnl_shlock() down(&rtnl_sem)
+#define rtnl_shlock_nowait() down_trylock(&rtnl_sem)
+#ifndef CONFIG_RTNETLINK
+#define rtnl_shunlock() up(&rtnl_sem)
+#else
+#define rtnl_shunlock() do { up(&rtnl_sem); \
+ if (rtnl && rtnl->receive_queue.qlen) \
+ rtnl->data_ready(rtnl, 0); \
+ } while(0)
#endif
extern void rtnl_lock(void);
extern void rtnl_unlock(void);
extern void rtnetlink_init(void);
+
+
#endif /* __KERNEL__ */
extern int ipv6_get_saddr(struct dst_entry *dst,
struct in6_addr *daddr,
struct in6_addr *saddr);
-extern struct inet6_ifaddr * ipv6_get_lladdr(struct device *dev);
+extern int ipv6_get_lladdr(struct device *dev, struct in6_addr *);
/*
* multicast prototypes (mcast.c)
int ifindex,
struct in6_addr *addr);
extern void ipv6_sock_mc_close(struct sock *sk);
+extern int inet6_mc_check(struct sock *sk, struct in6_addr *addr);
extern int ipv6_dev_mc_inc(struct device *dev,
struct in6_addr *addr);
if (dst->expires == 0 || (long)(dst->expires - expires) > 0)
dst->expires = expires;
}
+
+extern void dst_init(void);
+
#endif
#endif /* _NET_DST_H */
__u32 valid_lft;
__u32 prefered_lft;
unsigned long tstamp;
+ atomic_t refcnt;
__u8 probes;
__u8 flags;
struct inet6_ifaddr *addr_list;
struct ifmcaddr6 *mc_list;
+ rwlock_t lock;
__u32 if_flags;
struct neigh_parms *nd_parms;
skb->protocol = __constant_htons(ETH_P_IP);
if (hh) {
- read_lock_irq(&hh->hh_lock);
+ read_lock_bh(&hh->hh_lock);
memcpy(skb->data - 16, hh->hh_data, 16);
- read_unlock_irq(&hh->hh_lock);
- skb_push(skb, dev->hard_header_len);
+ read_unlock_bh(&hh->hh_lock);
+ skb_push(skb, hh->hh_len);
return hh->hh_output(skb);
} else if (dst->neighbour)
return dst->neighbour->output(skb);
__u8 flags;
__u8 nud_state;
__u8 type;
- __u8 probes;
+ atomic_t probes;
+ rwlock_t lock;
unsigned char ha[MAX_ADDR_LEN];
struct hh_cache *hh;
atomic_t refcnt;
struct timer_list proxy_timer;
struct sk_buff_head proxy_queue;
int entries;
- atomic_t lock;
+ rwlock_t lock;
unsigned long last_rand;
struct neigh_parms *parms_list;
struct neigh_statistics stats;
extern void neigh_table_init(struct neigh_table *tbl);
extern int neigh_table_clear(struct neigh_table *tbl);
-extern struct neighbour *__neigh_lookup(struct neigh_table *tbl,
- const void *pkey, struct device *dev,
- int creat);
+extern struct neighbour * neigh_lookup(struct neigh_table *tbl,
+ const void *pkey,
+ struct device *dev);
+extern struct neighbour * neigh_create(struct neigh_table *tbl,
+ const void *pkey,
+ struct device *dev);
extern void neigh_destroy(struct neighbour *neigh);
extern int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb);
extern int neigh_update(struct neighbour *neigh, u8 *lladdr, u8 new, int override, int arp);
neigh->confirmed = jiffies;
}
-extern __inline__ struct neighbour *
-neigh_lookup(struct neigh_table *tbl, const void *pkey, struct device *dev)
-{
- struct neighbour *neigh;
- start_bh_atomic();
- neigh = __neigh_lookup(tbl, pkey, dev, 0);
- end_bh_atomic();
- return neigh;
-}
-
extern __inline__ int neigh_is_connected(struct neighbour *neigh)
{
return neigh->nud_state&NUD_CONNECTED;
return 0;
}
-extern __inline__ void neigh_table_lock(struct neigh_table *tbl)
+extern __inline__ struct neighbour *
+__neigh_lookup(struct neigh_table *tbl, const void *pkey, struct device *dev, int creat)
{
- atomic_inc(&tbl->lock);
- synchronize_bh();
-}
+ struct neighbour *n = neigh_lookup(tbl, pkey, dev);
-extern __inline__ void neigh_table_unlock(struct neigh_table *tbl)
-{
- atomic_dec(&tbl->lock);
-}
+ if (n || !creat)
+ return n;
+ return neigh_create(tbl, pkey, dev);
+}
#endif
#endif
return -1;
}
-extern __inline__ unsigned long cls_set_class(unsigned long *clp, unsigned long cl)
-{
- cl = xchg(clp, cl);
- synchronize_bh();
- return cl;
-}
+
extern int register_tcf_proto_ops(struct tcf_proto_ops *ops);
extern int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
+
+
#endif
struct Qdisc_head
{
struct Qdisc_head *forw;
+ struct Qdisc_head *back;
};
extern struct Qdisc_head qdisc_head;
+extern spinlock_t qdisc_runqueue_lock;
+extern rwlock_t qdisc_tree_lock;
struct Qdisc
{
int refcnt;
};
+extern __inline__ void sch_tree_lock(struct Qdisc *q)
+{
+ write_lock(&qdisc_tree_lock);
+ spin_lock_bh(&q->dev->queue_lock);
+}
+
+extern __inline__ void sch_tree_unlock(struct Qdisc *q)
+{
+ spin_unlock_bh(&q->dev->queue_lock);
+ write_unlock(&qdisc_tree_lock);
+}
+
+extern __inline__ void tcf_tree_lock(struct tcf_proto *tp)
+{
+ write_lock(&qdisc_tree_lock);
+ spin_lock_bh(&tp->q->dev->queue_lock);
+}
+
+extern __inline__ void tcf_tree_unlock(struct tcf_proto *tp)
+{
+ spin_unlock_bh(&tp->q->dev->queue_lock);
+ write_unlock(&qdisc_tree_lock);
+}
+
+
+extern __inline__ unsigned long
+cls_set_class(struct tcf_proto *tp, unsigned long *clp, unsigned long cl)
+{
+ tcf_tree_lock(tp);
+ cl = xchg(clp, cl);
+ tcf_tree_unlock(tp);
+ return cl;
+}
+
+extern __inline__ unsigned long
+__cls_set_class(unsigned long *clp, unsigned long cl)
+{
+ return xchg(clp, cl);
+}
+
/*
Timer resolution MUST BE < 10% of min_schedulable_packet_size/bandwidth
u32 toks;
u32 ptoks;
psched_time_t t_c;
+ spinlock_t lock;
struct qdisc_rate_table *R_tab;
struct qdisc_rate_table *P_tab;
struct tc_stats stats;
};
+extern int qdisc_copy_stats(struct sk_buff *skb, struct tc_stats *st);
extern void tcf_police_destroy(struct tcf_police *p);
extern struct tcf_police * tcf_police_locate(struct rtattr *rta, struct rtattr *est);
extern int tcf_police_dump(struct sk_buff *skb, struct tcf_police *p);
int tc_filter_init(void);
int pktsched_init(void);
-void qdisc_run_queues(void);
-int qdisc_restart(struct device *dev);
+extern void qdisc_run_queues(void);
+extern int qdisc_restart(struct device *dev);
+
+extern spinlock_t qdisc_runqueue_lock;
+
+/* Is it on run list? Reliable only under qdisc_runqueue_lock. */
+
+extern __inline__ int qdisc_on_runqueue(struct Qdisc *q)
+{
+ return q->h.forw != NULL;
+}
+
+/* Is run list not empty? Reliable only under qdisc_runqueue_lock. */
+
+extern __inline__ int qdisc_pending(void)
+{
+ return qdisc_head.forw != &qdisc_head;
+}
+
+/* Add qdisc to tail of run list. Called with BH, disabled on this CPU */
+
+extern __inline__ void qdisc_run(struct Qdisc *q)
+{
+ spin_lock(&qdisc_runqueue_lock);
+ if (!qdisc_on_runqueue(q)) {
+ q->h.forw = &qdisc_head;
+ q->h.back = qdisc_head.back;
+ qdisc_head.back->forw = &q->h;
+ qdisc_head.back = &q->h;
+ }
+ spin_unlock(&qdisc_runqueue_lock);
+}
+
+/* If the device is not throttled, restart it and add to run list.
+ * BH must be disabled on this CPU.
+ */
extern __inline__ void qdisc_wakeup(struct device *dev)
{
if (!dev->tbusy) {
- struct Qdisc *q = dev->qdisc;
- if (qdisc_restart(dev) && q->h.forw == NULL) {
- q->h.forw = qdisc_head.forw;
- qdisc_head.forw = &q->h;
- }
+ spin_lock(&dev->queue_lock);
+ if (qdisc_restart(dev))
+ qdisc_run(dev->qdisc);
+ spin_unlock(&dev->queue_lock);
}
}
+/* Calculate maximal size of packet seen by hard_start_xmit
+ routine of this device.
+ */
extern __inline__ unsigned psched_mtu(struct device *dev)
{
unsigned mtu = dev->mtu;
#define RT_HASH_DIVISOR 256
-/*
- * Prevents LRU trashing, entries considered equivalent,
- * if the difference between last use times is less then this number.
- */
-#define RT_CACHE_BUBBLE_THRESHOLD (5*HZ)
-
-
#define RTO_ONLINK 0x01
#define RTO_TPROXY 0x80000000
};
extern struct ip_rt_acct ip_rt_acct[256];
+extern rwlock_t ip_rt_acct_lock;
extern void ip_rt_init(void);
extern void ip_rt_redirect(u32 old_gw, u32 dst, u32 new_gw,
#define SCSICAM_H
#include <linux/kdev_t.h>
extern int scsicam_bios_param (Disk *disk, kdev_t dev, int *ip);
+extern int scsi_partsize(struct buffer_head *bh, unsigned long capacity,
+ unsigned int *cyls, unsigned int *hds, unsigned int *secs);
#endif /* def SCSICAM_H */
{
struct device *dev;
- read_lock_bh(&dev_base_lock);
+ read_lock(&dev_base_lock);
for (dev = dev_base; dev != NULL; dev = dev->next) {
if (strcmp(dev->name, name) == 0)
goto out;
}
out:
- read_unlock_bh(&dev_base_lock);
+ read_unlock(&dev_base_lock);
return dev;
}
{
struct device *dev;
- read_lock_bh(&dev_base_lock);
+ read_lock(&dev_base_lock);
for (dev = dev_base; dev != NULL; dev = dev->next) {
if (dev->ifindex == ifindex)
goto out;
}
out:
- read_unlock_bh(&dev_base_lock);
+ read_unlock(&dev_base_lock);
return dev;
}
{
struct device *dev;
- read_lock_bh(&dev_base_lock);
+ read_lock(&dev_base_lock);
for (dev = dev_base; dev != NULL; dev = dev->next) {
if (dev->type == type &&
memcmp(dev->dev_addr, ha, dev->addr_len) == 0)
goto out;
}
out:
- read_unlock_bh(&dev_base_lock);
+ read_unlock(&dev_base_lock);
return dev;
}
}
return -ENFILE; /* Over 100 of the things .. bail out! */
}
-
+
struct device *dev_alloc(const char *name, int *err)
{
struct device *dev=kmalloc(sizeof(struct device)+16, GFP_KERNEL);
if (dev->flags&IFF_UP)
return 0;
- /* Setup the lock before we open the faucet. */
- spin_lock_init(&dev->xmit_lock);
-
/*
* Call device private open method
*/
if (dev) {
dev_do_clear_fastroute(dev);
} else {
- read_lock_bh(&dev_base_lock);
+ read_lock(&dev_base_lock);
for (dev = dev_base; dev; dev = dev->next)
dev_do_clear_fastroute(dev);
- read_unlock_bh(&dev_base_lock);
+ read_unlock(&dev_base_lock);
}
}
#endif
struct device *dev = skb->dev;
struct Qdisc *q;
-#ifdef CONFIG_NET_PROFILE
- start_bh_atomic();
- NET_PROFILE_ENTER(dev_queue_xmit);
-#endif
-
- spin_lock_bh(&dev->xmit_lock);
+ /* Grab device queue */
+ spin_lock_bh(&dev->queue_lock);
q = dev->qdisc;
if (q->enqueue) {
q->enqueue(skb, q);
- qdisc_wakeup(dev);
- spin_unlock_bh(&dev->xmit_lock);
-#ifdef CONFIG_NET_PROFILE
- NET_PROFILE_LEAVE(dev_queue_xmit);
- end_bh_atomic();
-#endif
+ /* If the device is not busy, kick it.
+ * Otherwise or if queue is not empty after kick,
+ * add it to run list.
+ */
+ if (dev->tbusy || qdisc_restart(dev))
+ qdisc_run(dev->qdisc);
+ spin_unlock_bh(&dev->queue_lock);
return 0;
}
+ spin_unlock_bh(&dev->queue_lock);
/* The device has no queue. Common case for software devices:
loopback, all the sorts of tunnels...
- Really, it is unlikely that bh protection is necessary here:
- virtual devices do not generate EOI events.
- However, it is possible, that they rely on bh protection
+ Really, it is unlikely that xmit_lock protection is necessary here.
+ (f.e. loopback and IP tunnels are clean ignoring statistics counters.)
+ However, it is possible, that they rely on protection
made by us here.
+
+ Check this and shot the lock. It is not prone from deadlocks.
+ Either shot noqueue qdisc, it is even simpler 8)
*/
if (dev->flags&IFF_UP) {
if (netdev_nit)
dev_queue_xmit_nit(skb,dev);
- if (dev->hard_start_xmit(skb, dev) == 0) {
- spin_unlock_bh(&dev->xmit_lock);
-
-#ifdef CONFIG_NET_PROFILE
- NET_PROFILE_LEAVE(dev_queue_xmit);
- end_bh_atomic();
-#endif
- return 0;
+ local_bh_disable();
+ if (dev->xmit_lock_owner != smp_processor_id()) {
+ spin_lock(&dev->xmit_lock);
+ dev->xmit_lock_owner = smp_processor_id();
+ if (dev->hard_start_xmit(skb, dev) == 0) {
+ dev->xmit_lock_owner = -1;
+ spin_unlock_bh(&dev->xmit_lock);
+ return 0;
+ }
+ dev->xmit_lock_owner = -1;
+ spin_unlock_bh(&dev->xmit_lock);
+ if (net_ratelimit())
+ printk(KERN_DEBUG "Virtual device %s asks to queue packet!\n", dev->name);
+ } else {
+ /* Recursion is detected! It is possible, unfortunately */
+ local_bh_enable();
+ if (net_ratelimit())
+ printk(KERN_DEBUG "Dead loop on virtual device %s, fix it urgently!\n", dev->name);
}
- if (net_ratelimit())
- printk(KERN_DEBUG "Virtual device %s asks to queue packet!\n", dev->name);
}
- spin_unlock_bh(&dev->xmit_lock);
kfree_skb(skb);
-
-#ifdef CONFIG_NET_PROFILE
- NET_PROFILE_LEAVE(dev_queue_xmit);
- end_bh_atomic();
-#endif
-
return 0;
}
int netdev_dropping = 0;
int netdev_max_backlog = 300;
atomic_t netdev_rx_dropped;
-#ifdef CONFIG_CPU_IS_SLOW
-int net_cpu_congestion;
-#endif
#ifdef CONFIG_NET_HW_FLOWCONTROL
int netdev_throttle_events;
struct packet_type *pt_prev;
unsigned short type;
unsigned long start_time = jiffies;
-#ifdef CONFIG_CPU_IS_SLOW
- static unsigned long start_busy = 0;
- static unsigned long ave_busy = 0;
-
- if (start_busy == 0)
- start_busy = start_time;
- net_cpu_congestion = ave_busy>>8;
-#endif
NET_PROFILE_ENTER(net_bh);
/*
* latency on a transmit interrupt bh.
*/
- if (qdisc_head.forw != &qdisc_head)
+ if (qdisc_pending())
qdisc_run_queues();
-
+
/*
* Any data left to process. This may occur because a
* mark_bh() is done after we empty the queue including
*/
skb = skb_dequeue(&backlog);
-#ifdef CONFIG_CPU_IS_SLOW
- if (ave_busy > 128*16) {
- kfree_skb(skb);
- while ((skb = skb_dequeue(&backlog)) != NULL)
- kfree_skb(skb);
- break;
- }
-#endif
-
-
-#if 0
- NET_PROFILE_SKB_PASSED(skb, net_bh_skb);
-#endif
#ifdef CONFIG_NET_FASTROUTE
if (skb->pkt_type == PACKET_FASTROUTE) {
dev_queue_xmit(skb);
* One last output flush.
*/
- if (qdisc_head.forw != &qdisc_head)
+ if (qdisc_pending())
qdisc_run_queues();
-#ifdef CONFIG_CPU_IS_SLOW
- if (1) {
- unsigned long start_idle = jiffies;
- ave_busy += ((start_idle - start_busy)<<3) - (ave_busy>>4);
- start_busy = 0;
- }
-#endif
#ifdef CONFIG_NET_HW_FLOWCONTROL
if (netdev_dropping)
netdev_wakeup();
*/
/*
- * This call is useful, but I'd remove it too.
- *
- * The reason is purely aestetical, it is the only call
- * from SIOC* family using struct ifreq in reversed manner.
- * Besides that, it is pretty silly to put "drawing" facility
- * to kernel, it is useful only to print ifindices
- * in readable form, is not it? --ANK
- *
* We need this ioctl for efficient implementation of the
* if_indextoname() function required by the IPv6 API. Without
* it, we would have to search all the interfaces to find a
*/
total = 0;
- read_lock_bh(&dev_base_lock);
+ read_lock(&dev_base_lock);
for (dev = dev_base; dev != NULL; dev = dev->next) {
for (i=0; i<NPROTO; i++) {
if (gifconf_list[i]) {
}
}
}
- read_unlock_bh(&dev_base_lock);
+ read_unlock(&dev_base_lock);
if(pos != NULL) {
int err = copy_to_user(ifc.ifc_buf, pos, total);
len+=size;
- read_lock_bh(&dev_base_lock);
+ read_lock(&dev_base_lock);
for (dev = dev_base; dev != NULL; dev = dev->next) {
size = sprintf_stats(buffer+len, dev);
len+=size;
if(pos>offset+length)
break;
}
- read_unlock_bh(&dev_base_lock);
+ read_unlock(&dev_base_lock);
*start=buffer+(offset-begin); /* Start of wanted data */
len-=(offset-begin); /* Start slop */
pos+=size;
len+=size;
- read_lock_bh(&dev_base_lock);
+ read_lock(&dev_base_lock);
for(dev = dev_base; dev != NULL; dev = dev->next) {
size = sprintf_wireless_stats(buffer+len, dev);
len+=size;
if(pos > offset + length)
break;
}
- read_unlock_bh(&dev_base_lock);
+ read_unlock(&dev_base_lock);
*start = buffer + (offset - begin); /* Start of wanted data */
len -= (offset - begin); /* Start slop */
if (IW_IS_SET(cmd)) {
if (!suser())
return -EPERM;
- rtnl_lock();
}
+ rtnl_lock();
ret = dev_ifsioc(&ifr, cmd);
- if (IW_IS_SET(cmd))
- rtnl_unlock();
+ rtnl_unlock();
if (!ret && IW_IS_GET(cmd) &&
copy_to_user(arg, &ifr, sizeof(struct ifreq)))
return -EFAULT;
{
struct device *d, **dp;
+ spin_lock_init(&dev->queue_lock);
+ spin_lock_init(&dev->xmit_lock);
+ dev->xmit_lock_owner = -1;
+
if (dev_boot_phase) {
/* This is NOT bug, but I am not sure, that all the
devices, initialized before netdev module is started
printk(KERN_INFO "early initialization of device %s is deferred\n", dev->name);
/* Check for existence, and append to tail of chain */
- write_lock_bh(&dev_base_lock);
for (dp=&dev_base; (d=*dp) != NULL; dp=&d->next) {
if (d == dev || strcmp(d->name, dev->name) == 0) {
- write_unlock_bh(&dev_base_lock);
return -EEXIST;
}
}
dev->next = NULL;
+ write_lock_bh(&dev_base_lock);
*dp = dev;
write_unlock_bh(&dev_base_lock);
return 0;
if (dev->init && dev->init(dev) != 0)
return -EIO;
+ dev->ifindex = dev_new_index();
+ if (dev->iflink == -1)
+ dev->iflink = dev->ifindex;
+
/* Check for existence, and append to tail of chain */
- write_lock_bh(&dev_base_lock);
for (dp=&dev_base; (d=*dp) != NULL; dp=&d->next) {
if (d == dev || strcmp(d->name, dev->name) == 0) {
- write_unlock_bh(&dev_base_lock);
return -EEXIST;
}
}
dev->next = NULL;
dev_init_scheduler(dev);
+ write_lock_bh(&dev_base_lock);
*dp = dev;
write_unlock_bh(&dev_base_lock);
- dev->ifindex = -1;
- dev->ifindex = dev_new_index();
- if (dev->iflink == -1)
- dev->iflink = dev->ifindex;
-
/* Notify protocols, that a new device appeared. */
notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev);
{
struct device *d, **dp;
- if (dev_boot_phase == 0) {
- /* If device is running, close it.
- It is very bad idea, really we should
- complain loudly here, but random hackery
- in linux/drivers/net likes it.
- */
- if (dev->flags & IFF_UP)
- dev_close(dev);
+ /* If device is running, close it first. */
+ if (dev->flags & IFF_UP)
+ dev_close(dev);
+ /* And unlink it from device chain. */
+ for (dp = &dev_base; (d=*dp) != NULL; dp=&d->next) {
+ if (d == dev) {
+ write_lock_bh(&dev_base_lock);
+ *dp = d->next;
+ write_unlock_bh(&dev_base_lock);
+
+ /* Sorry. It is known "feature". The race is clear.
+ Keep it after device reference counting will
+ be complete.
+ */
+ synchronize_bh();
+ break;
+ }
+ }
+ if (d == NULL)
+ return -ENODEV;
+
+ /* It is "synchronize_bh" to those of guys, who overslept
+ in skb_alloc/page fault etc. that device is off-line.
+ Again, it can be removed only if devices are refcounted.
+ */
+ dev_lock_wait();
+
+ if (dev_boot_phase == 0) {
#ifdef CONFIG_NET_FASTROUTE
dev_clear_fastroute(dev);
#endif
* Flush the multicast chain
*/
dev_mc_discard(dev);
-
- /* To avoid pointers looking to nowhere,
- we wait for end of critical section */
- dev_lock_wait();
}
- /* And unlink it from device chain. */
- write_lock_bh(&dev_base_lock);
- for (dp = &dev_base; (d=*dp) != NULL; dp=&d->next) {
- if (d == dev) {
- *dp = d->next;
- d->next = NULL;
- write_unlock_bh(&dev_base_lock);
-
- if (dev->destructor)
- dev->destructor(dev);
- return 0;
- }
- }
- write_unlock_bh(&dev_base_lock);
- return -ENODEV;
+ if (dev->destructor)
+ dev->destructor(dev);
+ return 0;
}
* If the call to dev->init fails, the dev is removed
* from the chain disconnecting the device until the
* next reboot.
+ *
+ * NB At boot phase networking is dead. No locking is required.
+ * But we still preserve dev_base_lock for sanity.
*/
dp = &dev_base;
while ((dev = *dp) != NULL) {
+ spin_lock_init(&dev->queue_lock);
+ spin_lock_init(&dev->xmit_lock);
+ dev->xmit_lock_owner = -1;
dev->iflink = -1;
if (dev->init && dev->init(dev)) {
/*
* It failed to come up. Unhook it.
*/
+ write_lock_bh(&dev_base_lock);
*dp = dev->next;
+ write_unlock_bh(&dev_base_lock);
} else {
dp = &dev->next;
dev->ifindex = dev_new_index();
dev_boot_phase = 0;
+ dst_init();
dev_mcast_init();
#ifdef CONFIG_IP_PNP
*
* Device mc lists are changed by bh at least if IPv6 is enabled,
* so that it must be bh protected.
+ *
+ * We protect all mc lists with global rw lock
+ * and block accesses to device mc filters with dev->xmit_lock.
*/
+static rwlock_t dev_mc_lock = RW_LOCK_UNLOCKED;
/*
* Update the multicast list into the physical NIC controller.
/* Don't do anything till we up the interface
[dev_open will call this function so the list will
stay sane] */
-
+
if(!(dev->flags&IFF_UP))
return;
if(dev->set_multicast_list==NULL)
return;
- start_bh_atomic();
+ read_lock_bh(&dev_mc_lock);
+ spin_lock(&dev->xmit_lock);
+ dev->xmit_lock_owner = smp_processor_id();
dev->set_multicast_list(dev);
- end_bh_atomic();
+ dev->xmit_lock_owner = -1;
+ spin_unlock(&dev->xmit_lock);
+ read_unlock_bh(&dev_mc_lock);
}
-
+
/*
* Delete a device level multicast
*/
int err = 0;
struct dev_mc_list *dmi, **dmip;
- start_bh_atomic();
+ write_lock_bh(&dev_mc_lock);
for (dmip=&dev->mc_list; (dmi=*dmip)!=NULL; dmip=&dmi->next) {
/*
* Find the entry we want to delete. The device could
* We have altered the list, so the card
* loaded filter is now wrong. Fix it
*/
- end_bh_atomic();
+ write_unlock_bh(&dev_mc_lock);
+
dev_mc_upload(dev);
return 0;
}
}
err = -ENOENT;
done:
- end_bh_atomic();
+ write_unlock_bh(&dev_mc_lock);
return err;
}
int err = 0;
struct dev_mc_list *dmi, *dmi1;
+ /* RED-PEN: does gfp_any() work now? It requires
+ true local_bh_disable rather than global.
+ */
dmi1 = (struct dev_mc_list *)kmalloc(sizeof(*dmi), gfp_any());
- start_bh_atomic();
+ write_lock_bh(&dev_mc_lock);
for(dmi=dev->mc_list; dmi!=NULL; dmi=dmi->next) {
if (memcmp(dmi->dmi_addr,addr,dmi->dmi_addrlen)==0 && dmi->dmi_addrlen==alen) {
if (glbl) {
}
}
- if ((dmi=dmi1)==NULL)
+ if ((dmi=dmi1)==NULL) {
+ write_unlock_bh(&dev_mc_lock);
return -ENOMEM;
+ }
memcpy(dmi->dmi_addr, addr, alen);
dmi->dmi_addrlen=alen;
dmi->next=dev->mc_list;
dmi->dmi_gusers=glbl ? 1 : 0;
dev->mc_list=dmi;
dev->mc_count++;
- end_bh_atomic();
+ write_unlock_bh(&dev_mc_lock);
dev_mc_upload(dev);
return 0;
done:
- end_bh_atomic();
+ write_unlock_bh(&dev_mc_lock);
if (dmi1)
kfree(dmi1);
return err;
void dev_mc_discard(struct device *dev)
{
- start_bh_atomic();
+ write_lock_bh(&dev_mc_lock);
while (dev->mc_list!=NULL) {
struct dev_mc_list *tmp=dev->mc_list;
dev->mc_list=tmp->next;
kfree_s(tmp,sizeof(*tmp));
}
dev->mc_count=0;
- end_bh_atomic();
+ write_unlock_bh(&dev_mc_lock);
}
#ifdef CONFIG_PROC_FS
int len=0;
struct device *dev;
- read_lock_bh(&dev_base_lock);
+ read_lock(&dev_base_lock);
for (dev = dev_base; dev; dev = dev->next) {
+ read_lock_bh(&dev_mc_lock);
for (m = dev->mc_list; m; m = m->next) {
int i;
len=0;
begin=pos;
}
- if (pos > offset+length)
+ if (pos > offset+length) {
+ read_unlock_bh(&dev_mc_lock);
goto done;
+ }
}
+ read_unlock_bh(&dev_mc_lock);
}
*eof = 1;
done:
- read_unlock_bh(&dev_base_lock);
+ read_unlock(&dev_base_lock);
*start=buffer+(offset-begin);
len-=(offset-begin);
if(len>length)
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
+#include <linux/init.h>
#include <net/dst.h>
static struct timer_list dst_gc_timer =
{ NULL, NULL, DST_GC_MIN, 0L, dst_run_gc };
-#if RT_CACHE_DEBUG >= 2
-atomic_t hh_count;
-#endif
static void dst_run_gc(unsigned long dummy)
{
int delayed = 0;
struct dst_entry * dst, **dstp;
- spin_lock(&dst_lock);
+ if (!spin_trylock(&dst_lock)) {
+ mod_timer(&dst_gc_timer, jiffies + HZ/10);
+ return;
+ }
del_timer(&dst_gc_timer);
dstp = &dst_garbage_list;
atomic_dec(&dst_total);
kfree(dst);
}
+
+static int dst_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
+{
+ struct device *dev = ptr;
+ struct dst_entry *dst;
+
+ switch (event) {
+ case NETDEV_UNREGISTER:
+ case NETDEV_DOWN:
+ spin_lock_bh(&dst_lock);
+ for (dst = dst_garbage_list; dst; dst = dst->next) {
+ if (dst->dev == dev) {
+ dst->input = dst_discard;
+ dst->output = dst_blackhole;
+ dst->dev = &loopback_dev;
+ }
+ }
+ spin_unlock_bh(&dst_lock);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+struct notifier_block dst_dev_notifier = {
+ dst_dev_event,
+ NULL,
+ 0
+};
+
+__initfunc(void dst_init(void))
+{
+ register_netdevice_notifier(&dst_dev_notifier);
+}
*
* Fixes:
* Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
- * Horst von Brand Add #include <linux/string.h>
*/
#include <linux/config.h>
#include <linux/types.h>
-#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/socket.h>
#include <linux/sched.h>
#include <net/sock.h>
#include <linux/rtnetlink.h>
-/*
- NOTE. The most unpleasent question is serialization of
- accesses to resolved addresses. The problem is that addresses
- are modified by bh, but they are referenced from normal
- kernel thread. Before today no locking was made.
- My reasoning was that corrupted address token will be copied
- to packet with cosmologically small probability
- (it is even difficult to estimate such small number)
- and it is very silly to waste cycles in fast path to lock them.
-
- But now I changed my mind, but not because previous statement
- is wrong. Actually, neigh->ha MAY BE not opaque byte array,
- but reference to some private data. In this case even neglibible
- corruption probability becomes bug.
-
- - hh cache is protected by rwlock. It assumes that
- hh cache update procedure is short and fast, and that
- read_lock is cheaper than start_bh_atomic().
- - ha tokens, saved in neighbour entries, are protected
- by bh_atomic().
- - no protection is made in /proc reading. It is OK, because
- /proc is broken by design in any case, and
- corrupted output is normal behaviour there.
-
- --ANK (981025)
- */
-
#define NEIGH_DEBUG 1
#define NEIGH_PRINTK(x...) printk(x)
static int neigh_glbl_allocs;
static struct neigh_table *neigh_tables;
+#if defined(__i386__) && defined(__SMP__)
+#define ASSERT_WL(n) if ((int)((n)->lock.lock) >= 0) { printk("WL assertion failed at " __FILE__ "(%d):" __FUNCTION__ "\n", __LINE__); }
+#else
+#define ASSERT_WL(n) do { } while(0)
+#endif
+
+/*
+ Neighbour hash table buckets are protected with rwlock tbl->lock.
+
+ - All the scans/updates to hash buckets MUST be made under this lock.
+ - NOTHING clever should be made under this lock: no callbacks
+ to protocol backends, no attempts to send something to network.
+ It will result in deadlocks, if backend/driver wants to use neighbour
+ cache.
+ - If the entry requires some non-trivial actions, increase
+ its reference count and release table lock.
+
+ Neighbour entries are protected:
+ - with reference count.
+ - with rwlock neigh->lock
+
+ Reference count prevents destruction.
+
+ neigh->lock mainly serializes ll address data and its validity state.
+ However, the same lock is used to protect another entry fields:
+ - timer
+ - resolution queue
+
+ Again, nothing clever shall be made under neigh->lock,
+ the most complicated procedure, which we allow is dev->hard_header.
+ It is supposed, that dev->hard_header is simplistic and does
+ not make callbacks to neighbour tables.
+
+ The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
+ list of neighbour tables. This list is used only in process context,
+ so that this lock is useless with big kernel lock.
+ */
+
+static rwlock_t neigh_tbl_lock = RW_LOCK_UNLOCKED;
+
static int neigh_blackhole(struct sk_buff *skb)
{
kfree_skb(skb);
int shrunk = 0;
int i;
- if (atomic_read(&tbl->lock))
- return 0;
-
for (i=0; i<=NEIGH_HASHMASK; i++) {
struct neighbour *n, **np;
np = &tbl->hash_buckets[i];
+ write_lock_bh(&tbl->lock);
while ((n = *np) != NULL) {
/* Neighbour record may be discarded if:
- nobody refers to it.
It is not clear, what is better table overflow
or flooding.
*/
+ write_lock(&n->lock);
if (atomic_read(&n->refcnt) == 0 &&
!(n->nud_state&NUD_PERMANENT) &&
(n->nud_state != NUD_INCOMPLETE ||
n->tbl = NULL;
tbl->entries--;
shrunk = 1;
+ write_unlock(&n->lock);
neigh_destroy(n);
continue;
}
+ write_unlock(&n->lock);
np = &n->next;
}
+ write_unlock_bh(&tbl->lock);
}
tbl->last_flush = jiffies;
{
int i;
- if (atomic_read(&tbl->lock)) {
- NEIGH_PRINTK1("neigh_ifdown: impossible event 1763\n");
- return -EBUSY;
- }
+ write_lock_bh(&tbl->lock);
- start_bh_atomic();
for (i=0; i<=NEIGH_HASHMASK; i++) {
struct neighbour *n, **np;
continue;
}
*np = n->next;
+ write_lock(&n->lock);
n->tbl = NULL;
tbl->entries--;
if (atomic_read(&n->refcnt)) {
else
n->nud_state = NUD_NONE;
NEIGH_PRINTK2("neigh %p is stray.\n", n);
- } else
+ write_unlock(&n->lock);
+ } else {
+ write_unlock(&n->lock);
neigh_destroy(n);
+ }
}
}
del_timer(&tbl->proxy_timer);
skb_queue_purge(&tbl->proxy_queue);
pneigh_ifdown(tbl, dev);
- end_bh_atomic();
+ write_unlock_bh(&tbl->lock);
return 0;
}
-static struct neighbour *neigh_alloc(struct neigh_table *tbl, int creat)
+static struct neighbour *neigh_alloc(struct neigh_table *tbl)
{
struct neighbour *n;
unsigned long now = jiffies;
- if (tbl->entries > tbl->gc_thresh1) {
- if (creat < 0)
+ if (tbl->entries > tbl->gc_thresh3 ||
+ (tbl->entries > tbl->gc_thresh2 &&
+ now - tbl->last_flush > 5*HZ)) {
+ if (neigh_forced_gc(tbl) == 0 &&
+ tbl->entries > tbl->gc_thresh3)
return NULL;
- if (tbl->entries > tbl->gc_thresh3 ||
- (tbl->entries > tbl->gc_thresh2 &&
- now - tbl->last_flush > 5*HZ)) {
- if (neigh_forced_gc(tbl) == 0 &&
- tbl->entries > tbl->gc_thresh3)
- return NULL;
- }
}
n = kmalloc(tbl->entry_size, GFP_ATOMIC);
memset(n, 0, tbl->entry_size);
skb_queue_head_init(&n->arp_queue);
+ n->lock = RW_LOCK_UNLOCKED;
n->updated = n->used = now;
n->nud_state = NUD_NONE;
n->output = neigh_blackhole;
return n;
}
-
-struct neighbour * __neigh_lookup(struct neigh_table *tbl, const void *pkey,
- struct device *dev, int creat)
+struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
+ struct device *dev)
{
struct neighbour *n;
u32 hash_val;
hash_val ^= hash_val>>3;
hash_val = (hash_val^dev->ifindex)&NEIGH_HASHMASK;
+ read_lock_bh(&tbl->lock);
for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
if (dev == n->dev &&
memcmp(n->primary_key, pkey, key_len) == 0) {
atomic_inc(&n->refcnt);
- return n;
+ break;
}
}
- if (!creat)
- return NULL;
+ read_unlock_bh(&tbl->lock);
+ return n;
+}
+
+struct neighbour * neigh_create(struct neigh_table *tbl, const void *pkey,
+ struct device *dev)
+{
+ struct neighbour *n, *n1;
+ u32 hash_val;
+ int key_len = tbl->key_len;
- n = neigh_alloc(tbl, creat);
+ n = neigh_alloc(tbl);
if (n == NULL)
return NULL;
}
n->confirmed = jiffies - (n->parms->base_reachable_time<<1);
- atomic_set(&n->refcnt, 1);
+
+ hash_val = *(u32*)(pkey + key_len - 4);
+ hash_val ^= (hash_val>>16);
+ hash_val ^= hash_val>>8;
+ hash_val ^= hash_val>>3;
+ hash_val = (hash_val^dev->ifindex)&NEIGH_HASHMASK;
+
+ write_lock_bh(&tbl->lock);
+ for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
+ if (dev == n1->dev &&
+ memcmp(n1->primary_key, pkey, key_len) == 0) {
+ atomic_inc(&n1->refcnt);
+ write_unlock_bh(&tbl->lock);
+ neigh_destroy(n);
+ return n1;
+ }
+ }
+
tbl->entries++;
+ n->tbl = tbl;
+ atomic_set(&n->refcnt, 1);
n->next = tbl->hash_buckets[hash_val];
tbl->hash_buckets[hash_val] = n;
- n->tbl = tbl;
+ write_unlock_bh(&tbl->lock);
NEIGH_PRINTK2("neigh %p is created.\n", n);
return n;
}
while ((hh = neigh->hh) != NULL) {
neigh->hh = hh->hh_next;
hh->hh_next = NULL;
+ write_lock_bh(&hh->hh_lock);
hh->hh_output = neigh_blackhole;
+ write_unlock_bh(&hh->hh_lock);
if (atomic_dec_and_test(&hh->hh_refcnt))
kfree(hh);
}
/* Neighbour state is suspicious;
disable fast path.
+
+ Called with write_locked neigh.
*/
static void neigh_suspect(struct neighbour *neigh)
{
NEIGH_PRINTK2("neigh %p is suspecteded.\n", neigh);
+ ASSERT_WL(neigh);
+
neigh->output = neigh->ops->output;
for (hh = neigh->hh; hh; hh = hh->hh_next)
/* Neighbour state is OK;
enable fast path.
+
+ Called with write_locked neigh.
*/
static void neigh_connect(struct neighbour *neigh)
{
NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
+ ASSERT_WL(neigh);
+
neigh->output = neigh->ops->connected_output;
for (hh = neigh->hh; hh; hh = hh->hh_next)
If a routine wants to know TRUE entry state, it calls
neigh_sync before checking state.
+
+ Called with write_locked neigh.
*/
static void neigh_sync(struct neighbour *n)
unsigned long now = jiffies;
u8 state = n->nud_state;
+ ASSERT_WL(n);
if (state&(NUD_NOARP|NUD_PERMANENT))
return;
if (state&NUD_REACHABLE) {
unsigned long now = jiffies;
int i;
- if (atomic_read(&tbl->lock)) {
- tbl->gc_timer.expires = now + 1*HZ;
- add_timer(&tbl->gc_timer);
- return;
- }
+
+ write_lock(&tbl->lock);
/*
* periodicly recompute ReachableTime from random function
np = &tbl->hash_buckets[i];
while ((n = *np) != NULL) {
- unsigned state = n->nud_state;
+ unsigned state;
- if (state&(NUD_PERMANENT|NUD_IN_TIMER))
+ write_lock(&n->lock);
+
+ state = n->nud_state;
+ if (state&(NUD_PERMANENT|NUD_IN_TIMER)) {
+ write_unlock(&n->lock);
goto next_elt;
+ }
if ((long)(n->used - n->confirmed) < 0)
n->used = n->confirmed;
n->tbl = NULL;
n->next = NULL;
tbl->entries--;
+ write_unlock(&n->lock);
neigh_destroy(n);
continue;
}
n->nud_state = NUD_STALE;
neigh_suspect(n);
}
+ write_unlock(&n->lock);
next_elt:
np = &n->next;
tbl->gc_timer.expires = now + tbl->gc_interval;
add_timer(&tbl->gc_timer);
+ write_unlock(&tbl->lock);
}
static __inline__ int neigh_max_probes(struct neighbour *n)
{
unsigned long now = jiffies;
struct neighbour *neigh = (struct neighbour*)arg;
- unsigned state = neigh->nud_state;
+ unsigned state;
+ int notify = 0;
+
+ write_lock(&neigh->lock);
+ atomic_inc(&neigh->refcnt);
+
+ state = neigh->nud_state;
if (!(state&NUD_IN_TIMER)) {
NEIGH_PRINTK1("neigh: timer & !nud_in_timer\n");
- return;
+ goto out;
}
if ((state&NUD_VALID) &&
neigh->nud_state = NUD_REACHABLE;
NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
neigh_connect(neigh);
- return;
+ goto out;
}
if (state == NUD_DELAY) {
NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
neigh->nud_state = NUD_PROBE;
- neigh->probes = 0;
+ atomic_set(&neigh->probes, 0);
}
- if (neigh->probes >= neigh_max_probes(neigh)) {
+ if (atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
struct sk_buff *skb;
neigh->nud_state = NUD_FAILED;
+ notify = 1;
neigh->tbl->stats.res_failed++;
NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
So that, we try to be accurate and avoid dead loop. --ANK
*/
- while(neigh->nud_state==NUD_FAILED && (skb=__skb_dequeue(&neigh->arp_queue)) != NULL)
+ while(neigh->nud_state==NUD_FAILED && (skb=__skb_dequeue(&neigh->arp_queue)) != NULL) {
+ write_unlock(&neigh->lock);
neigh->ops->error_report(neigh, skb);
+ write_lock(&neigh->lock);
+ }
skb_queue_purge(&neigh->arp_queue);
- return;
+ goto out;
}
neigh->timer.expires = now + neigh->parms->retrans_time;
add_timer(&neigh->timer);
+ write_unlock(&neigh->lock);
neigh->ops->solicit(neigh, skb_peek(&neigh->arp_queue));
- neigh->probes++;
+ atomic_inc(&neigh->probes);
+ neigh_release(neigh);
+ return;
+
+out:
+ write_unlock(&neigh->lock);
+#ifdef CONFIG_ARPD
+ if (notify && neigh->parms->app_probes)
+ neigh_app_notify(neigh);
+#endif
+ neigh_release(neigh);
}
int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
{
- start_bh_atomic();
+ write_lock_bh(&neigh->lock);
if (!(neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE))) {
if (!(neigh->nud_state&(NUD_STALE|NUD_INCOMPLETE))) {
if (neigh->tbl == NULL) {
NEIGH_PRINTK2("neigh %p used after death.\n", neigh);
if (skb)
kfree_skb(skb);
- end_bh_atomic();
+ write_unlock_bh(&neigh->lock);
return 1;
}
if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
- neigh->probes = neigh->parms->ucast_probes;
+ atomic_set(&neigh->probes, neigh->parms->ucast_probes);
neigh->nud_state = NUD_INCOMPLETE;
neigh->timer.expires = jiffies + neigh->parms->retrans_time;
add_timer(&neigh->timer);
-
+ write_unlock_bh(&neigh->lock);
neigh->ops->solicit(neigh, skb);
- neigh->probes++;
+ atomic_inc(&neigh->probes);
+ write_lock_bh(&neigh->lock);
} else {
neigh->nud_state = NUD_FAILED;
+ write_unlock_bh(&neigh->lock);
+
if (skb)
kfree_skb(skb);
- end_bh_atomic();
return 1;
}
}
}
__skb_queue_head(&neigh->arp_queue, skb);
}
- end_bh_atomic();
+ write_unlock_bh(&neigh->lock);
return 1;
}
if (neigh->nud_state == NUD_STALE) {
add_timer(&neigh->timer);
}
}
- end_bh_atomic();
+ write_unlock_bh(&neigh->lock);
return 0;
}
if (update) {
for (hh=neigh->hh; hh; hh=hh->hh_next) {
- write_lock_irq(&hh->hh_lock);
+ write_lock_bh(&hh->hh_lock);
update(hh, neigh->dev, neigh->ha);
- write_unlock_irq(&hh->hh_lock);
+ write_unlock_bh(&hh->hh_lock);
}
}
}
-- new is new state.
-- override==1 allows to override existing lladdr, if it is different.
-- arp==0 means that the change is administrative.
+
+ Caller MUST hold reference count on the entry.
*/
int neigh_update(struct neighbour *neigh, u8 *lladdr, u8 new, int override, int arp)
{
- u8 old = neigh->nud_state;
+ u8 old;
+ int err;
+ int notify = 0;
struct device *dev = neigh->dev;
+ write_lock_bh(&neigh->lock);
+ old = neigh->nud_state;
+
+ err = -EPERM;
if (arp && (old&(NUD_NOARP|NUD_PERMANENT)))
- return -EPERM;
+ goto out;
if (!(new&NUD_VALID)) {
if (old&NUD_IN_TIMER)
if (old&NUD_CONNECTED)
neigh_suspect(neigh);
neigh->nud_state = new;
- return 0;
+ err = 0;
+ notify = old&NUD_VALID;
+ goto out;
}
/* Compare new lladdr with cached one */
if (memcmp(lladdr, neigh->ha, dev->addr_len) == 0)
lladdr = neigh->ha;
else if (!override)
- return -EPERM;
+ goto out;
}
} else {
/* No address is supplied; if we know something,
use it, otherwise discard the request.
*/
+ err = -EINVAL;
if (!(old&NUD_VALID))
- return -EINVAL;
+ goto out;
lladdr = neigh->ha;
}
/* If entry was valid and address is not changed,
do not change entry state, if new one is STALE.
*/
+ err = 0;
if (old&NUD_VALID) {
if (lladdr == neigh->ha)
if (new == old || (new == NUD_STALE && (old&NUD_CONNECTED)))
- return 0;
+ goto out;
}
if (old&NUD_IN_TIMER)
del_timer(&neigh->timer);
neigh_update_hhs(neigh);
neigh->confirmed = jiffies - (neigh->parms->base_reachable_time<<1);
#ifdef CONFIG_ARPD
- if (neigh->parms->app_probes)
- neigh_app_notify(neigh);
+ notify = 1;
#endif
}
if (new == old)
- return 0;
+ goto out;
if (new&NUD_CONNECTED)
neigh_connect(neigh);
else
while (neigh->nud_state&NUD_VALID &&
(skb=__skb_dequeue(&neigh->arp_queue)) != NULL) {
struct neighbour *n1 = neigh;
+ write_unlock_bh(&neigh->lock);
/* On shaper/eql skb->dst->neighbour != neigh :( */
if (skb->dst && skb->dst->neighbour)
n1 = skb->dst->neighbour;
n1->output(skb);
+ write_lock_bh(&neigh->lock);
}
skb_queue_purge(&neigh->arp_queue);
}
- return 0;
+out:
+ write_unlock_bh(&neigh->lock);
+#ifdef CONFIG_ARPD
+ if (notify && neigh->parms->app_probes)
+ neigh_app_notify(neigh);
+#endif
+ return err;
}
struct neighbour * neigh_event_ns(struct neigh_table *tbl,
int err;
struct device *dev = neigh->dev;
if (dev->hard_header_cache && dst->hh == NULL) {
- start_bh_atomic();
+ write_lock_bh(&neigh->lock);
if (dst->hh == NULL)
neigh_hh_init(neigh, dst, dst->ops->protocol);
err = dev->hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, NULL, skb->len);
- end_bh_atomic();
+ write_unlock_bh(&neigh->lock);
} else {
- start_bh_atomic();
+ read_lock_bh(&neigh->lock);
err = dev->hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, NULL, skb->len);
- end_bh_atomic();
+ read_unlock_bh(&neigh->lock);
}
if (err >= 0)
return neigh->ops->queue_xmit(skb);
__skb_pull(skb, skb->nh.raw - skb->data);
- start_bh_atomic();
+ read_lock_bh(&neigh->lock);
err = dev->hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, NULL, skb->len);
- end_bh_atomic();
+ read_unlock_bh(&neigh->lock);
if (err >= 0)
return neigh->ops->queue_xmit(skb);
kfree_skb(skb);
return NULL;
}
}
+ write_lock_bh(&tbl->lock);
p->next = tbl->parms.next;
tbl->parms.next = p;
+ write_unlock_bh(&tbl->lock);
}
return p;
}
if (parms == NULL || parms == &tbl->parms)
return;
+ write_lock_bh(&tbl->lock);
for (p = &tbl->parms.next; *p; p = &(*p)->next) {
if (*p == parms) {
*p = parms->next;
- synchronize_bh();
+ write_unlock_bh(&tbl->lock);
#ifdef CONFIG_SYSCTL
neigh_sysctl_unregister(parms);
#endif
return;
}
}
+ write_unlock_bh(&tbl->lock);
NEIGH_PRINTK1("neigh_release_parms: not found\n");
}
tbl->parms.reachable_time = neigh_rand_reach_time(tbl->parms.base_reachable_time);
init_timer(&tbl->gc_timer);
+ tbl->lock = RW_LOCK_UNLOCKED;
tbl->gc_timer.data = (unsigned long)tbl;
tbl->gc_timer.function = neigh_periodic_timer;
tbl->gc_timer.expires = now + tbl->gc_interval + tbl->parms.reachable_time;
tbl->last_flush = now;
tbl->last_rand = now + tbl->parms.reachable_time*20;
+ write_lock(&neigh_tbl_lock);
tbl->next = neigh_tables;
neigh_tables = tbl;
+ write_unlock(&neigh_tbl_lock);
}
int neigh_table_clear(struct neigh_table *tbl)
{
struct neigh_table **tp;
- start_bh_atomic();
del_timer(&tbl->gc_timer);
del_timer(&tbl->proxy_timer);
skb_queue_purge(&tbl->proxy_queue);
neigh_ifdown(tbl, NULL);
- end_bh_atomic();
if (tbl->entries)
printk(KERN_CRIT "neighbour leakage\n");
+ write_lock(&neigh_tbl_lock);
for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
if (*tp == tbl) {
*tp = tbl->next;
- synchronize_bh();
break;
}
}
+ write_unlock(&neigh_tbl_lock);
#ifdef CONFIG_SYSCTL
neigh_sysctl_unregister(&tbl->parms);
#endif
return -ENODEV;
}
+ read_lock(&neigh_tbl_lock);
for (tbl=neigh_tables; tbl; tbl = tbl->next) {
int err = 0;
struct neighbour *n;
if (tbl->family != ndm->ndm_family)
continue;
+ read_unlock(&neigh_tbl_lock);
if (nda[NDA_DST-1] == NULL ||
nda[NDA_DST-1]->rta_len != RTA_LENGTH(tbl->key_len))
if (dev == NULL)
return -EINVAL;
- start_bh_atomic();
- n = __neigh_lookup(tbl, RTA_DATA(nda[NDA_DST-1]), dev, 0);
+ n = neigh_lookup(tbl, RTA_DATA(nda[NDA_DST-1]), dev);
if (n) {
err = neigh_update(n, NULL, NUD_FAILED, 1, 0);
neigh_release(n);
}
- end_bh_atomic();
return err;
}
+ read_unlock(&neigh_tbl_lock);
return -EADDRNOTAVAIL;
}
return -ENODEV;
}
+ read_lock(&neigh_tbl_lock);
for (tbl=neigh_tables; tbl; tbl = tbl->next) {
int err = 0;
struct neighbour *n;
if (tbl->family != ndm->ndm_family)
continue;
+ read_unlock(&neigh_tbl_lock);
+
if (nda[NDA_DST-1] == NULL ||
nda[NDA_DST-1]->rta_len != RTA_LENGTH(tbl->key_len))
return -EINVAL;
if (nda[NDA_LLADDR-1] != NULL &&
nda[NDA_LLADDR-1]->rta_len != RTA_LENGTH(dev->addr_len))
return -EINVAL;
- start_bh_atomic();
- n = __neigh_lookup(tbl, RTA_DATA(nda[NDA_DST-1]), dev, 0);
+ n = neigh_lookup(tbl, RTA_DATA(nda[NDA_DST-1]), dev);
if (n) {
if (nlh->nlmsg_flags&NLM_F_EXCL)
err = -EEXIST;
}
if (n)
neigh_release(n);
- end_bh_atomic();
return err;
}
+ read_unlock(&neigh_tbl_lock);
return -EADDRNOTAVAIL;
}
ndm->ndm_family = n->ops->family;
ndm->ndm_flags = n->flags;
ndm->ndm_type = n->type;
- ndm->ndm_state = n->nud_state;
ndm->ndm_ifindex = n->dev->ifindex;
RTA_PUT(skb, NDA_DST, n->tbl->key_len, n->primary_key);
+ read_lock_bh(&n->lock);
+ ndm->ndm_state = n->nud_state;
if (n->nud_state&NUD_VALID)
RTA_PUT(skb, NDA_LLADDR, n->dev->addr_len, n->ha);
ci.ndm_used = now - n->used;
ci.ndm_confirmed = now - n->confirmed;
ci.ndm_updated = now - n->updated;
ci.ndm_refcnt = atomic_read(&n->refcnt);
+ read_unlock_bh(&n->lock);
RTA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
nlh->nlmsg_len = skb->tail - b;
return skb->len;
if (h < s_h) continue;
if (h > s_h)
s_idx = 0;
- start_bh_atomic();
+ read_lock_bh(&tbl->lock);
for (n = tbl->hash_buckets[h], idx = 0; n;
n = n->next, idx++) {
if (idx < s_idx)
continue;
if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, RTM_NEWNEIGH) <= 0) {
- end_bh_atomic();
+ read_unlock_bh(&tbl->lock);
cb->args[1] = h;
cb->args[2] = idx;
return -1;
}
}
- end_bh_atomic();
+ read_unlock_bh(&tbl->lock);
}
cb->args[1] = h;
s_t = cb->args[0];
+ read_lock(&neigh_tbl_lock);
for (tbl=neigh_tables, t=0; tbl; tbl = tbl->next, t++) {
if (t < s_t) continue;
if (family && tbl->family != family)
if (neigh_dump_table(tbl, skb, cb) < 0)
break;
}
+ read_unlock(&neigh_tbl_lock);
cb->args[0] = t;
#include <net/sock.h>
#include <net/pkt_sched.h>
-atomic_t rtnl_rlockct;
-DECLARE_WAIT_QUEUE_HEAD(rtnl_wait);
+DECLARE_MUTEX(rtnl_sem);
-
-void rtnl_lock()
+void rtnl_lock(void)
{
rtnl_shlock();
rtnl_exlock();
}
-
-void rtnl_unlock()
+
+void rtnl_unlock(void)
{
rtnl_exunlock();
rtnl_shunlock();
}
+
+
int rtattr_parse(struct rtattr *tb[], int maxattr, struct rtattr *rta, int len)
{
memset(tb, 0, sizeof(struct rtattr*)*maxattr);
#ifdef CONFIG_RTNETLINK
struct sock *rtnl;
-unsigned long rtnl_wlockct;
-
struct rtnetlink_link * rtnetlink_links[NPROTO];
#define _S 1 /* superuser privileges required */
int s_idx = cb->args[0];
struct device *dev;
- read_lock_bh(&dev_base_lock);
+ read_lock(&dev_base_lock);
for (dev=dev_base, idx=0; dev; dev = dev->next, idx++) {
if (idx < s_idx)
continue;
if (rtnetlink_fill_ifinfo(skb, dev, RTM_NEWLINK, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq) <= 0)
break;
}
- read_unlock_bh(&dev_base_lock);
+ read_unlock(&dev_base_lock);
cb->args[0] = idx;
return skb->len;
continue;
if (idx > s_idx)
memset(&cb->args[0], 0, sizeof(cb->args));
- if (rtnetlink_links[idx][type].dumpit(skb, cb) == 0)
- continue;
- if (skb_tailroom(skb) < 256)
+ if (rtnetlink_links[idx][type].dumpit(skb, cb))
break;
}
cb->family = idx;
static int rtnetlink_done(struct netlink_callback *cb)
{
- if (cap_raised(NETLINK_CB(cb->skb).eff_cap, CAP_NET_ADMIN) && cb->nlh->nlmsg_flags&NLM_F_ATOMIC)
- rtnl_shunlock();
return 0;
}
if (link->dumpit == NULL)
goto err_inval;
- /* Super-user locks all the tables to get atomic snapshot */
- if (cap_raised(NETLINK_CB(skb).eff_cap, CAP_NET_ADMIN)
- && nlh->nlmsg_flags&NLM_F_ATOMIC)
- atomic_inc(&rtnl_rlockct);
if ((*errp = netlink_dump_start(rtnl, skb, nlh,
link->dumpit,
rtnetlink_done)) != 0) {
- if (cap_raised(NETLINK_CB(skb).eff_cap, CAP_NET_ADMIN) && nlh->nlmsg_flags&NLM_F_ATOMIC)
- atomic_dec(&rtnl_rlockct);
return -1;
}
rlen = NLMSG_ALIGN(nlh->nlmsg_len);
len += sprintf(buffer + len, "Addr Flags State Use Blksize Dev\n");
- neigh_table_lock(&dn_neigh_table);
for(i=0;i <= NEIGH_HASHMASK; i++) {
+ read_lock_bh(&dn_neigh_table.lock);
n = dn_neigh_table.hash_buckets[i];
for(; n != NULL; n = n->next) {
struct dn_neigh *dn = (struct dn_neigh *)n;
+ read_lock(&n->lock);
len += sprintf(buffer+len, "%-7s %s%s%s %02x %02d %07ld %-8s\n",
dn_addr2asc(dn_ntohs(dn_eth2dn(dn->addr)), buf),
(dn->flags&DN_NDFLAG_R1) ? "1" : "-",
atomic_read(&dn->n.refcnt),
dn->blksize,
(dn->n.dev) ? dn->n.dev->name : "?");
+ read_unlock(&n->lock);
pos = begin + len;
begin = pos;
}
- if (pos > offset + length)
- break;
+ if (pos > offset + length) {
+ read_unlock_bh(&dn_neigh_table.lock);
+ goto done;
+ }
}
+ read_unlock_bh(&dn_neigh_table.lock);
}
- neigh_table_unlock(&dn_neigh_table);
+
+done:
*start = buffer + (offset - begin);
len -= offset - begin;
eth->h_proto = type;
memcpy(eth->h_source, dev->dev_addr, dev->addr_len);
memcpy(eth->h_dest, neigh->ha, dev->addr_len);
+ hh->hh_len = ETH_HLEN;
return 0;
}
*
* PF_INET protocol family socket handler.
*
- * Version: $Id: af_inet.c,v 1.90 1999/05/29 04:30:38 davem Exp $
+ * Version: $Id: af_inet.c,v 1.91 1999/06/09 08:28:55 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
/* linux/net/inet/arp.c
*
- * Version: $Id: arp.c,v 1.77 1999/03/21 05:22:30 davem Exp $
+ * Version: $Id: arp.c,v 1.78 1999/06/09 10:10:36 davem Exp $
*
* Copyright (C) 1994 by Florian La Roche
*
#include <asm/system.h>
#include <asm/uaccess.h>
+#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+static char *ax2asc2(ax25_address *a, char *buf);
+#endif
+
+
/*
* Interface to generic neighbour cache.
*/
u8 *dst_ha = NULL;
struct device *dev = neigh->dev;
u32 target = *(u32*)neigh->primary_key;
- int probes = neigh->probes;
+ int probes = atomic_read(&neigh->probes);
if (skb && inet_addr_type(skb->nh.iph->saddr) == RTN_LOCAL)
saddr = skb->nh.iph->saddr;
if (!(neigh->nud_state&NUD_VALID))
printk(KERN_DEBUG "trying to ucast probe in NUD_INVALID\n");
dst_ha = neigh->ha;
+ read_lock_bh(&neigh->lock);
} else if ((probes -= neigh->parms->app_probes) < 0) {
#ifdef CONFIG_ARPD
neigh_app_ns(neigh);
arp_send(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr,
dst_ha, dev->dev_addr, NULL);
+ if (dst_ha)
+ read_unlock_bh(&neigh->lock);
}
/* OBSOLETE FUNCTIONS */
if (arp_set_predefined(inet_addr_type(paddr), haddr, paddr, dev))
return 0;
- start_bh_atomic();
n = __neigh_lookup(&arp_tbl, &paddr, dev, 1);
if (n) {
n->used = jiffies;
if (n->nud_state&NUD_VALID || neigh_event_send(n, skb) == 0) {
- memcpy(haddr, n->ha, dev->addr_len);
+ read_lock_bh(&n->lock);
+ memcpy(haddr, n->ha, dev->addr_len);
+ read_unlock_bh(&n->lock);
neigh_release(n);
- end_bh_atomic();
return 0;
}
+ neigh_release(n);
} else
kfree_skb(skb);
- neigh_release(n);
- end_bh_atomic();
return 1;
}
/* END OF OBSOLETE FUNCTIONS */
-/*
- * Note: requires bh_atomic locking.
- */
int arp_bind_neighbour(struct dst_entry *dst)
{
struct device *dev = dst->dev;
(addr_type == RTN_UNICAST && rt->u.dst.dev != dev &&
(IN_DEV_PROXY_ARP(in_dev) || pneigh_lookup(&arp_tbl, &tip, dev, 0)))) {
n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
- neigh_release(n);
+ if (n)
+ neigh_release(n);
if (skb->stamp.tv_sec == 0 ||
skb->pkt_type == PACKET_HOST ||
return -EINVAL;
err = -ENOBUFS;
- start_bh_atomic();
neigh = __neigh_lookup(&arp_tbl, &ip, dev, 1);
if (neigh) {
unsigned state = NUD_STALE;
r->arp_ha.sa_data : NULL, state, 1, 0);
neigh_release(neigh);
}
- end_bh_atomic();
return err;
}
struct neighbour *neigh;
int err = -ENXIO;
- start_bh_atomic();
- neigh = __neigh_lookup(&arp_tbl, &ip, dev, 0);
+ neigh = neigh_lookup(&arp_tbl, &ip, dev);
if (neigh) {
+ read_lock_bh(&neigh->lock);
memcpy(r->arp_ha.sa_data, neigh->ha, dev->addr_len);
+ r->arp_flags = arp_state_to_flags(neigh);
+ read_unlock_bh(&neigh->lock);
r->arp_ha.sa_family = dev->type;
strncpy(r->arp_dev, dev->name, sizeof(r->arp_dev));
- r->arp_flags = arp_state_to_flags(neigh);
neigh_release(neigh);
err = 0;
}
- end_bh_atomic();
return err;
}
return -EINVAL;
}
err = -ENXIO;
- start_bh_atomic();
- neigh = __neigh_lookup(&arp_tbl, &ip, dev, 0);
+ neigh = neigh_lookup(&arp_tbl, &ip, dev);
if (neigh) {
if (neigh->nud_state&~NUD_NOARP)
err = neigh_update(neigh, NULL, NUD_FAILED, 1, 0);
neigh_release(neigh);
}
- end_bh_atomic();
return err;
}
char hbuffer[HBUFFERLEN];
int i,j,k;
const char hexbuf[] = "0123456789ABCDEF";
+ char abuf[16];
size = sprintf(buffer,"IP address HW type Flags HW address Mask Device\n");
pos+=size;
len+=size;
- neigh_table_lock(&arp_tbl);
-
- for(i=0; i<=NEIGH_HASHMASK; i++) {
+ for(i=0; i<=NEIGH_HASHMASK; i++) {
struct neighbour *n;
+ read_lock_bh(&arp_tbl.lock);
for (n=arp_tbl.hash_buckets[i]; n; n=n->next) {
struct device *dev = n->dev;
int hatype = dev->type;
if (!(n->nud_state&~NUD_NOARP))
continue;
- /* I'd get great pleasure deleting
- this ugly code. Let's output it in hexadecimal format.
- "arp" utility will eventually repaired --ANK
- */
-#if 1 /* UGLY CODE */
+ read_lock(&n->lock);
+
/*
* Convert hardware address to XX:XX:XX:XX ... form.
*/
#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
if (hatype == ARPHRD_AX25 || hatype == ARPHRD_NETROM)
- strcpy(hbuffer,ax2asc((ax25_address *)n->ha));
+ ax2asc2((ax25_address *)n->ha, hbuffer);
else {
#endif
for (k=0,j=0;k<HBUFFERLEN-3 && j<dev->addr_len;j++) {
hbuffer[k++]=':';
}
hbuffer[--k]=0;
-
+
#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
}
#endif
-#else
- if ((neigh->nud_state&NUD_VALID) && dev->addr_len) {
- int j;
- for (j=0; j < dev->addr_len; j++)
- sprintf(hbuffer+2*j, "%02x", neigh->ha[j]);
- } else
- sprintf(hbuffer, "0");
-#endif
size = sprintf(buffer+len,
"%-17s0x%-10x0x%-10x%s",
- in_ntoa(*(u32*)n->primary_key),
+ in_ntoa2(*(u32*)n->primary_key, abuf),
hatype,
arp_state_to_flags(n),
hbuffer);
size += sprintf(buffer+len+size,
" %-17s %s\n",
"*", dev->name);
+ read_unlock(&n->lock);
len += size;
pos += size;
if (pos <= offset)
len=0;
- if (pos >= offset+length)
- goto done;
+ if (pos >= offset+length) {
+ read_unlock_bh(&arp_tbl.lock);
+ goto done;
+ }
}
+ read_unlock_bh(&arp_tbl.lock);
}
for (i=0; i<=PNEIGH_HASHMASK; i++) {
size = sprintf(buffer+len,
"%-17s0x%-10x0x%-10x%s",
- in_ntoa(*(u32*)n->key),
+ in_ntoa2(*(u32*)n->key, abuf),
hatype,
ATF_PUBL|ATF_PERM,
"00:00:00:00:00:00");
}
done:
- neigh_table_unlock(&arp_tbl);
*start = buffer+len-(pos-offset); /* Start of wanted data */
len = pos-offset; /* Start slop */
}
-#ifdef CONFIG_AX25_MODULE
+#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
/*
* ax25 -> ASCII conversion
*/
-char *ax2asc(ax25_address *a)
+char *ax2asc2(ax25_address *a, char *buf)
{
- static char buf[11];
char c, *s;
int n;
/*
* NET3 IP device support routines.
*
- * Version: $Id: devinet.c,v 1.30 1999/06/01 07:49:59 davem Exp $
+ * Version: $Id: devinet.c,v 1.32 1999/06/09 11:15:33 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
return done;
}
-u32 inet_select_addr(struct device *dev, u32 dst, int scope)
+u32 inet_select_addr(const struct device *dev, u32 dst, int scope)
{
u32 addr = 0;
- struct in_device *in_dev = dev->ip_ptr;
+ const struct in_device *in_dev = dev->ip_ptr;
if (in_dev == NULL)
return 0;
in this case. It is importnat that lo is the first interface
in dev_base list.
*/
- read_lock_bh(&dev_base_lock);
+ read_lock(&dev_base_lock);
for (dev=dev_base; dev; dev=dev->next) {
if ((in_dev=dev->ip_ptr) == NULL)
continue;
for_primary_ifa(in_dev) {
if (ifa->ifa_scope <= scope) {
- read_unlock_bh(&dev_base_lock);
+ read_unlock(&dev_base_lock);
return ifa->ifa_local;
}
} endfor_ifa(in_dev);
}
- read_unlock_bh(&dev_base_lock);
+ read_unlock(&dev_base_lock);
return 0;
}
s_idx = cb->args[0];
s_ip_idx = ip_idx = cb->args[1];
- read_lock_bh(&dev_base_lock);
+ read_lock(&dev_base_lock);
for (dev=dev_base, idx=0; dev; dev = dev->next, idx++) {
if (idx < s_idx)
continue;
}
}
done:
- read_unlock_bh(&dev_base_lock);
+ read_unlock(&dev_base_lock);
cb->args[0] = idx;
cb->args[1] = ip_idx;
ipv4_devconf.accept_redirects = !on;
ipv4_devconf_dflt.forwarding = on;
- read_lock_bh(&dev_base_lock);
+ read_lock(&dev_base_lock);
for (dev = dev_base; dev; dev = dev->next) {
struct in_device *in_dev = dev->ip_ptr;
if (in_dev)
in_dev->cnf.forwarding = on;
}
- read_unlock_bh(&dev_base_lock);
+ read_unlock(&dev_base_lock);
rt_cache_flush(0);
*
* IPv4 Forwarding Information Base: FIB frontend.
*
- * Version: $Id: fib_frontend.c,v 1.15 1999/03/21 05:22:31 davem Exp $
+ * Version: $Id: fib_frontend.c,v 1.16 1999/06/09 10:10:42 davem Exp $
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*
first = 0;
}
- /* rtnl_shlock(); -- it is pointless at the moment --ANK */
if (main_table && count > 0) {
int n = main_table->tb_get_info(main_table, ptr, first, count);
count -= n;
ptr += n*128;
}
- /* rtnl_shunlock(); */
len = ptr - *start;
if (len >= length)
return length;
*
* IPv4 FIB: lookup engine and maintenance routines.
*
- * Version: $Id: fib_hash.c,v 1.9 1999/05/27 00:38:05 davem Exp $
+ * Version: $Id: fib_hash.c,v 1.10 1999/06/09 10:10:45 davem Exp $
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*
fz->fz_mask = inet_make_mask(z);
/* Find the first not empty zone with more specific mask */
- write_lock_bh(&fib_hash_lock);
for (i=z+1; i<=32; i++)
if (table->fn_zones[i])
break;
+ write_lock_bh(&fib_hash_lock);
if (i>32) {
/* No more specific masks, we are the first. */
fz->fz_next = table->fn_zone_list;
struct fn_zone *fz;
struct fn_hash *t = (struct fn_hash*)tb->tb_data;
- read_lock_bh(&fib_hash_lock);
+ read_lock(&fib_hash_lock);
for (fz = t->fn_zone_list; fz; fz = fz->fz_next) {
struct fib_node *f;
fn_key_t k = fz_key(key->dst, fz);
}
err = 1;
out:
- read_unlock_bh(&fib_hash_lock);
+ read_unlock(&fib_hash_lock);
return err;
}
last_resort = NULL;
order = -1;
- read_lock_bh(&fib_hash_lock);
+ read_lock(&fib_hash_lock);
for (f = fz->fz_hash[0]; f; f = f->fn_next) {
struct fib_info *next_fi = FIB_INFO(f);
res->fi = last_resort;
fn_hash_last_dflt = last_idx;
out:
- read_unlock_bh(&fib_hash_lock);
+ read_unlock(&fib_hash_lock);
}
#define FIB_SCAN(f, fp) \
fp = fz_chain_p(key, fz);
- write_lock_bh(&fib_hash_lock);
/*
* Scan list to find the first route with the same destination
*/
new_f->fn_next = f;
+ write_lock_bh(&fib_hash_lock);
*fp = new_f;
+ write_unlock_bh(&fib_hash_lock);
fz->fz_nent++;
if (del_fp) {
f = *del_fp;
/* Unlink replaced node */
+ write_lock_bh(&fib_hash_lock);
*del_fp = f->fn_next;
write_unlock_bh(&fib_hash_lock);
fn_free_node(f);
fz->fz_nent--;
} else {
- write_unlock_bh(&fib_hash_lock);
rt_cache_flush(-1);
}
rtmsg_fib(RTM_NEWROUTE, new_f, z, tb->tb_id, n, req);
return 0;
out:
- write_unlock_bh(&fib_hash_lock);
fib_release_info(fi);
return err;
}
fp = fz_chain_p(key, fz);
- write_lock_bh(&fib_hash_lock);
FIB_SCAN(f, fp) {
if (fn_key_eq(f->fn_key, key))
break;
if (fn_key_leq(key, f->fn_key)) {
- write_unlock_bh(&fib_hash_lock);
return -ESRCH;
}
}
struct fib_info * fi = FIB_INFO(f);
if (f->fn_state&FN_S_ZOMBIE) {
- write_unlock_bh(&fib_hash_lock);
return -ESRCH;
}
matched++;
rtmsg_fib(RTM_DELROUTE, f, z, tb->tb_id, n, req);
if (matched != 1) {
+ write_lock_bh(&fib_hash_lock);
*del_fp = f->fn_next;
write_unlock_bh(&fib_hash_lock);
fn_free_node(f);
fz->fz_nent--;
} else {
- write_unlock_bh(&fib_hash_lock);
f->fn_state |= FN_S_ZOMBIE;
if (f->fn_state&FN_S_ACCESSED) {
f->fn_state &= ~FN_S_ACCESSED;
return 0;
}
- write_unlock_bh(&fib_hash_lock);
return -ESRCH;
}
struct fib_info *fi = FIB_INFO(f);
if (fi && ((f->fn_state&FN_S_ZOMBIE) || (fi->fib_flags&RTNH_F_DEAD))) {
+ write_lock_bh(&fib_hash_lock);
*fp = f->fn_next;
+ write_unlock_bh(&fib_hash_lock);
fn_free_node(f);
found++;
int found = 0;
fib_hash_zombies = 0;
- write_lock_bh(&fib_hash_lock);
for (fz = table->fn_zone_list; fz; fz = fz->fz_next) {
int i;
int tmp = 0;
fz->fz_nent -= tmp;
found += tmp;
}
- write_unlock_bh(&fib_hash_lock);
return found;
}
int pos = 0;
int n = 0;
- read_lock_bh(&fib_hash_lock);
+ read_lock(&fib_hash_lock);
for (fz=table->fn_zone_list; fz; fz = fz->fz_next) {
int i;
struct fib_node *f;
}
}
out:
- read_unlock_bh(&fib_hash_lock);
+ read_unlock(&fib_hash_lock);
return n;
}
#endif
struct fn_hash *table = (struct fn_hash*)tb->tb_data;
s_m = cb->args[1];
- read_lock_bh(&fib_hash_lock);
+ read_lock(&fib_hash_lock);
for (fz = table->fn_zone_list, m=0; fz; fz = fz->fz_next, m++) {
if (m < s_m) continue;
if (m > s_m)
memset(&cb->args[2], 0, sizeof(cb->args) - 2*sizeof(cb->args[0]));
if (fn_hash_dump_zone(skb, cb, tb, fz) < 0) {
cb->args[1] = m;
- read_unlock_bh(&fib_hash_lock);
+ read_unlock(&fib_hash_lock);
return -1;
}
}
- read_unlock_bh(&fib_hash_lock);
+ read_unlock(&fib_hash_lock);
cb->args[1] = m;
return skb->len;
}
*
* IPv4 Forwarding Information Base: policy rules.
*
- * Version: $Id: fib_rules.c,v 1.10 1999/05/27 00:38:03 davem Exp $
+ * Version: $Id: fib_rules.c,v 1.11 1999/06/09 10:10:47 davem Exp $
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*
struct fib_rule *r, **rp;
int err = -ESRCH;
- write_lock_bh(&fib_rules_lock);
for (rp=&fib_rules; (r=*rp) != NULL; rp=&r->r_next) {
if ((!rta[RTA_SRC-1] || memcmp(RTA_DATA(rta[RTA_SRC-1]), &r->r_src, 4) == 0) &&
rtm->rtm_src_len == r->r_src_len &&
if (r == &local_rule)
break;
+ write_lock_bh(&fib_rules_lock);
*rp = r->r_next;
+ write_unlock_bh(&fib_rules_lock);
if (r != &default_rule && r != &main_rule)
kfree(r);
err = 0;
break;
}
}
- write_unlock_bh(&fib_rules_lock);
return err;
}
memcpy(&new_r->r_tclassid, RTA_DATA(rta[RTA_FLOW-1]), 4);
#endif
- write_lock_bh(&fib_rules_lock);
rp = &fib_rules;
if (!new_r->r_preference) {
r = fib_rules;
}
new_r->r_next = r;
+ write_lock_bh(&fib_rules_lock);
*rp = new_r;
write_unlock_bh(&fib_rules_lock);
return 0;
{
struct fib_rule *r;
- write_lock_bh(&fib_rules_lock);
for (r=fib_rules; r; r=r->r_next) {
- if (r->r_ifindex == dev->ifindex)
+ if (r->r_ifindex == dev->ifindex) {
+ write_lock_bh(&fib_rules_lock);
r->r_ifindex = -1;
+ write_unlock_bh(&fib_rules_lock);
+ }
}
- write_unlock_bh(&fib_rules_lock);
}
static void fib_rules_attach(struct device *dev)
{
struct fib_rule *r;
- write_lock_bh(&fib_rules_lock);
for (r=fib_rules; r; r=r->r_next) {
- if (r->r_ifindex == -1 && strcmp(dev->name, r->r_ifname) == 0)
+ if (r->r_ifindex == -1 && strcmp(dev->name, r->r_ifname) == 0) {
+ write_lock_bh(&fib_rules_lock);
r->r_ifindex = dev->ifindex;
+ write_unlock_bh(&fib_rules_lock);
+ }
}
- write_unlock_bh(&fib_rules_lock);
}
int fib_lookup(const struct rt_key *key, struct fib_result *res)
u32 saddr = key->src;
FRprintk("Lookup: %08x <- %08x ", key->dst, key->src);
- read_lock_bh(&fib_rules_lock);
+ read_lock(&fib_rules_lock);
for (r = fib_rules; r; r=r->r_next) {
if (((saddr^r->r_src) & r->r_srcmask) ||
((daddr^r->r_dst) & r->r_dstmask) ||
policy = r;
break;
case RTN_UNREACHABLE:
- read_unlock_bh(&fib_rules_lock);
+ read_unlock(&fib_rules_lock);
return -ENETUNREACH;
default:
case RTN_BLACKHOLE:
- read_unlock_bh(&fib_rules_lock);
+ read_unlock(&fib_rules_lock);
return -EINVAL;
case RTN_PROHIBIT:
- read_unlock_bh(&fib_rules_lock);
+ read_unlock(&fib_rules_lock);
return -EACCES;
}
if (err == 0) {
FRprintk("ok\n");
res->r = policy;
- read_unlock_bh(&fib_rules_lock);
+ read_unlock(&fib_rules_lock);
return 0;
}
if (err < 0 && err != -EAGAIN) {
- read_unlock_bh(&fib_rules_lock);
+ read_unlock(&fib_rules_lock);
return err;
}
}
FRprintk("FAILURE\n");
- read_unlock_bh(&fib_rules_lock);
+ read_unlock(&fib_rules_lock);
return -ENETUNREACH;
}
int s_idx = cb->args[0];
struct fib_rule *r;
- read_lock_bh(&fib_rules_lock);
+ read_lock(&fib_rules_lock);
for (r=fib_rules, idx=0; r; r = r->r_next, idx++) {
if (idx < s_idx)
continue;
if (inet_fill_rule(skb, r, cb) < 0)
break;
}
- read_unlock_bh(&fib_rules_lock);
+ read_unlock(&fib_rules_lock);
cb->args[0] = idx;
return skb->len;
*
* Alan Cox, <alan@redhat.com>
*
- * Version: $Id: icmp.c,v 1.54 1999/05/30 01:16:22 davem Exp $
+ * Version: $Id: icmp.c,v 1.57 1999/06/09 10:10:50 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
case ICMP_FRAG_NEEDED:
if (ipv4_config.no_pmtu_disc) {
if (net_ratelimit())
- printk(KERN_INFO "ICMP: %s: fragmentation needed and DF set.\n",
- in_ntoa(iph->daddr));
+ printk(KERN_INFO "ICMP: %d.%d.%d.%d: fragmentation needed and DF set.\n",
+ NIPQUAD(iph->daddr));
} else {
unsigned short new_mtu;
new_mtu = ip_rt_frag_needed(iph, ntohs(icmph->un.frag.mtu));
break;
case ICMP_SR_FAILED:
if (net_ratelimit())
- printk(KERN_INFO "ICMP: %s: Source Route Failed.\n", in_ntoa(iph->daddr));
+ printk(KERN_INFO "ICMP: %d.%d.%d.%d: Source Route Failed.\n", NIPQUAD(iph->daddr));
break;
default:
break;
if (inet_addr_type(iph->daddr) == RTN_BROADCAST)
{
if (net_ratelimit())
- printk(KERN_WARNING "%s sent an invalid ICMP error to a broadcast.\n",
- in_ntoa(skb->nh.iph->saddr));
+ printk(KERN_WARNING "%d.%d.%d.%d sent an invalid ICMP error to a broadcast.\n",
+ NIPQUAD(skb->nh.iph->saddr));
return;
}
}
* the older version didn't come out right using gcc 2.5.8, the newer one
* seems to fall out with gcc 2.6.2.
*
- * Version: $Id: igmp.c,v 1.31 1999/05/27 00:37:59 davem Exp $
+ * Version: $Id: igmp.c,v 1.32 1999/06/09 10:10:53 davem Exp $
*
* Authors:
* Alan Cox <Alan.Cox@linux.org>
#include <linux/mroute.h>
#endif
+/* Big mc list lock for all the devices */
+static rwlock_t ip_mc_lock = RW_LOCK_UNLOCKED;
+/* Big mc list semaphore for all the sockets.
+ We do not refer to this list in IP data paths or from BH,
+ so that semaphore is OK.
+ */
+DECLARE_MUTEX(ip_sk_mc_sem);
+
+
#define IP_MAX_MEMBERSHIPS 20
#ifdef CONFIG_IP_MULTICAST
struct in_device *in_dev = im->interface;
int err;
+ read_lock(&ip_mc_lock);
+
im->tm_running=0;
if (IGMP_V1_SEEN(in_dev))
igmp_start_timer(im, IGMP_Unsolicited_Report_Interval);
}
im->reporter = 1;
+ read_unlock(&ip_mc_lock);
}
static void igmp_heard_report(struct in_device *in_dev, u32 group)
if (LOCAL_MCAST(group))
return;
+ read_lock(&ip_mc_lock);
for (im=in_dev->mc_list; im!=NULL; im=im->next) {
if (im->multiaddr == group) {
igmp_stop_timer(im);
im->reporter = 0;
im->unsolicit_count = 0;
- return;
+ break;
}
}
+ read_unlock(&ip_mc_lock);
}
static void igmp_heard_query(struct in_device *in_dev, unsigned char max_resp_time,
* - Use the igmp->igmp_code field as the maximum
* delay possible
*/
+ read_lock(&ip_mc_lock);
for (im=in_dev->mc_list; im!=NULL; im=im->next) {
if (group && group != im->multiaddr)
continue;
igmp_stop_timer(im);
igmp_start_timer(im, max_delay);
}
+ read_unlock(&ip_mc_lock);
}
int igmp_rcv(struct sk_buff *skb, unsigned short len)
if (LOCAL_MCAST(im->multiaddr))
return;
- start_bh_atomic();
igmp_stop_timer(im);
- end_bh_atomic();
if (im->reporter && !IGMP_V1_SEEN(im->interface))
igmp_send_report(im->interface->dev, im->multiaddr, IGMP_HOST_LEAVE_MESSAGE);
if (LOCAL_MCAST(im->multiaddr))
return;
- start_bh_atomic();
igmp_start_timer(im, IGMP_Initial_Report_Delay);
- end_bh_atomic();
#endif
}
im = (struct ip_mc_list *)kmalloc(sizeof(*im), GFP_KERNEL);
+ write_lock_bh(&ip_mc_lock);
for (i=in_dev->mc_list; i; i=i->next) {
if (i->multiaddr == addr) {
i->users++;
if (im)
kfree(im);
- return;
+ goto out;
}
}
if (!im)
- return;
+ goto out;
im->users=1;
im->interface=in_dev;
im->multiaddr=addr;
im->next=in_dev->mc_list;
in_dev->mc_list=im;
igmp_group_added(im);
+ write_unlock_bh(&ip_mc_lock);
if (in_dev->dev->flags & IFF_UP)
ip_rt_multicast_event(in_dev);
return;
+out:
+ write_unlock_bh(&ip_mc_lock);
+ return;
}
/*
int ip_mc_dec_group(struct in_device *in_dev, u32 addr)
{
+ int err = -ESRCH;
struct ip_mc_list *i, **ip;
+ write_lock_bh(&ip_mc_lock);
for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) {
if (i->multiaddr==addr) {
if (--i->users == 0) {
*ip = i->next;
- synchronize_bh();
-
igmp_group_dropped(i);
+
+ write_unlock_bh(&ip_mc_lock);
if (in_dev->dev->flags & IFF_UP)
ip_rt_multicast_event(in_dev);
kfree_s(i, sizeof(*i));
+ return 0;
}
- return 0;
+ err = 0;
+ break;
}
}
+ write_unlock_bh(&ip_mc_lock);
return -ESRCH;
}
{
struct ip_mc_list *i;
+ read_lock_bh(&ip_mc_lock);
for (i=in_dev->mc_list; i; i=i->next)
igmp_group_dropped(i);
+ read_unlock_bh(&ip_mc_lock);
ip_mc_dec_group(in_dev, IGMP_ALL_HOSTS);
}
ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS);
+ read_lock_bh(&ip_mc_lock);
for (i=in_dev->mc_list; i; i=i->next)
igmp_group_added(i);
+ read_unlock_bh(&ip_mc_lock);
}
/*
{
struct ip_mc_list *i;
+ write_lock_bh(&ip_mc_lock);
while ((i = in_dev->mc_list) != NULL) {
in_dev->mc_list = i->next;
igmp_group_dropped(i);
kfree_s(i, sizeof(*i));
}
+ write_unlock_bh(&ip_mc_lock);
}
static struct in_device * ip_mc_find_dev(struct ip_mreqn *imr)
iml = (struct ip_mc_socklist *)sock_kmalloc(sk, sizeof(*iml), GFP_KERNEL);
err = -EADDRINUSE;
+ down(&ip_sk_mc_sem);
for (i=sk->ip_mc_list; i; i=i->next) {
if (memcmp(&i->multi, imr, sizeof(*imr)) == 0) {
/* New style additions are reference counted */
i->count++;
err = 0;
}
- goto done;
+ goto done_unlock;
}
count++;
}
err = -ENOBUFS;
if (iml == NULL || count >= sysctl_igmp_max_memberships)
- goto done;
+ goto done_unlock;
memcpy(&iml->multi, imr, sizeof(*imr));
iml->next = sk->ip_mc_list;
iml->count = 1;
ip_mc_inc_group(in_dev, addr);
iml = NULL;
err = 0;
+
+done_unlock:
+ up(&ip_sk_mc_sem);
done:
rtnl_shunlock();
if (iml)
{
struct ip_mc_socklist *iml, **imlp;
+ down(&ip_sk_mc_sem);
for (imlp=&sk->ip_mc_list; (iml=*imlp)!=NULL; imlp=&iml->next) {
if (iml->multi.imr_multiaddr.s_addr==imr->imr_multiaddr.s_addr &&
iml->multi.imr_address.s_addr==imr->imr_address.s_addr &&
return 0;
*imlp = iml->next;
- synchronize_bh();
+ up(&ip_sk_mc_sem);
in_dev = inetdev_by_index(iml->multi.imr_ifindex);
if (in_dev)
return 0;
}
}
+ up(&ip_sk_mc_sem);
return -EADDRNOTAVAIL;
}
{
struct ip_mc_socklist *iml;
+ down(&ip_sk_mc_sem);
while ((iml=sk->ip_mc_list) != NULL) {
struct in_device *in_dev;
sk->ip_mc_list = iml->next;
+ up(&ip_sk_mc_sem);
+
if ((in_dev = inetdev_by_index(iml->multi.imr_ifindex)) != NULL)
ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
sock_kfree_s(sk, iml, sizeof(*iml));
+
+ down(&ip_sk_mc_sem);
}
+ up(&ip_sk_mc_sem);
+}
+
+int ip_check_mc(struct device *dev, u32 mc_addr)
+{
+ struct in_device *in_dev = dev->ip_ptr;
+ struct ip_mc_list *im;
+
+ if (in_dev) {
+ read_lock(&ip_mc_lock);
+ for (im=in_dev->mc_list; im; im=im->next) {
+ if (im->multiaddr == mc_addr) {
+ read_unlock(&ip_mc_lock);
+ return 1;
+ }
+ }
+ read_unlock(&ip_mc_lock);
+ }
+ return 0;
}
struct ip_mc_list *im;
int len=0;
struct device *dev;
-
+
len=sprintf(buffer,"Idx\tDevice : Count Querier\tGroup Users Timer\tReporter\n");
-
- read_lock_bh(&dev_base_lock);
+
+ read_lock(&dev_base_lock);
for(dev = dev_base; dev; dev = dev->next) {
struct in_device *in_dev = dev->ip_ptr;
char *querier = "NONE";
len+=sprintf(buffer+len,"%d\t%-10s: %5d %7s\n",
dev->ifindex, dev->name, dev->mc_count, querier);
+ read_lock(&ip_mc_lock);
for (im = in_dev->mc_list; im; im = im->next) {
len+=sprintf(buffer+len,
"\t\t\t\t%08lX %5d %d:%08lX\t\t%d\n",
len=0;
begin=pos;
}
- if(pos>offset+length)
+ if(pos>offset+length) {
+ read_unlock(&ip_mc_lock);
goto done;
+ }
}
+ read_unlock(&ip_mc_lock);
}
done:
- read_unlock_bh(&dev_base_lock);
+ read_unlock(&dev_base_lock);
*start=buffer+(offset-begin);
len-=(offset-begin);
*
* The Internet Protocol (IP) module.
*
- * Version: $Id: ip_input.c,v 1.39 1999/05/30 01:16:25 davem Exp $
+ * Version: $Id: ip_input.c,v 1.40 1999/06/09 10:10:55 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
if (skb->dst == NULL) {
if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))
goto drop;
-#ifdef CONFIG_CPU_IS_SLOW
- if (net_cpu_congestion > 10 && !(iph->tos&IPTOS_RELIABILITY) &&
- IPTOS_PREC(iph->tos) < IPTOS_PREC_INTERNETCONTROL) {
- goto drop;
- }
-#endif
}
#ifdef CONFIG_NET_CLS_ROUTE
if (skb->dst->tclassid) {
u32 idx = skb->dst->tclassid;
+ write_lock(&ip_rt_acct_lock);
ip_rt_acct[idx&0xFF].o_packets++;
ip_rt_acct[idx&0xFF].o_bytes+=skb->len;
ip_rt_acct[(idx>>16)&0xFF].i_packets++;
ip_rt_acct[(idx>>16)&0xFF].i_bytes+=skb->len;
+ write_unlock(&ip_rt_acct_lock);
}
#endif
* IP_MASQ_VDOLIVE - VDO Live masquerading module
*
*
- * Version: @(#)$Id: ip_masq_vdolive.c,v 1.4 1998/10/06 04:49:07 davem Exp $
+ * Version: @(#)$Id: ip_masq_vdolive.c,v 1.6 1999/06/09 08:29:03 davem Exp $
*
* Author: Nigel Metheringham <Nigel.Metheringham@ThePLAnet.net>
* PLAnet Online Ltd
*
* The options processing module for ip.c
*
- * Version: $Id: ip_options.c,v 1.16 1999/03/21 05:22:40 davem Exp $
+ * Version: $Id: ip_options.c,v 1.18 1999/06/09 08:29:06 davem Exp $
*
* Authors: A.N.Kuznetsov
*
/*
- * $Id: ipconfig.c,v 1.21 1999/05/27 00:38:01 davem Exp $
+ * $Id: ipconfig.c,v 1.22 1999/06/09 10:10:57 davem Exp $
*
* Automatic Configuration of IP -- use BOOTP or RARP or user-supplied
* information to configure own IP address and routes.
unsigned short oflags;
last = &ic_first_dev;
- read_lock_bh(&dev_base_lock);
+ read_lock(&dev_base_lock);
for (dev = dev_base; dev; dev = dev->next) {
if (user_dev_name[0] ? !strcmp(dev->name, user_dev_name) :
(!(dev->flags & IFF_LOOPBACK) &&
DBG(("IP-Config: Opened %s (able=%d)\n", dev->name, able));
}
}
- read_unlock_bh(&dev_base_lock);
+ read_unlock(&dev_base_lock);
*last = NULL;
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * Version: $Id: ipmr.c,v 1.40 1999/03/25 10:04:25 davem Exp $
+ * Version: $Id: ipmr.c,v 1.43 1999/06/09 10:10:59 davem Exp $
*
* Fixes:
* Michael Chastain : Incorrect size of copying.
* Brad Parker : Better behaviour on mrouted upcall
* overflow.
* Carlos Picoto : PIMv1 Support
+ * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
+ * Relax this requrement to work with older peers.
*
*/
skb_trim(skb, nlh->nlmsg_len);
((struct nlmsgerr*)NLMSG_DATA(nlh))->error = -EMSGSIZE;
}
- err = netlink_unicast(rtnl, skb, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+ err = netlink_unicast(rtnl, skb, NETLINK_CB(skb).dst_pid, MSG_DONTWAIT);
} else
#endif
ip_mr_forward(skb, cache, 0);
pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) ||
(pim->flags&PIM_NULL_REGISTER) ||
reg_dev == NULL ||
- ip_compute_csum((void *)pim, len)) {
+ (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
+ ip_compute_csum((void *)pim, len))) {
kfree_skb(skb);
return -EINVAL;
}
*
* ROUTE - implementation of the IP router.
*
- * Version: $Id: route.c,v 1.68 1999/05/27 00:37:54 davem Exp $
+ * Version: $Id: route.c,v 1.69 1999/06/09 10:11:02 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
#ifdef CONFIG_NET_CLS_ROUTE
struct ip_rt_acct ip_rt_acct[256];
+rwlock_t ip_rt_acct_lock = RW_LOCK_UNLOCKED;
#ifdef CONFIG_PROC_FS
static int ip_rt_acct_read(char *buffer, char **start, off_t offset,
*eof = 1;
}
if (length > 0) {
- start_bh_atomic();
+ read_lock_bh(&ip_rt_acct_lock);
memcpy(buffer, ((u8*)&ip_rt_acct)+offset, length);
- end_bh_atomic();
+ read_unlock_bh(&ip_rt_acct_lock);
return length;
}
return 0;
*
* Implementation of the Transmission Control Protocol(TCP).
*
- * Version: $Id: tcp_input.c,v 1.167 1999/05/29 22:37:54 davem Exp $
+ * Version: $Id: tcp_input.c,v 1.169 1999/06/09 08:29:13 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
*
* Implementation of the Transmission Control Protocol(TCP).
*
- * Version: $Id: tcp_ipv4.c,v 1.178 1999/05/30 01:16:27 davem Exp $
+ * Version: $Id: tcp_ipv4.c,v 1.180 1999/06/09 08:29:19 davem Exp $
*
* IPv4 specific functions
*
*
* The User Datagram Protocol (UDP).
*
- * Version: $Id: udp.c,v 1.67 1999/05/27 00:37:50 davem Exp $
+ * Version: $Id: udp.c,v 1.69 1999/06/09 11:15:31 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
return 0;
}
-/* Shared by v4/v6 tcp. */
+/* Shared by v4/v6 udp. */
unsigned short udp_good_socknum(void)
{
int result;
* Various kernel-resident INET utility functions; mainly
* for format conversion and debugging output.
*
- * Version: $Id: utils.c,v 1.6 1997/12/13 21:53:03 kuznet Exp $
+ * Version: $Id: utils.c,v 1.7 1999/06/09 10:11:05 davem Exp $
*
* Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
*
return(buff);
}
+char *in_ntoa2(__u32 in, char *buff)
+{
+ sprintf(buff, "%d.%d.%d.%d", NIPQUAD(in));
+ return buff;
+}
/*
* Convert an ASCII string to binary IP.
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
- * $Id: addrconf.c,v 1.49 1999/05/27 00:38:20 davem Exp $
+ * $Id: addrconf.c,v 1.50 1999/06/09 10:11:09 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
1. The result of inet6_add_addr() is used only inside lock
or from bh_atomic context.
- 2. inet6_get_lladdr() is used only from bh protected context.
-
- 3. The result of ipv6_chk_addr() is not used outside of bh protected context.
+ 2. The result of ipv6_chk_addr() is not used outside of bh protected context.
*/
static __inline__ void addrconf_lock(void)
return err;
}
-struct inet6_ifaddr * ipv6_get_lladdr(struct device *dev)
+int ipv6_get_lladdr(struct device *dev, struct in6_addr *addr)
{
struct inet6_ifaddr *ifp = NULL;
struct inet6_dev *idev;
if ((idev = ipv6_get_idev(dev)) != NULL) {
addrconf_lock();
for (ifp=idev->addr_list; ifp; ifp=ifp->if_next) {
- if (ifp->scope == IFA_LINK)
- break;
+ if (ifp->scope == IFA_LINK) {
+ ipv6_addr_copy(addr, &ifp->addr);
+ addrconf_unlock();
+ return 0;
+ }
}
addrconf_unlock();
}
- return ifp;
+ return -EADDRNOTAVAIL;
}
/*
return;
}
- read_lock_bh(&dev_base_lock);
+ read_lock(&dev_base_lock);
for (dev = dev_base; dev != NULL; dev = dev->next) {
if (dev->ip_ptr && (dev->flags & IFF_UP)) {
struct in_device * in_dev = dev->ip_ptr;
flag |= IFA_HOST;
}
- read_unlock_bh(&dev_base_lock);
addrconf_lock();
ifp = ipv6_add_addr(idev, &addr, flag);
if (ifp) {
ipv6_ifa_notify(RTM_NEWADDR, ifp);
}
addrconf_unlock();
- read_lock_bh(&dev_base_lock);
}
}
}
- read_unlock_bh(&dev_base_lock);
+ read_unlock(&dev_base_lock);
}
static void init_loopback(struct device *dev)
struct device *dev;
/* This takes sense only during module load. */
- read_lock_bh(&dev_base_lock);
+ read_lock(&dev_base_lock);
for (dev = dev_base; dev; dev = dev->next) {
if (!(dev->flags&IFF_UP))
continue;
- read_unlock_bh(&dev_base_lock);
switch (dev->type) {
case ARPHRD_LOOPBACK:
init_loopback(dev);
default:
/* Ignore all other */
}
- read_lock_bh(&dev_base_lock);
}
- read_unlock_bh(&dev_base_lock);
+ read_unlock(&dev_base_lock);
#endif
#ifdef CONFIG_PROC_FS
*
* Adapted from linux/net/ipv4/af_inet.c
*
- * $Id: af_inet6.c,v 1.43 1999/04/22 10:07:39 davem Exp $
+ * $Id: af_inet6.c,v 1.44 1999/06/09 08:29:29 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
- * $Id: ip6_fw.c,v 1.10 1998/08/26 12:04:57 davem Exp $
+ * $Id: ip6_fw.c,v 1.12 1999/06/09 08:29:32 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
- * $Id: ip6_output.c,v 1.17 1999/04/22 10:07:42 davem Exp $
+ * $Id: ip6_output.c,v 1.20 1999/06/09 10:11:12 davem Exp $
*
* Based on linux/net/ipv4/ip_output.c
*
}
if (hh) {
-#ifdef __alpha__
- /* Alpha has disguisting memcpy. Help it. */
- u64 *aligned_hdr = (u64*)(skb->data - 16);
- u64 *aligned_hdr0 = hh->hh_data;
- read_lock_irq(&hh->hh_lock);
- aligned_hdr[0] = aligned_hdr0[0];
- aligned_hdr[1] = aligned_hdr0[1];
-#else
- read_lock_irq(&hh->hh_lock);
+ read_lock_bh(&hh->hh_lock);
memcpy(skb->data - 16, hh->hh_data, 16);
-#endif
- read_unlock_irq(&hh->hh_lock);
- skb_push(skb, dev->hard_header_len);
+ read_unlock_bh(&hh->hh_lock);
+ skb_push(skb, hh->hh_len);
return hh->hh_output(skb);
} else if (dst->neighbour)
return dst->neighbour->output(skb);
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
- * $Id: mcast.c,v 1.20 1999/05/27 00:38:23 davem Exp $
+ * $Id: mcast.c,v 1.23 1999/06/09 10:11:14 davem Exp $
*
* Based on linux/ipv4/igmp.c and linux/ipv4/ip_sockglue.c
*
#define MDBG(x)
#endif
+/* Big mc list lock for all the devices */
+static rwlock_t ipv6_mc_lock = RW_LOCK_UNLOCKED;
+/* Big mc list lock for all the sockets */
+static rwlock_t ipv6_sk_mc_lock = RW_LOCK_UNLOCKED;
+
static struct socket *igmp6_socket;
static void igmp6_join_group(struct ifmcaddr6 *ma);
return err;
}
+ write_lock_bh(&ipv6_sk_mc_lock);
mc_lst->next = np->ipv6_mc_list;
np->ipv6_mc_list = mc_lst;
+ write_unlock_bh(&ipv6_sk_mc_lock);
return 0;
}
struct ipv6_pinfo *np = &sk->net_pinfo.af_inet6;
struct ipv6_mc_socklist *mc_lst, **lnk;
+ write_lock_bh(&ipv6_sk_mc_lock);
for (lnk = &np->ipv6_mc_list; (mc_lst = *lnk) !=NULL ; lnk = &mc_lst->next) {
if (mc_lst->ifindex == ifindex &&
ipv6_addr_cmp(&mc_lst->addr, addr) == 0) {
struct device *dev;
*lnk = mc_lst->next;
- synchronize_bh();
+ write_unlock_bh(&ipv6_sk_mc_lock);
if ((dev = dev_get_by_index(ifindex)) != NULL)
ipv6_dev_mc_dec(dev, &mc_lst->addr);
return 0;
}
}
+ write_unlock_bh(&ipv6_sk_mc_lock);
return -ENOENT;
}
struct ipv6_pinfo *np = &sk->net_pinfo.af_inet6;
struct ipv6_mc_socklist *mc_lst;
+ write_lock_bh(&ipv6_sk_mc_lock);
while ((mc_lst = np->ipv6_mc_list) != NULL) {
- struct device *dev = dev_get_by_index(mc_lst->ifindex);
+ struct device *dev;
+
+ np->ipv6_mc_list = mc_lst->next;
+ write_unlock_bh(&ipv6_sk_mc_lock);
+ dev = dev_get_by_index(mc_lst->ifindex);
if (dev)
ipv6_dev_mc_dec(dev, &mc_lst->addr);
- np->ipv6_mc_list = mc_lst->next;
sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
+
+ write_lock_bh(&ipv6_sk_mc_lock);
}
+ write_unlock_bh(&ipv6_sk_mc_lock);
+}
+
+int inet6_mc_check(struct sock *sk, struct in6_addr *addr)
+{
+ struct ipv6_mc_socklist *mc;
+
+ read_lock(&ipv6_sk_mc_lock);
+ for (mc = sk->net_pinfo.af_inet6.ipv6_mc_list; mc; mc=mc->next) {
+ if (ipv6_addr_cmp(&mc->addr, addr) == 0) {
+ read_unlock(&ipv6_sk_mc_lock);
+ return 1;
+ }
+ }
+ read_unlock(&ipv6_sk_mc_lock);
+
+ return 0;
}
static int igmp6_group_added(struct ifmcaddr6 *mc)
hash = ipv6_addr_hash(addr);
+ write_lock_bh(&ipv6_mc_lock);
for (mc = inet6_mcast_lst[hash]; mc; mc = mc->next) {
if (ipv6_addr_cmp(&mc->mca_addr, addr) == 0 && mc->dev == dev) {
atomic_inc(&mc->mca_users);
+ write_unlock_bh(&ipv6_mc_lock);
return 0;
}
}
mc = kmalloc(sizeof(struct ifmcaddr6), GFP_ATOMIC);
- if (mc == NULL)
+ if (mc == NULL) {
+ write_unlock_bh(&ipv6_mc_lock);
return -ENOMEM;
+ }
memset(mc, 0, sizeof(struct ifmcaddr6));
mc->mca_timer.function = igmp6_timer_handler;
igmp6_group_added(mc);
+ write_unlock_bh(&ipv6_mc_lock);
+
return 0;
}
for (lnk = &idev->mc_list; (iter = *lnk) != NULL; lnk = &iter->if_next) {
if (iter == ma) {
*lnk = iter->if_next;
- synchronize_bh();
return;
}
}
hash = ipv6_addr_hash(addr);
+ write_lock_bh(&ipv6_mc_lock);
for (lnk = &inet6_mcast_lst[hash]; (ma=*lnk) != NULL; lnk = &ma->next) {
if (ipv6_addr_cmp(&ma->mca_addr, addr) == 0 && ma->dev == dev) {
if (atomic_dec_and_test(&ma->mca_users)) {
igmp6_group_dropped(ma);
*lnk = ma->next;
- synchronize_bh();
ipv6_mca_remove(dev, ma);
kfree(ma);
}
+ write_unlock_bh(&ipv6_mc_lock);
return 0;
}
}
+ write_unlock_bh(&ipv6_mc_lock);
return -ENOENT;
}
hash = ipv6_addr_hash(addr);
+ read_lock_bh(&ipv6_mc_lock);
for (mc = inet6_mcast_lst[hash]; mc; mc=mc->next) {
- if (mc->dev == dev && ipv6_addr_cmp(&mc->mca_addr, addr) == 0)
+ if (mc->dev == dev && ipv6_addr_cmp(&mc->mca_addr, addr) == 0) {
+ read_unlock_bh(&ipv6_mc_lock);
return 1;
+ }
}
+ read_unlock_bh(&ipv6_mc_lock);
return 0;
}
if (idev == NULL)
return 0;
+ read_lock(&ipv6_mc_lock);
for (ma = idev->mc_list; ma; ma=ma->if_next)
igmp6_group_queried(ma, resptime);
+ read_unlock(&ipv6_mc_lock);
} else {
int hash = ipv6_addr_hash(addrp);
+ read_lock(&ipv6_mc_lock);
for (ma = inet6_mcast_lst[hash]; ma; ma=ma->next) {
if (ma->dev == skb->dev &&
ipv6_addr_cmp(addrp, &ma->mca_addr) == 0) {
break;
}
}
+ read_unlock(&ipv6_mc_lock);
}
return 0;
hash = ipv6_addr_hash(addrp);
+ read_lock(&ipv6_mc_lock);
for (ma = inet6_mcast_lst[hash]; ma; ma=ma->next) {
if ((ma->dev == dev) && ipv6_addr_cmp(&ma->mca_addr, addrp) == 0) {
if (ma->mca_flags & MAF_TIMER_RUNNING) {
break;
}
}
+ read_unlock(&ipv6_mc_lock);
return 0;
}
struct sock *sk = igmp6_socket->sk;
struct sk_buff *skb;
struct icmp6hdr *hdr;
- struct inet6_ifaddr *ifp;
struct in6_addr *snd_addr;
struct in6_addr *addrp;
+ struct in6_addr addr_buf;
struct in6_addr all_routers;
int err, len, payload_len, full_len;
u8 ra[8] = { IPPROTO_ICMPV6, 0,
dev->hard_header(skb, dev, ETH_P_IPV6, ha, NULL, full_len);
}
- ifp = ipv6_get_lladdr(dev);
-
- if (ifp == NULL) {
+ if (ipv6_get_lladdr(dev, &addr_buf)) {
#if MCAST_DEBUG >= 1
printk(KERN_DEBUG "igmp6: %s no linklocal address\n",
dev->name);
return;
}
- ip6_nd_hdr(sk, skb, dev, &ifp->addr, snd_addr, NEXTHDR_HOP, payload_len);
+ ip6_nd_hdr(sk, skb, dev, &addr_buf, snd_addr, NEXTHDR_HOP, payload_len);
memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra));
addrp = (struct in6_addr *) skb_put(skb, sizeof(struct in6_addr));
ipv6_addr_copy(addrp, addr);
- hdr->icmp6_cksum = csum_ipv6_magic(&ifp->addr, snd_addr, len,
+ hdr->icmp6_cksum = csum_ipv6_magic(&addr_buf, snd_addr, len,
IPPROTO_ICMPV6,
csum_partial((__u8 *) hdr, len, 0));
if ((addr_type & (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_LOOPBACK)))
return;
- start_bh_atomic();
igmp6_send(&ma->mca_addr, ma->dev, ICMPV6_MGM_REPORT);
delay = net_random() % IGMP6_UNSOLICITED_IVAL;
add_timer(&ma->mca_timer);
ma->mca_flags |= MAF_TIMER_RUNNING | MAF_LAST_REPORTER;
- end_bh_atomic();
}
static void igmp6_leave_group(struct ifmcaddr6 *ma)
if ((addr_type & IPV6_ADDR_LINKLOCAL))
return;
- start_bh_atomic();
if (ma->mca_flags & MAF_LAST_REPORTER)
igmp6_send(&ma->mca_addr, ma->dev, ICMPV6_MGM_REDUCTION);
if (ma->mca_flags & MAF_TIMER_RUNNING)
del_timer(&ma->mca_timer);
- end_bh_atomic();
}
void igmp6_timer_handler(unsigned long data)
{
struct ifmcaddr6 *ma = (struct ifmcaddr6 *) data;
+ read_lock(&ipv6_mc_lock);
ma->mca_flags |= MAF_LAST_REPORTER;
igmp6_send(&ma->mca_addr, ma->dev, ICMPV6_MGM_REPORT);
ma->mca_flags &= ~MAF_TIMER_RUNNING;
+ read_unlock(&ipv6_mc_lock);
}
/* Device going down */
/* Withdraw multicast list */
+ read_lock_bh(&ipv6_mc_lock);
for (i = idev->mc_list; i; i=i->if_next)
igmp6_group_dropped(i);
+ read_unlock_bh(&ipv6_mc_lock);
/* Delete all-nodes address. */
/* Install multicast list, except for all-nodes (already installed) */
+ read_lock(&ipv6_mc_lock);
for (i = idev->mc_list; i; i=i->if_next)
igmp6_group_added(i);
+ read_unlock(&ipv6_mc_lock);
}
/*
int hash;
struct ifmcaddr6 *i, **lnk;
+ write_lock_bh(&ipv6_mc_lock);
while ((i = idev->mc_list) != NULL) {
idev->mc_list = i->if_next;
for (lnk = &inet6_mcast_lst[hash]; *lnk; lnk = &(*lnk)->next) {
if (*lnk == i) {
*lnk = i->next;
- synchronize_bh();
break;
}
}
igmp6_group_dropped(i);
kfree(i);
}
+ write_unlock_bh(&ipv6_mc_lock);
}
#ifdef CONFIG_PROC_FS
int len=0;
struct device *dev;
- read_lock_bh(&dev_base_lock);
+ read_lock(&dev_base_lock);
for (dev = dev_base; dev; dev = dev->next) {
struct inet6_dev *idev;
if ((idev = ipv6_get_idev(dev)) == NULL)
continue;
+ read_lock_bh(&ipv6_mc_lock);
for (im = idev->mc_list; im; im = im->if_next) {
int i;
len=0;
begin=pos;
}
- if (pos > offset+length)
+ if (pos > offset+length) {
+ read_unlock_bh(&ipv6_mc_lock);
goto done;
+ }
}
+ read_unlock_bh(&ipv6_mc_lock);
}
*eof = 1;
done:
- read_unlock_bh(&dev_base_lock);
+ read_unlock(&dev_base_lock);
*start=buffer+(offset-begin);
len-=(offset-begin);
ndisc_mc_map(daddr, ha, dev, 1);
h_dest = ha;
} else if (neigh) {
- h_dest = neigh->ha;
+ read_lock_bh(&neigh->lock);
+ if (neigh->nud_state&NUD_VALID) {
+ memcpy(ha, neigh->ha, dev->addr_len);
+ h_dest = ha;
+ }
+ read_unlock_bh(&neigh->lock);
} else {
neigh = neigh_lookup(&nd_tbl, daddr, dev);
if (neigh) {
+ read_lock_bh(&neigh->lock);
if (neigh->nud_state&NUD_VALID) {
memcpy(ha, neigh->ha, dev->addr_len);
h_dest = ha;
}
+ read_unlock_bh(&neigh->lock);
neigh_release(neigh);
}
}
struct sock *sk = ndisc_socket->sk;
struct sk_buff *skb;
struct nd_msg *msg;
+ struct in6_addr addr_buf;
int len;
int err;
}
if (saddr == NULL) {
- struct inet6_ifaddr *ifa;
-
- /* use link local address */
- ifa = ipv6_get_lladdr(dev);
-
- if (ifa)
- saddr = &ifa->addr;
+ if (!ipv6_get_lladdr(dev, &addr_buf))
+ saddr = &addr_buf;
}
if (ndisc_build_ll_hdr(skb, dev, daddr, neigh, len) == 0) {
kfree_skb(skb);
}
+/* Called with locked neigh: either read or both */
+
static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb)
{
struct in6_addr *saddr = NULL;
struct in6_addr mcaddr;
struct device *dev = neigh->dev;
struct in6_addr *target = (struct in6_addr *)&neigh->primary_key;
- int probes = neigh->probes;
+ int probes = atomic_read(&neigh->probes);
if (skb && ipv6_chk_addr(&skb->nh.ipv6h->saddr, dev, 0))
saddr = &skb->nh.ipv6h->saddr;
struct sock *sk = ndisc_socket->sk;
int len = sizeof(struct icmp6hdr) + 2 * sizeof(struct in6_addr);
struct sk_buff *buff;
- struct inet6_ifaddr *ifp;
struct icmp6hdr *icmph;
+ struct in6_addr saddr_buf;
struct in6_addr *addrp;
struct device *dev;
struct rt6_info *rt;
rd_len &= ~0x7;
len += rd_len;
- ifp = ipv6_get_lladdr(dev);
-
- if (ifp == NULL) {
- ND_PRINTK1("redirect: no link_local addr for dev\n");
- return;
- }
+ if (ipv6_get_lladdr(dev, &saddr_buf)) {
+ ND_PRINTK1("redirect: no link_local addr for dev\n");
+ return;
+ }
buff = sock_alloc_send_skb(sk, MAX_HEADER + len + dev->hard_header_len + 15,
0, 0, &err);
return;
}
- ip6_nd_hdr(sk, buff, dev, &ifp->addr, &skb->nh.ipv6h->saddr,
+ ip6_nd_hdr(sk, buff, dev, &saddr_buf, &skb->nh.ipv6h->saddr,
IPPROTO_ICMPV6, len);
icmph = (struct icmp6hdr *) skb_put(buff, len);
memcpy(opt, skb->nh.ipv6h, rd_len - 8);
- icmph->icmp6_cksum = csum_ipv6_magic(&ifp->addr, &skb->nh.ipv6h->saddr,
+ icmph->icmp6_cksum = csum_ipv6_magic(&saddr_buf, &skb->nh.ipv6h->saddr,
len, IPPROTO_ICMPV6,
csum_partial((u8 *) icmph, len, 0));
ifp->idev->dev->name);
return 0;
}
- neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 0);
+ neigh = neigh_lookup(&nd_tbl, &msg->target, skb->dev);
if (neigh) {
if (neigh->flags & NTF_ROUTER) {
unsigned long now = jiffies;
int i;
- neigh_table_lock(&nd_tbl);
-
for (i = 0; i <= NEIGH_HASHMASK; i++) {
struct neighbour *neigh;
+ read_lock_bh(&nd_tbl.lock);
for (neigh = nd_tbl.hash_buckets[i]; neigh; neigh = neigh->next) {
int j;
size += 2;
}
+ read_lock(&neigh->lock);
size += sprintf(buffer+len+size,
" %02x %02x %02x %02x %08lx %08lx %08x %04x %04x %04x %8s ", i,
128,
} else {
size += sprintf(buffer+len+size, "000000000000");
}
+ read_unlock(&neigh->lock);
size += sprintf(buffer+len+size, "\n");
len += size;
pos += size;
if (pos <= offset)
len=0;
- if (pos >= offset+length)
+ if (pos >= offset+length) {
+ read_unlock_bh(&nd_tbl.lock);
goto done;
+ }
}
+ read_unlock_bh(&nd_tbl.lock);
}
done:
- neigh_table_unlock(&nd_tbl);
*start = buffer+len-(pos-offset); /* Start of wanted data */
len = pos-offset; /* Start slop */
*
* Adapted from linux/net/ipv4/raw.c
*
- * $Id: raw.c,v 1.25 1999/05/27 00:38:16 davem Exp $
+ * $Id: raw.c,v 1.26 1999/06/09 10:11:18 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
SOCKHASH_UNLOCK_WRITE();
}
-static __inline__ int inet6_mc_check(struct sock *sk, struct in6_addr *addr)
-{
- struct ipv6_mc_socklist *mc;
-
- for (mc = sk->net_pinfo.af_inet6.ipv6_mc_list; mc; mc=mc->next) {
- if (ipv6_addr_cmp(&mc->addr, addr) == 0)
- return 1;
- }
-
- return 0;
-}
/* Grumble... icmp and ip_input want to get at this... */
struct sock *raw_v6_lookup(struct sock *sk, unsigned short num,
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
- * $Id: reassembly.c,v 1.11 1998/08/26 12:05:16 davem Exp $
+ * $Id: reassembly.c,v 1.13 1999/06/09 08:29:40 davem Exp $
*
* Based on: net/ipv4/ip_fragment.c
*
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
- * $Id: route.c,v 1.35 1999/03/21 05:22:57 davem Exp $
+ * $Id: route.c,v 1.36 1999/06/09 10:11:21 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
return 0;
}
-static int fib6_dump_done(struct netlink_callback *cb)
+static void fib6_dump_end(struct netlink_callback *cb)
{
struct fib6_walker_t *w = (void*)cb->args[0];
cb->done = (void*)cb->args[1];
cb->args[1] = 0;
}
+}
+
+static int fib6_dump_done(struct netlink_callback *cb)
+{
+ fib6_dump_end(cb);
return cb->done(cb);
}
if (res <= 0 && skb->len == 0)
RT6_TRACE("%p>dump end\n", w);
#endif
+ res = res < 0 ? res : skb->len;
/* res < 0 is an error. (really, impossible)
res == 0 means that dump is complete, but skb still can contain data.
res > 0 dump is not complete, but frame is full.
*/
- return res < 0 ? res : skb->len;
+ /* Destroy walker, if dump of this table is complete. */
+ if (res <= 0)
+ fib6_dump_end(cb);
+ return res;
}
int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
- * $Id: tcp_ipv6.c,v 1.106 1999/05/27 01:12:44 davem Exp $
+ * $Id: tcp_ipv6.c,v 1.108 1999/06/09 08:29:43 davem Exp $
*
* Based on:
* linux/net/ipv4/tcp.c
*
* Based on linux/ipv4/udp.c
*
- * $Id: udp.c,v 1.41 1999/05/27 00:38:18 davem Exp $
+ * $Id: udp.c,v 1.42 1999/06/09 10:11:24 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
return 0;
}
-static __inline__ int inet6_mc_check(struct sock *sk, struct in6_addr *addr)
-{
- struct ipv6_mc_socklist *mc;
-
- for (mc = sk->net_pinfo.af_inet6.ipv6_mc_list; mc; mc=mc->next) {
- if (ipv6_addr_cmp(&mc->addr, addr) == 0)
- return 1;
- }
-
- return 0;
-}
-
static struct sock *udp_v6_mcast_next(struct sock *sk,
u16 loc_port, struct in6_addr *loc_addr,
u16 rmt_port, struct in6_addr *rmt_addr,
{
struct device *dev, *first = NULL;
- read_lock_bh(&dev_base_lock);
+ read_lock(&dev_base_lock);
for (dev = dev_base; dev != NULL; dev = dev->next) {
if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM)
if (first == NULL || strncmp(dev->name, first->name, 3) < 0)
first = dev;
}
- read_unlock_bh(&dev_base_lock);
+ read_unlock(&dev_base_lock);
return first;
}
{
struct device *dev;
- read_lock_bh(&dev_base_lock);
+ read_lock(&dev_base_lock);
for (dev = dev_base; dev != NULL; dev = dev->next) {
if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM && ax25cmp(addr, (ax25_address *)dev->dev_addr) == 0)
goto out;
}
out:
- read_unlock_bh(&dev_base_lock);
+ read_unlock(&dev_base_lock);
return dev;
}
EXPORT_SYMBOL(neigh_table_init);
EXPORT_SYMBOL(neigh_table_clear);
-EXPORT_SYMBOL(__neigh_lookup);
EXPORT_SYMBOL(neigh_resolve_output);
EXPORT_SYMBOL(neigh_connected_output);
EXPORT_SYMBOL(neigh_update);
EXPORT_SYMBOL(dev_set_allmulti);
EXPORT_SYMBOL(dev_set_promiscuity);
EXPORT_SYMBOL(sklist_remove_socket);
-EXPORT_SYMBOL(rtnl_wait);
-EXPORT_SYMBOL(rtnl_rlockct);
+EXPORT_SYMBOL(rtnl_sem);
EXPORT_SYMBOL(rtnl_lock);
EXPORT_SYMBOL(rtnl_unlock);
*
* PACKET - implements raw packet sockets.
*
- * Version: $Id: af_packet.c,v 1.19 1999/03/21 05:23:03 davem Exp $
+ * Version: $Id: af_packet.c,v 1.20 1999/06/09 10:11:32 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
else
return(-ENOTCONN); /* SOCK_PACKET must be sent giving an address */
+ dev_lock_list();
+
/*
* Find the device first to size check it
*/
saddr->spkt_device[13] = 0;
dev = dev_get(saddr->spkt_device);
- if (dev == NULL)
- {
- return(-ENODEV);
- }
+ err = -ENODEV;
+ if (dev == NULL)
+ goto out_unlock;
/*
* You may not queue a frame bigger than the mtu. This is the lowest level
* raw protocol and you must do your own fragmentation at this level.
*/
- if(len>dev->mtu+dev->hard_header_len)
- return -EMSGSIZE;
+ err = -EMSGSIZE;
+ if(len>dev->mtu+dev->hard_header_len)
+ goto out_unlock;
- dev_lock_list();
err = -ENOBUFS;
skb = sock_wmalloc(sk, len+dev->hard_header_len+15, 0, GFP_KERNEL);
* Now send it
*/
- dev_unlock_list();
dev_queue_xmit(skb);
+ dev_unlock_list();
return(len);
out_free:
addr = saddr->sll_addr;
}
+ dev_lock_list();
dev = dev_get_by_index(ifindex);
+ err = -ENXIO;
if (dev == NULL)
- return -ENXIO;
+ goto out_unlock;
if (sock->type == SOCK_RAW)
reserve = dev->hard_header_len;
+ err = -EMSGSIZE;
if (len > dev->mtu+reserve)
- return -EMSGSIZE;
+ goto out_unlock;
- dev_lock_list();
skb = sock_alloc_send_skb(sk, len+dev->hard_header_len+15, 0,
msg->msg_flags & MSG_DONTWAIT, &err);
* Now send it
*/
- dev_unlock_list();
dev_queue_xmit(skb);
+ dev_unlock_list();
return(len);
out_free:
{
struct device *dev, *first = NULL;
- read_lock_bh(&dev_base_lock);
+ read_lock(&dev_base_lock);
for (dev = dev_base; dev != NULL; dev = dev->next) {
if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE)
if (first == NULL || strncmp(dev->name, first->name, 3) < 0)
first = dev;
}
- read_unlock_bh(&dev_base_lock);
+ read_unlock(&dev_base_lock);
return first;
}
{
struct device *dev;
- read_lock_bh(&dev_base_lock);
+ read_lock(&dev_base_lock);
for (dev = dev_base; dev != NULL; dev = dev->next) {
if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE && rosecmp(addr, (rose_address *)dev->dev_addr) == 0)
goto out;
}
out:
- read_unlock_bh(&dev_base_lock);
+ read_unlock(&dev_base_lock);
return dev;
}
static struct tcf_proto_ops *tcf_proto_base;
+/* Protects list of registered TC modules. It is pure SMP lock. */
+static rwlock_t cls_mod_lock = RW_LOCK_UNLOCKED;
/* Find classifier type by string name */
struct tcf_proto_ops * tcf_proto_lookup_ops(struct rtattr *kind)
{
- struct tcf_proto_ops *t;
+ struct tcf_proto_ops *t = NULL;
if (kind) {
+ read_lock(&cls_mod_lock);
for (t = tcf_proto_base; t; t = t->next) {
if (rtattr_strcmp(kind, t->kind) == 0)
- return t;
+ break;
}
+ read_unlock(&cls_mod_lock);
}
- return NULL;
+ return t;
}
/* Register(unregister) new classifier type */
{
struct tcf_proto_ops *t, **tp;
- for (tp = &tcf_proto_base; (t=*tp) != NULL; tp = &t->next)
- if (strcmp(ops->kind, t->kind) == 0)
+ write_lock(&cls_mod_lock);
+ for (tp = &tcf_proto_base; (t=*tp) != NULL; tp = &t->next) {
+ if (strcmp(ops->kind, t->kind) == 0) {
+ write_unlock(&cls_mod_lock);
return -EEXIST;
+ }
+ }
ops->next = NULL;
*tp = ops;
+ write_unlock(&cls_mod_lock);
return 0;
}
{
struct tcf_proto_ops *t, **tp;
+ write_lock(&cls_mod_lock);
for (tp = &tcf_proto_base; (t=*tp) != NULL; tp = &t->next)
if (t == ops)
break;
- if (!t)
+ if (!t) {
+ write_unlock(&cls_mod_lock);
return -ENOENT;
+ }
*tp = t->next;
+ write_unlock(&cls_mod_lock);
return 0;
}
kfree(tp);
goto errout;
}
+ write_lock(&qdisc_tree_lock);
+ spin_lock_bh(&dev->queue_lock);
tp->next = *back;
*back = tp;
+ spin_unlock_bh(&dev->queue_lock);
+ write_unlock(&qdisc_tree_lock);
} else if (tca[TCA_KIND-1] && rtattr_strcmp(tca[TCA_KIND-1], tp->ops->kind))
goto errout;
if (fh == 0) {
if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) {
+ write_lock(&qdisc_tree_lock);
+ spin_lock_bh(&dev->queue_lock);
*back = tp->next;
- synchronize_bh();
+ spin_unlock_bh(&dev->queue_lock);
+ write_unlock(&qdisc_tree_lock);
tp->ops->destroy(tp);
kfree(tp);
return skb->len;
if ((dev = dev_get_by_index(tcm->tcm_ifindex)) == NULL)
return skb->len;
+
+ read_lock(&qdisc_tree_lock);
if (!tcm->tcm_parent)
q = dev->qdisc_sleeping;
else
q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
- if (q == NULL)
+ if (q == NULL) {
+ read_unlock(&qdisc_tree_lock);
return skb->len;
+ }
if ((cops = q->ops->cl_ops) == NULL)
goto errout;
if (TC_H_MIN(tcm->tcm_parent)) {
if (cl)
cops->put(q, cl);
+ read_unlock(&qdisc_tree_lock);
return skb->len;
}
unsigned long cl;
head->ht[h] = f->next;
- if ((cl = cls_set_class(&f->res.class, 0)) != 0)
+ if ((cl = __cls_set_class(&f->res.class, 0)) != 0)
tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
#ifdef CONFIG_NET_CLS_POLICE
tcf_police_release(f->police);
if (*fp == f) {
unsigned long cl;
+ tcf_tree_lock(tp);
*fp = f->next;
- synchronize_bh();
+ tcf_tree_unlock(tp);
- if ((cl = cls_set_class(&f->res.class, 0)) != 0)
+ if ((cl = cls_set_class(tp, &f->res.class, 0)) != 0)
tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
#ifdef CONFIG_NET_CLS_POLICE
tcf_police_release(f->police);
f->res.classid = *(u32*)RTA_DATA(tb[TCA_FW_CLASSID-1]);
cl = tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid);
- cl = cls_set_class(&f->res.class, cl);
+ cl = cls_set_class(tp, &f->res.class, cl);
if (cl)
tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
}
if (tb[TCA_FW_POLICE-1]) {
struct tcf_police *police = tcf_police_locate(tb[TCA_FW_POLICE-1], tca[TCA_RATE-1]);
+ tcf_tree_lock(tp);
police = xchg(&f->police, police);
- synchronize_bh();
+ tcf_tree_unlock(tp);
tcf_police_release(police);
}
return -ENOBUFS;
memset(head, 0, sizeof(*head));
+ tcf_tree_lock(tp);
tp->root = head;
- synchronize_bh();
+ tcf_tree_unlock(tp);
}
f = kmalloc(sizeof(struct fw_filter), GFP_KERNEL);
if (RTA_PAYLOAD(tb[TCA_FW_CLASSID-1]) != 4)
goto errout;
f->res.classid = *(u32*)RTA_DATA(tb[TCA_FW_CLASSID-1]);
- cls_set_class(&f->res.class, tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid));
+ cls_set_class(tp, &f->res.class, tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid));
}
#ifdef CONFIG_NET_CLS_POLICE
#endif
f->next = head->ht[fw_hash(handle)];
- wmb();
+ tcf_tree_lock(tp);
head->ht[fw_hash(handle)] = f;
+ tcf_tree_unlock(tp);
*arg = (unsigned long)f;
return 0;
rta->rta_len = skb->tail - b;
#ifdef CONFIG_NET_CLS_POLICE
if (f->police) {
- RTA_PUT(skb, TCA_STATS, sizeof(struct tc_stats), &f->police->stats);
+ if (qdisc_copy_stats(skb, &f->police->stats))
+ goto rtattr_failure;
}
#endif
return skb->len;
return id&0xF;
}
-static void route4_reset_fastmap(struct route4_head *head, u32 id)
+static void route4_reset_fastmap(struct device *dev, struct route4_head *head, u32 id)
{
- start_bh_atomic();
+ spin_lock_bh(&dev->queue_lock);
memset(head->fastmap, 0, sizeof(head->fastmap));
- end_bh_atomic();
+ spin_unlock_bh(&dev->queue_lock);
}
static void __inline__
unsigned long cl;
b->ht[h2] = f->next;
- if ((cl = cls_set_class(&f->res.class, 0)) != 0)
+ if ((cl = __cls_set_class(&f->res.class, 0)) != 0)
tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
#ifdef CONFIG_NET_CLS_POLICE
tcf_police_release(f->police);
if (*fp == f) {
unsigned long cl;
+ tcf_tree_lock(tp);
*fp = f->next;
- synchronize_bh();
+ tcf_tree_unlock(tp);
- route4_reset_fastmap(head, f->id);
+ route4_reset_fastmap(tp->q->dev, head, f->id);
- if ((cl = cls_set_class(&f->res.class, 0)) != 0)
+ if ((cl = cls_set_class(tp, &f->res.class, 0)) != 0)
tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
#ifdef CONFIG_NET_CLS_POLICE
return 0;
/* OK, session has no flows */
+ tcf_tree_lock(tp);
head->table[to_hash(h)] = NULL;
- synchronize_bh();
+ tcf_tree_unlock(tp);
kfree(b);
return 0;
unsigned long cl;
f->res.classid = *(u32*)RTA_DATA(tb[TCA_ROUTE4_CLASSID-1]);
- cl = cls_set_class(&f->res.class, tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid));
+ cl = cls_set_class(tp, &f->res.class, tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid));
if (cl)
tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
}
if (tb[TCA_ROUTE4_POLICE-1]) {
struct tcf_police *police = tcf_police_locate(tb[TCA_ROUTE4_POLICE-1], tca[TCA_RATE-1]);
+ tcf_tree_lock(tp);
police = xchg(&f->police, police);
- synchronize_bh();
+ tcf_tree_unlock(tp);
tcf_police_release(police);
}
return -ENOBUFS;
memset(head, 0, sizeof(struct route4_head));
+ tcf_tree_lock(tp);
tp->root = head;
- synchronize_bh();
+ tcf_tree_unlock(tp);
}
f = kmalloc(sizeof(struct route4_filter), GFP_KERNEL);
goto errout;
memset(b, 0, sizeof(*b));
+ tcf_tree_lock(tp);
head->table[h1] = b;
- synchronize_bh();
+ tcf_tree_unlock(tp);
}
f->bkt = b;
goto errout;
}
- cls_set_class(&f->res.class, tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid));
+ cls_set_class(tp, &f->res.class, tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid));
#ifdef CONFIG_NET_CLS_POLICE
if (tb[TCA_ROUTE4_POLICE-1])
f->police = tcf_police_locate(tb[TCA_ROUTE4_POLICE-1], tca[TCA_RATE-1]);
#endif
f->next = f1;
- wmb();
+ tcf_tree_lock(tp);
*ins_f = f;
+ tcf_tree_unlock(tp);
- route4_reset_fastmap(head, f->id);
+ route4_reset_fastmap(tp->q->dev, head, f->id);
*arg = (unsigned long)f;
return 0;
rta->rta_len = skb->tail - b;
#ifdef CONFIG_NET_CLS_POLICE
if (f->police) {
- RTA_PUT(skb, TCA_STATS, sizeof(struct tc_stats), &f->police->stats);
+ if (qdisc_copy_stats(skb, &f->police->stats))
+ goto rtattr_failure;
}
#endif
return skb->len;
unsigned long cl;
s->ht[h2] = f->next;
- if ((cl = cls_set_class(&f->res.class, 0)) != 0)
+ if ((cl = __cls_set_class(&f->res.class, 0)) != 0)
tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
#ifdef CONFIG_NET_CLS_POLICE
tcf_police_release(f->police);
unsigned long cl;
+ tcf_tree_lock(tp);
*fp = f->next;
- synchronize_bh();
+ tcf_tree_unlock(tp);
- if ((cl = cls_set_class(&f->res.class, 0)) != 0)
+ if ((cl = cls_set_class(tp, &f->res.class, 0)) != 0)
tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
#ifdef CONFIG_NET_CLS_POLICE
for (sp = &((struct rsvp_head*)tp->root)->ht[h&0xFF];
*sp; sp = &(*sp)->next) {
if (*sp == s) {
+ tcf_tree_lock(tp);
*sp = s->next;
- synchronize_bh();
+ tcf_tree_unlock(tp);
kfree(s);
return 0;
unsigned long cl;
f->res.classid = *(u32*)RTA_DATA(tb[TCA_RSVP_CLASSID-1]);
- cl = cls_set_class(&f->res.class, tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid));
+ cl = cls_set_class(tp, &f->res.class, tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid));
if (cl)
tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
}
if (tb[TCA_RSVP_POLICE-1]) {
struct tcf_police *police = tcf_police_locate(tb[TCA_RSVP_POLICE-1], tca[TCA_RATE-1]);
+ tcf_tree_lock(tp);
police = xchg(&f->police, police);
- synchronize_bh();
+ tcf_tree_unlock(tp);
tcf_police_release(police);
}
f->sess = s;
if (f->tunnelhdr == 0)
- cls_set_class(&f->res.class, tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid));
+ cls_set_class(tp, &f->res.class, tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid));
#ifdef CONFIG_NET_CLS_POLICE
if (tb[TCA_RSVP_POLICE-1])
f->police = tcf_police_locate(tb[TCA_RSVP_POLICE-1], tca[TCA_RATE-1]);
rta->rta_len = skb->tail - b;
#ifdef CONFIG_NET_CLS_POLICE
if (f->police) {
- RTA_PUT(skb, TCA_STATS, sizeof(struct tc_stats), &f->police->stats);
+ if (qdisc_copy_stats(skb, &f->police->stats))
+ goto rtattr_failure;
}
#endif
return skb->len;
{
unsigned long cl;
- if ((cl = cls_set_class(&n->res.class, 0)) != 0)
+ if ((cl = __cls_set_class(&n->res.class, 0)) != 0)
tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
#ifdef CONFIG_NET_CLS_POLICE
tcf_police_release(n->police);
if (ht) {
for (kp = &ht->ht[TC_U32_HASH(key->handle)]; *kp; kp = &(*kp)->next) {
if (*kp == key) {
+ tcf_tree_lock(tp);
*kp = key->next;
- synchronize_bh();
+ tcf_tree_unlock(tp);
u32_destroy_key(tp, key);
return 0;
for (h=0; h<=ht->divisor; h++) {
while ((n = ht->ht[h]) != NULL) {
ht->ht[h] = n->next;
- synchronize_bh();
u32_destroy_key(tp, n);
}
ht_down->refcnt++;
}
+ sch_tree_lock(q);
ht_down = xchg(&n->ht_down, ht_down);
- synchronize_bh();
+ sch_tree_unlock(q);
if (ht_down)
ht_down->refcnt--;
unsigned long cl;
n->res.classid = *(u32*)RTA_DATA(tb[TCA_U32_CLASSID-1]);
- cl = cls_set_class(&n->res.class, q->ops->cl_ops->bind_tcf(q, base, n->res.classid));
+ sch_tree_lock(q);
+ cl = __cls_set_class(&n->res.class, q->ops->cl_ops->bind_tcf(q, base, n->res.classid));
+ sch_tree_unlock(q);
if (cl)
q->ops->cl_ops->unbind_tcf(q, cl);
}
if (tb[TCA_U32_POLICE-1]) {
struct tcf_police *police = tcf_police_locate(tb[TCA_U32_POLICE-1], est);
+ sch_tree_lock(q);
police = xchg(&n->police, police);
- synchronize_bh();
+ sch_tree_lock(q);
tcf_police_release(police);
}
rta->rta_len = skb->tail - b;
#ifdef CONFIG_NET_CLS_POLICE
if (TC_U32_KEY(n->handle) && n->police) {
- RTA_PUT(skb, TCA_STATS, sizeof(struct tc_stats), &n->police->stats);
+ if (qdisc_copy_stats(skb, &n->police->stats))
+ goto rtattr_failure;
}
#endif
return skb->len;
static struct qdisc_estimator_head elist[EST_MAX_INTERVAL+1];
+/* Estimator array lock */
+static rwlock_t est_lock = RW_LOCK_UNLOCKED;
+
static void est_timer(unsigned long arg)
{
int idx = (int)arg;
struct qdisc_estimator *e;
+ read_lock(&est_lock);
for (e = elist[idx].list; e; e = e->next) {
- u64 nbytes = e->stats->bytes;
- u32 npackets = e->stats->packets;
+ struct tc_stats *st = e->stats;
+ u64 nbytes;
+ u32 npackets;
u32 rate;
-
+
+ spin_lock(st->lock);
+ nbytes = st->bytes;
+ npackets = st->packets;
rate = (nbytes - e->last_bytes)<<(7 - idx);
e->last_bytes = nbytes;
e->avbps += ((long)rate - (long)e->avbps) >> e->ewma_log;
- e->stats->bps = (e->avbps+0xF)>>5;
+ st->bps = (e->avbps+0xF)>>5;
rate = (npackets - e->last_packets)<<(12 - idx);
e->last_packets = npackets;
e->avpps += ((long)rate - (long)e->avpps) >> e->ewma_log;
e->stats->pps = (e->avpps+0x1FF)>>10;
+ spin_unlock(st->lock);
}
- elist[idx].timer.expires = jiffies + ((HZ/4)<<idx);
- add_timer(&elist[idx].timer);
+ mod_timer(&elist[idx].timer, jiffies + ((HZ/4)<<idx));
+ read_unlock(&est_lock);
}
int qdisc_new_estimator(struct tc_stats *stats, struct rtattr *opt)
elist[est->interval].timer.function = est_timer;
add_timer(&elist[est->interval].timer);
}
+ write_lock_bh(&est_lock);
elist[est->interval].list = est;
+ write_unlock_bh(&est_lock);
return 0;
}
continue;
}
+ write_lock_bh(&est_lock);
*pest = est->next;
- synchronize_bh();
+ write_unlock_bh(&est_lock);
kfree(est);
killed++;
static u32 idx_gen;
static struct tcf_police *tcf_police_ht[16];
+/* Policer hash table lock */
+static rwlock_t police_lock = RW_LOCK_UNLOCKED;
+
+/* Each policer is serialized by its individual spinlock */
static __inline__ unsigned tcf_police_hash(u32 index)
{
{
struct tcf_police *p;
+ read_lock(&police_lock);
for (p = tcf_police_ht[tcf_police_hash(index)]; p; p = p->next) {
if (p->index == index)
- return p;
+ break;
}
- return NULL;
+ read_unlock(&police_lock);
+ return p;
}
static __inline__ u32 tcf_police_new_index(void)
for (p1p = &tcf_police_ht[h]; *p1p; p1p = &(*p1p)->next) {
if (*p1p == p) {
+ write_lock_bh(&police_lock);
*p1p = p->next;
+ write_unlock_bh(&police_lock);
#ifdef CONFIG_NET_ESTIMATOR
qdisc_kill_estimator(&p->stats);
#endif
memset(p, 0, sizeof(*p));
p->refcnt = 1;
+ spin_lock_init(&p->lock);
+ p->stats.lock = &p->lock;
if (parm->rate.rate) {
if ((p->R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE-1])) == NULL)
goto failure;
qdisc_new_estimator(&p->stats, est);
#endif
h = tcf_police_hash(p->index);
+ write_lock_bh(&police_lock);
p->next = tcf_police_ht[h];
tcf_police_ht[h] = p;
+ write_unlock_bh(&police_lock);
return p;
failure:
long toks;
long ptoks = 0;
+ spin_lock(&p->lock);
+
p->stats.bytes += skb->len;
p->stats.packets++;
#ifdef CONFIG_NET_ESTIMATOR
if (p->ewma_rate && p->stats.bps >= p->ewma_rate) {
p->stats.overlimits++;
+ spin_unlock(&p->lock);
return p->action;
}
#endif
if (skb->len <= p->mtu) {
- if (p->R_tab == NULL)
+ if (p->R_tab == NULL) {
+ spin_unlock(&p->lock);
return p->result;
+ }
PSCHED_GET_TIME(now);
p->t_c = now;
p->toks = toks;
p->ptoks = ptoks;
+ spin_unlock(&p->lock);
return p->result;
}
}
p->stats.overlimits++;
+ spin_unlock(&p->lock);
return p->action;
}
changes qdisc parameters.
*/
+/* Protects list of registered TC modules. It is pure SMP lock. */
+static rwlock_t qdisc_mod_lock = RW_LOCK_UNLOCKED;
+
+
/************************************************
* Queueing disciplines manipulation. *
************************************************/
{
struct Qdisc_ops *q, **qp;
- for (qp = &qdisc_base; (q=*qp)!=NULL; qp = &q->next)
- if (strcmp(qops->id, q->id) == 0)
+ write_lock(&qdisc_mod_lock);
+ for (qp = &qdisc_base; (q=*qp)!=NULL; qp = &q->next) {
+ if (strcmp(qops->id, q->id) == 0) {
+ write_unlock(&qdisc_mod_lock);
return -EEXIST;
+ }
+ }
if (qops->enqueue == NULL)
qops->enqueue = noop_qdisc_ops.enqueue;
qops->next = NULL;
*qp = qops;
+ write_unlock(&qdisc_mod_lock);
return 0;
}
int unregister_qdisc(struct Qdisc_ops *qops)
{
struct Qdisc_ops *q, **qp;
+ int err = -ENOENT;
+
+ write_lock(&qdisc_mod_lock);
for (qp = &qdisc_base; (q=*qp)!=NULL; qp = &q->next)
if (q == qops)
break;
- if (!q)
- return -ENOENT;
- *qp = q->next;
- q->next = NULL;
- return 0;
+ if (q) {
+ *qp = q->next;
+ q->next = NULL;
+ err = 0;
+ }
+ write_unlock(&qdisc_mod_lock);
+ return err;
}
/* We know handle. Find qdisc among all qdisc's attached to device
struct Qdisc_ops *qdisc_lookup_ops(struct rtattr *kind)
{
- struct Qdisc_ops *q;
+ struct Qdisc_ops *q = NULL;
if (kind) {
+ read_lock(&qdisc_mod_lock);
for (q = qdisc_base; q; q = q->next) {
if (rtattr_strcmp(kind, q->id) == 0)
- return q;
+ break;
}
+ read_unlock(&qdisc_mod_lock);
}
- return NULL;
+ return q;
}
static struct qdisc_rate_table *qdisc_rtab_list;
if (dev->flags & IFF_UP)
dev_deactivate(dev);
- start_bh_atomic();
+ write_lock(&qdisc_tree_lock);
+ spin_lock_bh(&dev->queue_lock);
oqdisc = dev->qdisc_sleeping;
/* Prune old scheduler */
qdisc = &noop_qdisc;
dev->qdisc_sleeping = qdisc;
dev->qdisc = &noop_qdisc;
- end_bh_atomic();
+ spin_unlock_bh(&dev->queue_lock);
+ write_unlock(&qdisc_tree_lock);
if (dev->flags & IFF_UP)
dev_activate(dev);
goto err_out;
/* Grrr... Resolve race condition with module unload */
-
+
err = -EINVAL;
if (ops != qdisc_lookup_ops(kind))
goto err_out;
sch->dequeue = ops->dequeue;
sch->dev = dev;
atomic_set(&sch->refcnt, 1);
+ sch->stats.lock = &dev->queue_lock;
if (handle == 0) {
handle = qdisc_alloc_handle(dev);
err = -ENOMEM;
sch->handle = handle;
if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS-1])) == 0) {
+ write_lock(&qdisc_tree_lock);
sch->next = dev->qdisc_list;
dev->qdisc_list = sch;
+ write_unlock(&qdisc_tree_lock);
#ifdef CONFIG_NET_ESTIMATOR
if (tca[TCA_RATE-1])
qdisc_new_estimator(&sch->stats, tca[TCA_RATE-1]);
return err;
if (q) {
qdisc_notify(skb, n, clid, q, NULL);
+ spin_lock_bh(&dev->queue_lock);
qdisc_destroy(q);
+ spin_unlock_bh(&dev->queue_lock);
}
} else {
qdisc_notify(skb, n, clid, NULL, q);
struct Qdisc *old_q = NULL;
err = qdisc_graft(dev, p, clid, q, &old_q);
if (err) {
- if (q)
+ if (q) {
+ spin_lock_bh(&dev->queue_lock);
qdisc_destroy(q);
+ spin_unlock_bh(&dev->queue_lock);
+ }
return err;
}
qdisc_notify(skb, n, clid, old_q, q);
- if (old_q)
+ if (old_q) {
+ spin_lock_bh(&dev->queue_lock);
qdisc_destroy(old_q);
+ spin_unlock_bh(&dev->queue_lock);
+ }
}
return 0;
}
+int qdisc_copy_stats(struct sk_buff *skb, struct tc_stats *st)
+{
+ spin_lock_bh(st->lock);
+ RTA_PUT(skb, TCA_STATS, (char*)&st->lock - (char*)st, st);
+ spin_unlock_bh(st->lock);
+ return 0;
+
+rtattr_failure:
+ spin_unlock_bh(st->lock);
+ return -1;
+}
+
+
static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
u32 pid, u32 seq, unsigned flags, int event)
{
if (q->ops->dump && q->ops->dump(q, skb) < 0)
goto rtattr_failure;
q->stats.qlen = q->q.qlen;
- RTA_PUT(skb, TCA_STATS, sizeof(q->stats), &q->stats);
+ if (qdisc_copy_stats(skb, &q->stats))
+ goto rtattr_failure;
nlh->nlmsg_len = skb->tail - b;
return skb->len;
s_idx = cb->args[0];
s_q_idx = q_idx = cb->args[1];
- read_lock_bh(&dev_base_lock);
+ read_lock(&dev_base_lock);
for (dev=dev_base, idx=0; dev; dev = dev->next, idx++) {
if (idx < s_idx)
continue;
if (idx > s_idx)
s_q_idx = 0;
+ read_lock(&qdisc_tree_lock);
for (q = dev->qdisc_list, q_idx = 0; q;
q = q->next, q_idx++) {
if (q_idx < s_q_idx)
continue;
if (tc_fill_qdisc(skb, q, 0, NETLINK_CB(cb->skb).pid,
- cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
+ cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0) {
+ read_unlock(&qdisc_tree_lock);
goto done;
+ }
}
+ read_unlock(&qdisc_tree_lock);
}
done:
- read_unlock_bh(&dev_base_lock);
+ read_unlock(&dev_base_lock);
cb->args[0] = idx;
cb->args[1] = q_idx;
s_t = cb->args[0];
+ read_lock(&qdisc_tree_lock);
for (q=dev->qdisc_list, t=0; q; q = q->next, t++) {
if (t < s_t) continue;
if (!q->ops->cl_ops) continue;
if (arg.w.stop)
break;
}
+ read_unlock(&qdisc_tree_lock);
cb->args[0] = t;
q->link.ewma_log = TC_CBQ_DEF_EWMA;
q->link.avpkt = q->link.allot/2;
q->link.minidle = -0x7FFFFFFF;
+ q->link.stats.lock = &sch->dev->queue_lock;
init_timer(&q->wd_timer);
q->wd_timer.data = (unsigned long)sch;
return 0;
}
+int cbq_copy_xstats(struct sk_buff *skb, struct tc_cbq_xstats *st)
+{
+ RTA_PUT(skb, TCA_STATS, sizeof(*st), st);
+ return 0;
+
+rtattr_failure:
+ return -1;
+}
+
+
static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct cbq_sched_data *q = (struct cbq_sched_data*)sch->data;
if (cbq_dump_attr(skb, &q->link) < 0)
goto rtattr_failure;
rta->rta_len = skb->tail - b;
+ spin_lock_bh(&sch->dev->queue_lock);
q->link.xstats.avgidle = q->link.avgidle;
- RTA_PUT(skb, TCA_XSTATS, sizeof(q->link.xstats), &q->link.xstats);
+ if (cbq_copy_xstats(skb, &q->link.xstats)) {
+ spin_unlock_bh(&sch->dev->queue_lock);
+ goto rtattr_failure;
+ }
+ spin_unlock_bh(&sch->dev->queue_lock);
return skb->len;
rtattr_failure:
goto rtattr_failure;
rta->rta_len = skb->tail - b;
cl->stats.qlen = cl->q->q.qlen;
- RTA_PUT(skb, TCA_STATS, sizeof(cl->stats), &cl->stats);
+ if (qdisc_copy_stats(skb, &cl->stats))
+ goto rtattr_failure;
+ spin_lock_bh(&sch->dev->queue_lock);
cl->xstats.avgidle = cl->avgidle;
cl->xstats.undertime = 0;
if (!PSCHED_IS_PASTPERFECT(cl->undertime))
cl->xstats.undertime = PSCHED_TDIFF(cl->undertime, q->now);
- RTA_PUT(skb, TCA_XSTATS, sizeof(cl->xstats), &cl->xstats);
+ q->link.xstats.avgidle = q->link.avgidle;
+ if (cbq_copy_xstats(skb, &cl->xstats)) {
+ spin_unlock_bh(&sch->dev->queue_lock);
+ goto rtattr_failure;
+ }
+ spin_unlock_bh(&sch->dev->queue_lock);
return skb->len;
new->reshape_fail = cbq_reshape_fail;
#endif
}
- if ((*old = xchg(&cl->q, new)) != NULL)
- qdisc_reset(*old);
+ sch_tree_lock(sch);
+ *old = cl->q;
+ cl->q = new;
+ qdisc_reset(*old);
+ sch_tree_unlock(sch);
return 0;
}
struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
struct cbq_class *cl = (struct cbq_class*)arg;
- start_bh_atomic();
if (--cl->refcnt == 0) {
#ifdef CONFIG_NET_CLS_POLICE
+ spin_lock_bh(&sch->dev->queue_lock);
if (q->rx_class == cl)
q->rx_class = NULL;
+ spin_unlock_bh(&sch->dev->queue_lock);
#endif
+
cbq_destroy_class(cl);
}
- end_bh_atomic();
- return;
}
static int
}
/* Change class parameters */
- start_bh_atomic();
+ sch_tree_lock(sch);
if (cl->next_alive != NULL)
cbq_deactivate_class(cl);
if (cl->q->q.qlen)
cbq_activate_class(cl);
- end_bh_atomic();
+ sch_tree_lock(sch);
#ifdef CONFIG_NET_ESTIMATOR
if (tca[TCA_RATE-1]) {
cl->allot = parent->allot;
cl->quantum = cl->allot;
cl->weight = cl->R_tab->rate.rate;
+ cl->stats.lock = &sch->dev->queue_lock;
- start_bh_atomic();
+ sch_tree_lock(sch);
cbq_link_class(cl);
cl->borrow = cl->tparent;
if (cl->tparent != &q->link)
#endif
if (tb[TCA_CBQ_FOPT-1])
cbq_set_fopt(cl, RTA_DATA(tb[TCA_CBQ_FOPT-1]));
- end_bh_atomic();
+ sch_tree_unlock(sch);
#ifdef CONFIG_NET_ESTIMATOR
if (tca[TCA_RATE-1])
if (cl->filters || cl->children || cl == &q->link)
return -EBUSY;
- start_bh_atomic();
+ sch_tree_lock(sch);
if (cl->next_alive)
cbq_deactivate_class(cl);
cbq_sync_defmap(cl);
cbq_rmprio(q, cl);
+ sch_tree_unlock(sch);
if (--cl->refcnt == 0)
cbq_destroy_class(cl);
- end_bh_atomic();
-
return 0;
}
a = &q->flow[cl];
- start_bh_atomic();
+ spin_lock_bh(&sch->dev->queue_lock);
#if 0
a->rate_log = copt->rate_log;
#endif
if (tb[TCA_CSZ_RTAB-1])
memcpy(a->L_tab, RTA_DATA(tb[TCA_CSZ_RTAB-1]), 1024);
- end_bh_atomic();
+ spin_unlock_bh(&sch->dev->queue_lock);
return 0;
}
/* NI */
a = &q->flow[cl];
- start_bh_atomic();
+ spin_lock_bh(&sch->dev->queue_lock);
a->fprev->fnext = a->fnext;
a->fnext->fprev = a->fprev;
a->sprev->snext = a->snext;
a->snext->sprev = a->sprev;
a->start = a->finish = 0;
kfree(xchg(&q->flow[cl].L_tab, NULL));
- end_bh_atomic();
+ spin_unlock_bh(&sch->dev->queue_lock);
return 0;
}
/* Main transmission queue. */
-struct Qdisc_head qdisc_head = { &qdisc_head };
+struct Qdisc_head qdisc_head = { &qdisc_head, &qdisc_head };
+spinlock_t qdisc_runqueue_lock = SPIN_LOCK_UNLOCKED;
+
+/* Main qdisc structure lock.
+
+ However, modifications
+ to data, participating in scheduling must be additionally
+ protected with dev->queue_lock spinlock.
+
+ The idea is the following:
+ - enqueue, dequeue are serialized via top level device
+ spinlock dev->queue_lock.
+ - tree walking is protected by read_lock(qdisc_tree_lock)
+ and this lock is used only in process context.
+ - updates to tree are made only under rtnl semaphore,
+ hence this lock may be made without local bh disabling.
+
+ qdisc_tree_lock must be grabbed BEFORE dev->queue_lock!
+ */
+rwlock_t qdisc_tree_lock = RW_LOCK_UNLOCKED;
+
+/* Anti deadlock rules:
+
+ qdisc_runqueue_lock protects main transmission list qdisc_head.
+ Run list is accessed only under this spinlock.
+
+ dev->queue_lock serializes queue accesses for this device
+ AND dev->qdisc pointer itself.
+
+ dev->xmit_lock serializes accesses to device driver.
+
+ dev->queue_lock and dev->xmit_lock are mutually exclusive,
+ if one is grabbed, another must be free.
+
+ qdisc_runqueue_lock may be requested under dev->queue_lock,
+ but neither dev->queue_lock nor dev->xmit_lock may be requested
+ under qdisc_runqueue_lock.
+ */
+
/* Kick device.
Note, that this procedure can be called by a watchdog timer, so that
>0 - queue is not empty, but throttled.
<0 - queue is not empty. Device is throttled, if dev->tbusy != 0.
- NOTE: Called only from NET BH
+ NOTE: Called under dev->queue_lock with locally disabled BH.
*/
int qdisc_restart(struct device *dev)
struct sk_buff *skb;
if ((skb = q->dequeue(q)) != NULL) {
+ /* Dequeue packet and release queue */
+ spin_unlock(&dev->queue_lock);
+
if (netdev_nit)
dev_queue_xmit_nit(skb, dev);
- if (dev->hard_start_xmit(skb, dev) == 0) {
- q->tx_last = jiffies;
- return -1;
+ if (spin_trylock(&dev->xmit_lock)) {
+ /* Remember that the driver is grabbed by us. */
+ dev->xmit_lock_owner = smp_processor_id();
+ if (dev->hard_start_xmit(skb, dev) == 0) {
+ dev->xmit_lock_owner = -1;
+ spin_unlock(&dev->xmit_lock);
+
+ spin_lock(&dev->queue_lock);
+ dev->qdisc->tx_last = jiffies;
+ return -1;
+ }
+ /* Release the driver */
+ dev->xmit_lock_owner = -1;
+ spin_unlock(&dev->xmit_lock);
+ } else {
+ /* So, someone grabbed the driver. */
+
+ /* It may be transient configuration error,
+ when hard_start_xmit() recurses. We detect
+ it by checking xmit owner and drop the
+ packet when deadloop is detected.
+ */
+ if (dev->xmit_lock_owner == smp_processor_id()) {
+ kfree_skb(skb);
+ if (net_ratelimit())
+ printk(KERN_DEBUG "Dead loop on virtual %s, fix it urgently!\n", dev->name);
+ spin_lock(&dev->queue_lock);
+ return -1;
+ }
+
+ /* Otherwise, packet is requeued
+ and will be sent by the next net_bh run.
+ */
+ mark_bh(NET_BH);
}
/* Device kicked us out :(
This is possible in three cases:
+ 0. driver is locked
1. fastroute is enabled
2. device cannot determine busy state
before start of transmission (f.e. dialout)
3. device is buggy (ppp)
*/
+ spin_lock(&dev->queue_lock);
+ q = dev->qdisc;
q->ops->requeue(skb, q);
return -1;
}
- return q->q.qlen;
+ return dev->qdisc->q.qlen;
+}
+
+static __inline__ void
+qdisc_stop_run(struct Qdisc *q)
+{
+ q->h.forw->back = q->h.back;
+ q->h.back->forw = q->h.forw;
+ q->h.forw = NULL;
+}
+
+extern __inline__ void
+qdisc_continue_run(struct Qdisc *q)
+{
+ if (!qdisc_on_runqueue(q) && q->dev) {
+ q->h.forw = &qdisc_head;
+ q->h.back = qdisc_head.back;
+ qdisc_head.back->forw = &q->h;
+ qdisc_head.back = &q->h;
+ }
+}
+
+static __inline__ int
+qdisc_init_run(struct Qdisc_head *lh)
+{
+ if (qdisc_head.forw != &qdisc_head) {
+ *lh = qdisc_head;
+ lh->forw->back = lh;
+ lh->back->forw = lh;
+ qdisc_head.forw = &qdisc_head;
+ qdisc_head.back = &qdisc_head;
+ return 1;
+ }
+ return 0;
}
/* Scan transmission queue and kick devices.
I have no idea how to solve it using only "anonymous" Linux mark_bh().
To change queue from device interrupt? Ough... only not this...
+
+ This function is called only from net_bh.
*/
void qdisc_run_queues(void)
{
- struct Qdisc_head **hp, *h;
+ struct Qdisc_head lh, *h;
- hp = &qdisc_head.forw;
- while ((h = *hp) != &qdisc_head) {
- int res = -1;
+ spin_lock(&qdisc_runqueue_lock);
+ if (!qdisc_init_run(&lh))
+ goto out;
+
+ while ((h = lh.forw) != &lh) {
+ int res;
+ struct device *dev;
struct Qdisc *q = (struct Qdisc*)h;
- struct device *dev = q->dev;
-
- spin_lock_bh(&dev->xmit_lock);
- while (!dev->tbusy && (res = qdisc_restart(dev)) < 0)
- /* NOTHING */;
- spin_unlock_bh(&dev->xmit_lock);
-
- /* An explanation is necessary here.
- qdisc_restart called dev->hard_start_xmit,
- if device is virtual, it could trigger one more
- dev_queue_xmit and a new device could appear
- in the active chain. In this case we cannot unlink
- the empty queue, because we lost the back pointer.
- No problem, we will unlink it during the next round.
- */
- if (res == 0 && *hp == h) {
- *hp = h->forw;
- h->forw = NULL;
- continue;
+ qdisc_stop_run(q);
+
+ dev = q->dev;
+ spin_unlock(&qdisc_runqueue_lock);
+
+ res = -1;
+ if (spin_trylock(&dev->queue_lock)) {
+ while (!dev->tbusy && (res = qdisc_restart(dev)) < 0)
+ /* NOTHING */;
+ spin_unlock(&dev->queue_lock);
}
- hp = &h->forw;
+
+ spin_lock(&qdisc_runqueue_lock);
+ /* If qdisc is not empty add it to the tail of list */
+ if (res)
+ qdisc_continue_run(q);
}
+out:
+ spin_unlock(&qdisc_runqueue_lock);
}
-/* Periodic watchdoc timer to recover from hard/soft device bugs. */
+/* Periodic watchdog timer to recover from hard/soft device bugs. */
static void dev_do_watchdog(unsigned long dummy);
static struct timer_list dev_watchdog =
{ NULL, NULL, 0L, 0L, &dev_do_watchdog };
+/* This function is called only from timer */
+
static void dev_do_watchdog(unsigned long dummy)
{
- struct Qdisc_head *h;
+ struct Qdisc_head lh, *h;
+
+ if (!spin_trylock(&qdisc_runqueue_lock)) {
+ /* No hurry with watchdog. */
+ mod_timer(&dev_watchdog, jiffies + HZ/10);
+ return;
+ }
+
+ if (!qdisc_init_run(&lh))
+ goto out;
- for (h = qdisc_head.forw; h != &qdisc_head; h = h->forw) {
+ while ((h = lh.forw) != &lh) {
+ struct device *dev;
struct Qdisc *q = (struct Qdisc*)h;
- struct device *dev = q->dev;
- spin_lock_bh(&dev->xmit_lock);
- if (dev->tbusy && jiffies - q->tx_last > q->tx_timeo)
- qdisc_restart(dev);
- spin_unlock_bh(&dev->xmit_lock);
+ qdisc_stop_run(q);
+
+ dev = q->dev;
+ spin_unlock(&qdisc_runqueue_lock);
+
+ if (spin_trylock(&dev->queue_lock)) {
+ q = dev->qdisc;
+ if (dev->tbusy && jiffies - q->tx_last > q->tx_timeo)
+ qdisc_restart(dev);
+ spin_unlock(&dev->queue_lock);
+ }
+
+ spin_lock(&qdisc_runqueue_lock);
+
+ qdisc_continue_run(dev->qdisc);
}
- dev_watchdog.expires = jiffies + 5*HZ;
- add_timer(&dev_watchdog);
+
+out:
+ mod_timer(&dev_watchdog, jiffies + 5*HZ);
+ spin_unlock(&qdisc_runqueue_lock);
}
{
{ NULL },
NULL,
- NULL,
+ noop_dequeue,
TCQ_F_BUILTIN,
&noqueue_qdisc_ops,
};
sch->enqueue = ops->enqueue;
sch->dequeue = ops->dequeue;
sch->dev = dev;
+ sch->stats.lock = &dev->queue_lock;
atomic_set(&sch->refcnt, 1);
if (!ops->init || ops->init(sch, NULL) == 0)
return sch;
return NULL;
}
+/* Under dev->queue_lock and BH! */
+
void qdisc_reset(struct Qdisc *qdisc)
{
struct Qdisc_ops *ops = qdisc->ops;
- start_bh_atomic();
if (ops->reset)
ops->reset(qdisc);
- end_bh_atomic();
}
+/* Under dev->queue_lock and BH! */
+
void qdisc_destroy(struct Qdisc *qdisc)
{
struct Qdisc_ops *ops = qdisc->ops;
+ struct device *dev;
if (!atomic_dec_and_test(&qdisc->refcnt))
return;
+ dev = qdisc->dev;
+
#ifdef CONFIG_NET_SCHED
- if (qdisc->dev) {
+ if (dev) {
struct Qdisc *q, **qp;
- for (qp = &qdisc->dev->qdisc_list; (q=*qp) != NULL; qp = &q->next)
+ for (qp = &qdisc->dev->qdisc_list; (q=*qp) != NULL; qp = &q->next) {
if (q == qdisc) {
*qp = q->next;
- q->next = NULL;
break;
}
+ }
}
#ifdef CONFIG_NET_ESTIMATOR
qdisc_kill_estimator(&qdisc->stats);
#endif
#endif
- start_bh_atomic();
if (ops->reset)
ops->reset(qdisc);
if (ops->destroy)
ops->destroy(qdisc);
- end_bh_atomic();
if (!(qdisc->flags&TCQ_F_BUILTIN))
kfree(qdisc);
}
*/
if (dev->qdisc_sleeping == &noop_qdisc) {
+ struct Qdisc *qdisc;
if (dev->tx_queue_len) {
- struct Qdisc *qdisc;
qdisc = qdisc_create_dflt(dev, &pfifo_fast_ops);
if (qdisc == NULL) {
printk(KERN_INFO "%s: activation failed\n", dev->name);
return;
}
- dev->qdisc_sleeping = qdisc;
- } else
- dev->qdisc_sleeping = &noqueue_qdisc;
+ } else {
+ qdisc = &noqueue_qdisc;
+ }
+ write_lock(&qdisc_tree_lock);
+ dev->qdisc_sleeping = qdisc;
+ write_unlock(&qdisc_tree_lock);
}
- start_bh_atomic();
+ spin_lock_bh(&dev->queue_lock);
+ spin_lock(&qdisc_runqueue_lock);
if ((dev->qdisc = dev->qdisc_sleeping) != &noqueue_qdisc) {
dev->qdisc->tx_timeo = 5*HZ;
dev->qdisc->tx_last = jiffies - dev->qdisc->tx_timeo;
dev_watchdog.expires = jiffies + 5*HZ;
add_timer(&dev_watchdog);
}
- end_bh_atomic();
+ spin_unlock(&qdisc_runqueue_lock);
+ spin_unlock_bh(&dev->queue_lock);
}
void dev_deactivate(struct device *dev)
{
struct Qdisc *qdisc;
- start_bh_atomic();
-
- qdisc = xchg(&dev->qdisc, &noop_qdisc);
+ spin_lock_bh(&dev->queue_lock);
+ qdisc = dev->qdisc;
+ dev->qdisc = &noop_qdisc;
qdisc_reset(qdisc);
- if (qdisc->h.forw) {
- struct Qdisc_head **hp, *h;
-
- for (hp = &qdisc_head.forw; (h = *hp) != &qdisc_head; hp = &h->forw) {
- if (h == &qdisc->h) {
- *hp = h->forw;
- break;
- }
- }
- }
-
- end_bh_atomic();
+ spin_lock(&qdisc_runqueue_lock);
+ if (qdisc_on_runqueue(qdisc))
+ qdisc_stop_run(qdisc);
+ spin_unlock(&qdisc_runqueue_lock);
+ spin_unlock_bh(&dev->queue_lock);
}
void dev_init_scheduler(struct device *dev)
{
+ write_lock(&qdisc_tree_lock);
+ spin_lock_bh(&dev->queue_lock);
dev->qdisc = &noop_qdisc;
+ spin_unlock_bh(&dev->queue_lock);
dev->qdisc_sleeping = &noop_qdisc;
dev->qdisc_list = NULL;
+ write_unlock(&qdisc_tree_lock);
}
void dev_shutdown(struct device *dev)
{
struct Qdisc *qdisc;
- start_bh_atomic();
+ write_lock(&qdisc_tree_lock);
+ spin_lock_bh(&dev->queue_lock);
qdisc = dev->qdisc_sleeping;
dev->qdisc = &noop_qdisc;
dev->qdisc_sleeping = &noop_qdisc;
qdisc_destroy(qdisc);
BUG_TRAP(dev->qdisc_list == NULL);
dev->qdisc_list = NULL;
- end_bh_atomic();
+ spin_unlock_bh(&dev->queue_lock);
+ write_unlock(&qdisc_tree_lock);
}
-
return -EINVAL;
}
- start_bh_atomic();
+ sch_tree_lock(sch);
q->bands = qopt->bands;
memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
if (child != &noop_qdisc)
qdisc_destroy(child);
}
- end_bh_atomic();
+ sch_tree_unlock(sch);
for (i=0; i<=TC_PRIO_MAX; i++) {
int band = q->prio2band[i];
struct Qdisc *child;
child = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
if (child) {
+ sch_tree_lock(sch);
child = xchg(&q->queues[band], child);
- synchronize_bh();
if (child != &noop_qdisc)
qdisc_destroy(child);
+ sch_tree_unlock(sch);
}
}
}
if (new == NULL)
new = &noop_qdisc;
- *old = xchg(&q->queues[band], new);
+ sch_tree_lock(sch);
+ *old = q->queues[band];
+ q->queues[band] = new;
+ qdisc_reset(*old);
+ sch_tree_unlock(sch);
return 0;
}
if (opt->rta_len < RTA_LENGTH(sizeof(*ctl)))
return -EINVAL;
- start_bh_atomic();
+ sch_tree_lock(sch);
q->quantum = ctl->quantum ? : psched_mtu(sch->dev);
q->perturb_period = ctl->perturb_period*HZ;
q->perturb_timer.expires = jiffies + q->perturb_period;
add_timer(&q->perturb_timer);
}
- end_bh_atomic();
+ sch_tree_unlock(sch);
return 0;
}
if (rtab->data[max_size>>qopt->rate.cell_log] > qopt->buffer)
goto done;
- start_bh_atomic();
+ sch_tree_lock(sch);
q->limit = qopt->limit;
q->mtu = qopt->mtu;
q->max_size = max_size;
q->ptokens = q->mtu;
rtab = xchg(&q->R_tab, rtab);
ptab = xchg(&q->P_tab, ptab);
- end_bh_atomic();
+ sch_tree_unlock(sch);
err = 0;
done:
if (rtab)
if (skb == NULL) {
struct device *m = dat->m->dev.qdisc->dev;
if (m) {
- m->tbusy = 0;
dat->m->slaves = sch;
+ spin_lock(&m->queue_lock);
+ m->tbusy = 0;
qdisc_restart(m);
+ spin_unlock(&m->queue_lock);
}
}
sch->q.qlen = dat->q.qlen + dat->m->dev.qdisc->q.qlen;
master->slaves = NEXT_SLAVE(q);
if (q == master->slaves) {
master->slaves = NULL;
+ spin_lock_bh(&master->dev.queue_lock);
qdisc_reset(master->dev.qdisc);
+ spin_unlock_bh(&master->dev.queue_lock);
}
}
skb_queue_purge(&dat->q);
if (dev->hard_header_len > m->dev.hard_header_len)
return -EINVAL;
+ if (&m->dev == dev)
+ return -ELOOP;
+
q->m = m;
skb_queue_head_init(&q->q);
return -ENOBUFS;
}
if (neigh_event_send(n, skb_res) == 0) {
- if (dev->hard_header(skb, dev, ntohs(skb->protocol), n->ha, NULL, skb->len) < 0) {
+ int err;
+ read_lock(&n->lock);
+ err = dev->hard_header(skb, dev, ntohs(skb->protocol), n->ha, NULL, skb->len);
+ read_unlock(&n->lock);
+ if (err < 0) {
neigh_release(n);
return -EINVAL;
}
continue;
}
- if (q->h.forw == NULL) {
- q->h.forw = qdisc_head.forw;
- qdisc_head.forw = &q->h;
- }
+ if (!qdisc_on_runqueue(q))
+ qdisc_run(q);
switch (teql_resolve(skb, skb_res, slave)) {
case 0:
- if (slave->hard_start_xmit(skb, slave) == 0) {
- master->slaves = NEXT_SLAVE(q);
- dev->tbusy = 0;
- master->stats.tx_packets++;
- master->stats.tx_bytes += len;
+ if (spin_trylock(&slave->xmit_lock)) {
+ slave->xmit_lock_owner = smp_processor_id();
+ if (slave->hard_start_xmit(skb, slave) == 0) {
+ slave->xmit_lock_owner = -1;
+ spin_unlock(&slave->xmit_lock);
+ master->slaves = NEXT_SLAVE(q);
+ dev->tbusy = 0;
+ master->stats.tx_packets++;
+ master->stats.tx_bytes += len;
return 0;
+ }
+ slave->xmit_lock_owner = -1;
+ spin_unlock(&slave->xmit_lock);
}
if (dev->tbusy)
busy = 1;
/*
* Register any pre existing devices.
*/
- read_lock_bh(&dev_base_lock);
+ read_lock(&dev_base_lock);
for (dev = dev_base; dev != NULL; dev = dev->next) {
if ((dev->flags & IFF_UP) && (dev->type == ARPHRD_X25
#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
))
x25_link_device_up(dev);
}
- read_unlock_bh(&dev_base_lock);
+ read_unlock(&dev_base_lock);
return 0;
}