CONFIG_IP_NF_MATCH_LIMIT
limit matching allows you to control the rate at which a rule can be
matched: mainly useful in combination with the LOG target ("LOG
- target support", below).
+ target support", below) and to avoid some Denial of Service attacks.
If you want to compile it as a module, say M here and read
Documentation/modules.txt. If unsure, say `N'.
Change History
--------------
+Version 0.9.4.1 - April 27, 2000 - third public beta release
+
+* Replace several "magic numbers" with symbolic constants
+* Differentiate between board-specific info and chip-specific info
+ (allows for easier support of specific boards or chips)
+* Move some of the transmit side outside of the spinlock
+ by using atomic variables. Use spin_lock_irq instead of
+ spin_lock_irq{save,restore} in select places, for better performance.
+* New module option "media" for forcing media selection. Functions the
+ same as "options" in other drivers, and will soon be renamed
+ 'options' to be homogeneous.
+* New power management wake-up code
+* Slightly more verbose chip id messages in kernel log
+* Add/correct chip register constant list
+* New chipset wake up (open) logic
+* No longer locks CONFIGx updates
+* Do not set Interfame Gap (IFG) bits in TxConfig
+* Better Rx reset logic in case of Rx FIFO Overflow
+* For chips which support it, enable bit to automatically clear Rx
+ FIFO overflow
+* No longer enable and disable interrupts in interrupt handler
+ (technique borrowed from BSD driver, appears to have problems
+ with some chips)
+* H/W spinlock now protects ioctl
+* Chipset-dependent RxConfig settings
+
+
Version 0.9.3.3.2 - Feb 22, 2000 - second public beta release
* Begin integration of Daniel Kobras' MMIO flush patch (disabled for now)
* Reset NWay registers to sane defaults on rtl8139_open/hw_start
* Miscellaneous code cleanup
-Version 0.7.0 - Feb 7, 2000 - first public beta release
+Version 0.7.0 - Feb 7, 2000 - first public beta release
+* Initial public version, derived from Donald Becker's rtl8139.c v1.08r
[EOF]
VERSION = 2
PATCHLEVEL = 3
SUBLEVEL = 99
-EXTRAVERSION = -pre6
+EXTRAVERSION = -pre7
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
extql $3,$7,$3
extqh $2,$7,$1
bis $3,$1,$1
- stq $1,0($6)
+ EXO( stq $1,0($6) )
addq $7,8,$7
subq $0,8,$0
addq $6,8,$6
486/Cx486 CONFIG_M486 \
586/K5/5x86/6x86/6x86MX CONFIG_M586 \
Pentium/TSC CONFIG_M586TSC \
- PPro CONFIG_M686 \
+ PPro/P-II/P-III CONFIG_M686 \
K6/II/III CONFIG_MK6 \
Athlon CONFIG_MK7" PPro
#
struct acpi_errata_info
{
- const char *oem;
- const char *oem_table;
- u32 oem_rev;
- unsigned long options;
+ const char *signature; // table signature (eg. "RSDT")
+ const char *oem; // OEM name
+ const char *oem_table; // OEM table identifier (optional)
+ u32 oem_rev; // OEM table revision (optional)
+ unsigned long options; // errata options
};
+/*
+ * We must identify systems that need ACPI_TRUST_TABLES solely from the
+ * RSDP ("RSD PTR "). All other options should be flagged from the
+ * RSDT ("RSDT") which can be better identified.
+ */
struct acpi_errata_info acpi_errata[] =
{
+ {"RSD PTR ", "AMI ", NULL, 0, ACPI_TRUST_TABLES | ACPI_COPY_TABLES},
{NULL, NULL, 0, 0},
};
}
}
+/*
+ * Match ACPI table and set options based on platform errata, if any
+ */
+static int __init acpi_find_errata(struct acpi_table *table)
+{
+ struct acpi_errata_info *info;
+ int size;
+
+ for (info = acpi_errata; info->signature && info->oem; info++) {
+ size = strlen(info->signature);
+ if (memcmp(&table->signature, info->signature, size))
+ continue;
+ if (strcmp(info->signature, "RSD PTR ")) {
+ // ordinary ACPI table
+ size = strlen(info->oem);
+ if (memcmp(table->oem, info->oem, size))
+ continue;
+ if (info->oem_table) {
+ size = strlen(info->oem_table);
+ if (memcmp(table->oem_table,
+ info->oem_table,
+ size))
+ continue;
+ }
+ if (info->oem_rev && table->oem_rev != info->oem_rev)
+ continue;
+ }
+ else {
+ // special handling for RSDP
+ size = strlen(info->oem);
+ if (memcmp(((struct acpi_rsdp*) table)->oem,
+ info->oem,
+ size))
+ continue;
+ }
+
+ printk(KERN_INFO
+ "ACPI: found platform errata 0x%08lx\n",
+ info->options);
+ acpi_opts |= info->options;
+ return 0;
+ }
+ return -1;
+}
+
/*
* Locate and map ACPI tables
*/
if (i >= ACPI_BIOS_ROM_END)
return -ENODEV;
+ // find any errata based on the RSDP
+ if (!acpi_find_errata((struct acpi_table*) rsdp)) {
+ if (acpi_opts & ACPI_DISABLED)
+ return -EINVAL;
+ else if (acpi_opts & ACPI_CHIPSET_ONLY)
+ return -ENODEV;
+ }
+
// fetch RSDT from RSDP
rsdt = acpi_map_table(rsdp->rsdt);
if (!rsdt) {
acpi_unmap_table(rsdt);
return -EINVAL;
}
+
+ // find any errata based on the RSDT
+ if (!acpi_find_errata(rsdt)) {
+ if (acpi_opts & ACPI_DISABLED)
+ return -EINVAL;
+ else if (acpi_opts & ACPI_CHIPSET_ONLY)
+ return -ENODEV;
+ }
+
// search RSDT for FACP
acpi_facp.table = NULL;
rsdt_entry = (u32 *) (rsdt + 1);
sgi_hpc_write1 ^= (HPC3_WRITE1_LC0OFF|HPC3_WRITE1_LC1OFF);
hpc3mregs->write1 = sgi_hpc_write1;
- del_timer(&blink_timer);
- blink_timer.expires = jiffies + data;
- add_timer(&blink_timer);
+ mod_timer(&blink_timer, jiffies+data);
}
static void debounce(unsigned long data)
sgi_hpc_write1 ^= (HPC3_WRITE1_LC0OFF|HPC3_WRITE1_LC1OFF);
hpc3mregs->write1 = sgi_hpc_write1;
- del_timer(&blink_timer);
- blink_timer.expires = jiffies + data;
- add_timer(&blink_timer);
+ mod_timer(&blink_timer, jiffies+data);
}
static void debounce(unsigned long data)
timer_active |= (1 << FLOPPY_TIMER); \
} while(0)
-#define START_TIMEOUT() \
- do { \
- del_timer( &timeout_timer ); \
- timeout_timer.expires = jiffies + FLOPPY_TIMEOUT; \
- add_timer( &timeout_timer ); \
+#define START_TIMEOUT() \
+ do { \
+ mod_timer(&timeout_timer, jiffies+FLOPPY_TIMEOUT); \
} while(0)
#define STOP_TIMEOUT() \
// read resources from PCI configuration space
u8 irq = pci_dev->irq;
- u32 * membase = bus_to_virt (pci_dev->resource[0].start);
- u32 iobase = pci_dev->resource[1].start;
+ u32 * membase = bus_to_virt (pci_resource_start (pci_dev, 0));
+ u32 iobase = pci_resource_start (pci_dev, 1);
void setup_dev (void) {
unsigned char pool;
void setup_pci_dev (void) {
unsigned char lat;
+ /* XXX check return value */
+ pci_enable_device (pci_dev);
+
// enable bus master accesses
pci_set_master (pci_dev);
if (pci_dev == NULL)
return NULL;
} while (count--);
+
+ if (pci_enable_device(pci_dev))
+ return NULL;
fore200e = fore200e_kmalloc(sizeof(struct fore200e), GFP_KERNEL);
if (fore200e == NULL)
fore200e->bus = bus;
fore200e->bus_dev = pci_dev;
fore200e->irq = pci_dev->irq;
- fore200e->phys_base = (pci_dev->resource[0].start & PCI_BASE_ADDRESS_MEM_MASK);
+ fore200e->phys_base = pci_resource_start (pci_dev, 0);
#if defined(__powerpc__)
fore200e->phys_base += KERNELBASE;
sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1);
- pci_enable_device(pci_dev);
pci_set_master(pci_dev);
return fore200e;
PRINTD (DBG_FLOW, "hrz_probe");
- if (!pci_present())
- return 0;
-
devs = 0;
pci_dev = NULL;
while ((pci_dev = pci_find_device
hrz_dev * dev;
// adapter slot free, read resources from PCI configuration space
- u32 iobase = pci_dev->resource[0].start;
- u32 * membase = bus_to_virt (pci_dev->resource[1].start);
+ u32 iobase = pci_resource_start (pci_dev, 0);
+ u32 * membase = bus_to_virt (pci_resource_start (pci_dev, 1));
u8 irq = pci_dev->irq;
// check IO region
PRINTD (DBG_WARN, "IO range already in use");
continue;
}
-
+
+ if (pci_enable_device (pci_dev))
+ continue;
+
dev = kmalloc (sizeof(hrz_dev), GFP_KERNEL);
if (!dev) {
// perhaps we should be nice: deregister all adapters and abort?
IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n",
iadev->pci->bus->number, PCI_SLOT(iadev->pci->devfn),
PCI_FUNC(iadev->pci->devfn));)
+ if (pci_enable_device(iadev->pci)) break;
dev = atm_dev_register(DEV_LABEL, &ops, -1, NULL);
if (!dev) break;
IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n",
error = 0;
+ if (pci_enable_device(pcidev))
+ {
+ printk("nicstar%d: can't enable PCI device\n", i);
+ error = 2;
+ ns_init_card_error(card, error);
+ return error;
+ }
+
if ((card = kmalloc(sizeof(ns_dev), GFP_KERNEL)) == NULL)
{
printk("nicstar%d: can't allocate memory for device structure.\n", i);
while ((pci_dev = pci_find_device(PCI_VENDOR_ID_ZEITNET,type ?
PCI_DEVICE_ID_ZEITNET_1225 : PCI_DEVICE_ID_ZEITNET_1221,
pci_dev))) {
+ if (pci_enable_device(pci_dev)) break;
dev = atm_dev_register(DEV_LABEL,&ops,-1,NULL);
if (!dev) break;
zatm_dev->pci_dev = pci_dev;
if (dev->device > MAX_PCI_DEVICE_NUM || dev->device == 0)
continue;
+ if (pci_enable_device(dev))
+ return -EIO;
+
RamIO = ioremap(PCI_BASE_ADDRESS(dev), LEN_RAM_IO);
if (!RamIO) {
btv->id=dev->device;
btv->irq=dev->irq;
- btv->bt848_adr=dev->resource[0].start;
+ btv->bt848_adr=pci_resource_start(dev, 0);
+
+ if (pci_enable_device(dev))
+ return -EIO;
if (!request_mem_region(pci_resource_start(dev,0),
pci_resource_len(dev,0),
"bttv")) {
else
btv->i2c_command=(I2C_TIMING | BT848_I2C_SCL | BT848_I2C_SDA);
- btv->bt848_adr&=PCI_BASE_ADDRESS_MEM_MASK;
pci_read_config_byte(dev, PCI_CLASS_REVISION, &btv->revision);
printk(KERN_INFO "bttv%d: Brooktree Bt%d (rev %d) ",
bttv_num,btv->id, btv->revision);
spin_lock_init(&zr->lock);
- zr->zr36057_adr = zr->pci_dev->resource[0].start;
+ if (pci_enable_device(dev))
+ continue;
+
+ zr->zr36057_adr = pci_resource_start(zr->pci_dev, 0);
pci_read_config_byte(zr->pci_dev, PCI_CLASS_REVISION, &zr->revision);
if (zr->revision < 2) {
printk(KERN_INFO "%s: Zoran ZR36057 (rev %d) irq: %d, memory: 0x%08x.\n",
} else {
unsigned short ss_vendor_id, ss_id;
- pci_read_config_word(zr->pci_dev, PCI_SUBSYSTEM_VENDOR_ID, &ss_vendor_id);
- pci_read_config_word(zr->pci_dev, PCI_SUBSYSTEM_ID, &ss_id);
+ ss_vendor_id = zr->pci_dev->subsystem_vendor;
+ ss_id = zr->pci_dev->subsystem_device;
printk(KERN_INFO "%s: Zoran ZR36067 (rev %d) irq: %d, memory: 0x%08x\n",
zr->name, zr->revision, zr->pci_dev->irq, zr->zr36057_adr);
printk(KERN_INFO "%s: subsystem vendor=0x%04x id=0x%04x\n",
}
zr->zr36057_mem = ioremap(zr->zr36057_adr, 0x1000);
+ if (!zr->zr36057_mem) {
+ printk(KERN_ERR "%s: ioremap failed\n", zr->name);
+ /* XXX handle error */
+ }
/* set PCI latency timer */
pci_read_config_byte(zr->pci_dev, PCI_LATENCY_TIMER, &latency);
uclong Ze_addr0[NR_CARDS], Ze_addr2[NR_CARDS], ZeIndex = 0;
unsigned char Ze_irq[NR_CARDS];
- if(pci_present() == 0) { /* PCI bus not present */
- return(0);
- }
for (i = 0; i < NR_CARDS; i++) {
/* look for a Cyclades card by vendor and device id */
while((device_id = cy_pci_dev_id[dev_index]) != 0) {
if (device_id == 0)
break;
+ if (pci_enable_device(pdev))
+ continue;
+
/* read PCI configuration area */
cy_pci_irq = pdev->irq;
- cy_pci_addr0 = pdev->resource[0].start;
- cy_pci_addr1 = pdev->resource[1].start;
- cy_pci_addr2 = pdev->resource[2].start;
+ cy_pci_addr0 = pci_resource_start(pdev, 0);
+ cy_pci_addr1 = pci_resource_start(pdev, 1);
+ cy_pci_addr2 = pci_resource_start(pdev, 2);
pci_read_config_byte(pdev, PCI_REVISION_ID, &cyy_rev_id);
device_id &= ~PCI_DEVICE_ID_MASK;
static unsigned int dtlk_poll(struct file *file, poll_table * wait)
{
int mask = 0;
+ unsigned long expires;
+
TRACE_TEXT(" dtlk_poll");
/*
static long int j;
/* there are no exception conditions */
/* There won't be any interrupts, so we set a timer instead. */
- del_timer(&dtlk_timer);
- dtlk_timer.expires = jiffies + 3*HZ / 100;
- add_timer(&dtlk_timer);
+ expires = jiffies + 3*HZ / 100;
+ mod_timer(&dtlk_timer, expires);
return mask;
}
if (card >= BOARD_COUNT)
break;
+ if (pci_enable_device(dev))
+ break;
+
/* found a PCI ISI card! */
- ioaddr = dev->resource[3].start; /* i.e at offset 0x1c in the
- * PCI configuration register
- * space.
- */
- ioaddr &= PCI_BASE_ADDRESS_IO_MASK;
+ ioaddr = pci_resource_start (dev, 3); /* i.e at offset 0x1c in the
+ * PCI configuration register
+ * space.
+ */
pciirq = dev->irq;
printk(KERN_INFO "ISI PCI Card(Device ID 0x%x)\n", device_id[idx]);
/*
dev->bus->number, dev->devfn);
#endif
+ if (pci_enable_device(devp))
+ return(-EIO);
if ((brdp = stli_allocbrd()) == (stlibrd_t *) NULL)
return(-ENOMEM);
if ((brdp->brdnr = stli_getbrdnr()) < 0) {
brdp->brdtype = brdtype;
#if DEBUG
- printk("%s(%d): BAR[]=%x,%x,%x,%x\n", __FILE__, __LINE__,
- devp->resource[0].start, devp->resource[1].start,
- devp->resource[2].start, devp->resource[3].start);
+ printk("%s(%d): BAR[]=%lx,%lx,%lx,%lx\n", __FILE__, __LINE__,
+ pci_resource_start(devp, 0),
+ pci_resource_start(devp, 1),
+ pci_resource_start(devp, 2),
+ pci_resource_start(devp, 3));
#endif
/*
* We have all resources from the board, so lets setup the actual
* board structure now.
*/
- brdp->iobase = (devp->resource[3].start & PCI_BASE_ADDRESS_IO_MASK);
- brdp->memaddr = (devp->resource[2].start & PCI_BASE_ADDRESS_MEM_MASK);
+ brdp->iobase = pci_resource_start(devp, 3);
+ brdp->memaddr = pci_resource_start(devp, 2);
stli_brdinit(brdp);
return(0);
#endif
/* Find PCI boards here */
#ifdef CONFIG_PCI
- if (pci_present()) {
+ {
struct pci_dev *p = NULL;
n = sizeof(moxa_pcibrds) / sizeof(moxa_pciinfo);
i = 0;
while (i < n) {
while((p = pci_find_device(moxa_pcibrds[i].vendor_id, moxa_pcibrds[i].device_id, p))!=NULL)
{
+ if (pci_enable_device(p))
+ continue;
if (numBoards >= MAX_BOARDS) {
if (verbose)
printk("More than %d MOXA Intellio family boards found. Board is ignored.", MAX_BOARDS);
{
unsigned int val;
- board->baseAddr = p->resource[2].start;
+ board->baseAddr = pci_resource_start (p, 2);
board->boardType = board_type;
switch (board_type) {
case MOXA_BOARD_C218_ISA:
}
if (once)
msp->watch_stereo = 0;
- if (msp->watch_stereo) {
- del_timer(&msp->wake_stereo);
- msp->wake_stereo.expires = jiffies + 5*HZ;
- add_timer(&msp->wake_stereo);
- }
+ if (msp->watch_stereo)
+ mod_timer(&msp->wake_stereo, jiffies+5*HZ);
}
static int msp3400c_thread(void *data)
/* unmute */
msp3400c_setvolume(client, msp->left, msp->right);
- if (msp->watch_stereo) {
- del_timer(&msp->wake_stereo);
- msp->wake_stereo.expires = jiffies + 5*HZ;
- add_timer(&msp->wake_stereo);
- }
+ if (msp->watch_stereo)
+ mod_timer(&msp->wake_stereo, jiffies+5*HZ);
if (debug)
msp3400c_print_mode(msp);
msp3400c_settreble(client, msp->treble);
msp3400c_setvolume(client, msp->left, msp->right);
- if (msp->watch_stereo) {
- del_timer(&msp->wake_stereo);
- msp->wake_stereo.expires = jiffies + HZ;
- add_timer(&msp->wake_stereo);
- }
+ if (msp->watch_stereo)
+ mod_timer(&msp->wake_stereo, jiffies+HZ);
msp->active = 0;
}
static int active=0; /* number of concurrent open()s */
static struct semaphore reader_lock;
-/*
- * set_timer_callback:
- *
- * Utility to reset a timer to go off some time in the future.
- */
-
-static void set_timer_callback(struct timer_list *timer, int ticks)
-{
- del_timer(timer);
- timer->expires = jiffies+ticks;
- add_timer(timer);
-}
-
-
/**
* wake_readers:
*
transition_count=1;
recent_transition=1;
}
- set_timer_callback(&tap_timer, current_params.tap_interval);
+ mod_timer(&tap_timer, jiffies + current_params.tap_interval);
/* changes to transition_count can cause reported button to change */
button_pending = 1;
else
{
bounce=JUST_GONE_DOWN;
- set_timer_callback(&bounce_timer,
- current_params.bounce_interval);
+ mod_timer(&bounce_timer,
+ jiffies+current_params.bounce_interval);
/* start new stroke/tap */
debounced_down=new_down;
notify_pad_up_down();
{
/* don't trust it yet */
bounce=JUST_GONE_UP;
- set_timer_callback(&bounce_timer,
- current_params.bounce_interval);
+ mod_timer(&bounce_timer,
+ jiffies+current_params.bounce_interval);
}
}
}
#ifdef MODULE
printk(KERN_ERR "You must set an I/O address with io=0x???\n");
#endif
- return EINVAL;
+ return -EINVAL;
}
- if(video_register_device(&cadet_radio,VFL_TYPE_RADIO)==-1)
+ if (!request_region(io,2,"cadet"))
+ return -EBUSY;
+ if(video_register_device(&cadet_radio,VFL_TYPE_RADIO)==-1) {
+ release_region(io,2);
return -EINVAL;
-
- request_region(io,2,"cadet");
+ }
printk(KERN_INFO "ADS Cadet Radio Card at 0x%x\n",io);
return 0;
}
/*
* Activate timer
*/
- del_timer(&watchdog_ticktock);
- watchdog_ticktock.expires=jiffies + (soft_margin * HZ);
- add_timer(&watchdog_ticktock);
+ mod_timer(&watchdog_ticktock, jiffies+(soft_margin*HZ));
timer_alive=1;
return 0;
}
return 0;
}
-static void softdog_ping(void)
-{
- /*
- * Refresh the timer.
- */
-
- mod_timer(&watchdog_ticktock, jiffies + (soft_margin * HZ));
-}
-
static ssize_t softdog_write(struct file *file, const char *data, size_t len, loff_t *ppos)
{
/* Can't seek (pwrite) on this device */
/*
* Refresh the timer.
*/
- if(len)
- {
- softdog_ping();
+ if(len) {
+ mod_timer(&watchdog_ticktock, jiffies+(soft_margin*HZ));
return 1;
}
return 0;
case WDIOC_GETBOOTSTATUS:
return put_user(0,(int *)arg);
case WDIOC_KEEPALIVE:
- softdog_ping();
+ mod_timer(&watchdog_ticktock, jiffies+(soft_margin*HZ));
return 0;
}
}
pdev);
if (!pdev) break;
+ if (pci_enable_device(pdev)) {
+ i++;
+ continue;
+ }
+
sx_board[i].irq = pdev->irq;
- pci_read_config_dword(pdev, PCI_BASE_ADDRESS_2, &tint);
- /* Mask out the fact that it's IO-space */
- sx_board[i].base = tint & PCI_BASE_ADDRESS_IO_MASK;
+ sx_board[i].base = pci_resource_start (pdev, 2);
sx_board[i].flags |= SX_BOARD_IS_PCI;
if (!sx_probe(&sx_board[i]))
devp->bus->number, devp->devfn);
#endif
+ if (pci_enable_device(devp))
+ return(-EIO);
if ((brdp = stl_allocbrd()) == (stlbrd_t *) NULL)
return(-ENOMEM);
if ((brdp->brdnr = stl_getbrdnr()) < 0) {
init_waitqueue_head(&saa->debiq);
init_waitqueue_head(&saa->vidq);
spin_lock_init(&saa->lock);
+
+ if (pci_enable_device(dev))
+ return -EIO;
saa->id = dev->device;
saa->irq = dev->irq;
while ((pdev = pci_find_device (PCI_VENDOR_ID_SPECIALIX,
PCI_DEVICE_ID_SPECIALIX_SX_XIO_IO8,
pdev))) {
+ if (pci_enable_device(pdev))
+ continue;
#else
for (i=0;i< SX_NBOARDS;i++) {
if (pcibios_find_device (PCI_VENDOR_ID_SPECIALIX,
/* CF boards use base address 3.... */
if (IS_CF_BOARD (board))
- pci_read_config_dword(pdev, PCI_BASE_ADDRESS_3,
- &tint);
+ board->hw_base = pci_resource_start (pdev, 3);
else
- pci_read_config_dword(pdev, PCI_BASE_ADDRESS_2,
- &tint);
- board->hw_base = tint & PCI_BASE_ADDRESS_MEM_MASK;
+ board->hw_base = pci_resource_start (pdev, 2);
board->base2 =
board->base = (ulong) ioremap(board->hw_base, WINDOW_LEN (board));
+ if (!board->base) {
+ printk(KERN_ERR "ioremap failed\n");
+ /* XXX handle error */
+ }
+
/* Most of the stuff on the CF board is offset by
0x18000 .... */
if (IS_CF_BOARD (board)) board->base += 0x18000;
unsigned char revision;
int zoran_num=0;
- if (!pcibios_present())
- {
- printk(KERN_DEBUG "zoran: PCI-BIOS not present or not accessible!\n");
- return 0;
- }
-
while ((dev = pci_find_device(PCI_VENDOR_ID_ZORAN,PCI_DEVICE_ID_ZORAN_36120, dev)))
{
/* Ok, a ZR36120/ZR36125 found! */
ztv = &zorans[zoran_num];
ztv->dev = dev;
+ if (pci_enable_device(dev))
+ return -EIO;
+
pci_read_config_byte(dev, PCI_CLASS_REVISION, &revision);
printk(KERN_INFO "zoran: Zoran %x (rev %d) ",
dev->device, revision);
act2000_receive(card);
save_flags(flags);
cli();
- del_timer(&card->ptimer);
- card->ptimer.expires = jiffies + 3;
- add_timer(&card->ptimer);
+ mod_timer(&card->ptimer, jiffies+3);
restore_flags(flags);
}
while ((dev = pci_find_device(PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_T1, dev))) {
struct capicardparams param;
- param.port = dev->resource[ 1].start & PCI_BASE_ADDRESS_IO_MASK;
+ param.port = pci_resource_start (dev, 1);
param.irq = dev->irq;
- param.membase = dev->resource[ 0].start & PCI_BASE_ADDRESS_MEM_MASK;
+ param.membase = pci_resource_start (dev, 0);
+
+ pci_enable_device (dev); /* XXX check return */
printk(KERN_INFO
"%s: PCI BIOS reports AVM-T1-PCI at i/o %#x, irq %d, mem %#x\n",
goto Start_ISAC;
}
/* Watchdog */
- if (cs->hw.saphir.timer.function) {
- del_timer(&cs->hw.saphir.timer);
- cs->hw.saphir.timer.expires = jiffies + 1*HZ;
- add_timer(&cs->hw.saphir.timer);
- } else
+ if (cs->hw.saphir.timer.function)
+ mod_timer(&cs->hw.saphir.timer, jiffies+1*HZ);
+ else
printk(KERN_WARNING "saphir: Spurious timer!\n");
writereg(cs->hw.saphir.ale, cs->hw.saphir.hscx, HSCX_MASK, 0xFF);
writereg(cs->hw.saphir.ale, cs->hw.saphir.hscx, HSCX_MASK + 0x40, 0xFF);
{
/* 5 sec WatchDog, so read at least every 4 sec */
cs->readisac(cs, ISAC_RBCH);
- del_timer(&cs->hw.saphir.timer);
- cs->hw.saphir.timer.expires = jiffies + 1*HZ;
- add_timer(&cs->hw.saphir.timer);
+ mod_timer(&cs->hw.saphir.timer, jiffies+1*HZ);
}
void
/* schedule b-channel polling again */
save_flags(flags);
cli();
- del_timer(&card->rb_timer);
- card->rb_timer.expires = jiffies + ICN_TIMER_BCREAD;
- add_timer(&card->rb_timer);
+ mod_timer(&card->rb_timer, jiffies+ICN_TIMER_BCREAD);
card->flags |= ICN_FLAGS_RBTIMER;
restore_flags(flags);
} else
/* schedule again */
save_flags(flags);
cli();
- del_timer(&card->st_timer);
- card->st_timer.expires = jiffies + ICN_TIMER_DCREAD;
- add_timer(&card->st_timer);
+ mod_timer(&card->st_timer, jiffies+ICN_TIMER_DCREAD);
restore_flags(flags);
}
save_flags(flags);
cli();
- del_timer(&dev->timer);
- dev->timer.expires = jiffies + ISDN_TIMER_RES;
- add_timer(&dev->timer);
+ mod_timer(&dev->timer, jiffies+ISDN_TIMER_RES);
restore_flags(flags);
}
}
dev->tflags |= tf;
else
dev->tflags &= ~tf;
- if (dev->tflags) {
- if (!del_timer(&dev->timer)) /* del_timer is 1, when active */
- dev->timer.expires = jiffies + ISDN_TIMER_RES;
- add_timer(&dev->timer);
- }
+ if (dev->tflags)
+ mod_timer(&dev->timer, jiffies+ISDN_TIMER_RES);
restore_flags(flags);
}
else {
pr_debug("%s: No signature yet, waiting another %d jiffies.\n",
adapter[card]->devicename, CHECKRESET_TIME);
- del_timer(&adapter[card]->reset_timer);
- adapter[card]->reset_timer.expires = jiffies + CHECKRESET_TIME;
- add_timer(&adapter[card]->reset_timer);
+ mod_timer(&adapter[card]->reset_timer, jiffies+CHECKRESET_TIME);
}
restore_flags(flags);
/* Reinitialize the timer */
save_flags(flags);
cli();
- del_timer(&adapter[card]->stat_timer);
- adapter[card]->stat_timer.expires = jiffies + CHECKSTAT_TIME;
- add_timer(&adapter[card]->stat_timer);
+ mod_timer(&adapter[card]->stat_timer, jiffies+CHECKSTAT_TIME);
restore_flags(flags);
/* Send a new cePhyStatus message */
outw(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
} while ((status = inw(ioaddr + EL3_CMD)) & IntLatch);
/* The timer will reenable interrupts. */
- del_timer(&vp->timer);
- vp->timer.expires = RUN_AT(1);
- add_timer(&vp->timer);
+ mod_timer(&vp->timer, jiffies+1*HZ);
break;
}
/* Acknowledge the IRQ. */
outw(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
} while ((status = inw(ioaddr + EL3_CMD)) & IntLatch);
/* The timer will reenable interrupts. */
- del_timer(&vp->timer);
- vp->timer.expires = RUN_AT(1);
- add_timer(&vp->timer);
+ mod_timer(&vp->timer, jiffies+1*HZ);
break;
}
/* Acknowledge the IRQ. */
#include <linux/ioport.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/delay.h>
#include <asm/io.h>
-#define RTL8139_VERSION "0.9.4"
+#define RTL8139_VERSION "0.9.4.1"
#define RTL8139_MODULE_NAME "8139too"
#define RTL8139_DRIVER_NAME RTL8139_MODULE_NAME " Fast Ethernet driver " RTL8139_VERSION
#define PFX RTL8139_MODULE_NAME ": "
/* A few user-configurable values. */
+/* media options */
+static int media[] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
static int max_interrupt_work = 20;
/* Size of the in-memory receive ring. */
#define RX_BUF_LEN_IDX 2 /* 0==8K, 1==16K, 2==32K, 3==64K */
#define RX_BUF_LEN (8192 << RX_BUF_LEN_IDX)
+#define RX_BUF_PAD 16
+#define RX_BUF_TOT_LEN (RX_BUF_LEN + RX_BUF_PAD)
+
+/* Number of Tx descriptor registers. */
+#define NUM_TX_DESC 4
+
/* Size of the Tx bounce buffers -- must be at least (dev->mtu+14+4). */
#define TX_BUF_SIZE 1536
+#define TX_BUF_TOT_LEN (TX_BUF_SIZE * NUM_TX_DESC)
/* PCI Tuning Parameters
Threshold is bytes transferred to chip before transmission starts. */
#define TX_FIFO_THRESH 256 /* In bytes, rounded down to 32 byte units. */
-/* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024. */
+/* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */
#define RX_FIFO_THRESH 4 /* Rx buffer level before first PCI xfer. */
-#define RX_DMA_BURST 4 /* Maximum PCI burst, '4' is 256 bytes */
-#define TX_DMA_BURST 4 /* Calculate as 16<<val. */
+#define RX_DMA_BURST 4 /* Maximum PCI burst, '7' is unlimited */
+#define TX_DMA_BURST 4 /* Maximum PCI burst, '4' is 256 */
/* Operational parameters that usually are not changed. */
};
#define RTL_MIN_IO_SIZE 0x80
-#define RTL8139B_IO_SIZE 0xFF
+#define RTL8139B_IO_SIZE 256
#define RTL8139_CAPS HAS_CHIP_XCVR|HAS_LNK_CHNG
/*MPX5030,*/
DELTA8139,
ADDTRON8139,
-} chip_t;
+} board_t;
-/* indexed by chip_t, above */
+/* indexed by board_t, above */
static struct {
const char *name;
-} chip_info[] __devinitdata = {
+} board_info[] __devinitdata = {
{ "RealTek RTL8139 Fast Ethernet" },
{ "RealTek RTL8139B PCI/CardBus" },
{ "SMC1211TX EZCard 10/100 (RealTek RTL8139)" },
/* The rest of these values should never change. */
-#define NUM_TX_DESC 4 /* Number of Tx descriptor registers. */
/* Symbolic offsets to registers. */
enum RTL8139_registers {
Config0 = 0x51,
Config1 = 0x52,
FlashReg = 0x54,
- GPPinData = 0x58,
- GPPinDir = 0x59,
+ MediaStatus = 0x58,
+ Config3 = 0x59,
Config4 = 0x5A, /* absent on RTL-8139A */
HltClk = 0x5B,
MultiIntr = 0x5C,
CSCR = 0x74, /* Chip Status and Configuration Register. */
PARA78 = 0x78,
PARA7c = 0x7c, /* Magic transceiver parameter register. */
+ Config5 = 0xD8, /* absent on RTL-8139A */
+};
+
+enum ClearBitMasks {
+ MultiIntrClear = 0xF000,
+ ChipCmdClear = 0xE2,
+ Config1Clear = (1<<7)|(1<<6)|(1<<3)|(1<<2)|(1<<1),
};
enum ChipCmdBits {
};
enum RxConfigBits {
+ /* Early Rx threshold, none or X/16 */
+ RxCfgEarlyRxNone = 0,
+ RxCfgEarlyRxShift = 24,
+
+ /* rx fifo threshold */
+ RxCfgFIFOShift = 13,
+ RxCfgFIFONone = (7 << RxCfgFIFOShift),
+
+ /* Max DMA burst */
+ RxCfgDMAShift = 8,
+ RxCfgDMAUnlimited = (7 << RxCfgDMAShift),
+
+ /* rx ring buffer length */
RxCfgRcv8K = 0,
RxCfgRcv16K = (1 << 11),
RxCfgRcv32K = (1 << 12),
dma_addr_t mapping;
};
+typedef enum {
+ CH_8139 = 0,
+ CH_8139A,
+ CH_8139B,
+} chip_t;
+
+/* directly indexed by chip_t, above */
+const static struct {
+ const char *name;
+ u32 RxConfigMask; /* should clear the bits supported by this chip */
+} rtl_chip_info[] = {
+ { "RTL-8139",
+ 0xf0fe0040, /* XXX copied from RTL8139A, verify */
+ },
+
+ { "RTL-8139A",
+ 0xf0fe0040,
+ },
+
+ { "RTL-8139B(L)",
+ 0xf0fc0040
+ },
+};
+
-#define PRIV_ALIGN 15 /* Required alignment mask */
struct rtl8139_private {
- chip_t chip;
+ board_t board;
void *mmio_addr;
int drv_flags;
struct pci_dev *pci_dev;
struct timer_list timer; /* Media selection timer. */
unsigned char *rx_ring;
unsigned int cur_rx; /* Index into the Rx buffer of next Rx pkt. */
- unsigned int cur_tx, dirty_tx, tx_flag;
+ unsigned int tx_flag;
+ atomic_t cur_tx;
+ atomic_t dirty_tx;
/* The saved address of a sent-in-place packet/buffer, for skfree(). */
struct ring_info tx_info[NUM_TX_DESC];
unsigned char *tx_buf[NUM_TX_DESC]; /* Tx bounce buffers */
unsigned int media2:4; /* Secondary monitored media port. */
unsigned int medialock:1; /* Don't sense media type. */
unsigned int mediasense:1; /* Media sensing in progress. */
- int extended_regs; /* bool: supports regs > 0x80 ? */
spinlock_t lock;
+ chip_t chipset;
};
MODULE_AUTHOR ("Jeff Garzik <jgarzik@mandrakesoft.com>");
MODULE_PARM (multicast_filter_limit, "i");
MODULE_PARM (max_interrupt_work, "i");
MODULE_PARM (debug, "i");
+MODULE_PARM (media, "1-" __MODULE_STRING(8) "i");
static int read_eeprom (void *ioaddr, int location, int addr_len);
static int rtl8139_open (struct net_device *dev);
TxErr | TxOK | RxErr | RxOK;
static const unsigned int rtl8139_rx_config =
- (RX_FIFO_THRESH << 13) | (RxCfgRcv32K) |
- (RX_DMA_BURST << 8);
+ RxCfgEarlyRxNone | RxCfgFIFONone | RxCfgRcv32K | RxCfgDMAUnlimited;
-static int __devinit rtl8139_init_pci (struct pci_dev *pdev, void **ioaddr_out)
+static int __devinit rtl8139_init_board (struct pci_dev *pdev,
+ struct net_device **dev_out,
+ void **ioaddr_out)
{
void *ioaddr = NULL;
+ struct net_device *dev;
+ struct rtl8139_private *tp;
u8 tmp8;
- int rc;
+ int rc, i;
u32 pio_start, pio_end, pio_flags, pio_len;
unsigned long mmio_start, mmio_end, mmio_flags, mmio_len;
+ u32 tmp;
DPRINTK ("ENTER\n");
assert (ioaddr_out != NULL);
*ioaddr_out = NULL;
+ *dev_out = NULL;
+
+ /* dev zeroed in init_etherdev */
+ dev = init_etherdev (NULL, sizeof (*tp));
+ if (dev == NULL) {
+ printk (KERN_ERR PFX "unable to alloc new ethernet\n");
+ DPRINTK ("EXIT, returning -ENOMEM\n");
+ return -ENOMEM;
+ }
+ tp = dev->priv;
pio_start = pci_resource_start (pdev, 0);
pio_end = pci_resource_end (pdev, 0);
mmio_flags = pci_resource_flags (pdev, 1);
mmio_len = pci_resource_len (pdev, 1);
+ /* set this immediately, we need to know before
+ * we talk to the chip directly */
+ DPRINTK("PIO region size == 0x%02X\n", pio_len);
+ DPRINTK("MMIO region size == 0x%02X\n", mmio_len);
+ if (pio_len == RTL8139B_IO_SIZE)
+ tp->chipset = CH_8139B;
+
/* make sure PCI base addr 0 is PIO */
if (!(pio_flags & IORESOURCE_IO)) {
printk (KERN_ERR PFX "region #0 not a PIO resource, aborting\n");
}
/* make sure our PIO region in PCI space is available */
- if (!request_region (pio_start, pio_len, RTL8139_MODULE_NAME)) {
+ if (!request_region (pio_start, pio_len, dev->name)) {
printk (KERN_ERR PFX "no I/O resource available, aborting\n");
rc = -EBUSY;
goto err_out;
}
/* make sure our MMIO region in PCI space is available */
- if (!request_mem_region (mmio_start, mmio_len, RTL8139_MODULE_NAME)) {
+ if (!request_mem_region (mmio_start, mmio_len, dev->name)) {
printk (KERN_ERR PFX "no mem resource available, aborting\n");
rc = -EBUSY;
goto err_out_free_pio;
goto err_out_free_mmio;
}
+ /* Soft reset the chip. */
+ RTL_W8 (ChipCmd, (RTL_R8 (ChipCmd) & ChipCmdClear) | CmdReset);
+
+ /* Check that the chip has finished the reset. */
+ for (i = 1000; i > 0; i--)
+ if ((RTL_R8 (ChipCmd) & CmdReset) == 0)
+ break;
+ else
+ udelay (10);
+
/* Bring the chip out of low-power mode. */
- RTL_W8 (Config1, 0x00);
+ if (tp->chipset == CH_8139B) {
+ RTL_W8 (Config1, RTL_R8 (Config1) & ~(1<<4));
+ RTL_W8 (Config4, RTL_R8 (Config4) & ~(1<<2));
+ } else {
+ /* handle RTL8139A and RTL8139 cases */
+ /* XXX from becker driver. is this right?? */
+ RTL_W8 (Config1, 0);
+ }
+ /* sanity checks -- ensure PIO and MMIO registers agree */
+ assert (inb (pio_start+Config0) == readb (ioaddr+Config0));
+ assert (inb (pio_start+Config1) == readb (ioaddr+Config1));
+ assert (inb (pio_start+TxConfig) == readb (ioaddr+TxConfig));
+ assert (inb (pio_start+RxConfig) == readb (ioaddr+RxConfig));
+
/* make sure chip thinks PIO and MMIO are enabled */
tmp8 = RTL_R8 (Config1);
if ((tmp8 & Cfg1_PIO) == 0) {
goto err_out_iounmap;
}
- /* sanity checks -- ensure PIO and MMIO registers agree */
- assert (inb (pio_start+Config0) == RTL_R8 (Config0));
- assert (inb (pio_start+Config1) == RTL_R8 (Config1));
- assert (inb (pio_start+TxConfig) == RTL_R8 (TxConfig));
- assert (inb (pio_start+RxConfig) == RTL_R8 (RxConfig));
-
+ /* identify chip attached to board */
+ tmp = RTL_R32 (TxConfig);
+ if (((tmp >> 28) & 7) == 7) {
+ if (pio_len == RTL8139B_IO_SIZE)
+ tp->chipset = CH_8139B;
+ else
+ tp->chipset = CH_8139A;
+ } else {
+ tp->chipset = CH_8139;
+ }
+ DPRINTK ("chipset id (%d/%d/%d) == %d, '%s'\n",
+ CH_8139,
+ CH_8139A,
+ CH_8139B,
+ tp->chipset,
+ rtl_chip_info[tp->chipset].name);
+
DPRINTK ("EXIT, returning 0\n");
*ioaddr_out = ioaddr;
+ *dev_out = dev;
return 0;
err_out_iounmap:
err_out_free_pio:
release_region (pio_start, pio_len);
err_out:
+ unregister_netdev (dev);
+ kfree (dev);
DPRINTK ("EXIT, returning %d\n", rc);
return rc;
}
static int __devinit rtl8139_init_one (struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- struct net_device *dev;
+ struct net_device *dev = NULL;
struct rtl8139_private *tp;
- int i, addr_len, option = -1;
+ int i, addr_len, option;
void *ioaddr = NULL;
+ static int board_idx = -1;
+ u8 tmp;
#ifndef RTL8139_NDEBUG
static int printed_version = 0;
assert (pdev != NULL);
assert (ent != NULL);
-#ifndef RTL8139_NDEBUG
+ board_idx++;
+
if (!printed_version) {
printk (KERN_INFO RTL8139_DRIVER_NAME " loaded\n");
printed_version = 1;
}
-#endif /* RTL8139_NDEBUG */
- i = rtl8139_init_pci (pdev, &ioaddr);
+ i = rtl8139_init_board (pdev, &dev, &ioaddr);
if (i < 0) {
DPRINTK ("EXIT, returning %d\n", i);
return i;
}
+ tp = dev->priv;
+
assert (ioaddr != NULL);
-
- /* dev zeroed in init_etherdev */
- dev = init_etherdev (NULL, sizeof (*tp) + PRIV_ALIGN);
- if (dev == NULL) {
- iounmap (ioaddr);
- printk (KERN_ERR PFX "unable to alloc new ethernet\n");
- DPRINTK ("EXIT, returning -ENOMEM\n");
- return -ENOMEM;
- }
+ assert (dev != NULL);
+ assert (tp != NULL);
addr_len = read_eeprom (ioaddr, 0, 8) == 0x8129 ? 8 : 6;
for (i = 0; i < 3; i++)
dev->irq = pdev->irq;
dev->base_addr = pci_resource_start (pdev, 1);
- /* dev->priv/tp zeroed in init_etherdev */
- dev->priv = tp = (void *)
- (((long)dev->priv + PRIV_ALIGN) & ~PRIV_ALIGN);
+ /* dev->priv/tp zeroed and aligned in init_etherdev */
+ tp = dev->priv;
+ /* note: tp->chipset set in rtl8139_init_board */
tp->drv_flags = PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
PCI_COMMAND_MASTER | RTL8139_CAPS;
tp->pci_dev = pdev;
- tp->chip = ent->driver_data;
+ tp->board = ent->driver_data;
tp->mmio_addr = ioaddr;
- tp->extended_regs =
- (pci_resource_len (pdev, 0) == RTL8139B_IO_SIZE) ? 1 : 0;
tp->lock = SPIN_LOCK_UNLOCKED;
PCI_SET_DRIVER_DATA (pdev, dev);
tp->phys[0] = 32;
- printk (KERN_INFO "%s: %s at 0x%lx, IRQ %d,%s "
+ printk (KERN_INFO "%s: '%s' board found at 0x%lx, IRQ %d\n",
+ dev->name, board_info[ent->driver_data].name,
+ dev->base_addr, dev->irq);
+
+ printk (KERN_INFO "%s: Chip is '%s'\n",
+ dev->name,
+ rtl_chip_info[tp->chipset].name);
+
+ printk (KERN_INFO "%s: MAC address "
"%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x.\n",
- dev->name, chip_info[ent->driver_data].name,
- dev->base_addr, dev->irq,
- tp->extended_regs ? " 8139B regs," : "",
+ dev->name,
dev->dev_addr[0], dev->dev_addr[1],
dev->dev_addr[2], dev->dev_addr[3],
dev->dev_addr[4], dev->dev_addr[5]);
/* Put the chip into low-power mode. */
- RTL_W8 (Cfg9346, Cfg9346_Unlock);
- RTL_W8 (Config1, 0x03); /* Enable PM & PCI VPD */
- RTL_W8 (HltClk, 'H'); /* 'R' would leave the clock running. */
+ RTL_W8_F (Cfg9346, Cfg9346_Unlock);
+
+ tmp = RTL_R8 (Config1) & Config1Clear;
+ tmp |= (tp->chipset == CH_8139B) ? 3 : 1; /* Enable PM/VPD */
+ RTL_W8_F (Config1, tmp);
+
+ RTL_W8_F (HltClk, 'H'); /* 'R' would leave the clock running. */
/* The lower four bits are the media type. */
+ option = (board_idx > 7) ? 0 : media[board_idx];
if (option > 0) {
tp->full_duplex = (option & 0x200) ? 1 : 0;
tp->default_port = option & 15;
#ifndef RTL8139_NDEBUG
/* poison memory before freeing */
- memset (dev, 0xC0,
+ memset (dev, 0xBC,
sizeof (struct net_device) +
- sizeof (struct rtl8139_private) +
- PRIV_ALIGN);
+ sizeof (struct rtl8139_private));
#endif /* RTL8139_NDEBUG */
kfree (dev);
return -EBUSY;
}
- tp->tx_bufs = pci_alloc_consistent(tp->pci_dev, TX_BUF_SIZE * NUM_TX_DESC,
+ tp->tx_bufs = pci_alloc_consistent(tp->pci_dev, TX_BUF_TOT_LEN,
&tp->tx_bufs_dma);
- tp->rx_ring = pci_alloc_consistent(tp->pci_dev, RX_BUF_LEN + 16,
+ tp->rx_ring = pci_alloc_consistent(tp->pci_dev, RX_BUF_TOT_LEN,
&tp->rx_ring_dma);
if (tp->tx_bufs == NULL || tp->rx_ring == NULL) {
free_irq(dev->irq, dev);
if (tp->tx_bufs)
- pci_free_consistent(tp->pci_dev, TX_BUF_SIZE * NUM_TX_DESC,
+ pci_free_consistent(tp->pci_dev, TX_BUF_TOT_LEN,
tp->tx_bufs, tp->tx_bufs_dma);
if (tp->rx_ring)
- pci_free_consistent(tp->pci_dev, RX_BUF_LEN + 16,
+ pci_free_consistent(tp->pci_dev, RX_BUF_TOT_LEN,
tp->rx_ring, tp->rx_ring_dma);
DPRINTK ("EXIT, returning -ENOMEM\n");
}
- rtl8139_init_ring (dev);
tp->full_duplex = tp->duplex_lock;
tp->tx_flag = (TX_FIFO_THRESH << 11) & 0x003f0000;
+ rtl8139_init_ring (dev);
rtl8139_hw_start (dev);
DPRINTK ("%s: rtl8139_open() ioaddr %#lx IRQ %d"
" GP Pins %2.2x %s-duplex.\n",
dev->name, pci_resource_start (tp->pci_dev, 1),
- dev->irq, RTL_R8 (GPPinData),
+ dev->irq, RTL_R8 (MediaStatus),
tp->full_duplex ? "full" : "half");
/* Set the timer to switch to check for link beat and perhaps switch
{
struct rtl8139_private *tp = (struct rtl8139_private *) dev->priv;
void *ioaddr = tp->mmio_addr;
- int i;
- unsigned long flags;
+ u32 i;
+ u8 tmp;
DPRINTK ("ENTER\n");
- spin_lock_irqsave (&tp->lock, flags);
-
/* Soft reset the chip. */
- RTL_W8 (ChipCmd, CmdReset);
+ RTL_W8 (ChipCmd, (RTL_R8 (ChipCmd) & ChipCmdClear) | CmdReset);
+ udelay (100);
/* Check that the chip has finished the reset. */
for (i = 1000; i > 0; i--)
RTL_W32_F (MAC0 + 4, cpu_to_le32 (*(u32 *) (dev->dev_addr + 4)));
/* unlock Config[01234] and BMCR register writes */
- RTL_W8 (Cfg9346, Cfg9346_Unlock);
+ RTL_W8_F (Cfg9346, Cfg9346_Unlock);
+ udelay (100);
tp->cur_rx = 0;
+ /* init Rx ring buffer DMA address */
+ RTL_W32_F (RxBuf, tp->rx_ring_dma);
+
+ /* init Tx buffer DMA addresses */
+ for (i = 0; i < NUM_TX_DESC; i++)
+ RTL_W32_F (TxAddr0 + (i * 4), tp->tx_bufs_dma + (tp->tx_buf[i] - tp->tx_bufs));
+
/* Must enable Tx/Rx before setting transfer thresholds! */
- RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb);
- RTL_W32 (RxConfig, rtl8139_rx_config);
- /* Check this value: the documentation contradicts ifself. Is the
- IFG correct with bit 28:27 zero, or with |0x03000000 ? */
- RTL_W32 (TxConfig, (TX_DMA_BURST << 8) | 0x00000000);
-
- /* Reset N-Way to chipset defaults */
- RTL_W16 (BasicModeCtrl, (1<<15)|(1<<12)|(1<<9));
- for (i = 1000; i > 0; i--)
- if ((RTL_R8 (BasicModeCtrl) & (1<<15)) == 0)
- break;
+ RTL_W8_F (ChipCmd, (RTL_R8 (ChipCmd) & ChipCmdClear) |
+ CmdRxEnb | CmdTxEnb);
+
+ i = rtl8139_rx_config |
+ (RTL_R32 (RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
+ RTL_W32_F (RxConfig, i);
+
+ /* Check this value: the documentation for IFG contradicts ifself. */
+ RTL_W32 (TxConfig, (TX_DMA_BURST << 8));
+
+ /* if link status not ok... */
+ if ((RTL_R16 (BasicModeStatus) & (1<<2)) == 0) {
+ printk (KERN_INFO "%s: no link, starting NWay\n", dev->name);
+
+ /* Reset N-Way to chipset defaults */
+ RTL_W16 (BasicModeCtrl, RTL_R16 (BasicModeCtrl) | (1<<15));
+ for (i = 1000; i > 0; i--)
+ if ((RTL_R8 (BasicModeCtrl) & (1<<15)) == 0)
+ break;
- /* Set N-Way to sane defaults */
- RTL_W16 (FIFOTMS, 0x0000);
- RTL_W16 (NWayAdvert, (1<<13)|(1<<8)|(1<<7)|(1<<6)|(1<<5)|0x1);
- RTL_W16 (BasicModeCtrl, (1<<13)|(1<<12)|(1<<9)|(1<<8));
+ /* Set N-Way to sane defaults */
+ RTL_W16_F (FIFOTMS, RTL_R16 (FIFOTMS) & ~(1<<7));
+ RTL_W16_F (NWayAdvert, RTL_R16 (NWayAdvert) |
+ (1<<13)|(1<<8)|(1<<7)|(1<<6)|(1<<5)|(1<<0));
+ RTL_W16_F (BasicModeCtrl, RTL_R16 (BasicModeCtrl) |
+ (1<<13)|(1<<12)|(1<<9)|(1<<8));
+ RTL_W8_F (MediaStatus, RTL_R8 (MediaStatus) | (1<<7) | (1<<6));
- /* check_duplex() here. */
- RTL_W8 (Config1, tp->full_duplex ? 0x60 : 0x20);
+ /* check_duplex() here. */
+ /* XXX writing Config1 here is flat out wrong */
+ /* RTL_W8 (Config1, tp->full_duplex ? 0x60 : 0x20); */
+ }
- /* lock Config[01234] and BMCR register writes */
- RTL_W8 (Cfg9346, Cfg9346_Lock);
+ tmp = RTL_R8 (Config1) & Config1Clear;
+ tmp |= (tp->chipset == CH_8139B) ? 3 : 1; /* Enable PM/VPD */
+ RTL_W8_F (Config1, tmp);
- RTL_W32 (RxBuf, tp->rx_ring_dma);
+ if (tp->chipset == CH_8139B) {
+ tmp = RTL_R8 (Config4) & ~(1<<2);
+ /* chip will clear Rx FIFO overflow automatically */
+ tmp |= (1<<7);
+ RTL_W8 (Config4, tmp);
+ }
+
+ /* disable magic packet scanning, which is enabled
+ * when PM is enabled above (Config1) */
+ RTL_W8 (Config3, RTL_R8 (Config3) & ~(1<<5));
- /* Start the chip's Tx and Rx process. */
- RTL_W32 (RxMissed, 0);
+ RTL_W32_F (RxMissed, 0);
- /* release lock cuz set_rx_mode wants it */
- spin_unlock_irqrestore (&tp->lock, flags);
rtl8139_set_rx_mode (dev);
- spin_lock_irqsave (&tp->lock, flags);
- RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb);
+ /* no early-rx interrupts */
+ RTL_W16 (MultiIntr, RTL_R16 (MultiIntr) & MultiIntrClear);
/* Enable all known interrupts by setting the interrupt mask. */
- RTL_W16 (IntrMask, rtl8139_intr_mask);
+ RTL_W16_F (IntrMask, rtl8139_intr_mask);
- if (netif_queue_stopped (dev))
- netif_start_queue (dev);
+ netif_start_queue (dev);
- spin_unlock_irqrestore (&tp->lock, flags);
+ DPRINTK ("EXIT\n");
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void rtl8139_init_ring (struct net_device *dev)
+{
+ struct rtl8139_private *tp = (struct rtl8139_private *) dev->priv;
+ int i;
+
+ DPRINTK ("ENTER\n");
+
+ tp->cur_rx = 0;
+ atomic_set (&tp->cur_tx, 0);
+ atomic_set (&tp->dirty_tx, 0);
+
+ for (i = 0; i < NUM_TX_DESC; i++) {
+ tp->tx_info[i].skb = NULL;
+ tp->tx_info[i].mapping = 0;
+ tp->tx_buf[i] = &tp->tx_bufs[i * TX_BUF_SIZE];
+ }
DPRINTK ("EXIT\n");
}
void *ioaddr = tp->mmio_addr;
int next_tick = 60 * HZ;
int mii_reg5;
- unsigned long flags;
- DPRINTK ("ENTER\n");
-
- spin_lock_irqsave (&tp->lock, flags);
+ spin_lock_irq (&tp->lock);
mii_reg5 = mdio_read (dev, tp->phys[0], 5);
tp->phys[0], mii_reg5);
RTL_W8 (Cfg9346, Cfg9346_Unlock);
RTL_W8 (Config1, tp->full_duplex ? 0x60 : 0x20);
- RTL_W8 (Cfg9346, Cfg9346_Lock);
}
}
dev->name, RTL_R8 (Config0),
RTL_R8 (Config1));
- spin_unlock_irqrestore (&tp->lock, flags);
+ spin_unlock_irq (&tp->lock);
tp->timer.expires = jiffies + next_tick;
add_timer (&tp->timer);
-
- DPRINTK ("EXIT\n");
}
{
struct rtl8139_private *tp = (struct rtl8139_private *) dev->priv;
void *ioaddr = tp->mmio_addr;
- int mii_reg, i;
- unsigned long flags;
-
- DPRINTK ("ENTER\n");
-
- spin_lock_irqsave (&tp->lock, flags);
+ int i;
DPRINTK ("%s: Transmit timeout, status %2.2x %4.4x "
"media %2.2x.\n", dev->name,
RTL_R8 (ChipCmd),
RTL_R16 (IntrStatus),
- RTL_R8 (GPPinData));
+ RTL_R8 (MediaStatus));
+
+ spin_lock_irq (&tp->lock);
/* Disable interrupts by clearing the interrupt mask. */
RTL_W16 (IntrMask, 0x0000);
+
+ spin_unlock_irq (&tp->lock);
+
/* Emit info to figure out what went wrong. */
printk (KERN_DEBUG
"%s: Tx queue start entry %d dirty entry %d.\n",
- dev->name, tp->cur_tx, tp->dirty_tx);
+ dev->name, atomic_read (&tp->cur_tx),
+ atomic_read (&tp->dirty_tx));
for (i = 0; i < NUM_TX_DESC; i++)
printk (KERN_DEBUG "%s: Tx descriptor %d is %8.8x.%s\n",
dev->name, i, RTL_R32 (TxStatus0 + (i * 4)),
i ==
- tp->dirty_tx % NUM_TX_DESC ? " (queue head)" : "");
- printk (KERN_DEBUG "%s: MII #%d registers are:", dev->name,
- tp->phys[0]);
- for (mii_reg = 0; mii_reg < 8; mii_reg++)
- printk (" %4.4x", mdio_read (dev, tp->phys[0], mii_reg));
- printk (".\n");
+ atomic_read (&tp->dirty_tx) % NUM_TX_DESC ? " (queue head)" : "");
+
+ spin_lock_irq (&tp->lock);
/* Stop a shared interrupt from scavenging while we are. */
- tp->dirty_tx = tp->cur_tx = 0;
+ atomic_set (&tp->cur_tx, 0);
+ atomic_set (&tp->dirty_tx, 0);
/* Dump the unsent Tx packets. */
for (i = 0; i < NUM_TX_DESC; i++) {
rp->mapping = 0;
}
}
-
- spin_unlock_irqrestore (&tp->lock, flags);
+
+ spin_unlock_irq (&tp->lock);
rtl8139_hw_start (dev);
-
- DPRINTK ("EXIT\n");
}
-/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
-static void rtl8139_init_ring (struct net_device *dev)
-{
- struct rtl8139_private *tp = (struct rtl8139_private *) dev->priv;
- int i;
-
- DPRINTK ("ENTER\n");
-
- tp->cur_rx = 0;
- tp->dirty_tx = tp->cur_tx = 0;
-
- for (i = 0; i < NUM_TX_DESC; i++) {
- tp->tx_info[i].skb = NULL;
- tp->tx_info[i].mapping = 0;
- tp->tx_buf[i] = &tp->tx_bufs[i * TX_BUF_SIZE];
- }
-
- DPRINTK ("EXIT\n");
-}
-
static int rtl8139_start_xmit (struct sk_buff *skb, struct net_device *dev)
{
struct rtl8139_private *tp = (struct rtl8139_private *) dev->priv;
void *ioaddr = tp->mmio_addr;
int entry;
- unsigned long flags;
-
- DPRINTK ("ENTER\n");
-
- spin_lock_irqsave (&tp->lock, flags);
/* Calculate the next Tx descriptor entry. */
- entry = tp->cur_tx % NUM_TX_DESC;
+ entry = atomic_read (&tp->cur_tx) % NUM_TX_DESC;
tp->tx_info[entry].skb = skb;
- if ((long) skb->data & 3) { /* Must use alignment buffer. */
- tp->tx_info[entry].mapping = 0;
- memcpy (tp->tx_buf[entry], skb->data, skb->len);
+ tp->tx_info[entry].mapping = 0;
+ memcpy (tp->tx_buf[entry], skb->data, skb->len);
- assert (tp->tx_bufs_dma > 0);
- RTL_W32 (TxAddr0 + entry * 4, tp->tx_bufs_dma + (tp->tx_buf[entry] - tp->tx_bufs));
- } else {
- tp->tx_info[entry].mapping =
- pci_map_single(tp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
+ spin_lock_irq (&tp->lock);
- assert (tp->tx_info[entry].mapping > 0);
- RTL_W32 (TxAddr0 + entry * 4, tp->tx_info[entry].mapping);
- }
-
/* Note: the chip doesn't have auto-pad! */
- RTL_W32 (TxStatus0 + entry * 4,
+ RTL_W32 (TxStatus0 + (entry * sizeof(u32)),
tp->tx_flag | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
+ spin_unlock_irq (&tp->lock);
+
dev->trans_start = jiffies;
- if (++tp->cur_tx - tp->dirty_tx >= NUM_TX_DESC)
+ atomic_inc (&tp->cur_tx);
+ if ((atomic_read (&tp->cur_tx) - atomic_read (&tp->dirty_tx)) >= NUM_TX_DESC)
netif_stop_queue (dev);
- spin_unlock_irqrestore (&tp->lock, flags);
-
DPRINTK ("%s: Queued Tx packet at %p size %u to slot %d.\n",
dev->name, skb->data, skb->len, entry);
- DPRINTK ("EXIT\n");
return 0;
}
assert (tp != NULL);
assert (ioaddr != NULL);
- dirty_tx = tp->dirty_tx;
+ /* drop lock held in rtl8139_interrupt */
+ spin_unlock (&tp->lock);
+
+ dirty_tx = atomic_read (&tp->dirty_tx);
- while (tp->cur_tx - dirty_tx > 0) {
+ while ((atomic_read (&tp->cur_tx) - dirty_tx) > 0) {
int entry = dirty_tx % NUM_TX_DESC;
- int txstatus = RTL_R32 (TxStatus0 + (entry * 4));
+ int txstatus;
+ spin_lock (&tp->lock);
+ txstatus = RTL_R32 (TxStatus0 + (entry * 4));
+ spin_unlock (&tp->lock);
+
if (!(txstatus & (TxStatOK | TxUnderrun | TxAborted)))
break; /* It still hasn't been Txed */
tp->stats.tx_errors++;
if (txstatus & TxAborted) {
tp->stats.tx_aborted_errors++;
- RTL_W32 (TxConfig, (TX_DMA_BURST << 8) | 0x03000001);
+ spin_lock (&tp->lock);
+ RTL_W32 (TxConfig, (TX_DMA_BURST << 8));
+ spin_unlock (&tp->lock);
}
if (txstatus & TxCarrierLost)
tp->stats.tx_carrier_errors++;
tp->stats.tx_packets++;
}
- if (tp->tx_info[entry].mapping != 0) {
- pci_unmap_single (tp->pci_dev,
- tp->tx_info[entry].mapping,
- tp->tx_info[entry].skb->len,
- PCI_DMA_TODEVICE);
- tp->tx_info[entry].mapping = 0;
- }
/* Free the original skb. */
dev_kfree_skb_irq (tp->tx_info[entry].skb);
tp->tx_info[entry].skb = NULL;
dirty_tx++;
- if (tp->cur_tx - dirty_tx < NUM_TX_DESC)
+ if (netif_queue_stopped (dev) &&
+ (atomic_read (&tp->cur_tx) - dirty_tx < NUM_TX_DESC))
netif_wake_queue (dev);
}
#ifndef RTL8139_NDEBUG
- if (tp->cur_tx - dirty_tx > NUM_TX_DESC) {
+ if (atomic_read (&tp->cur_tx) - dirty_tx > NUM_TX_DESC) {
printk (KERN_ERR
"%s: Out-of-sync dirty pointer, %d vs. %d.\n",
- dev->name, dirty_tx, tp->cur_tx);
+ dev->name, dirty_tx, atomic_read (&tp->cur_tx));
dirty_tx += NUM_TX_DESC;
}
#endif /* RTL8139_NDEBUG */
- tp->dirty_tx = dirty_tx;
+ atomic_set (&tp->dirty_tx, dirty_tx);
+
+ /* obtain lock need for rtl8139_interrupt */
+ spin_lock (&tp->lock);
}
/* The data sheet doesn't describe the Rx ring at all, so I'm guessing at the
field alignments and semantics. */
-static inline void rtl8139_rx_interrupt (struct net_device *dev,
- struct rtl8139_private *tp,
- void *ioaddr)
+static void rtl8139_rx_interrupt (struct net_device *dev,
+ struct rtl8139_private *tp,
+ void *ioaddr)
{
unsigned char *rx_ring;
u16 cur_rx;
if (rx_status &
(RxBadSymbol | RxRunt | RxTooLong | RxCRCErr |
RxBadAlign)) {
+ u8 tmp8;
+ int tmp_work = 1000;
+
DPRINTK ("%s: Ethernet frame had errors,"
" status %8.8x.\n", dev->name,
rx_status);
tp->stats.rx_crc_errors++;
/* Reset the receiver, based on RealTek recommendation. (Bug?) */
tp->cur_rx = 0;
- RTL_W8 (ChipCmd, CmdTxEnb);
+
+ /* disable receive */
+ tmp8 = RTL_R8 (ChipCmd) & ChipCmdClear;
+ RTL_W8_F (ChipCmd, tmp8 | CmdTxEnb);
+
/* A.C.: Reset the multicast list. */
rtl8139_set_rx_mode (dev);
- RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb);
+
+ while (--tmp_work > 0) {
+ tmp8 = RTL_R8 (ChipCmd) & ChipCmdClear;
+ if ((tmp8 & CmdRxEnb) && (tmp8 & CmdTxEnb))
+ break;
+ RTL_W8_F (ChipCmd, tmp8 | CmdRxEnb | CmdTxEnb);
+ }
+
+ if (tmp_work <= 0)
+ printk (KERN_WARNING PFX "tx/rx enable wait too long\n");
} else {
/* Malloc up new buffer, compatible with net-2e. */
/* Omit the four octet CRC from the length. */
}
skb->dev = dev;
skb_reserve (skb, 2); /* 16 byte align the IP fields. */
+
if (ring_offset + rx_size + 4 > RX_BUF_LEN) {
int semi_count =
RX_BUF_LEN - ring_offset - 4;
memcpy (skb_put (skb, semi_count),
&rx_ring[ring_offset + 4],
semi_count);
- memcpy (skb_put
- (skb, rx_size - semi_count),
+ memcpy (skb_put (skb, rx_size - semi_count),
rx_ring, rx_size - semi_count);
#ifdef RTL8139_DEBUG
{
int i;
- printk (KERN_DEBUG
- "%s: Frame wrap @%d",
+ printk (KERN_DEBUG "%s: Frame wrap @%d",
dev->name, semi_count);
for (i = 0; i < 16; i++)
- printk (" %2.2x",
- rx_ring[i]);
- printk (".\n");
+ printk (" %2.2x", rx_ring[i]);
+ printk ("\n");
memset (rx_ring, 0xcc, 16);
}
#endif /* RTL8139_DEBUG */
} else {
eth_copy_and_sum (skb,
- &rx_ring[ring_offset +
- 4], rx_size, 0);
+ &rx_ring[ring_offset + 4],
+ rx_size, 0);
skb_put (skb, rx_size);
}
skb->protocol = eth_type_trans (skb, dev);
}
-static inline int rtl8139_weird_interrupt (struct net_device *dev,
- struct rtl8139_private *tp,
- void *ioaddr,
- int status, int link_changed)
+static int rtl8139_weird_interrupt (struct net_device *dev,
+ struct rtl8139_private *tp,
+ void *ioaddr,
+ int status, int link_changed)
{
DPRINTK ("%s: Abnormal interrupt, status %8.8x.\n",
dev->name, status);
tp->full_duplex = duplex;
RTL_W8 (Cfg9346, Cfg9346_Unlock);
RTL_W8 (Config1, tp->full_duplex ? 0x60 : 0x20);
- RTL_W8 (Cfg9346, Cfg9346_Lock);
}
status &= ~RxUnderrun;
}
spin_lock (&tp->lock);
- /* disable interrupt generation while handling this interrupt */
- RTL_W16 (IntrMask, 0x0000);
-
do {
status = RTL_R16 (IntrStatus);
CPU speed, lower CPU speed --> more errors).
After clearing the RxOverflow bit the transfer of the
packet was repeated and all data are error free transfered */
- RTL_W16 (IntrStatus, (status & RxFIFOOver) ? (status | RxOverflow) : status);
+ RTL_W16_F (IntrStatus, (status & RxFIFOOver) ? (status | RxOverflow) : status);
DPRINTK ("%s: interrupt status=%#4.4x new intstat=%#4.4x.\n",
dev->name, status,
RTL_W16 (IntrStatus, 0xffff);
}
- /* Enable all known interrupts by setting the interrupt mask. */
- RTL_W16 (IntrMask, rtl8139_intr_mask);
-
spin_unlock (&tp->lock);
DPRINTK ("%s: exiting interrupt, intr_status=%#4.4x.\n",
dev->name, RTL_R16 (IntrStatus));
-
}
RTL_W16 (IntrMask, 0x0000);
/* Stop the chip's Tx and Rx DMA processes. */
- RTL_W8 (ChipCmd, 0x00);
+ RTL_W8 (ChipCmd, (RTL_R8 (ChipCmd) & ChipCmdClear));
/* Update the error counts. */
tp->stats.rx_missed_errors += RTL_R32 (RxMissed);
tp->tx_info[i].mapping = 0;
}
- pci_free_consistent(tp->pci_dev, RX_BUF_LEN + 16,
+ pci_free_consistent(tp->pci_dev, RX_BUF_TOT_LEN,
tp->rx_ring, tp->rx_ring_dma);
- pci_free_consistent(tp->pci_dev, TX_BUF_SIZE * NUM_TX_DESC,
+ pci_free_consistent(tp->pci_dev, TX_BUF_TOT_LEN,
tp->tx_bufs, tp->tx_bufs_dma);
tp->rx_ring = NULL;
tp->tx_bufs = NULL;
void *ioaddr = tp->mmio_addr;
u32 mc_filter[2]; /* Multicast hash filter */
int i, rx_mode;
+ u32 tmp;
unsigned long flags=0;
DPRINTK ("ENTER\n");
struct dev_mc_list *mclist;
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- for (i = 0, mclist = dev->mc_list;
- mclist && i < dev->mc_count;
- i++, mclist =
- mclist->next) set_bit (ether_crc (ETH_ALEN,
- mclist->
- dmi_addr) >> 26,
- mc_filter);
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next)
+ set_bit (ether_crc (ETH_ALEN, mclist->dmi_addr) >> 26,
+ mc_filter);
}
/* if called from irq handler, lock already acquired */
spin_lock_irqsave (&tp->lock, flags);
/* We can safely update without stopping the chip. */
- RTL_W32 (RxConfig, rtl8139_rx_config | rx_mode);
+ tmp = rtl8139_rx_config | rx_mode |
+ (RTL_R32 (RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
+ RTL_W32_F (RxConfig, tmp);
RTL_W32_F (MAR0 + 0, mc_filter[0]);
RTL_W32_F (MAR0 + 4, mc_filter[1]);
/* Disable interrupts, stop Tx and Rx. */
RTL_W16 (IntrMask, 0x0000);
- RTL_W8 (ChipCmd, 0x00);
+ RTL_W8 (ChipCmd, (RTL_R8 (ChipCmd) & ChipCmdClear));
/* Update the error counts. */
tp->stats.rx_missed_errors += RTL_R32 (RxMissed);
if (memcmp(eth_addr, "\x00\x00\x49", 3) != 0)
return -ENODEV;
- request_region(ioaddr, I596_TOTAL_SIZE, "i596");
+ if (!request_region(ioaddr, I596_TOTAL_SIZE, "i596"))
+ return -ENODEV;
dev->base_addr = ioaddr;
dev->irq = 10;
// printk(KERN_ERR "aironet4X00 mem addrs not available for maping \n");
// continue;
// }
- request_region(pci_ioaddr, AIRONET4X00_IO_SIZE, "aironet4x00 ioaddr");
+ if (!request_region(pci_ioaddr, AIRONET4X00_IO_SIZE, "aironet4x00 ioaddr"))
+ continue;
// request_region(pci_cisaddr, AIRONET4X00_CIS_SIZE, "aironet4x00 cis");
// request_region(pci_memaddr, AIRONET4X00_MEM_SIZE, "aironet4x00 mem");
#endif
}
-#endif
\ No newline at end of file
+#endif
for (i = 0; i < 6; i++)
write_reg_byte(ioaddr, PAR0 + i, dev->dev_addr[i]);
#ifdef TIMED_CHECKER
- del_timer(&atp_timer);
- atp_timer.expires = jiffies + TIMED_CHECKER;
- add_timer(&atp_timer);
+ mod_timer(&atp_timer, jiffies+TIMED_CHECKER);
#endif
}
for (i = 0; i < 6; i++)
write_reg_byte(ioaddr, PAR0 + i, atp_timed_dev->dev_addr[i]);
spin_unlock(&lp->lock);
- del_timer(&atp_timer);
- atp_timer.expires = jiffies + TIMED_CHECKER;
- add_timer(&atp_timer);
+ mod_timer(&atp_timer, jiffies+TIMED_CHECKER);
}
#endif
/* Enable Master/IO access, Disable memory access */
- pci_enable_device (net_dev);
+ pci_enable_device (net_dev); /* XXX check return val */
pci_set_master(net_dev);
/* Set Latency Timer 80h */
acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK;
}
- if (pci_enable_device(pdev)) {
- printk(KERN_ERR "eepro100: Could not enable PCI device\n");
+ if (pci_enable_device(pdev))
goto err_out_free_mmio_region;
- }
pci_set_master(pdev);
if (!request_region(ioaddr, EPIC_TOTAL_SIZE, EPIC100_MODULE_NAME))
return -EBUSY;
- pci_enable_device (pdev);
+ i = pci_enable_device (pdev);
+ if (i) {
+ release_region(ioaddr, EPIC_TOTAL_SIZE);
+ return i;
+ }
/* EPIC-specific code: Soft-reset the chip ere setting as master. */
outl(0x0001, ioaddr + GENCTL);
}
i = pci_enable_device (pdev);
- if (i) {
- printk (KERN_ERR "ne2k-pci: cannot enable device\n");
+ if (i)
return i;
- }
if (request_region (ioaddr, NE_IO_EXTENT, "ne2k-pci") == NULL) {
printk (KERN_ERR "ne2k-pci: I/O resource 0x%x @ 0x%lx busy\n",
{
int rc;
- MOD_INC_USE_COUNT;
lock_8390_module();
rc = pci_module_init (&ne2k_driver);
if (rc <= 0)
unlock_8390_module();
- MOD_DEC_USE_COUNT;
-
return rc;
}
printk(KERN_INFO "tulip_attach(%s)\n", pdev->slot_name);
- pci_enable_device (pdev);
+ if (pci_enable_device (pdev))
+ return -ENODEV;
pci_set_master (pdev);
dev = tulip_probe1(pdev, NULL,
pci_resource_start (pdev, 0), pdev->irq,
}
/* setup various bits in PCI command register */
- pci_enable_device (pci_dev);
+ if (pci_enable_device (pci_dev))
+ return -ENODEV;
pci_set_master(pci_dev);
/* do the real low level jobs */
/* starfire.c: Linux device driver for the Adaptec Starfire network adapter. */
/*
- Written 1998-1999 by Donald Becker.
-
- This software may be used and distributed according to the terms
- of the GNU Public License (GPL), incorporated herein by reference.
-
- The author may be reached as becker@usra.edu, or
- Donald Becker
- 312 Severn Ave. #W302
+ Written 1998-2000 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
Annapolis MD 21403
Support and updates available at
- http://cesdis.gsfc.nasa.gov/linux/drivers/starfire.html
+ http://www.scyld.com/network/starfire.html
+
+ Linux kernel-specific changes:
LK1.1.1 (jgarzik):
- Use PCI driver interface
- Fix MOD_xxx races
- softnet fixups
+ LK1.1.2 (jgarzik):
+ - Merge Becker version 0.15
*/
-static const char *versionA =
-"starfire.c:v0.12+LK1.1.1 3/19/2000 Written by Donald Becker and others\n",
-*versionB =" Undates and info at http://www.beowulf.org/linux/drivers.html\n";
-
-/* A few user-configurable values. These may be modified when a driver
- module is loaded.*/
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
/* Used for tuning interrupt latency vs. overhead. */
static int interrupt_mitigation = 0x0;
#define PFX "starfire: "
-
-#if !defined(__OPTIMIZE__) || !defined(__KERNEL__)
+#if !defined(__OPTIMIZE__)
#warning You must compile this file with the correct options!
#warning See the last lines of the source file.
#error You must compile this driver with "-O".
#endif
+/* Include files, designed to support most kernel versions 2.0.0 and later. */
+#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/sched.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/errno.h>
#include <asm/bitops.h>
#include <asm/io.h>
-/* Kernel compatibility defines, some common to David Hind's PCMCIA package.
- This is only in the support-all-kernels source code. */
-
-#define RUN_AT(x) (jiffies + (x))
+/* These identify the driver base version and may not be removed. */
+static const char version1[] __devinitdata =
+"starfire.c:v0.15+LK1.1.2 4/28/2000 Written by Donald Becker <becker@scyld.com>\n";
+static const char version2[] __devinitdata =
+" Undates and info at http://www.scyld.com/network/starfire.html\n";
-MODULE_AUTHOR("Donald Becker <becker@cesdis.gsfc.nasa.gov>");
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver");
MODULE_PARM(max_interrupt_work, "i");
MODULE_PARM(mtu, "i");
I. Board Compatibility
-State the chips and boards this driver is known to work with.
-Note any similar chips or boards that will not work.
-
-This driver skeleton demonstrates the driver for an idealized
-descriptor-based bus-master PCI chip.
+This driver is for the Adaptec 6915 "Starfire" 64 bit PCI Ethernet adapter.
II. Board-specific settings
-No jumpers exist on most PCI boards, so this section is usually empty.
-
III. Driver operation
IIIa. Ring buffers
*/
-\f
-/* This table drives the PCI probe routines. It's mostly boilerplate in all
- PCI drivers, and will likely be provided by some future kernel.
-*/
-enum pci_flags_bit {
- PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
- PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
-};
+enum chip_capability_flags {CanHaveMII=1, };
+
+#define MEM_ADDR_SZ 0x80000 /* And maps in 0.5MB(!). */
#if 0
#define ADDR_64BITS 1 /* This chip uses 64 bit addresses. */
#endif
-#define MEM_ADDR_SZ 0x80000 /* And maps in 0.5MB(!). */
+#define HAS_IP_COPYSUM 1
enum chipset {
CH_6915 = 0,
/* A chip capabilities table, matching the CH_xxx entries in xxx_pci_tbl[] above. */
-enum chip_capability_flags {CanHaveMII=1, };
static struct chip_info {
- char *chip_name;
+ const char *name;
int io_size;
- int flags;
-} netdrv_tbl[] = {
- { "Adaptec Starfire 6915", 128, CanHaveMII },
+ int drv_flags;
+} netdrv_tbl[] __devinitdata = {
+ { "Adaptec Starfire 6915", MEM_ADDR_SZ, CanHaveMII },
};
dma_addr_t tx_done_q_dma;
struct net_device_stats stats;
struct timer_list timer; /* Media monitoring timer. */
+ int chip_id, drv_flags;
+ struct pci_dev *pci_dev;
/* Frequently used values: keep some adjacent for cache effect. */
- int chip_id;
- struct pci_dev *pdev;
unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
unsigned int cur_tx, dirty_tx;
unsigned int rx_buf_sz; /* Based on MTU+slack. */
int mii_cnt; /* MII device addresses. */
u16 advertising; /* NWay media advertisement */
unsigned char phys[2]; /* MII device addresses. */
- u32 pad[4]; /* Used for 32-byte alignment */
};
static int mdio_read(struct net_device *dev, int phy_id, int location);
static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static int netdev_close(struct net_device *dev);
+\f
static int __devinit starfire_init_one (struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct netdev_private *np;
- int i, irq, option, chip_id = ent->driver_data;
+ int i, irq, option, chip_idx = ent->driver_data;
struct net_device *dev;
- static int card_idx = 0;
+ static int card_idx = -1;
static int printed_version = 0;
long ioaddr;
- int io_size = netdrv_tbl[chip_id].io_size;
+ int io_size = netdrv_tbl[chip_idx].io_size;
+
+ card_idx++;
+ option = card_idx < MAX_UNITS ? options[card_idx] : 0;
+
+ if (!printed_version++)
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
ioaddr = pci_resource_start (pdev, 0);
if (!ioaddr || ((pci_resource_flags (pdev, 0) & IORESOURCE_MEM) == 0)) {
- printk (KERN_ERR PFX "no PCI MEM resources, aborting\n");
+ printk (KERN_ERR PFX "card %d: no PCI MEM resources, aborting\n", card_idx);
return -ENODEV;
}
dev = init_etherdev(NULL, sizeof(*np));
if (!dev) {
- printk (KERN_ERR PFX "cannot alloc etherdev, aborting\n");
+ printk (KERN_ERR PFX "card %d: cannot alloc etherdev, aborting\n", card_idx);
return -ENOMEM;
}
irq = pdev->irq;
-
+
if (request_mem_region (ioaddr, io_size, dev->name) == NULL) {
- printk (KERN_ERR PFX "resource 0x%x @ 0x%lx busy, aborting\n",
- io_size, ioaddr);
+ printk (KERN_ERR PFX "card %d: resource 0x%x @ 0x%lx busy, aborting\n",
+ card_idx, io_size, ioaddr);
goto err_out_free_netdev;
}
if (pci_enable_device (pdev)) {
- printk (KERN_ERR PFX "cannot enable PCI device, aborting\n");
+ printk (KERN_ERR PFX "card %d: cannot enable PCI device, aborting\n", card_idx);
goto err_out_free_res;
}
ioaddr = (long) ioremap (ioaddr, io_size);
if (!ioaddr) {
- printk (KERN_ERR PFX "cannot remap 0x%x @ 0x%lx, aborting\n",
- io_size, ioaddr);
+ printk (KERN_ERR PFX "card %d: cannot remap 0x%x @ 0x%lx, aborting\n",
+ card_idx, io_size, ioaddr);
goto err_out_free_res;
}
pci_set_master (pdev);
- option = card_idx < MAX_UNITS ? options[card_idx] : 0;
- card_idx++;
-
- if (!printed_version) {
- printk(KERN_INFO "%s" KERN_INFO "%s", versionA, versionB);
- printed_version = 1;
- }
-
- printk (KERN_INFO "%s: %s at 0x%lx, ",
- dev->name, netdrv_tbl[chip_id].chip_name, ioaddr);
+ printk(KERN_INFO "%s: %s at 0x%lx, ",
+ dev->name, netdrv_tbl[chip_idx].name, ioaddr);
/* Serial EEPROM reads are hidden by the hardware. */
for (i = 0; i < 6; i++)
dev->base_addr = ioaddr;
dev->irq = irq;
+ pdev->driver_data = dev;
+
/* private struct aligned and zeroed by init_etherdev */
np = dev->priv;
- np->pdev = pdev;
- np->chip_id = chip_id;
+ np->pci_dev = pdev;
+ np->chip_id = chip_idx;
- pdev->driver_data = dev;
+ /* save useful data, netdrv_tbl is __devinitdata and might be dropped */
+ np->drv_flags = netdrv_tbl[chip_idx].drv_flags;
if (dev->mem_start)
option = dev->mem_start;
if (mtu)
dev->mtu = mtu;
- if (netdrv_tbl[np->chip_id].flags & CanHaveMII) {
+ if (np->drv_flags & CanHaveMII) {
int phy, phy_idx = 0;
for (phy = 0; phy < 32 && phy_idx < 4; phy++) {
int mii_status = mdio_read(dev, phy, 1);
{
long mdio_addr = dev->base_addr + MIICtrl + (phy_id<<7) + (location<<2);
int result, boguscnt=1000;
- /* ??? Must add a busy-wait here. */
+ /* ??? Should we add a busy-wait here? */
do
result = readl(mdio_addr);
while ((result & 0xC0000000) != 0x80000000 && --boguscnt >= 0);
long ioaddr = dev->base_addr;
int i;
+ /* Do we ever need to reset the chip??? */
+
MOD_INC_USE_COUNT;
- /* Do we need to reset the chip??? */
if (request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev)) {
MOD_DEC_USE_COUNT;
- return -EBUSY;
+ return -EAGAIN;
}
/* Disable the Rx and Tx, and reset the chip. */
dev->name, dev->irq);
/* Allocate the various queues, failing gracefully. */
if (np->tx_done_q == 0)
- np->tx_done_q = pci_alloc_consistent(np->pdev, PAGE_SIZE, &np->tx_done_q_dma);
+ np->tx_done_q = pci_alloc_consistent(np->pci_dev, PAGE_SIZE, &np->tx_done_q_dma);
if (np->rx_done_q == 0)
- np->rx_done_q = pci_alloc_consistent(np->pdev, PAGE_SIZE, &np->rx_done_q_dma);
+ np->rx_done_q = pci_alloc_consistent(np->pci_dev, PAGE_SIZE, &np->rx_done_q_dma);
if (np->tx_ring == 0)
- np->tx_ring = pci_alloc_consistent(np->pdev, PAGE_SIZE, &np->tx_ring_dma);
+ np->tx_ring = pci_alloc_consistent(np->pci_dev, PAGE_SIZE, &np->tx_ring_dma);
if (np->rx_ring == 0)
- np->rx_ring = pci_alloc_consistent(np->pdev, PAGE_SIZE, &np->rx_ring_dma);
+ np->rx_ring = pci_alloc_consistent(np->pci_dev, PAGE_SIZE, &np->rx_ring_dma);
if (np->tx_done_q == 0 || np->rx_done_q == 0
|| np->rx_ring == 0 || np->tx_ring == 0) {
if (np->tx_done_q)
- pci_free_consistent(np->pdev, PAGE_SIZE,
+ pci_free_consistent(np->pci_dev, PAGE_SIZE,
np->tx_done_q, np->tx_done_q_dma);
if (np->rx_done_q)
- pci_free_consistent(np->pdev, PAGE_SIZE,
+ pci_free_consistent(np->pci_dev, PAGE_SIZE,
np->rx_done_q, np->rx_done_q_dma);
if (np->tx_ring)
- pci_free_consistent(np->pdev, PAGE_SIZE,
+ pci_free_consistent(np->pci_dev, PAGE_SIZE,
np->tx_ring, np->tx_ring_dma);
if (np->rx_ring)
- pci_free_consistent(np->pdev, PAGE_SIZE,
+ pci_free_consistent(np->pci_dev, PAGE_SIZE,
np->rx_ring, np->rx_ring_dma);
MOD_DEC_USE_COUNT;
return -ENOMEM;
if (dev->if_port == 0)
dev->if_port = np->default_port;
+ netif_start_queue(dev);
+
if (debug > 1)
printk(KERN_DEBUG "%s: Setting the Rx and Tx modes.\n", dev->name);
set_rx_mode(dev);
/* Enable the Rx and Tx units. */
writel(0x000F, ioaddr + GenCtrl);
- netif_start_queue(dev);
-
if (debug > 2)
printk(KERN_DEBUG "%s: Done netdev_open().\n",
dev->name);
/* Set the timer to check for link beat. */
init_timer(&np->timer);
- np->timer.expires = RUN_AT(3*HZ);
+ np->timer.expires = jiffies + 3*HZ;
np->timer.data = (unsigned long)dev;
np->timer.function = &netdev_timer; /* timer handler */
add_timer(&np->timer);
struct netdev_private *np = (struct netdev_private *)dev->priv;
long ioaddr = dev->base_addr;
int mii_reg5 = mdio_read(dev, np->phys[0], 5);
+ int negotiated = mii_reg5 & np->advertising;
int duplex, new_tx_mode ;
new_tx_mode = 0x0C04 | (np->tx_flowctrl ? 0x0800:0) | (np->rx_flowctrl ? 0x0400:0);
if (np->duplex_lock)
duplex = 1;
else
- duplex = (mii_reg5 & 0x0100) || (mii_reg5 & 0x01C0) == 0x0040;
+ duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
if (duplex)
new_tx_mode |= 2;
if (np->full_duplex != duplex) {
np->full_duplex = duplex;
if (debug)
- printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
- " partner capability of %4.4x.\n", dev->name,
- duplex ? "full" : "half", np->phys[0], mii_reg5);
+ printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d"
+ " negotiated capability %4.4x.\n", dev->name,
+ duplex ? "full" : "half", np->phys[0], negotiated);
}
if (new_tx_mode != np->tx_mode) {
np->tx_mode = new_tx_mode;
}
#endif
- np->timer.expires = RUN_AT(next_tick);
+ np->timer.expires = jiffies + next_tick;
add_timer(&np->timer);
}
-
static void tx_timeout(struct net_device *dev)
{
struct netdev_private *np = (struct netdev_private *)dev->priv;
/* Trigger an immediate transmit demand. */
/* XXX todo */
+ dev->trans_start = jiffies;
np->stats.tx_errors++;
}
np->rx_info[i].skb = skb;
if (skb == NULL)
break;
- np->rx_info[i].mapping = pci_map_single(np->pdev, skb->tail, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
skb->dev = dev; /* Mark as being used by this device. */
/* Grrr, we cannot offset to correctly align the IP header. */
np->rx_ring[i].rxaddr = cpu_to_le32(np->rx_info[i].mapping | RxDescValid);
}
- writew(i-1, dev->base_addr + RxDescQIdx);
+ writew(i - 1, dev->base_addr + RxDescQIdx);
np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
/* Clear the remainder of the Rx buffer ring. */
np->tx_info[entry].skb = skb;
np->tx_info[entry].mapping =
- pci_map_single(np->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
+ pci_map_single(np->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
np->tx_ring[entry].addr = cpu_to_le32(np->tx_info[entry].mapping);
- /* Add |TxDescIntr to generate Tx-done interrupts. */
+ /* Add "| TxDescIntr" to generate Tx-done interrupts. */
np->tx_ring[entry].status = cpu_to_le32(skb->len | TxDescID);
if (debug > 5) {
printk(KERN_DEBUG "%s: Tx #%d slot %d %8.8x %8.8x.\n",
{
struct net_device *dev = (struct net_device *)dev_instance;
struct netdev_private *np;
- long ioaddr, boguscnt = max_interrupt_work;
+ long ioaddr;
+ int boguscnt = max_interrupt_work;
#ifndef final_version /* Can never occur. */
if (dev == NULL) {
netdev_rx(dev);
/* Scavenge the skbuff list based on the Tx-done queue.
- There are redundant checks here that may be cleaned up when
- after the driver has proven reliable. */
+ There are redundant checks here that may be cleaned up
+ after the driver has proven to be reliable. */
{
int consumer = readl(ioaddr + TxConsumerIdx);
int tx_status;
(np->tx_done+1) & (DONE_Q_SIZE-1),
le32_to_cpu(np->tx_done_q[(np->tx_done+1)&(DONE_Q_SIZE-1)].status));
#endif
- while ((tx_status = le32_to_cpu(np->tx_done_q[np->tx_done].status)) != 0) {
+ while ((tx_status = le32_to_cpu(np->tx_done_q[np->tx_done].status))
+ != 0) {
if (debug > 4)
printk(KERN_DEBUG "%s: Tx completion entry %d is %8.8x.\n",
dev->name, np->tx_done, tx_status);
entry >>= 3;
skb = np->tx_info[entry].skb;
- pci_unmap_single(np->pdev,
- np->tx_info[entry].mapping,
- skb->len, PCI_DMA_TODEVICE);
+ pci_unmap_single(np->pci_dev,
+ np->tx_info[entry].mapping,
+ skb->len, PCI_DMA_TODEVICE);
/* Scavenge the descriptor. */
dev_kfree_skb_irq(skb);
printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
dev->name, readl(ioaddr + IntrStatus));
- return;
+#ifndef final_version
+ /* Code that should never be run! Remove after testing.. */
+ {
+ static int stopit = 10;
+ if (!netif_running(dev) && --stopit < 0) {
+ printk(KERN_ERR "%s: Emergency stop, looping startup interrupt.\n",
+ dev->name);
+ free_irq(irq, dev);
+ }
+ }
+#endif
}
-/* This routine is logically part of the interrupt handler, but seperated
+/* This routine is logically part of the interrupt handler, but separated
for clarity and better register allocation. */
static int netdev_rx(struct net_device *dev)
{
struct netdev_private *np = (struct netdev_private *)dev->priv;
int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
u32 desc_status;
+
if (np->rx_done_q == 0) {
printk(KERN_ERR "%s: rx_done_q is NULL! rx_done is %d. %p.\n",
dev->name, np->rx_done, np->tx_done_q);
np->rx_done, desc_status);
if (--boguscnt < 0)
break;
- if (! (desc_status & RxOK)) {
+ if ( ! (desc_status & RxOK)) {
/* There was a error. */
if (debug > 2)
printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
skb->dev = dev;
skb_reserve(skb, 2); /* 16 byte align the IP header */
- pci_dma_sync_single(np->pdev,
- np->rx_info[entry].mapping,
- pkt_len, PCI_DMA_FROMDEVICE);
+ pci_dma_sync_single(np->pci_dev,
+ np->rx_info[entry].mapping,
+ pkt_len, PCI_DMA_FROMDEVICE);
#if HAS_IP_COPYSUM /* Call copy + cksum if available. */
eth_copy_and_sum(skb, np->rx_info[entry].skb->tail, pkt_len, 0);
skb_put(skb, pkt_len);
} else {
char *temp;
- pci_unmap_single(np->pdev, np->rx_info[entry].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ pci_unmap_single(np->pci_dev, np->rx_info[entry].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
skb = np->rx_info[entry].skb;
temp = skb_put(skb, pkt_len);
np->rx_info[entry].skb = NULL;
np->rx_info[entry].mapping = 0;
+#ifndef final_version /* Remove after testing. */
+ if (le32_to_cpu(np->rx_ring[entry].rxaddr & ~3) != ((unsigned long) temp))
+ printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
+ "do not match in netdev_rx: %d vs. %p / %p.\n",
+ dev->name,
+ le32_to_cpu(np->rx_ring[entry].rxaddr),
+ skb->head, temp);
+#endif
}
#ifndef final_version /* Remove after testing. */
/* You will want this info for the initial debug. */
if (skb == NULL)
break; /* Better luck next round. */
np->rx_info[entry].mapping =
- pci_map_single(np->pdev, skb->tail, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
skb->dev = dev; /* Mark as being used by this device. */
np->rx_ring[entry].rxaddr =
cpu_to_le32(np->rx_info[entry].mapping | RxDescValid);
np->stats.rx_fifo_errors++;
}
-static struct enet_statistics *get_stats(struct net_device *dev)
+static struct net_device_stats *get_stats(struct net_device *dev)
{
long ioaddr = dev->base_addr;
struct netdev_private *np = (struct netdev_private *)dev->priv;
if (debug > 2) {
printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
np->tx_ring_dma);
- for (i = 0; i < 8 /* TX_RING_SIZE */; i++)
+ for (i = 0; i < 8 /* TX_RING_SIZE is huge! */; i++)
printk(KERN_DEBUG " #%d desc. %8.8x %8.8x -> %8.8x.\n",
i, le32_to_cpu(np->tx_ring[i].status),
le32_to_cpu(np->tx_ring[i].addr),
for (i = 0; i < RX_RING_SIZE; i++) {
np->rx_ring[i].rxaddr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
if (np->rx_info[i].skb != NULL) {
- pci_unmap_single(np->pdev, np->rx_info[i].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ pci_unmap_single(np->pci_dev, np->rx_info[i].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
dev_kfree_skb(np->rx_info[i].skb);
}
np->rx_info[i].skb = NULL;
for (i = 0; i < TX_RING_SIZE; i++) {
struct sk_buff *skb = np->tx_info[i].skb;
if (skb != NULL) {
- pci_unmap_single(np->pdev,
- np->tx_info[i].mapping,
- skb->len, PCI_DMA_TODEVICE);
+ pci_unmap_single(np->pci_dev,
+ np->tx_info[i].mapping,
+ skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb(skb);
}
np->tx_info[i].skb = NULL;
struct net_device *dev = pdev->driver_data;
struct netdev_private *np;
- if (!dev) {
- printk (KERN_WARNING "bug: removing starfire pci dev without driver\n");
- return;
- }
+ if (!dev)
+ BUG();
np = dev->priv;
iounmap((char *)dev->base_addr);
if (np->tx_done_q)
- pci_free_consistent(np->pdev, PAGE_SIZE,
+ pci_free_consistent(np->pci_dev, PAGE_SIZE,
np->tx_done_q, np->tx_done_q_dma);
if (np->rx_done_q)
- pci_free_consistent(np->pdev, PAGE_SIZE,
+ pci_free_consistent(np->pci_dev, PAGE_SIZE,
np->rx_done_q, np->rx_done_q_dma);
if (np->tx_ring)
- pci_free_consistent(np->pdev, PAGE_SIZE,
+ pci_free_consistent(np->pci_dev, PAGE_SIZE,
np->tx_ring, np->tx_ring_dma);
if (np->rx_ring)
- pci_free_consistent(np->pdev, PAGE_SIZE,
+ pci_free_consistent(np->pci_dev, PAGE_SIZE,
np->rx_ring, np->rx_ring_dma);
kfree(dev);
static int __init starfire_init (void)
{
- int rc;
-
- MOD_INC_USE_COUNT;
-
- rc = pci_module_init (&starfire_driver);
-
- MOD_DEC_USE_COUNT;
-
- return rc;
+ return pci_module_init (&starfire_driver);
}
/*
* Local variables:
- * compile-command: "gcc -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c starfire.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
- * simple-compile-command: "gcc -DMODULE -D__KERNEL__ -O6 -c starfire.c"
+ * compile-command: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c starfire.c"
+ * simple-compile-command: "gcc -DMODULE -O6 -c starfire.c"
* c-indent-level: 4
* c-basic-offset: 4
* tab-width: 4
if (versionprinted++ == 0)
printk("%s", version);
- pci_enable_device(pdev);
+ if (pci_enable_device(pdev))
+ continue;
/* Remove I/O space marker in bit 0. */
pci_irq_line = pdev->irq;
- pci_ioaddr = pdev->resource[0].start ;
+ pci_ioaddr = pci_resource_start (pdev, 0);
- if(check_region(pci_ioaddr, ABYSS_IO_EXTENT))
+ if(!request_region(pci_ioaddr, ABYSS_IO_EXTENT, "abyss"))
continue;
/* At this point we have found a valid card. */
dev = init_trdev(NULL, 0);
+ if (!dev) {
+ release_region(pci_ioaddr, ABYSS_IO_EXTENT);
+ continue;
+ }
- request_region(pci_ioaddr, ABYSS_IO_EXTENT, "abyss");
if(request_irq(pdev->irq, tms380tr_interrupt, SA_SHIRQ,
"abyss", dev)) {
release_region(pci_ioaddr, ABYSS_IO_EXTENT) ;
+ /* XXX free trdev */
continue; /*return (-ENODEV);*/ /* continue; ?? */
}
}
*/
- pci_ioaddr &= ~3 ;
dev->base_addr = pci_ioaddr;
dev->irq = pci_irq_line;
dev->dma = 0;
if (versionprinted++ == 0)
printk("%s", version);
- pci_enable_device(pdev);
+ if (pci_enable_device(pdev))
+ continue;
/* Remove I/O space marker in bit 0. */
pci_irq_line = pdev->irq;
- pci_ioaddr = pdev->resource[0].start ;
+ pci_ioaddr = pci_resource_start (pdev, 0);
if(check_region(pci_ioaddr, TMS_PCI_IO_EXTENT))
continue;
/* At this point we have found a valid card. */
dev = init_trdev(NULL, 0);
+ if (!dev) {
+ continue; /*return (-ENOMEM);*/ /* continue; ?? */
+ }
- request_region(pci_ioaddr, TMS_PCI_IO_EXTENT, cardinfo->name);
+ request_region(pci_ioaddr, TMS_PCI_IO_EXTENT, cardinfo->name); /* XXX check return */
if(request_irq(pdev->irq, tms380tr_interrupt, SA_SHIRQ,
cardinfo->name, dev)) {
release_region(pci_ioaddr, TMS_PCI_IO_EXTENT);
+ /* XXX free trdev */
continue; /*return (-ENODEV);*/ /* continue; ?? */
}
netif_device_detach (dev);
tulip_down (dev);
}
+ pci_set_power_state(pdev, 3);
}
{
struct net_device *dev = pdev->driver_data;
+ pci_enable_device(pdev);
if (dev && !netif_device_present (dev)) {
tulip_up (dev);
netif_device_attach (dev);
ioaddr = pci_resource_start (pdev, pci_flags & PCI_ADDR0 ? 0 : 1);
- if (pci_enable_device (pdev)) {
- printk (KERN_ERR "unable to init PCI device (card #%d)\n",
- card_idx);
+ if (pci_enable_device (pdev))
goto err_out_free_dma;
- }
if (pci_flags & PCI_USES_MASTER)
pci_set_master (pdev);
static int __init via_rhine_init (void)
{
- int rc;
-
- MOD_INC_USE_COUNT;
-
- rc = pci_module_init (&via_rhine_driver);
-
- MOD_DEC_USE_COUNT;
-
- return rc;
+ return pci_module_init (&via_rhine_driver);
}
{
x25_channel_t *chan = dev->priv;
- if (chan->svc) {
- del_timer(&chan->timer);
- chan->timer.expires = jiffies + chan->idle_tmout * HZ;
- add_timer(&chan->timer);
- }
+ if (chan->svc)
+ mod_timer(&chan->timer, jiffies+chan->idle_tmout*HZ);
}
#ifdef CYCLOMX_X25_DEBUG
static void x25_dump_config(TX25Config *conf)
goto err_out_free_pio_region;
}
+ /* XXX check enable_device for failure */
pci_enable_device (pdev);
pci_set_master (pdev);
#else
real_ioaddr = ioaddr = pci_resource_start (pdev, 1);
ioaddr = (long) ioremap(ioaddr, YELLOWFIN_SIZE);
+ /* XXX check for failure */
#endif
irq = pdev->irq;
#include <linux/init.h>
#include <linux/malloc.h>
#include <linux/ioport.h>
+#include <linux/pm.h>
#include <asm/page.h>
#include <asm/dma.h> /* isa_dma_bridge_buggy */
return b;
}
+#ifdef CONFIG_PM
+
+/*
+ * PCI Power management..
+ *
+ * This needs to be done centralized, so that we power manage PCI
+ * devices in the right order: we should not shut down PCI bridges
+ * before we've shut down the devices behind them, and we should
+ * not wake up devices before we've woken up the bridge to the
+ * device.. Eh?
+ *
+ * We do not touch devices that don't have a driver that exports
+ * a suspend/resume function. That is just too dangerous. If the default
+ * PCI suspend/resume functions work for a device, the driver can
+ * easily implement them (ie just have a suspend function that calls
+ * the pci_set_power_state() function).
+ */
+static int pci_pm_suspend_device(struct pci_dev *dev)
+{
+ if (dev) {
+ struct pci_driver *driver = dev->driver;
+ if (driver && driver->suspend)
+ driver->suspend(dev);
+ }
+ return 0;
+}
+
+static int pci_pm_resume_device(struct pci_dev *dev)
+{
+ if (dev) {
+ struct pci_driver *driver = dev->driver;
+ if (driver && driver->resume)
+ driver->resume(dev);
+ }
+ return 0;
+}
+
+static int pci_pm_suspend_bus(struct pci_bus *bus)
+{
+ struct list_head *list;
+
+ /* Walk the bus children list */
+ list_for_each(list, &bus->children)
+ pci_pm_suspend_bus(pci_bus_b(list));
+
+ /* Walk the device children list */
+ list_for_each(list, &bus->devices)
+ pci_pm_suspend_device(pci_dev_b(list));
+
+ /* Suspend the bus controller.. */
+ pci_pm_suspend_device(bus->self);
+ return 0;
+}
+
+static int pci_pm_resume_bus(struct pci_bus *bus)
+{
+ struct list_head *list;
+
+ pci_pm_resume_device(bus->self);
+
+ /* Walk the device children list */
+ list_for_each(list, &bus->devices)
+ pci_pm_resume_device(pci_dev_b(list));
+
+ /* And then walk the bus children */
+ list_for_each(list, &bus->children)
+ pci_pm_resume_bus(pci_bus_b(list));
+ return 0;
+}
+
+static int pci_pm_suspend(void)
+{
+ struct list_head *list;
+
+ list_for_each(list, &pci_root_buses)
+ pci_pm_suspend_bus(pci_bus_b(list));
+ return 0;
+}
+
+static int pci_pm_resume(void)
+{
+ struct list_head *list;
+
+ list_for_each(list, &pci_root_buses)
+ pci_pm_resume_bus(pci_bus_b(list));
+ return 0;
+}
+
+static int pci_pm_callback(struct pm_dev *dev, pm_request_t rqst, void *data)
+{
+ switch (rqst) {
+ case PM_SUSPEND:
+ return pci_pm_suspend();
+ case PM_RESUME:
+ return pci_pm_resume();
+ }
+ return 0;
+}
+#endif
+
void __init pci_init(void)
{
struct pci_dev *dev;
pci_for_each_dev(dev) {
pci_fixup_device(PCI_FIXUP_FINAL, dev);
}
+
+#ifdef CONFIG_PM
+ pm_register(PM_PCI_DEV, 0, pci_pm_callback);
+#endif
}
static int __init pci_setup(char *str)
((struct NCR5380_hostdata *) instance->hostdata)->next_timer = tmp;
*prev = instance;
- del_timer(&usleep_timer);
- usleep_timer.expires = ((struct NCR5380_hostdata *) expires_first->hostdata)->time_expires;
- add_timer(&usleep_timer);
+ mod_timer(&usleep_timer, ((struct NCR5380_hostdata *) expires_first->hostdata)->time_expires);
restore_flags(flags);
return 0;
}
{
{ "Adaptec AHA-1520 BIOS", 0x102e, 21 },
/* Adaptec 152x */
- { "Adaptec AHA-1520B", 0x000b, 19 },
+ { "Adaptec AHA-1520B", 0x000b, 17 },
/* Adaptec 152x rev B */
+ { "Adaptec AHA-1520B", 0x0026, 17 },
+ /* Iomega Jaz Jet ISA (AIC6370Q) */
{ "Adaptec ASW-B626 BIOS", 0x1029, 21 },
/* on-board controller */
{ "Adaptec BIOS: ASW-B626", 0x000f, 22 },
if ( !(p->dev_timer_active & (0x01 << MAX_TARGETS)) ||
time_after_eq(p->dev_timer.expires, p->dev_expires[i]) )
{
- del_timer(&p->dev_timer);
- p->dev_timer.expires = p->dev_expires[i];
- add_timer(&p->dev_timer);
+ mod_timer(&p->dev_timer, p->dev_expires[i]);
p->dev_timer_active |= (0x01 << MAX_TARGETS);
}
}
}
else if ( time_after_eq(p->dev_timer.expires,
p->dev_expires[tindex]) )
- {
- del_timer(&p->dev_timer);
- p->dev_timer.expires = p->dev_expires[tindex];
- add_timer(&p->dev_timer);
- }
+ mod_timer(&p->dev_timer, p->dev_expires[tindex]);
}
#ifdef AIC7XXX_VERBOSE_DEBUGGING
if( (aic7xxx_verbose & VERBOSE_MINOR_ERROR) ||
if ( !(p->dev_timer_active & (0x01 << MAX_TARGETS)) ||
time_after_eq(p->dev_timer.expires, p->dev_expires[p->scsi_id]) )
{
- del_timer(&p->dev_timer);
- p->dev_timer.expires = p->dev_expires[p->scsi_id];
- add_timer(&p->dev_timer);
+ mod_timer(&p->dev_timer, p->dev_expires[p->scsi_id]);
p->dev_timer_active |= (0x01 << MAX_TARGETS);
}
aic7xxx_reset_channel(p, cmd->channel, TRUE);
printk(KERN_ERR "es1370: io ports %#lx-%#lx in use\n", s->io, s->io+ES1370_EXTENT-1);
goto err_region;
}
+ if (pci_enable_device(pcidev))
+ goto err_irq;
if (request_irq(s->irq, es1370_interrupt, SA_SHIRQ, "es1370", s)) {
printk(KERN_ERR "es1370: irq %u in use\n", s->irq);
goto err_irq;
}
- pci_enable_device(pcidev);
/* initialize codec registers */
/* note: setting CTRL_SERR_DIS is reported to break
* mic bias setting (by Kim.Berts@fisub.mail.abb.com) */
printk(KERN_ERR PFX "io ports %#lx-%#lx in use\n", s->io, s->io+ES1371_EXTENT-1);
goto err_region;
}
+ if (pci_enable_device(pcidev))
+ goto err_irq;
if (request_irq(s->irq, es1371_interrupt, SA_SHIRQ, "es1371", s)) {
printk(KERN_ERR PFX "irq %u in use\n", s->irq);
goto err_irq;
}
- pci_enable_device(pcidev);
printk(KERN_INFO PFX "found es1371 rev %d at io %#lx irq %u\n"
KERN_INFO PFX "features: joystick 0x%x\n", s->rev, s->io, s->irq, joystick[devindex]);
/* register devices */
printk(KERN_ERR "solo1: io ports in use\n");
goto err_region4;
}
+ if (pci_enable_device(pcidev))
+ goto err_irq;
if (request_irq(s->irq, solo1_interrupt, SA_SHIRQ, "ESS Solo1", s)) {
printk(KERN_ERR "solo1: irq %u in use\n", s->irq);
goto err_irq;
}
- pci_enable_device(pcidev);
printk(KERN_DEBUG "solo1: ddma base address: 0x%lx\n", s->ddmabase);
printk(KERN_INFO "solo1: joystick port at %#lx\n", s->gpbase+1);
/* register devices */
return -ENODEV;
}
+ if (pci_enable_device(pci_dev))
+ return -EIO;
if ((card = kmalloc(sizeof(struct i810_card), GFP_KERNEL)) == NULL) {
printk(KERN_ERR "i810_audio: out of memory\n");
return -ENOMEM;
}
memset(card, 0, sizeof(*card));
- card->iobase = pci_dev->resource[1].start;
- card->ac97base = pci_dev->resource[0].start;
+ card->iobase = pci_resource_start (pci_dev, 1);
+ card->ac97base = pci_resource_start (pci_dev, 0);
card->pci_dev = pci_dev;
card->pci_id = pci_id->device;
card->irq = pci_dev->irq;
devs = card;
pci_set_master(pci_dev);
- pci_enable_device(pci_dev);
printk(KERN_INFO "i810: %s found at IO 0x%04lx and 0x%04lx, IRQ %d\n",
card_names[pci_id->driver_data], card->iobase, card->ac97base,
printk(KERN_ERR "sv: io ports %#lx-%#lx in use\n", s->iosynth, s->iosynth+SV_EXTENT_SYNTH-1);
goto err_region1;
}
- pci_enable_device(pcidev);
+ if (pci_enable_device(pcidev))
+ goto err_irq;
/* initialize codec registers */
outb(0x80, s->ioenh + SV_CODEC_CONTROL); /* assert reset */
udelay(50);
}
pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &revision);
- iobase = pci_dev->resource[0].start;
+ iobase = pci_resource_start (pci_dev, 0);
if (check_region(iobase, 256)) {
printk(KERN_ERR "trident: can't allocate I/O space at 0x%4.4lx\n",
iobase);
return -ENODEV;
}
+ if (pci_enable_device(pci_dev))
+ return -ENODEV;
+
if ((card = kmalloc(sizeof(struct trident_card), GFP_KERNEL)) == NULL) {
printk(KERN_ERR "trident: out of memory\n");
return -ENOMEM;
devs = card;
pci_set_master(pci_dev);
- pci_enable_device(pci_dev);
printk(KERN_INFO "trident: %s found at IO 0x%04lx, IRQ %d\n",
card_names[pci_id->driver_data], card->iobase, card->irq);
db = &as->usbin.dma;
} else
return -EINVAL;
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,3,22)
+
if (vma->vm_pgoff != 0)
return -EINVAL;
-#endif
+
return dmabuf_mmap(db, vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot);
}
*
* See Documentation/usb/usb-serial.txt for more information on using this driver
*
+ * (04/27/2000) Ryan VanderBijl
+ * Put calls to *_paranoia_checks into one function.
+ *
* (04/23/2000) gkh
* Fixed bug that Randy Dunlap found for Generic devices with no bulk out ports.
* Moved when the startup code printed out the devices that are supported.
static struct usb_serial *serial_table[SERIAL_TTY_MINORS] = {NULL, };
+static inline struct usb_serial* get_usb_serial (struct usb_serial_port *port, const char *function)
+{
+ /* if no port was specified, or it fails a paranoia check */
+ if (!port ||
+ port_paranoia_check (port, function) ||
+ serial_paranoia_check (port->serial, function)) {
+ /* then say that we dont have a valid usb_serial thing, which will
+ * end up genrating -ENODEV return values */
+ return NULL;
+ }
+
+ return port->serial;
+}
+
static struct usb_serial *get_serial_by_minor (int minor)
{
for (i = *minor+1; (i < (*minor + num_ports)) && (i < SERIAL_TTY_MINORS); ++i)
serial_table[i] = serial;
return serial;
- }
+ }
return NULL;
}
static void serial_close(struct tty_struct *tty, struct file * filp)
{
struct usb_serial_port *port = (struct usb_serial_port *) tty->driver_data;
- struct usb_serial *serial;
+ struct usb_serial *serial = get_usb_serial (port, "serial_close");
- dbg("serial_close");
-
- if (port_paranoia_check (port, "serial_close")) {
- return;
- }
-
- serial = port->serial;
- if (serial_paranoia_check (serial, "serial_close")) {
+ if (!serial) {
return;
}
static int serial_write (struct tty_struct * tty, int from_user, const unsigned char *buf, int count)
{
struct usb_serial_port *port = (struct usb_serial_port *) tty->driver_data;
- struct usb_serial *serial;
-
- dbg("serial_write");
-
- if (port_paranoia_check (port, "serial_write")) {
- return -ENODEV;
- }
+ struct usb_serial *serial = get_usb_serial (port, "serial_write");
- serial = port->serial;
- if (serial_paranoia_check (serial, "serial_write")) {
+ if (!serial) {
return -ENODEV;
}
static int serial_write_room (struct tty_struct *tty)
{
struct usb_serial_port *port = (struct usb_serial_port *) tty->driver_data;
- struct usb_serial *serial;
-
- dbg("serial_write_room");
-
- if (port_paranoia_check (port, "serial_write")) {
- return -ENODEV;
- }
-
- serial = port->serial;
- if (serial_paranoia_check (serial, "serial_write")) {
+ struct usb_serial *serial = get_usb_serial (port, "serial_write_room");
+
+ if (!serial) {
return -ENODEV;
}
-
+
dbg("serial_write_room port %d", port->number);
if (!port->active) {
dbg ("port not open");
return -EINVAL;
}
-
+
/* pass on to the driver specific version of this function if it is available */
if (serial->type->write_room) {
return (serial->type->write_room(port));
static int serial_chars_in_buffer (struct tty_struct *tty)
{
struct usb_serial_port *port = (struct usb_serial_port *) tty->driver_data;
- struct usb_serial *serial;
-
- dbg("serial_chars_in_buffer");
-
- if (port_paranoia_check (port, "serial_chars_in_buffer")) {
- return -ENODEV;
- }
-
- serial = port->serial;
- if (serial_paranoia_check (serial, "serial_chars_in_buffer")) {
+ struct usb_serial *serial = get_usb_serial (port, "serial_chars_in_buffer");
+
+ if (!serial) {
return -ENODEV;
}
-
+
if (!port->active) {
dbg ("port not open");
return -EINVAL;
}
-
+
/* pass on to the driver specific version of this function if it is available */
if (serial->type->chars_in_buffer) {
return (serial->type->chars_in_buffer(port));
static void serial_throttle (struct tty_struct * tty)
{
struct usb_serial_port *port = (struct usb_serial_port *) tty->driver_data;
- struct usb_serial *serial;
-
- dbg("serial_throttle");
-
- if (port_paranoia_check (port, "serial_throttle")) {
- return;
- }
-
- serial = port->serial;
- if (serial_paranoia_check (serial, "serial_throttle")) {
+ struct usb_serial *serial = get_usb_serial (port, "serial_throttle");
+
+ if (!serial) {
return;
}
-
+
dbg("serial_throttle port %d", port->number);
-
+
if (!port->active) {
dbg ("port not open");
return;
static void serial_unthrottle (struct tty_struct * tty)
{
struct usb_serial_port *port = (struct usb_serial_port *) tty->driver_data;
- struct usb_serial *serial;
-
- dbg("serial_unthrottle");
-
- if (port_paranoia_check (port, "serial_unthrottle")) {
- return;
- }
-
- serial = port->serial;
- if (serial_paranoia_check (serial, "serial_unthrottle")) {
+ struct usb_serial *serial = get_usb_serial (port, "serial_unthrottle");
+
+ if (!serial) {
return;
}
-
+
dbg("serial_unthrottle port %d", port->number);
-
+
if (!port->active) {
dbg ("port not open");
return;
static int serial_ioctl (struct tty_struct *tty, struct file * file, unsigned int cmd, unsigned long arg)
{
struct usb_serial_port *port = (struct usb_serial_port *) tty->driver_data;
- struct usb_serial *serial;
-
- dbg("serial_ioctl");
-
- if (port_paranoia_check (port, "serial_ioctl")) {
- return -ENODEV;
- }
+ struct usb_serial *serial = get_usb_serial (port, "serial_ioctl");
- serial = port->serial;
- if (serial_paranoia_check (serial, "serial_ioctl")) {
+ if (!serial) {
return -ENODEV;
}
-
+
dbg("serial_ioctl port %d", port->number);
-
+
if (!port->active) {
dbg ("port not open");
return -ENODEV;
static void serial_set_termios (struct tty_struct *tty, struct termios * old)
{
struct usb_serial_port *port = (struct usb_serial_port *) tty->driver_data;
- struct usb_serial *serial;
-
- dbg("serial_set_termios");
-
- if (port_paranoia_check (port, "serial_set_termios")) {
- return;
- }
+ struct usb_serial *serial = get_usb_serial (port, "serial_set_termios");
- serial = port->serial;
- if (serial_paranoia_check (serial, "serial_set_termios")) {
+ if (!serial) {
return;
}
static void serial_break (struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = (struct usb_serial_port *) tty->driver_data;
- struct usb_serial *serial;
-
- dbg("serial_break");
-
- if (port_paranoia_check (port, "serial_break")) {
- return;
- }
+ struct usb_serial *serial = get_usb_serial (port, "serial_break");
- serial = port->serial;
- if (serial_paranoia_check (serial, "serial_break")) {
+ if (!serial) {
return;
}
static void generic_read_bulk_callback (struct urb *urb)
{
struct usb_serial_port *port = (struct usb_serial_port *)urb->context;
- struct usb_serial *serial;
- struct tty_struct *tty;
- unsigned char *data = urb->transfer_buffer;
+ struct usb_serial *serial = get_usb_serial (port, "generic_read_bulk_callback");
+ struct tty_struct *tty;
+ unsigned char *data = urb->transfer_buffer;
int i;
- dbg("generic_read_bulk_callback");
-
- if (port_paranoia_check (port, "generic_read_bulk_callback")) {
+ if (!serial) {
return;
}
- serial = port->serial;
- if (serial_paranoia_check (serial, "generic_read_bulk_callback")) {
- return;
- }
-
if (urb->status) {
dbg("nonzero read bulk status received: %d", urb->status);
return;
static void generic_write_bulk_callback (struct urb *urb)
{
struct usb_serial_port *port = (struct usb_serial_port *)urb->context;
- struct usb_serial *serial;
- struct tty_struct *tty;
-
- dbg("generic_write_bulk_callback");
+ struct usb_serial *serial = get_usb_serial (port, "generic_write_bulk_callback");
+ struct tty_struct *tty;
- if (port_paranoia_check (port, "generic_write_bulk_callback")) {
+ if (!serial) {
return;
}
- serial = port->serial;
- if (serial_paranoia_check (serial, "generic_write_bulk_callback")) {
- return;
- }
-
if (urb->status) {
dbg("nonzero write bulk status received: %d", urb->status);
return;
*
* See Documentation/usb/usb-serial.txt for more information on using this driver
*
+ * (04/27/2000) Ryan VanderBijl
+ * Fixed memory leak in visor_close
+ *
* (03/26/2000) gkh
* Split driver up into device specific pieces.
*
/* send a shutdown message to the device */
usb_control_msg (serial->dev, usb_rcvctrlpipe(serial->dev, 0), VISOR_CLOSE_NOTIFICATION,
0xc2, 0x0000, 0x0000, transfer_buffer, 0x12, 300);
+ kfree (transfer_buffer);
}
/* shutdown our bulk reads and writes */
{
urb_priv_t * urb_priv = urb->hcpriv;
int i;
- void * wait;
if (!urb_priv) return;
- wait = urb_priv->wait;
-
for (i = 0; i < urb_priv->length; i++) {
if (urb_priv->td [i]) {
OHCI_FREE (urb_priv->td [i]);
}
kfree (urb->hcpriv);
urb->hcpriv = NULL;
-
- if (wait) {
- add_wait_queue (&op_wakeup, wait);
- wake_up (&op_wakeup);
- }
+
+ wake_up (&op_wakeup);
}
/*-------------------------------------------------------------------------*/
urb_priv->td_cnt = 0;
urb_priv->state = 0;
urb_priv->ed = ed;
- urb_priv->wait = NULL;
/* allocate the TDs */
for (i = 0; i < size; i++) {
if (urb->status == USB_ST_URB_PENDING) { /* URB active? */
urb_priv_t * urb_priv = urb->hcpriv;
urb_priv->state = URB_DEL;
+
/* we want to delete the TDs of an URB from an ed
* request the deletion, it will be handled at the next USB-frame */
- urb_priv->wait = &wait;
spin_lock_irqsave (&usb_ed_lock, flags);
ep_rm_ed (urb->dev, urb_priv->ed);
urb_priv->ed->state |= ED_URB_DEL;
spin_unlock_irqrestore (&usb_ed_lock, flags);
+ add_wait_queue (&op_wakeup, &wait);
current->state = TASK_UNINTERRUPTIBLE;
- if(schedule_timeout (HZ / 10)) /* wait until all TDs are deleted */
- remove_wait_queue (&op_wakeup, &wait);
- else
+ if (!schedule_timeout (HZ / 10)) /* wait until all TDs are deleted */
err("unlink URB timeout!");
+ remove_wait_queue (&op_wakeup, &wait);
} else
urb_rm_priv (urb);
usb_dec_dev_use (urb->dev);
spin_unlock_irqrestore (&usb_ed_lock, flags);
if (cnt > 0) {
- dev->wait = &wait;
+ add_wait_queue (&op_wakeup, &wait);
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout (HZ / 10);
remove_wait_queue (&op_wakeup, &wait);
ed->hwINFO = cpu_to_le32 (OHCI_ED_SKIP);
ed->state = ED_NEW;
/* if all eds are removed wake up sohci_free_dev */
- if ((! --dev->ed_cnt) && dev->wait) {
- add_wait_queue (&op_wakeup, dev->wait);
+ if (!--dev->ed_cnt)
wake_up (&op_wakeup);
- }
}
else {
ed->state &= ~ED_URB_DEL;
{
unsigned long mem_base;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0)
mem_base = dev->resource[0].start;
if (pci_enable_device(dev) < 0)
return -ENODEV;
-#else
- u16 cmd;
-
- mem_base = dev->base_address[0];
- if (mem_base & PCI_BASE_ADDRESS_SPACE_IO) return -ENODEV;
- mem_base &= PCI_BASE_ADDRESS_MEM_MASK;
-
- /* Some Mac firmware will switch memory response off */
- pci_read_config_word(dev, PCI_COMMAND, &cmd);
- pci_write_config_word(dev, PCI_COMMAND, cmd | PCI_COMMAND_MEMORY);
-#endif
pci_set_master (dev);
mem_base = (unsigned long) ioremap_nocache (mem_base, 4096);
if (ohci) {
switch (rqst) {
case PM_SUSPEND:
- dbg("USB-Bus suspend: %p", ohci->regs);
- if (ohci->bus->root_hub)
- usb_disconnect (&ohci->bus->root_hub);
- hc_reset (ohci);
+ dbg("USB-Bus suspend: %p", ohci);
+ writel (ohci->hc_control = 0xFF, &ohci->regs->control);
+ wait_ms (10);
break;
case PM_RESUME:
- dbg("USB-Bus resume: %p", ohci->regs);
- if ((temp = hc_reset (ohci)) < 0 || (temp = hc_start (ohci)) < 0)
- err ("can't restart controller, %d", temp);
+ dbg("USB-Bus resume: %p", ohci);
+ writel (ohci->hc_control = 0x7F, &ohci->regs->control);
+ wait_ms (20);
+ writel (ohci->hc_control = 0xBF, &ohci->regs->control);
break;
}
}
#include <linux/interrupt.h> /* for in_interrupt() */
#include <linux/init.h>
#include <linux/version.h>
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,3,44)
#include <linux/pm.h>
-#endif
#include <asm/uaccess.h>
#include <asm/io.h>
return 0;
}
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,3,44)
_static int handle_pm_event (struct pm_dev *dev, pm_request_t rqst, void *data)
{
uhci_t *s = (uhci_t*) dev->data;
}
return 0;
}
-#endif
_static int __init alloc_uhci (struct pci_dev *dev, int irq, unsigned int io_addr, unsigned int io_size)
{
//chain new uhci device into global list
devs = s;
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,3,44)
+
pmdev = pm_register(PM_PCI_DEV, PM_PCI_ID(dev), handle_pm_event);
if (pmdev)
pmdev->data = s;
-#endif
+
return 0;
}
/* Search for the IO base address.. */
for (i = 0; i < 6; i++) {
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,3,8)
+
unsigned int io_addr = dev->resource[i].start;
unsigned int io_size =
dev->resource[i].end - dev->resource[i].start + 1;
if (!(dev->resource[i].flags & 1))
continue;
-#else
- unsigned int io_addr = dev->base_address[i];
- unsigned int io_size = 0x14;
- if (!(io_addr & 1))
- continue;
- io_addr &= ~1;
-#endif
/* Is it already in use? */
if (check_region (io_addr, io_size))
if (type != 0)
continue;
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,3,8)
+
if (pci_enable_device (dev) < 0)
continue;
-#endif
+
if(!dev->irq)
{
err("Found UHCI device with no IRQ assigned. Check BIOS settings!");
void cleanup_module (void)
{
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,3,44)
pm_unregister_all (handle_pm_event);
-#endif
uhci_cleanup ();
}
* generic VGA port read/write
*/
-extern inline unsigned char vga_io_r (unsigned short port)
+static inline unsigned char vga_io_r (unsigned short port)
{
return inb (port);
}
-extern inline void vga_io_w (unsigned short port, unsigned char val)
+static inline void vga_io_w (unsigned short port, unsigned char val)
{
outb (val, port);
}
-extern inline void vga_io_w_fast (unsigned short port, unsigned char reg,
+static inline void vga_io_w_fast (unsigned short port, unsigned char reg,
unsigned char val)
{
outw (VGA_OUT16VAL (val, reg), port);
}
-extern inline unsigned char vga_mm_r (caddr_t regbase, unsigned short port)
+static inline unsigned char vga_mm_r (caddr_t regbase, unsigned short port)
{
return readb (regbase + port);
}
-extern inline void vga_mm_w (caddr_t regbase, unsigned short port, unsigned char val)
+static inline void vga_mm_w (caddr_t regbase, unsigned short port, unsigned char val)
{
writeb (val, regbase + port);
}
-extern inline void vga_mm_w_fast (caddr_t regbase, unsigned short port,
+static inline void vga_mm_w_fast (caddr_t regbase, unsigned short port,
unsigned char reg, unsigned char val)
{
writew (VGA_OUT16VAL (val, reg), regbase + port);
}
-extern inline unsigned char vga_r (caddr_t regbase, unsigned short port)
+static inline unsigned char vga_r (caddr_t regbase, unsigned short port)
{
if (regbase)
return vga_mm_r (regbase, port);
return vga_io_r (port);
}
-extern inline void vga_w (caddr_t regbase, unsigned short port, unsigned char val)
+static inline void vga_w (caddr_t regbase, unsigned short port, unsigned char val)
{
if (regbase)
vga_mm_w (regbase, port, val);
}
-extern inline void vga_w_fast (caddr_t regbase, unsigned short port,
+static inline void vga_w_fast (caddr_t regbase, unsigned short port,
unsigned char reg, unsigned char val)
{
if (regbase)
* VGA CRTC register read/write
*/
-extern inline unsigned char vga_rcrt (caddr_t regbase, unsigned char reg)
+static inline unsigned char vga_rcrt (caddr_t regbase, unsigned char reg)
{
vga_w (regbase, VGA_CRT_IC, reg);
return vga_r (regbase, VGA_CRT_DC);
}
-extern inline void vga_wcrt (caddr_t regbase, unsigned char reg, unsigned char val)
+static inline void vga_wcrt (caddr_t regbase, unsigned char reg, unsigned char val)
{
#ifdef VGA_OUTW_WRITE
vga_w_fast (regbase, VGA_CRT_IC, reg, val);
#endif /* VGA_OUTW_WRITE */
}
-extern inline unsigned char vga_io_rcrt (unsigned char reg)
+static inline unsigned char vga_io_rcrt (unsigned char reg)
{
vga_io_w (VGA_CRT_IC, reg);
return vga_io_r (VGA_CRT_DC);
}
-extern inline void vga_io_wcrt (unsigned char reg, unsigned char val)
+static inline void vga_io_wcrt (unsigned char reg, unsigned char val)
{
#ifdef VGA_OUTW_WRITE
vga_io_w_fast (VGA_CRT_IC, reg, val);
#endif /* VGA_OUTW_WRITE */
}
-extern inline unsigned char vga_mm_rcrt (caddr_t regbase, unsigned char reg)
+static inline unsigned char vga_mm_rcrt (caddr_t regbase, unsigned char reg)
{
vga_mm_w (regbase, VGA_CRT_IC, reg);
return vga_mm_r (regbase, VGA_CRT_DC);
}
-extern inline void vga_mm_wcrt (caddr_t regbase, unsigned char reg, unsigned char val)
+static inline void vga_mm_wcrt (caddr_t regbase, unsigned char reg, unsigned char val)
{
#ifdef VGA_OUTW_WRITE
vga_mm_w_fast (regbase, VGA_CRT_IC, reg, val);
* VGA sequencer register read/write
*/
-extern inline unsigned char vga_rseq (caddr_t regbase, unsigned char reg)
+static inline unsigned char vga_rseq (caddr_t regbase, unsigned char reg)
{
vga_w (regbase, VGA_SEQ_I, reg);
return vga_r (regbase, VGA_SEQ_D);
}
-extern inline void vga_wseq (caddr_t regbase, unsigned char reg, unsigned char val)
+static inline void vga_wseq (caddr_t regbase, unsigned char reg, unsigned char val)
{
#ifdef VGA_OUTW_WRITE
vga_w_fast (regbase, VGA_SEQ_I, reg, val);
#endif /* VGA_OUTW_WRITE */
}
-extern inline unsigned char vga_io_rseq (unsigned char reg)
+static inline unsigned char vga_io_rseq (unsigned char reg)
{
vga_io_w (VGA_SEQ_I, reg);
return vga_io_r (VGA_SEQ_D);
}
-extern inline void vga_io_wseq (unsigned char reg, unsigned char val)
+static inline void vga_io_wseq (unsigned char reg, unsigned char val)
{
#ifdef VGA_OUTW_WRITE
vga_io_w_fast (VGA_SEQ_I, reg, val);
#endif /* VGA_OUTW_WRITE */
}
-extern inline unsigned char vga_mm_rseq (caddr_t regbase, unsigned char reg)
+static inline unsigned char vga_mm_rseq (caddr_t regbase, unsigned char reg)
{
vga_mm_w (regbase, VGA_SEQ_I, reg);
return vga_mm_r (regbase, VGA_SEQ_D);
}
-extern inline void vga_mm_wseq (caddr_t regbase, unsigned char reg, unsigned char val)
+static inline void vga_mm_wseq (caddr_t regbase, unsigned char reg, unsigned char val)
{
#ifdef VGA_OUTW_WRITE
vga_mm_w_fast (regbase, VGA_SEQ_I, reg, val);
* VGA graphics controller register read/write
*/
-extern inline unsigned char vga_rgfx (caddr_t regbase, unsigned char reg)
+static inline unsigned char vga_rgfx (caddr_t regbase, unsigned char reg)
{
vga_w (regbase, VGA_GFX_I, reg);
return vga_r (regbase, VGA_GFX_D);
}
-extern inline void vga_wgfx (caddr_t regbase, unsigned char reg, unsigned char val)
+static inline void vga_wgfx (caddr_t regbase, unsigned char reg, unsigned char val)
{
#ifdef VGA_OUTW_WRITE
vga_w_fast (regbase, VGA_GFX_I, reg, val);
#endif /* VGA_OUTW_WRITE */
}
-extern inline unsigned char vga_io_rgfx (unsigned char reg)
+static inline unsigned char vga_io_rgfx (unsigned char reg)
{
vga_io_w (VGA_GFX_I, reg);
return vga_io_r (VGA_GFX_D);
}
-extern inline void vga_io_wgfx (unsigned char reg, unsigned char val)
+static inline void vga_io_wgfx (unsigned char reg, unsigned char val)
{
#ifdef VGA_OUTW_WRITE
vga_io_w_fast (VGA_GFX_I, reg, val);
#endif /* VGA_OUTW_WRITE */
}
-extern inline unsigned char vga_mm_rgfx (caddr_t regbase, unsigned char reg)
+static inline unsigned char vga_mm_rgfx (caddr_t regbase, unsigned char reg)
{
vga_mm_w (regbase, VGA_GFX_I, reg);
return vga_mm_r (regbase, VGA_GFX_D);
}
-extern inline void vga_mm_wgfx (caddr_t regbase, unsigned char reg, unsigned char val)
+static inline void vga_mm_wgfx (caddr_t regbase, unsigned char reg, unsigned char val)
{
#ifdef VGA_OUTW_WRITE
vga_mm_w_fast (regbase, VGA_GFX_I, reg, val);
* VGA attribute controller register read/write
*/
-extern inline unsigned char vga_rattr (caddr_t regbase, unsigned char reg)
+static inline unsigned char vga_rattr (caddr_t regbase, unsigned char reg)
{
vga_w (regbase, VGA_ATT_IW, reg);
return vga_r (regbase, VGA_ATT_R);
}
-extern inline void vga_wattr (caddr_t regbase, unsigned char reg, unsigned char val)
+static inline void vga_wattr (caddr_t regbase, unsigned char reg, unsigned char val)
{
vga_w (regbase, VGA_ATT_IW, reg);
vga_w (regbase, VGA_ATT_W, val);
}
-extern inline unsigned char vga_io_rattr (unsigned char reg)
+static inline unsigned char vga_io_rattr (unsigned char reg)
{
vga_io_w (VGA_ATT_IW, reg);
return vga_io_r (VGA_ATT_R);
}
-extern inline void vga_io_wattr (unsigned char reg, unsigned char val)
+static inline void vga_io_wattr (unsigned char reg, unsigned char val)
{
vga_io_w (VGA_ATT_IW, reg);
vga_io_w (VGA_ATT_W, val);
}
-extern inline unsigned char vga_mm_rattr (caddr_t regbase, unsigned char reg)
+static inline unsigned char vga_mm_rattr (caddr_t regbase, unsigned char reg)
{
vga_mm_w (regbase, VGA_ATT_IW, reg);
return vga_mm_r (regbase, VGA_ATT_R);
}
-extern inline void vga_mm_wattr (caddr_t regbase, unsigned char reg, unsigned char val)
+static inline void vga_mm_wattr (caddr_t regbase, unsigned char reg, unsigned char val)
{
vga_mm_w (regbase, VGA_ATT_IW, reg);
vga_mm_w (regbase, VGA_ATT_W, val);
continue;
}
- if ( !is_root_busy(dentry->d_mounts) ) {
+ if ( may_umount(dentry->d_mounts->d_sb) == 0 ) {
DPRINTK(("autofs: signaling expire on %s\n", ent->name));
return ent; /* Expirable! */
}
- DPRINTK(("autofs: didn't expire due to is_root_busy: %s\n", ent->name));
+ DPRINTK(("autofs: didn't expire due to may_umount: %s\n", ent->name));
}
return NULL; /* No expirable entries */
}
/* Decrement count for unused children */
count += (dentry->d_count - 1);
- /* Mountpoints don't count (either mountee or mounter) */
- if (d_mountpoint(dentry) ||
- dentry != dentry->d_covers) {
+ /* Mountpoints don't count */
+ if (d_mountpoint(dentry)) {
DPRINTK(("is_tree_busy: mountpoint dentry=%p covers=%p mounts=%p\n",
dentry, dentry->d_covers, dentry->d_mounts));
adj++;
}
+ /* ... and roots - twice as much... */
+ if (dentry != dentry->d_covers) {
+ DPRINTK(("is_tree_busy: mountpoint dentry=%p covers=%p mounts=%p\n",
+ dentry, dentry->d_covers, dentry->d_mounts));
+ adj+=2;
+ }
+
/* Ignore autofs's extra reference */
if (is_autofs4_dentry(dentry)) {
DPRINTK(("is_tree_busy: autofs\n"));
}
}
+/*
+ * Search for at least 1 mount point in the dentry's subdirs.
+ * We descend to the next level whenever the d_subdirs
+ * list is non-empty and continue searching.
+ */
+
/**
- * is_root_busy - check if a root dentry could be freed
- * @root: Dentry to work down from
- *
- * Check whether a root dentry would be in use if all of its
- * child dentries were freed. This allows a non-destructive
- * test for unmounting a device.
+ * have_submounts - check for mounts over a dentry
+ * @parent: dentry to check.
*
- * Return non zero if the root is still busy.
+ * Return true if the parent or its subdirectories contain
+ * a mount point
*/
-int is_root_busy(struct dentry *root)
+int have_submounts(struct dentry *parent)
{
- struct dentry *this_parent = root;
+ struct dentry *this_parent = parent;
struct list_head *next;
- int count = root->d_count;
-
- check_lock();
+ if (d_mountpoint(parent))
+ return 1;
repeat:
next = this_parent->d_subdirs.next;
resume:
struct list_head *tmp = next;
struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
next = tmp->next;
- /* Decrement count for unused children */
- count += (dentry->d_count - 1);
+ /* Have we found a mount point ? */
+ if (d_mountpoint(dentry))
+ return 1;
if (!list_empty(&dentry->d_subdirs)) {
this_parent = dentry;
goto repeat;
}
- /* root is busy if any leaf is busy */
- if (dentry->d_count)
- return 1;
}
/*
* All done at this level ... ascend and resume the search.
*/
- if (this_parent != root) {
+ if (this_parent != parent) {
next = this_parent->d_child.next;
this_parent = this_parent->d_parent;
goto resume;
}
- return (count > 1); /* remaining users? */
+ return 0; /* No mount points found in tree */
}
-/*
- * Search for at least 1 mount point in the dentry's subdirs.
- * We descend to the next level whenever the d_subdirs
- * list is non-empty and continue searching.
- */
-
-/**
- * have_submounts - check for mounts over a dentry
- * @parent: dentry to check.
- *
- * Return true if the parent or its subdirectories contain
- * a mount point
- */
-
-int have_submounts(struct dentry *parent)
+int d_active_refs(struct dentry *root)
{
- struct dentry *this_parent = parent;
+ struct dentry *this_parent = root;
struct list_head *next;
+ int count = root->d_count;
- if (d_mountpoint(parent))
- return 1;
repeat:
next = this_parent->d_subdirs.next;
resume:
struct list_head *tmp = next;
struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
next = tmp->next;
- /* Have we found a mount point ? */
- if (d_mountpoint(dentry))
- return 1;
+ /* Decrement count for unused children */
+ count += (dentry->d_count - 1);
if (!list_empty(&dentry->d_subdirs)) {
this_parent = dentry;
goto repeat;
/*
* All done at this level ... ascend and resume the search.
*/
- if (this_parent != parent) {
+ if (this_parent != root) {
next = this_parent->d_child.next;
this_parent = this_parent->d_parent;
goto resume;
}
- return 0; /* No mount points found in tree */
+ return count;
}
/*
dentry->d_mounts = dentry;
dentry->d_covers = dentry;
+ INIT_LIST_HEAD(&dentry->d_vfsmnt);
INIT_LIST_HEAD(&dentry->d_hash);
INIT_LIST_HEAD(&dentry->d_lru);
INIT_LIST_HEAD(&dentry->d_subdirs);
{
char * end = buffer+buflen;
char * retval;
+ int namelen;
*--end = '\0';
buflen--;
for (;;) {
struct dentry * parent;
- int namelen;
- if (dentry == root)
+ if (dentry == root && vfsmnt == rootmnt)
break;
- dentry = dentry->d_covers;
+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
+ /* Global root? */
+ if (vfsmnt->mnt_parent == vfsmnt)
+ goto global_root;
+ dentry = vfsmnt->mnt_mountpoint;
+ vfsmnt = vfsmnt->mnt_parent;
+ continue;
+ }
parent = dentry->d_parent;
- if (dentry == parent)
- break;
namelen = dentry->d_name.len;
buflen -= namelen + 1;
if (buflen < 0)
dentry = parent;
}
return retval;
+global_root:
+ namelen = dentry->d_name.len;
+ buflen -= namelen;
+ if (buflen >= 0) {
+ end -= namelen;
+ memcpy(end, dentry->d_name.name, namelen);
+ }
+ return end;
}
/*
/* [Feb 1997 T. Schoebel-Theuer] Complete rewrite of the pathname
* lookup logic.
*/
+/* [Feb-Apr 2000, AV] Rewrite to the new namespace architecture.
+ */
#include <linux/mm.h>
#include <linux/proc_fs.h>
#include <asm/namei.h>
-/* This can be removed after the beta phase. */
-#define CACHE_SUPERVISE /* debug the correctness of dcache entries */
-#undef DEBUG /* some other debugging */
-
-
#define ACC_MODE(x) ("\000\004\002\006"[(x)&O_ACCMODE])
/* [Feb-1997 T. Schoebel-Theuer]
* [10-Sep-98 Alan Modra] Another symlink change.
*/
+/* [Feb-Apr 2000 AV] Complete rewrite. Rules for symlinks:
+ * inside the path - always follow.
+ * in the last component in creation/removal/renaming - never follow.
+ * if LOOKUP_FOLLOW passed - follow.
+ * if the pathname has trailing slashes - follow.
+ * otherwise - don't follow.
+ * (applied in that order).
+ */
+
/* In order to reduce some races, while at the same time doing additional
* checking and hopefully speeding things up, we copy filenames to the
* kernel data space before using them..
return -ELOOP;
}
-static inline int follow_down(struct dentry ** dentry, struct vfsmount **mnt)
+static inline int follow_up(struct vfsmount **mnt, struct dentry **base)
{
- struct dentry * parent = dget((*dentry)->d_mounts);
- dput(*dentry);
- *dentry = parent;
+ struct vfsmount *parent=(*mnt)->mnt_parent;
+ struct dentry *dentry;
+ if (parent == *mnt)
+ return 0;
+ dentry=dget((*mnt)->mnt_mountpoint);
+ mntget(parent);
+ mntput(*mnt);
+ *mnt = parent;
+ dput(*base);
+ *base = dentry;
return 1;
}
+static inline int __follow_down(struct vfsmount **mnt, struct dentry **dentry)
+{
+ struct list_head *p = (*dentry)->d_vfsmnt.next;
+ while (p != &(*dentry)->d_vfsmnt) {
+ struct vfsmount *tmp;
+ tmp = list_entry(p, struct vfsmount, mnt_clash);
+ if (tmp->mnt_parent == *mnt) {
+ *mnt = mntget(tmp);
+ mntput(tmp->mnt_parent);
+ /* tmp holds the mountpoint, so... */
+ dput(*dentry);
+ *dentry = dget(tmp->mnt_root);
+ return 1;
+ }
+ p = p->next;
+ }
+ return 0;
+}
+
+int follow_down(struct vfsmount **mnt, struct dentry **dentry)
+{
+ return __follow_down(mnt,dentry);
+}
+
/*
* Name resolution.
*
case 2:
if (this.name[1] != '.')
break;
- if (nd->dentry != current->fs->root) {
- dentry = dget(nd->dentry->d_covers->d_parent);
- dput(nd->dentry);
- nd->dentry = dentry;
- inode = dentry->d_inode;
+ while (1) {
+ if (nd->dentry == current->fs->root &&
+ nd->mnt == current->fs->rootmnt)
+ break;
+ if (nd->dentry != nd->mnt->mnt_root) {
+ dentry = dget(nd->dentry->d_parent);
+ dput(nd->dentry);
+ nd->dentry = dentry;
+ break;
+ }
+ if (!follow_up(&nd->mnt, &nd->dentry))
+ break;
}
+ inode = nd->dentry->d_inode;
/* fallthrough */
case 1:
continue;
break;
}
/* Check mountpoints.. */
- while (d_mountpoint(dentry) && follow_down(&dentry, &nd->mnt))
+ while (d_mountpoint(dentry) && __follow_down(&nd->mnt, &dentry))
;
err = -ENOENT;
case 2:
if (this.name[1] != '.')
break;
- if (nd->dentry != current->fs->root) {
- dentry = dget(nd->dentry->d_covers->d_parent);
- dput(nd->dentry);
- nd->dentry = dentry;
- inode = dentry->d_inode;
+ while (1) {
+ if (nd->dentry == current->fs->root &&
+ nd->mnt == current->fs->rootmnt)
+ break;
+ if (nd->dentry != nd->mnt->mnt_root) {
+ dentry = dget(nd->dentry->d_parent);
+ dput(nd->dentry);
+ nd->dentry = dentry;
+ break;
+ }
+ if (!follow_up(&nd->mnt, &nd->dentry))
+ break;
}
+ inode = nd->dentry->d_inode;
/* fallthrough */
case 1:
goto return_base;
if (IS_ERR(dentry))
break;
}
- while (d_mountpoint(dentry) && follow_down(&dentry, &nd->mnt))
+ while (d_mountpoint(dentry) && __follow_down(&nd->mnt, &dentry))
;
inode = dentry->d_inode;
if ((lookup_flags & LOOKUP_FOLLOW)
{
struct svc_export *exp;
struct dentry *dparent;
- struct nameidata nd;
+ struct dentry *dentry;
int err;
dprintk("nfsd: nfsd_lookup(fh %s, %s)\n", SVCFH_fmt(fhp), name);
err = nfserr_acces;
/* Lookup the name, but don't follow links */
- if (strcmp(name, "..")==0) {
+ if (strcmp(name, ".")==0) {
+ dentry = dget(dparent);
+ } else if (strcmp(name, "..")==0) {
/* checking mountpoint crossing is very different when stepping up */
if (dparent == exp->ex_dentry) {
if (!EX_CROSSMNT(exp))
- nd.dentry = dget(dparent); /* .. == . just like at / */
+ dentry = dget(dparent); /* .. == . just like at / */
else
{
struct svc_export *exp2 = NULL;
struct dentry *dp;
- nd.dentry = dparent->d_covers->d_parent;
- for (dp=nd.dentry;
- exp2 == NULL && dp->d_covers->d_parent != dp;
- dp=dp->d_covers->d_parent)
+ dentry = dparent->d_covers->d_parent;
+ for (dp=dentry;
+ exp2 == NULL && dp->d_parent != dp;
+ dp=dp->d_parent)
exp2 = exp_get(exp->ex_client, dp->d_inode->i_dev, dp->d_inode->i_ino);
- if (exp2==NULL || nd.dentry->d_sb != exp2->ex_dentry->d_sb) {
- nd.dentry = dget(dparent);
+ if (exp2==NULL || dentry->d_sb != exp2->ex_dentry->d_sb) {
+ dentry = dget(dparent);
} else {
- dget(nd.dentry);
+ dget(dentry);
exp = exp2;
}
}
} else
- nd.dentry = dget(dparent->d_parent);
+ dentry = dget(dparent->d_parent);
} else {
- nd.mnt = NULL;
- nd.dentry = dget(dparent);
- nd.flags = 0;
- err = walk_name(name, &nd);
+ dentry = lookup_one(name, dparent);
+ err = PTR_ERR(dentry);
if (err)
goto out_nfserr;
/*
* check if we have crossed a mount point ...
*/
- if (nd.dentry->d_sb != dparent->d_sb) {
+ if (d_mountpoint(dentry)) {
struct svc_export *exp2 = NULL;
+ struct dentry *mounts = dget(dentry->d_mounts);
exp2 = exp_get(rqstp->rq_client,
- nd.dentry->d_inode->i_dev,
- nd.dentry->d_inode->i_ino);
- if (exp2 && EX_CROSSMNT(exp2))
+ mounts->d_inode->i_dev,
+ mounts->d_inode->i_ino);
+ if (exp2 && EX_CROSSMNT(exp2)) {
/* successfully crossed mount point */
exp = exp2;
- else if (nd.dentry->d_covers->d_sb == dparent->d_sb) {
- /* stay in the original filesystem */
- struct dentry *tdentry = dget(nd.dentry->d_covers);
- dput(nd.dentry);
- nd.dentry = tdentry;
- } else {
- /* This cannot possibly happen */
- printk("nfsd_lookup: %s/%s impossible mount point!\n", dparent->d_name.name, nd.dentry->d_name.name);
- dput(nd.dentry);
- err = nfserr_acces;
- goto out;
-
+ dput(dentry);
+ dentry = mounts;
}
}
}
* Note: we compose the file handle now, but as the
* dentry may be negative, it may need to be updated.
*/
- err = fh_compose(resfh, exp, nd.dentry);
- if (!err && !nd.dentry->d_inode)
+ err = fh_compose(resfh, exp, dentry);
+ if (!err && !dentry->d_inode)
err = nfserr_noent;
out:
return err;
static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentry)
{
- struct task_struct *p;
-
if (dir->i_ino == PROC_ROOT_INO) { /* check for safety... */
- extern unsigned long total_forks;
- static int last_timestamp = 0;
-
- /*
- * this one can be a serious 'ps' performance problem if
- * there are many threads running - thus we do 'lazy'
- * link-recalculation - we change it only if the number
- * of threads has increased.
- */
- if (total_forks != last_timestamp) {
- int nlink = proc_root.nlink;
-
- read_lock(&tasklist_lock);
- last_timestamp = total_forks;
- for_each_task(p)
- nlink++;
- read_unlock(&tasklist_lock);
- /*
- * subtract the # of idle threads which
- * do not show up in /proc:
- */
- dir->i_nlink = nlink - smp_num_cpus;
- }
+ int nlink = proc_root.nlink;
+
+ nlink += nr_threads;
+
+ dir->i_nlink = nlink;
}
if (!proc_lookup(dir, dentry))
static LIST_HEAD(vfsmntlist);
static struct vfsmount *add_vfsmnt(struct super_block *sb,
- const char *dev_name, const char *dir_name)
+ struct dentry *mountpoint,
+ struct dentry *root,
+ struct vfsmount *parent,
+ const char *dev_name,
+ const char *dir_name)
{
struct vfsmount *mnt;
char *name;
goto out;
memset(mnt, 0, sizeof(struct vfsmount));
+ atomic_set(&mnt->mnt_count,1);
mnt->mnt_sb = sb;
mnt->mnt_dev = sb->s_dev;
+ mnt->mnt_mountpoint = dget(mountpoint);
+ mnt->mnt_root = dget(root);
+ mnt->mnt_parent = parent ? mntget(parent) : mnt;
/* N.B. Is it really OK to have a vfsmount without names? */
if (dev_name) {
}
}
+ list_add(&mnt->mnt_instances, &sb->s_mounts);
+ list_add(&mnt->mnt_clash, &mountpoint->d_vfsmnt);
list_add(&mnt->mnt_list, vfsmntlist.prev);
+ mountpoint->d_mounts = root;
+ root->d_covers = mountpoint;
out:
return mnt;
}
-void remove_vfsmnt(kdev_t dev)
+static void move_vfsmnt(struct vfsmount *mnt,
+ struct dentry *mountpoint,
+ struct vfsmount *parent,
+ const char *dev_name,
+ const char *dir_name)
{
- struct list_head *p, *next;
+ struct dentry *old_mountpoint = mnt->mnt_mountpoint;
+ struct vfsmount *old_parent = mnt->mnt_parent;
+ char *new_devname = NULL, *new_dirname = NULL;
- for (p = vfsmntlist.next; p != &vfsmntlist; p = next) {
- struct vfsmount *mnt = list_entry(p, struct vfsmount, mnt_list);
+ if (dev_name) {
+ new_devname = (char *) kmalloc(strlen(dev_name)+1, GFP_KERNEL);
+ if (new_devname)
+ strcpy(new_devname, dev_name);
+ }
+ if (dir_name) {
+ new_dirname = (char *) kmalloc(strlen(dir_name)+1, GFP_KERNEL);
+ if (new_dirname)
+ strcpy(new_dirname, dir_name);
+ }
- next = p->next;
- if (mnt->mnt_dev != dev)
- continue;
- list_del(&mnt->mnt_list);
- kfree(mnt->mnt_devname);
+ /* flip names */
+ if (new_dirname) {
kfree(mnt->mnt_dirname);
- kfree(mnt);
+ mnt->mnt_dirname = new_dirname;
}
+ if (new_devname) {
+ kfree(mnt->mnt_devname);
+ mnt->mnt_devname = new_devname;
+ }
+
+ /* flip the linkage */
+ mnt->mnt_mountpoint = dget(mountpoint);
+ mnt->mnt_parent = parent ? mntget(parent) : mnt;
+ list_del(&mnt->mnt_clash);
+ list_add(&mnt->mnt_clash, &mountpoint->d_vfsmnt);
+
+ /* put the old stuff */
+ old_mountpoint->d_mounts = old_mountpoint;
+ mountpoint->d_mounts = mnt->mnt_sb->s_root;
+ mnt->mnt_sb->s_root->d_covers = mountpoint;
+ dput(old_mountpoint);
+ if (old_parent != mnt)
+ mntput(old_parent);
+}
+
+static void remove_vfsmnt(struct vfsmount *mnt)
+{
+ struct dentry * root = mnt->mnt_sb->s_root;
+ struct dentry * covered = mnt->mnt_mountpoint;
+ /* First of all, remove it from all lists */
+ list_del(&mnt->mnt_instances);
+ list_del(&mnt->mnt_clash);
+ list_del(&mnt->mnt_list);
+ /* Now we can work safely */
+ if (mnt->mnt_parent != mnt)
+ mntput(mnt->mnt_parent);
+
+ root->d_covers = root;
+ covered->d_mounts = covered;
+
+ dput(mnt->mnt_mountpoint);
+ dput(mnt->mnt_root);
+ kfree(mnt->mnt_devname);
+ kfree(mnt->mnt_dirname);
+ kfree(mnt);
}
static struct proc_fs_info {
for (p = vfsmntlist.next; p!=&vfsmntlist && len < PAGE_SIZE - 160;
p = p->next) {
struct vfsmount *tmp = list_entry(p, struct vfsmount, mnt_list);
- if (!tmp->mnt_sb || !tmp->mnt_sb->s_root)
- continue;
- path = d_path(tmp->mnt_sb->s_root, tmp, buffer, PAGE_SIZE);
+ path = d_path(tmp->mnt_root, tmp, buffer, PAGE_SIZE);
if (!path)
continue;
len += sprintf( buf + len, "%s %s %s %s",
list_add (&s->s_list, super_blocks.prev);
init_waitqueue_head(&s->s_wait);
INIT_LIST_HEAD(&s->s_files);
+ INIT_LIST_HEAD(&s->s_mounts);
}
return s;
}
{
struct block_device *bdev;
kdev_t dev;
+ dput(sb->s_root);
+ sb->s_root = NULL;
lock_super(sb);
if (sb->s_op) {
if (sb->s_op->write_super && sb->s_dirt)
return 0;
}
-static int d_umount(struct super_block * sb)
+/*
+ * Doesn't take quota and stuff into account. IOW, in some cases it will
+ * give false negatives. The main reason why it's here is that we need
+ * a non-destructive way to look for easily umountable filesystems.
+ */
+ /* MOUNT_REWRITE: it should take vfsmount, not superblock */
+int may_umount(struct super_block *sb)
{
- struct dentry * root = sb->s_root;
- struct dentry * covered = root->d_covers;
+ struct dentry * root;
+ int count;
- if (root->d_count != 1)
- return -EBUSY;
+ root = sb->s_root;
- if (root->d_inode->i_state)
+ count = d_active_refs(root);
+ if (root->d_covers == root)
+ count--;
+ if (count != 2)
return -EBUSY;
- sb->s_root = NULL;
-
- if (covered != root) {
- root->d_covers = root;
- covered->d_mounts = covered;
- dput(covered);
- }
- dput(root);
return 0;
}
-static void d_mount(struct dentry *covered, struct dentry *dentry)
+static int do_umount(struct vfsmount *mnt, int umount_root, int flags)
{
- if (covered->d_mounts != covered) {
- printk("VFS: mount - already mounted\n");
- return;
+ struct super_block * sb = mnt->mnt_sb;
+ int count;
+
+ if (mnt == current->fs->rootmnt && !umount_root) {
+ int retval = 0;
+ /*
+ * Special case for "unmounting" root ...
+ * we just try to remount it readonly.
+ */
+ mntput(mnt);
+ if (!(sb->s_flags & MS_RDONLY))
+ retval = do_remount_sb(sb, MS_RDONLY, 0);
+ return retval;
}
- covered->d_mounts = dentry;
- dentry->d_covers = covered;
-}
-static struct block_device *do_umount(kdev_t dev, int unmount_root, int flags)
-{
- struct super_block * sb;
- struct block_device *bdev;
- int retval;
-
- retval = -ENOENT;
- sb = get_super(dev);
- if (!sb || !sb->s_root)
- goto out;
+ if (atomic_read(&mnt->mnt_count) > 2) {
+ mntput(mnt);
+ return -EBUSY;
+ }
+
+ if (mnt->mnt_instances.next != mnt->mnt_instances.prev) {
+ mntput(mnt);
+ remove_vfsmnt(mnt);
+ return 0;
+ }
/*
* Before checking whether the filesystem is still busy,
* are no quotas running any more. Just turn them on again.
*/
DQUOT_OFF(sb);
- acct_auto_close(dev);
+ acct_auto_close(sb->s_dev);
/*
* If we may have to abort operations to get out of this
* must return, and the like. Thats for the mount program to worry
* about for the moment.
*/
-
+
if( (flags&MNT_FORCE) && sb->s_op->umount_begin)
sb->s_op->umount_begin(sb);
* clean.
*/
shrink_dcache_sb(sb);
- fsync_dev(dev);
+ fsync_dev(sb->s_dev);
- if (sb == current->fs->root->d_sb && !unmount_root) {
- /*
- * Special case for "unmounting" root ...
- * we just try to remount it readonly.
- */
- retval = 0;
- if (!(sb->s_flags & MS_RDONLY))
- retval = do_remount_sb(sb, MS_RDONLY, 0);
- return ERR_PTR(retval);
- }
+ /* Something might grab it again - redo checks */
- retval = d_umount(sb);
- if (retval)
- goto out;
- remove_vfsmnt(dev);
- bdev = kill_super(sb, unmount_root);
-
- return bdev;
-
-out:
- return ERR_PTR(retval);
-}
-
-static int umount_dev(kdev_t dev, int flags)
-{
- int retval;
- struct block_device *bdev;
+ if (atomic_read(&mnt->mnt_count) > 2) {
+ mntput(mnt);
+ return -EBUSY;
+ }
+
+ /*
+ * OK, at that point we have only one instance. We should have
+ * one active reference from ->s_root, one active reference
+ * from ->mnt_root (which may be different) and possibly one
+ * active reference from ->mnt_mountpoint (if mnt->mnt_parent == mnt).
+ * Anything above that means that tree is busy.
+ */
- retval = -ENXIO;
- if (MAJOR(dev) >= MAX_BLKDEV)
- goto out;
+ count = d_active_refs(sb->s_root);
+ if (mnt->mnt_parent == mnt)
+ count--;
+ if (count != 2)
+ return -EBUSY;
- fsync_dev(dev);
+ if (sb->s_root->d_inode->i_state)
+ return -EBUSY;
- down(&mount_sem);
+ /* OK, that's the point of no return */
+ mntput(mnt);
+ remove_vfsmnt(mnt);
- bdev = do_umount(dev, 0, flags);
- if (IS_ERR(bdev))
- retval = PTR_ERR(bdev);
- else
- retval = 0;
- up(&mount_sem);
-out:
- return retval;
+ kill_super(sb, umount_root);
+ return 0;
}
/*
asmlinkage long sys_umount(char * name, int flags)
{
- struct dentry * dentry;
+ struct nameidata nd;
+ char *kname;
int retval;
+ struct super_block *sb;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
lock_kernel();
- dentry = namei(name);
- retval = PTR_ERR(dentry);
- if (!IS_ERR(dentry)) {
- struct inode * inode = dentry->d_inode;
- kdev_t dev = inode->i_rdev;
-
- retval = 0;
- if (S_ISBLK(inode->i_mode)) {
- if (IS_NODEV(inode))
- retval = -EACCES;
- } else {
- struct super_block *sb = inode->i_sb;
- retval = -EINVAL;
- if (sb && inode == sb->s_root->d_inode) {
- dev = sb->s_dev;
- retval = 0;
- }
- }
- dput(dentry);
-
- if (!retval)
- retval = umount_dev(dev, flags);
- }
+ kname = getname(name);
+ retval = PTR_ERR(kname);
+ if (IS_ERR(kname))
+ goto out;
+ retval = 0;
+ if (walk_init(kname, LOOKUP_POSITIVE|LOOKUP_FOLLOW, &nd))
+ retval = walk_name(kname, &nd);
+ putname(kname);
+ if (retval)
+ goto out;
+ sb = nd.dentry->d_inode->i_sb;
+ retval = -EINVAL;
+ if (nd.dentry!=nd.mnt->mnt_root)
+ goto dput_and_out;
+ dput(nd.dentry);
+ /* puts nd.mnt */
+ down(&mount_sem);
+ retval = do_umount(nd.mnt, 0, flags);
+ up(&mount_sem);
+ goto out;
+dput_and_out:
+ dput(nd.dentry);
+ mntput(nd.mnt);
+out:
unlock_kernel();
return retval;
}
return sys_umount(name,0);
}
-/*
- * Check whether we can mount the specified device.
- */
-int fs_may_mount(kdev_t dev)
-{
- struct super_block * sb = get_super(dev);
- int busy;
-
- busy = sb && sb->s_root &&
- (sb->s_root->d_count != 1 || sb->s_root->d_covers != sb->s_root);
- return !busy;
-}
-
/*
* change filesystem flags. dir should be a physical root of filesystem.
* If you've mounted a non-root directory somewhere and want to do remount
unsigned long new_flags, void *data_page)
{
struct file_system_type * fstype;
- struct dentry * dir_d;
+ struct nameidata nd;
struct vfsmount *mnt;
struct super_block *sb;
- int retval;
+ int retval = 0;
unsigned long flags = 0;
/* Basic sanity checks */
return -ENODEV;
/* ... and mountpoint. Do the lookup first to force automounting. */
- dir_d = lookup_dentry(dir_name, LOOKUP_FOLLOW|LOOKUP_POSITIVE);
- retval = PTR_ERR(dir_d);
- if (IS_ERR(dir_d))
+ if (walk_init(dir_name, LOOKUP_FOLLOW|LOOKUP_POSITIVE|LOOKUP_DIRECTORY, &nd))
+ retval = walk_name(dir_name, &nd);
+ if (retval)
goto fs_out;
/* get superblock, locks mount_sem on success */
goto dput_out;
retval = -ENOENT;
- if (d_unhashed(dir_d))
+ if (d_unhashed(nd.dentry))
goto fail;
- retval = -ENOTDIR;
- if (!S_ISDIR(dir_d->d_inode->i_mode))
- goto fail;
-
- retval = -EBUSY;
- if (dir_d->d_covers != dir_d)
- goto fail;
-
- /*
- * We may have slept while reading the super block,
- * so we check afterwards whether it's safe to mount.
- */
- retval = -EBUSY;
- if (!fs_may_mount(sb->s_dev))
- goto fail;
+ /* Something was mounted here while we slept */
+ while(d_mountpoint(nd.dentry) && follow_down(&nd.mnt, &nd.dentry))
+ ;
retval = -ENOMEM;
- mnt = add_vfsmnt(sb, dev_name, dir_name);
+ mnt = add_vfsmnt(sb, nd.dentry, sb->s_root, nd.mnt, dev_name, dir_name);
if (!mnt)
goto fail;
- d_mount(dget(dir_d), sb->s_root);
-
retval = 0;
unlock_out:
up(&mount_sem);
dput_out:
- dput(dir_d);
+ dput(nd.dentry);
+ mntput(nd.mnt);
fs_out:
put_filesystem(fstype);
return retval;
fail:
- dput(sb->s_root);
- sb->s_root = NULL;
- kill_super(sb, 0);
+ if (list_empty(&sb->s_mounts))
+ kill_super(sb, 0);
goto unlock_out;
}
{
struct file_system_type * fs_type;
struct super_block * sb;
- struct vfsmount *vfsmnt = NULL;
+ struct vfsmount *vfsmnt;
struct block_device *bdev = NULL;
mode_t mode;
int retval;
}
check_disk_change(ROOT_DEV);
+ sb = get_super(ROOT_DEV);
+ if (sb) {
+ fs_type = sb->s_type;
+ goto mount_it;
+ }
spin_lock(&file_systems_lock);
for (fs_type = file_systems ; fs_type ; fs_type = fs_type->next) {
if (!try_inc_mod_count(fs_type->owner))
continue;
spin_unlock(&file_systems_lock);
- sb = get_super(ROOT_DEV);
- if (sb) {
- /* Shouldn't we fail here? Oh, well... */
- sb->s_bdev = bdev;
- goto mount_it;
- }
sb = read_super(ROOT_DEV,bdev,fs_type,root_mountflags,NULL,1);
if (sb)
goto mount_it;
kdevname(ROOT_DEV));
mount_it:
- set_fs_root(current->fs, vfsmnt, sb->s_root);
- set_fs_pwd(current->fs, vfsmnt, sb->s_root);
printk ("VFS: Mounted root (%s filesystem)%s.\n",
fs_type->name,
(sb->s_flags & MS_RDONLY) ? " readonly" : "");
path + 5 + path_start, 0,
NULL, NULL);
memcpy (path + path_start, "/dev/", 5);
- vfsmnt = add_vfsmnt (sb, path + path_start,
- "/");
+ vfsmnt = add_vfsmnt (sb, sb->s_root, sb->s_root, NULL,
+ path + path_start, "/");
}
- else vfsmnt = add_vfsmnt (sb, "/dev/root", "/");
+ else
+ vfsmnt = add_vfsmnt (sb, sb->s_root, sb->s_root, NULL,
+ "/dev/root", "/");
if (vfsmnt) {
+ set_fs_root(current->fs, vfsmnt, sb->s_root);
+ set_fs_pwd(current->fs, vfsmnt, sb->s_root);
if (bdev)
bdput(bdev); /* sb holds a reference */
return;
{
struct dentry *root = current->fs->root;
struct vfsmount *root_mnt = current->fs->rootmnt;
- struct dentry *d_new_root, *d_put_old, *covered;
- struct dentry *root_dev_root, *new_root_dev_root;
- struct dentry *walk, *next;
- struct vfsmount *new_root_mnt = NULL;
+ struct vfsmount *tmp;
+ struct nameidata new_nd, old_nd;
+ char *name;
int error;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
lock_kernel();
- d_new_root = namei(new_root);
- if (IS_ERR(d_new_root)) {
- error = PTR_ERR(d_new_root);
+
+ name = getname(new_root);
+ error = PTR_ERR(name);
+ if (IS_ERR(name))
goto out0;
- }
- d_put_old = namei(put_old);
- if (IS_ERR(d_put_old)) {
- error = PTR_ERR(d_put_old);
+ error = 0;
+ if (walk_init(name, LOOKUP_POSITIVE|LOOKUP_FOLLOW|LOOKUP_DIRECTORY, &new_nd))
+ error = walk_name(name, &new_nd);
+ putname(name);
+ if (error)
+ goto out0;
+
+ name = getname(put_old);
+ error = PTR_ERR(name);
+ if (IS_ERR(name))
+ goto out0;
+ error = 0;
+ if (walk_init(name, LOOKUP_POSITIVE|LOOKUP_FOLLOW|LOOKUP_DIRECTORY, &old_nd))
+ error = walk_name(name, &old_nd);
+ putname(name);
+ if (error)
goto out1;
- }
+
down(&mount_sem);
- if (!d_new_root->d_inode || !d_put_old->d_inode) {
- error = -ENOENT;
+ error = -ENOENT;
+ if (d_unhashed(new_nd.dentry) || d_unhashed(old_nd.dentry))
goto out2;
- }
- if (!S_ISDIR(d_new_root->d_inode->i_mode) ||
- !S_ISDIR(d_put_old->d_inode->i_mode)) {
- error = -ENOTDIR;
- goto out2;
- }
error = -EBUSY;
- if (d_new_root->d_sb == root->d_sb || d_put_old->d_sb == root->d_sb)
+ if (new_nd.mnt == root_mnt || old_nd.mnt == root_mnt)
goto out2; /* loop */
- if (d_put_old != d_put_old->d_covers)
- goto out2; /* mount point is busy */
error = -EINVAL;
- walk = d_put_old; /* make sure we can reach put_old from new_root */
- for (;;) {
- next = walk->d_covers->d_parent;
- if (next == walk)
+ tmp = old_nd.mnt; /* make sure we can reach put_old from new_root */
+ if (tmp != new_nd.mnt) {
+ for (;;) {
+ if (tmp->mnt_parent == tmp)
+ goto out2;
+ if (tmp->mnt_parent == new_nd.mnt)
+ break;
+ tmp = tmp->mnt_parent;
+ }
+ if (!is_subdir(tmp->mnt_root, new_nd.dentry))
goto out2;
- if (next == d_new_root)
- break;
- walk = next;
- }
+ } else if (!is_subdir(old_nd.dentry, new_nd.dentry))
+ goto out2;
- new_root_dev_root = d_new_root->d_sb->s_root;
- covered = new_root_dev_root->d_covers;
- new_root_dev_root->d_covers = new_root_dev_root;
- dput(covered);
- covered->d_mounts = covered;
+ error = -ENOMEM;
+ name = __getname();
+ if (!name)
+ goto out2;
- root_dev_root = root->d_sb->s_root;
- root_dev_root->d_covers = dget(d_put_old);
- d_put_old->d_mounts = root_dev_root;
- chroot_fs_refs(root,root_mnt,d_new_root,new_root_mnt);
+ move_vfsmnt(new_nd.mnt, new_nd.dentry, NULL, NULL, "/");
+ move_vfsmnt(root_mnt, old_nd.dentry, old_nd.mnt, NULL,
+ __d_path(old_nd.dentry, old_nd.mnt, new_nd.dentry,
+ new_nd.mnt, name, PAGE_SIZE));
+ putname(name);
+ chroot_fs_refs(root,root_mnt,new_nd.dentry,new_nd.mnt);
error = 0;
out2:
up(&mount_sem);
- dput(d_put_old);
+ dput(old_nd.dentry);
+ mntput(old_nd.mnt);
out1:
- dput(d_new_root);
+ dput(new_nd.dentry);
+ mntput(new_nd.mnt);
out0:
unlock_kernel();
return error;
int __init change_root(kdev_t new_root_dev,const char *put_old)
{
- kdev_t old_root_dev;
- struct vfsmount *vfsmnt;
- struct dentry *old_root,*old_pwd,*dir_d = NULL;
- int error;
+ kdev_t old_root_dev = ROOT_DEV;
+ struct vfsmount *old_rootmnt = mntget(current->fs->rootmnt);
+ struct nameidata devfs_nd, nd;
+ int error = 0;
- old_root = current->fs->root;
- old_pwd = current->fs->pwd;
- old_root_dev = ROOT_DEV;
- if (!fs_may_mount(new_root_dev)) {
- printk(KERN_CRIT "New root is busy. Staying in initrd.\n");
- return -EBUSY;
- }
/* First unmount devfs if mounted */
- dir_d = lookup_dentry ("/dev", LOOKUP_FOLLOW|LOOKUP_POSITIVE);
- if (!IS_ERR(dir_d)) {
- struct super_block *sb = dir_d->d_inode->i_sb;
-
- if (sb && (dir_d->d_inode == sb->s_root->d_inode) &&
- (sb->s_magic == DEVFS_SUPER_MAGIC)) {
- dput (dir_d);
- do_umount (sb->s_dev, 0, 0);
+ if (walk_init("/dev", LOOKUP_FOLLOW|LOOKUP_POSITIVE, &devfs_nd))
+ error = walk_name("/dev", &devfs_nd);
+ if (!error) {
+ struct super_block *sb = devfs_nd.dentry->d_inode->i_sb;
+
+ if (devfs_nd.mnt->mnt_sb->s_magic == DEVFS_SUPER_MAGIC &&
+ devfs_nd.dentry == devfs_nd.mnt->mnt_root) {
+ dput(devfs_nd.dentry);
+ down(&mount_sem);
+ /* puts devfs_nd.mnt */
+ do_umount(devfs_nd.mnt, 0, 0);
+ up(&mount_sem);
+ } else {
+ dput(devfs_nd.dentry);
+ mntput(devfs_nd.mnt);
}
- else dput (dir_d);
}
ROOT_DEV = new_root_dev;
mount_root();
- dput(old_root);
- dput(old_pwd);
#if 1
shrink_dcache();
printk("change_root: old root has d_count=%d\n", old_root->d_count);
/*
* Get the new mount directory
*/
- dir_d = lookup_dentry(put_old, LOOKUP_FOLLOW|LOOKUP_POSITIVE);
- if (IS_ERR(dir_d)) {
- error = PTR_ERR(dir_d);
- } else {
- error = 0;
- }
- if (!error && dir_d->d_covers != dir_d) {
- dput(dir_d);
- error = -EBUSY;
- }
- if (!error && !S_ISDIR(dir_d->d_inode->i_mode)) {
- dput(dir_d);
- error = -ENOTDIR;
- }
+ error = 0;
+ if (walk_init(put_old, LOOKUP_FOLLOW|LOOKUP_POSITIVE|LOOKUP_DIRECTORY, &nd))
+ error = walk_name(put_old, &nd);
if (error) {
- struct block_device *bdev;
+ int blivet;
printk(KERN_NOTICE "Trying to unmount old root ... ");
- bdev = do_umount(old_root_dev,1, 0);
- if (!IS_ERR(bdev)) {
+ blivet = do_umount(old_rootmnt, 1, 0);
+ if (!blivet) {
printk("okay\n");
return 0;
}
- printk(KERN_ERR "error %ld\n",PTR_ERR(bdev));
+ printk(KERN_ERR "error %ld\n",blivet);
return error;
}
- remove_vfsmnt(old_root_dev);
- vfsmnt = add_vfsmnt(old_root->d_sb, "/dev/root.old", put_old);
- if (vfsmnt) {
- d_mount(dir_d,old_root);
- return 0;
- }
- printk(KERN_CRIT "Trouble: add_vfsmnt failed\n");
- return -ENOMEM;
+ move_vfsmnt(old_rootmnt, nd.dentry, nd.mnt, "/dev/root.old", put_old);
+ mntput(old_rootmnt);
+ dput(nd.dentry);
+ mntput(nd.mnt);
+ return 0;
}
#endif
*/
#define __HAVE_ARCH_STRCPY
-extern inline char * strcpy(char * dest,const char *src)
+static inline char * strcpy(char * dest,const char *src)
{
int d0, d1, d2;
__asm__ __volatile__(
}
#define __HAVE_ARCH_STRNCPY
-extern inline char * strncpy(char * dest,const char *src,size_t count)
+static inline char * strncpy(char * dest,const char *src,size_t count)
{
int d0, d1, d2, d3;
__asm__ __volatile__(
}
#define __HAVE_ARCH_STRCAT
-extern inline char * strcat(char * dest,const char * src)
+static inline char * strcat(char * dest,const char * src)
{
int d0, d1, d2, d3;
__asm__ __volatile__(
}
#define __HAVE_ARCH_STRNCAT
-extern inline char * strncat(char * dest,const char * src,size_t count)
+static inline char * strncat(char * dest,const char * src,size_t count)
{
int d0, d1, d2, d3;
__asm__ __volatile__(
}
#define __HAVE_ARCH_STRCMP
-extern inline int strcmp(const char * cs,const char * ct)
+static inline int strcmp(const char * cs,const char * ct)
{
int d0, d1;
register int __res;
}
#define __HAVE_ARCH_STRNCMP
-extern inline int strncmp(const char * cs,const char * ct,size_t count)
+static inline int strncmp(const char * cs,const char * ct,size_t count)
{
register int __res;
int d0, d1, d2;
}
#define __HAVE_ARCH_STRCHR
-extern inline char * strchr(const char * s, int c)
+static inline char * strchr(const char * s, int c)
{
int d0;
register char * __res;
}
#define __HAVE_ARCH_STRRCHR
-extern inline char * strrchr(const char * s, int c)
+static inline char * strrchr(const char * s, int c)
{
int d0, d1;
register char * __res;
}
#define __HAVE_ARCH_STRLEN
-extern inline size_t strlen(const char * s)
+static inline size_t strlen(const char * s)
{
int d0;
register int __res;
return __res;
}
-extern inline void * __memcpy(void * to, const void * from, size_t n)
+static inline void * __memcpy(void * to, const void * from, size_t n)
{
int d0, d1, d2;
__asm__ __volatile__(
* This looks horribly ugly, but the compiler can optimize it totally,
* as the count is constant.
*/
-extern inline void * __constant_memcpy(void * to, const void * from, size_t n)
+static inline void * __constant_memcpy(void * to, const void * from, size_t n)
{
switch (n) {
case 0:
* This CPU favours 3DNow strongly (eg AMD Athlon)
*/
-extern inline void * __constant_memcpy3d(void * to, const void * from, size_t len)
+static inline void * __constant_memcpy3d(void * to, const void * from, size_t len)
{
if(len<512 || in_interrupt())
return __constant_memcpy(to, from, len);
})
#define __HAVE_ARCH_MEMMOVE
-extern inline void * memmove(void * dest,const void * src, size_t n)
+static inline void * memmove(void * dest,const void * src, size_t n)
{
int d0, d1, d2;
if (dest<src)
#define memcmp __builtin_memcmp
#define __HAVE_ARCH_MEMCHR
-extern inline void * memchr(const void * cs,int c,size_t count)
+static inline void * memchr(const void * cs,int c,size_t count)
{
int d0;
register void * __res;
return __res;
}
-extern inline void * __memset_generic(void * s, char c,size_t count)
+static inline void * __memset_generic(void * s, char c,size_t count)
{
int d0, d1;
__asm__ __volatile__(
* things 32 bits at a time even when we don't know the size of the
* area at compile-time..
*/
-extern inline void * __constant_c_memset(void * s, unsigned long c, size_t count)
+static inline void * __constant_c_memset(void * s, unsigned long c, size_t count)
{
int d0, d1;
__asm__ __volatile__(
/* Added by Gertjan van Wingerde to make minix and sysv module work */
#define __HAVE_ARCH_STRNLEN
-extern inline size_t strnlen(const char * s, size_t count)
+static inline size_t strnlen(const char * s, size_t count)
{
int d0;
register int __res;
/* end of additional stuff */
#define __HAVE_ARCH_STRSTR
-extern inline char * strstr(const char * cs,const char * ct)
+static inline char * strstr(const char * cs,const char * ct)
{
int d0, d1;
register char * __res;
* This looks horribly ugly, but the compiler can optimize it totally,
* as we by now know that both pattern and count is constant..
*/
-extern inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count)
+static inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count)
{
switch (count) {
case 0:
* find the first occurrence of byte 'c', or 1 past the area if none
*/
#define __HAVE_ARCH_MEMSCAN
-extern inline void * memscan(void * addr, int c, size_t size)
+static inline void * memscan(void * addr, int c, size_t size)
{
if (!size)
return addr;
#include <asm/page.h>
#define __HAVE_ARCH_STRCPY
-extern inline char * strcpy(char * dest,const char *src)
+static inline char * strcpy(char * dest,const char *src)
{
char *xdest = dest;
}
#define __HAVE_ARCH_STRNCPY
-extern inline char * strncpy(char *dest, const char *src, size_t n)
+static inline char * strncpy(char *dest, const char *src, size_t n)
{
char *xdest = dest;
}
#define __HAVE_ARCH_STRCAT
-extern inline char * strcat(char * dest, const char * src)
+static inline char * strcat(char * dest, const char * src)
{
char *tmp = dest;
}
#define __HAVE_ARCH_STRNCAT
-extern inline char * strncat(char *dest, const char *src, size_t count)
+static inline char * strncat(char *dest, const char *src, size_t count)
{
char *tmp = dest;
}
#define __HAVE_ARCH_STRCHR
-extern inline char * strchr(const char * s, int c)
+static inline char * strchr(const char * s, int c)
{
const char ch = c;
}
#define __HAVE_ARCH_STRPBRK
-extern inline char * strpbrk(const char * cs,const char * ct)
+static inline char * strpbrk(const char * cs,const char * ct)
{
const char *sc1,*sc2;
}
#define __HAVE_ARCH_STRSPN
-extern inline size_t strspn(const char *s, const char *accept)
+static inline size_t strspn(const char *s, const char *accept)
{
const char *p;
const char *a;
}
#define __HAVE_ARCH_STRTOK
-extern inline char * strtok(char * s,const char * ct)
+static inline char * strtok(char * s,const char * ct)
{
char *sbegin, *send;
/* strstr !! */
#define __HAVE_ARCH_STRLEN
-extern inline size_t strlen(const char * s)
+static inline size_t strlen(const char * s)
{
const char *sc;
for (sc = s; *sc != '\0'; ++sc) ;
/* strnlen !! */
#define __HAVE_ARCH_STRCMP
-extern inline int strcmp(const char * cs,const char * ct)
+static inline int strcmp(const char * cs,const char * ct)
{
char __res;
}
#define __HAVE_ARCH_STRNCMP
-extern inline int strncmp(const char * cs,const char * ct,size_t count)
+static inline int strncmp(const char * cs,const char * ct,size_t count)
{
char __res;
* 680[46]0 doesn't really care due to their copy-back caches.
* 10/09/96 - Jes Sorensen
*/
-extern inline void * __memset_g(void * s, int c, size_t count)
+static inline void * __memset_g(void * s, int c, size_t count)
{
void *xs = s;
size_t temp;
* caveat is that the destination address must be 16-byte aligned.
* 01/09/96 - Jes Sorensen
*/
-extern inline void * __memset_page(void * s,int c,size_t count)
+static inline void * __memset_page(void * s,int c,size_t count)
{
unsigned long data, tmp;
void *xs, *sp;
* both source and destination must be 16-byte aligned, if not we fall
* back to the generic memcpy function. - Jes
*/
-extern inline void * __memcpy_page(void * to, const void * from, size_t count)
+static inline void * __memcpy_page(void * to, const void * from, size_t count)
{
unsigned long tmp;
void *xto = to;
memcpy((to),(from),(n)))
#define __HAVE_ARCH_MEMMOVE
-extern inline void * memmove(void * dest,const void * src, size_t n)
+static inline void * memmove(void * dest,const void * src, size_t n)
{
void *xdest = dest;
size_t temp;
#define __HAVE_ARCH_MEMCPY
-extern inline void *__constant_memcpy(void *to, const void *from, __kernel_size_t n)
+static inline void *__constant_memcpy(void *to, const void *from, __kernel_size_t n)
{
extern void __copy_1page(void *, const void *);
return to;
}
-extern inline void *__nonconstant_memcpy(void *to, const void *from, __kernel_size_t n)
+static inline void *__nonconstant_memcpy(void *to, const void *from, __kernel_size_t n)
{
__memcpy(to, from, n);
return to;
#define __HAVE_ARCH_MEMSET
-extern inline void *__constant_c_and_count_memset(void *s, char c, __kernel_size_t count)
+static inline void *__constant_c_and_count_memset(void *s, char c, __kernel_size_t count)
{
extern void bzero_1page(void *);
extern __kernel_size_t __bzero(void *, __kernel_size_t);
return s;
}
-extern inline void *__constant_c_memset(void *s, char c, __kernel_size_t count)
+static inline void *__constant_c_memset(void *s, char c, __kernel_size_t count)
{
extern __kernel_size_t __bzero(void *, __kernel_size_t);
return s;
}
-extern inline void *__nonconstant_memset(void *s, char c, __kernel_size_t count)
+static inline void *__nonconstant_memset(void *s, char c, __kernel_size_t count)
{
__memset(s, c, count);
return s;
extern int __strncmp(const char *, const char *, __kernel_size_t);
-extern inline int __constant_strncmp(const char *src, const char *dest, __kernel_size_t count)
+static inline int __constant_strncmp(const char *src, const char *dest, __kernel_size_t count)
{
register int retval;
switch(count) {
#define __HAVE_ARCH_MEMCPY
-extern inline void *__constant_memcpy(void *to, const void *from, __kernel_size_t n)
+static inline void *__constant_memcpy(void *to, const void *from, __kernel_size_t n)
{
if(n) {
if(n <= 32) {
return to;
}
-extern inline void *__nonconstant_memcpy(void *to, const void *from, __kernel_size_t n)
+static inline void *__nonconstant_memcpy(void *to, const void *from, __kernel_size_t n)
{
__memcpy(to, from, n);
return to;
#define __HAVE_ARCH_MEMSET
-extern inline void *__constant_memset(void *s, int c, __kernel_size_t count)
+static inline void *__constant_memset(void *s, int c, __kernel_size_t count)
{
extern __kernel_size_t __bzero(void *, __kernel_size_t);
extern int __strncmp(const char *, const char *, __kernel_size_t);
-extern inline int __constant_strncmp(const char *src, const char *dest, __kernel_size_t count)
+static inline int __constant_strncmp(const char *src, const char *dest, __kernel_size_t count)
{
register int retval;
switch(count) {
struct dentry * d_parent; /* parent directory */
struct dentry * d_mounts; /* mount information */
struct dentry * d_covers;
+ struct list_head d_vfsmnt;
struct list_head d_hash; /* lookup hash list */
struct list_head d_lru; /* d_count = 0 LRU list */
struct list_head d_child; /* child of parent list */
extern struct dentry * d_alloc_root(struct inode *);
/* test whether root is busy without destroying dcache */
-extern int is_root_busy(struct dentry *);
+extern int d_active_refs(struct dentry *);
/* test whether we have any submounts in a subdir tree */
extern int have_submounts(struct dentry *);
extern void dput(struct dentry *);
-/* MOUNT_REWRITE: replace with the check for d_vfsmnt */
static __inline__ int d_mountpoint(struct dentry *dentry)
{
- return dentry != dentry->d_mounts;
+ return !list_empty(&dentry->d_vfsmnt);
}
* Check whether the specified task has the fd open. Since the task
* may not have a files_struct, we must test for p->files != NULL.
*/
-extern inline struct file * fcheck_task(struct task_struct *p, unsigned int fd)
+static inline struct file * fcheck_task(struct task_struct *p, unsigned int fd)
{
struct file * file = NULL;
/*
* Check whether the specified fd has an open file.
*/
-extern inline struct file * fcheck(unsigned int fd)
+static inline struct file * fcheck(unsigned int fd)
{
struct file * file = NULL;
struct files_struct *files = current->files;
return file;
}
-extern inline struct file * frip(struct files_struct *files, unsigned int fd)
+static inline struct file * frip(struct files_struct *files, unsigned int fd)
{
struct file * file = NULL;
return file;
}
-extern inline struct file * fget(unsigned int fd)
+static inline struct file * fget(unsigned int fd)
{
struct file * file = NULL;
struct files_struct *files = current->files;
/*
* 23/12/1998 Marcin Dalecki <dalecki@cs.net.pl>:
*
- * Since those functions where calling other functions, it was compleatly
- * bogous to make them all "extern inline".
+ * Since those functions where calling other functions, it was completely
+ * bogos to make them all "static inline".
*
* The removal of this pseudo optimization saved me scandaleous:
*
* I suspect there are many other similar "optimizations" across the
* kernel...
*/
-extern inline void fput(struct file * file)
+static inline void fput(struct file * file)
{
if (atomic_dec_and_test(&file->f_count))
_fput(file);
* fput() the struct file we are about to overwrite in this case.
*/
-extern inline void fd_install(unsigned int fd, struct file * file)
+static inline void fd_install(unsigned int fd, struct file * file)
{
struct files_struct *files = current->files;
struct file * result;
struct sock_filter insns[0];
};
-extern __inline__ unsigned int sk_filter_len(struct sk_filter *fp)
+static inline unsigned int sk_filter_len(struct sk_filter *fp)
{
return fp->len*sizeof(struct sock_filter) + sizeof(*fp);
}
struct list_head s_files;
struct block_device *s_bdev;
+ struct list_head s_mounts; /* vfsmount(s) of this one */
struct quota_mount_options s_dquot; /* Diskquota specific options */
union {
extern int register_filesystem(struct file_system_type *);
extern int unregister_filesystem(struct file_system_type *);
+extern int may_umount(struct super_block *);
static inline int vfs_statfs(struct super_block *sb, struct statfs *buf)
{
return 0;
}
-extern inline int locks_verify_area(int read_write, struct inode *inode,
+static inline int locks_verify_area(int read_write, struct inode *inode,
struct file *filp, loff_t offset,
size_t count)
{
return 0;
}
-extern inline int locks_verify_truncate(struct inode *inode,
+static inline int locks_verify_truncate(struct inode *inode,
struct file *filp,
loff_t size)
{
extern struct file_operations rdwr_pipe_fops;
extern int fs_may_remount_ro(struct super_block *);
-extern int fs_may_mount(kdev_t);
extern int try_to_free_buffers(struct page *);
extern void refile_buffer(struct buffer_head * buf);
/*
* This is called by bh->b_end_io() handlers when I/O has completed.
*/
-extern inline void mark_buffer_uptodate(struct buffer_head * bh, int on)
+static inline void mark_buffer_uptodate(struct buffer_head * bh, int on)
{
if (on)
set_bit(BH_Uptodate, &bh->b_state);
#define atomic_set_buffer_clean(bh) test_and_clear_bit(BH_Dirty, &(bh)->b_state)
-extern inline void __mark_buffer_clean(struct buffer_head *bh)
+static inline void __mark_buffer_clean(struct buffer_head *bh)
{
refile_buffer(bh);
}
-extern inline void mark_buffer_clean(struct buffer_head * bh)
+static inline void mark_buffer_clean(struct buffer_head * bh)
{
if (atomic_set_buffer_clean(bh))
__mark_buffer_clean(bh);
#define atomic_set_buffer_protected(bh) test_and_set_bit(BH_Protected, &(bh)->b_state)
-extern inline void __mark_buffer_protected(struct buffer_head *bh)
+static inline void __mark_buffer_protected(struct buffer_head *bh)
{
refile_buffer(bh);
}
-extern inline void mark_buffer_protected(struct buffer_head * bh)
+static inline void mark_buffer_protected(struct buffer_head * bh)
{
if (!atomic_set_buffer_protected(bh))
__mark_buffer_protected(bh);
extern struct dentry * lookup_dentry(const char *, unsigned int);
extern int walk_init(const char *, unsigned, struct nameidata *);
extern int walk_name(const char *, struct nameidata *);
+extern int follow_down(struct vfsmount **, struct dentry **);
extern struct dentry * lookup_one(const char *, struct dentry *);
extern struct dentry * __namei(const char *, unsigned int);
extern void ll_rw_block(int, int, struct buffer_head * bh[]);
extern int is_read_only(kdev_t);
extern void __brelse(struct buffer_head *);
-extern inline void brelse(struct buffer_head *buf)
+static inline void brelse(struct buffer_head *buf)
{
if (buf)
__brelse(buf);
}
extern void __bforget(struct buffer_head *);
-extern inline void bforget(struct buffer_head *buf)
+static inline void bforget(struct buffer_head *buf)
{
if (buf)
__bforget(buf);
extern struct super_block *get_super(kdev_t);
struct super_block *get_empty_super(void);
-void remove_vfsmnt(kdev_t dev);
extern void put_super(kdev_t);
unsigned long generate_cluster(kdev_t, int b[], int);
unsigned long generate_cluster_swab32(kdev_t, int b[], int);
struct vfsmount *old_rootmnt = fs->rootmnt;
fs->rootmnt = mntget(mnt);
fs->root = dget(dentry);
- dput(old_root);
- mntput(old_rootmnt);
+ if (old_root) {
+ dput(old_root);
+ mntput(old_rootmnt);
+ }
}
/*
struct vfsmount *old_pwdmnt = fs->pwdmnt;
fs->pwdmnt = mntget(mnt);
fs->pwd = dget(dentry);
- dput(old_pwd);
- mntput(old_pwdmnt);
+ if (old_pwd) {
+ dput(old_pwd);
+ mntput(old_pwdmnt);
+ }
}
struct fs_struct *copy_fs_struct(struct fs_struct *old);
#include <linux/devfs_fs_kernel.h>
#define NBITS(x) ((((x)-1)/BITS_PER_LONG)+1)
-#define BIT(x) (1<<((x)%BITS_PER_LONG))
+#define BIT(x) (1UL<<((x)%BITS_PER_LONG))
#define LONG(x) ((x)/BITS_PER_LONG)
struct input_dev {
extern struct page * alloc_pages_node(int nid, int gfp_mask, unsigned long order);
#ifndef CONFIG_DISCONTIGMEM
-extern inline struct page * alloc_pages(int gfp_mask, unsigned long order)
+static inline struct page * alloc_pages(int gfp_mask, unsigned long order)
{
/* temporary check. */
if (contig_page_data.node_zonelists[gfp_mask].gfp_mask != (gfp_mask))
#define alloc_page(gfp_mask) \
alloc_pages(gfp_mask, 0)
-extern inline unsigned long __get_free_pages (int gfp_mask, unsigned long order)
+static inline unsigned long __get_free_pages (int gfp_mask, unsigned long order)
{
struct page * page;
#define __get_dma_pages(gfp_mask, order) \
__get_free_pages((gfp_mask) | GFP_DMA,(order))
-extern inline unsigned long get_zeroed_page(int gfp_mask)
+static inline unsigned long get_zeroed_page(int gfp_mask)
{
unsigned long page;
*/
extern void FASTCALL(__free_pages_ok(struct page * page, unsigned long order));
-extern inline void __free_pages(struct page *page, unsigned long order)
+static inline void __free_pages(struct page *page, unsigned long order)
{
if (!put_page_testzero(page))
return;
#define __free_page(page) __free_pages(page, 0)
-extern inline void free_pages(unsigned long addr, unsigned long order)
+static inline void free_pages(unsigned long addr, unsigned long order)
{
unsigned long map_nr;
unsigned long len, unsigned long prot,
unsigned long flag, unsigned long pgoff);
-extern inline unsigned long do_mmap(struct file *file, unsigned long addr,
+static inline unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
unsigned long flag, unsigned long offset)
{
*/
#ifndef _LINUX_MOUNT_H
#define _LINUX_MOUNT_H
+#ifdef __KERNEL__
struct vfsmount
{
+ struct dentry *mnt_mountpoint; /* dentry of mountpoint */
+ struct dentry *mnt_root; /* root of the mounted tree */
+ struct vfsmount *mnt_parent; /* fs we are mounted on */
+ struct list_head mnt_instances; /* other vfsmounts of the same fs */
+ struct list_head mnt_clash; /* those who are mounted on (other */
+ /* instances) of the same dentry */
+ struct super_block *mnt_sb; /* pointer to superblock */
+ atomic_t mnt_count;
+
kdev_t mnt_dev; /* Device this applies to */
char *mnt_devname; /* Name of device e.g. /dev/dsk/hda1 */
char *mnt_dirname; /* Name of directory mounted on */
- struct super_block *mnt_sb; /* pointer to superblock */
struct list_head mnt_list;
};
-/* MOUNT_REWRITE: fill these */
static inline struct vfsmount *mntget(struct vfsmount *mnt)
{
+ atomic_inc(&mnt->mnt_count);
return mnt;
}
static inline void mntput(struct vfsmount *mnt)
{
+ if (atomic_dec_and_test(&mnt->mnt_count))
+ BUG();
}
+#endif
#endif /* _LINUX_MOUNT_H */
IPS_EXPECTED = 0x01,
/* We've seen packets both ways: bit 1 set. Can be set, not unset. */
- IPS_SEEN_REPLY = 0x02
+ IPS_SEEN_REPLY = 0x02,
+
+ /* Packet seen leaving box: bit 2 set. Can be set, not unset. */
+ IPS_CONFIRMED = 0x04
};
struct ip_conntrack_expect
struct ip_conntrack_tuple_hash tuplehash[IP_CT_DIR_MAX];
/* Have we seen traffic both ways yet? (bitset) */
- unsigned int status;
+ volatile unsigned int status;
/* Timer function; drops refcnt when it goes off. */
struct timer_list timeout;
extern struct ip_conntrack_protocol *__find_proto(u_int8_t protocol);
extern struct list_head protocol_list;
-/* Returns TRUE if it dealt with ICMP, and filled in skb->nfct */
-int icmp_error_track(struct sk_buff *skb);
+/* Returns conntrack if it dealt with ICMP, and filled in skb->nfct */
+extern struct ip_conntrack *icmp_error_track(struct sk_buff *skb,
+ enum ip_conntrack_info *ctinfo);
extern int get_tuple(const struct iphdr *iph, size_t len,
struct ip_conntrack_tuple *tuple,
struct ip_conntrack_protocol *protocol);
ip_conntrack_find_get(const struct ip_conntrack_tuple *tuple,
const struct ip_conntrack *ignored_conntrack);
+/* Confirm a connection */
+void ip_conntrack_confirm(struct ip_conntrack *ct);
+
extern unsigned int ip_conntrack_htable_size;
extern struct list_head *ip_conntrack_hash;
extern struct list_head expect_list;
unsigned int matchinfosize,
unsigned int hook_mask);
+ /* Called when entry of this type deleted. */
+ void (*destroy)(void *matchinfo, unsigned int matchinfosize);
+
/* Set this to THIS_MODULE if you are a module, otherwise NULL */
struct module *me;
};
unsigned int targinfosize,
unsigned int hook_mask);
+ /* Called when entry of this type deleted. */
+ void (*destroy)(void *targinfo, unsigned int targinfosize);
+
/* Set this to THIS_MODULE if you are a module, otherwise NULL */
struct module *me;
};
#define pid_hashfn(x) ((((x) >> 8) ^ (x)) & (PIDHASH_SZ - 1))
-extern __inline__ void hash_pid(struct task_struct *p)
+static inline void hash_pid(struct task_struct *p)
{
struct task_struct **htable = &pidhash[pid_hashfn(p->pid)];
p->pidhash_pprev = htable;
}
-extern __inline__ void unhash_pid(struct task_struct *p)
+static inline void unhash_pid(struct task_struct *p)
{
if(p->pidhash_next)
p->pidhash_next->pidhash_pprev = p->pidhash_pprev;
*p->pidhash_pprev = p->pidhash_next;
}
-extern __inline__ struct task_struct *find_task_by_pid(int pid)
+static inline struct task_struct *find_task_by_pid(int pid)
{
struct task_struct *p, **htable = &pidhash[pid_hashfn(pid)];
extern int do_sigaction(int, const struct k_sigaction *, struct k_sigaction *);
extern int do_sigaltstack(const stack_t *, stack_t *, unsigned long);
-extern inline int signal_pending(struct task_struct *p)
+static inline int signal_pending(struct task_struct *p)
{
return (p->sigpending != 0);
}
* These will be removed, but in the mean time, when the SECURE_NOROOT
* flag is set, uids don't grant privilege.
*/
-extern inline int suser(void)
+static inline int suser(void)
{
if (!issecure(SECURE_NOROOT) && current->euid == 0) {
current->flags |= PF_SUPERPRIV;
return 0;
}
-extern inline int fsuser(void)
+static inline int fsuser(void)
{
if (!issecure(SECURE_NOROOT) && current->fsuid == 0) {
current->flags |= PF_SUPERPRIV;
* fsuser(). See include/linux/capability.h for defined capabilities.
*/
-extern inline int capable(int cap)
+static inline int capable(int cap)
{
#if 1 /* ok now */
if (cap_raised(current->cap_effective, cap))
extern int do_execve(char *, char **, char **, struct pt_regs *);
extern int do_fork(unsigned long, unsigned long, struct pt_regs *);
-extern inline void add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
+static inline void add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
{
unsigned long flags;
wq_write_unlock_irqrestore(&q->lock, flags);
}
-extern inline void add_wait_queue_exclusive(wait_queue_head_t *q,
+static inline void add_wait_queue_exclusive(wait_queue_head_t *q,
wait_queue_t * wait)
{
unsigned long flags;
wq_write_unlock_irqrestore(&q->lock, flags);
}
-extern inline void remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
+static inline void remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
{
unsigned long flags;
p->run_list.next = NULL;
}
-extern inline int task_on_runqueue(struct task_struct *p)
+static inline int task_on_runqueue(struct task_struct *p)
{
return (p->run_list.next != NULL);
}
-extern inline void unhash_process(struct task_struct *p)
+static inline void unhash_process(struct task_struct *p)
{
if (task_on_runqueue(p)) BUG();
write_lock_irq(&tasklist_lock);
#endif
extern char * ___strtok;
+extern char * strpbrk(const char *,const char *);
+extern char * strtok(char *,const char *);
+extern char * strsep(char **,const char *);
+extern __kernel_size_t strspn(const char *,const char *);
+
+
+/*
+ * Include machine specific inline routines
+ */
+#include <asm/string.h>
+
+#ifndef __HAVE_ARCH_STRCPY
extern char * strcpy(char *,const char *);
+#endif
+#ifndef __HAVE_ARCH_STRNCPY
extern char * strncpy(char *,const char *, __kernel_size_t);
+#endif
+#ifndef __HAVE_ARCH_STRCAT
extern char * strcat(char *, const char *);
+#endif
+#ifndef __HAVE_ARCH_STRNCAT
extern char * strncat(char *, const char *, __kernel_size_t);
+#endif
+#ifndef __HAVE_ARCH_STRCMP
+extern int strcmp(const char *,const char *);
+#endif
+#ifndef __HAVE_ARCH_STRNCMP
+extern int strncmp(const char *,const char *,__kernel_size_t);
+#endif
+#ifndef __HAVE_ARCH_STRNICMP
+extern int strnicmp(const char *, const char *, __kernel_size_t);
+#endif
+#ifndef __HAVE_ARCH_STRCHR
extern char * strchr(const char *,int);
+#endif
+#ifndef __HAVE_ARCH_STRRCHR
extern char * strrchr(const char *,int);
-extern char * strpbrk(const char *,const char *);
-extern char * strtok(char *,const char *);
-extern char * strsep(char **,const char *);
+#endif
+#ifndef __HAVE_ARCH_STRSTR
extern char * strstr(const char *,const char *);
+#endif
+#ifndef __HAVE_ARCH_STRLEN
extern __kernel_size_t strlen(const char *);
+#endif
+#ifndef __HAVE_ARCH_STRNLEN
extern __kernel_size_t strnlen(const char *,__kernel_size_t);
-extern __kernel_size_t strspn(const char *,const char *);
-extern int strcmp(const char *,const char *);
-extern int strncmp(const char *,const char *,__kernel_size_t);
-extern int strnicmp(const char *, const char *, __kernel_size_t);
+#endif
+#ifndef __HAVE_ARCH_MEMSET
extern void * memset(void *,int,__kernel_size_t);
+#endif
+#ifndef __HAVE_ARCH_MEMCPY
extern void * memcpy(void *,const void *,__kernel_size_t);
+#endif
+#ifndef __HAVE_ARCH_MEMMOVE
extern void * memmove(void *,const void *,__kernel_size_t);
+#endif
+#ifndef __HAVE_ARCH_MEMSCAN
extern void * memscan(void *,int,__kernel_size_t);
+#endif
+#ifndef __HAVE_ARCH_MEMCMP
extern int memcmp(const void *,const void *,__kernel_size_t);
+#endif
+#ifndef __HAVE_ARCH_MEMCHR
extern void * memchr(const void *,int,__kernel_size_t);
-
-/*
- * Include machine specific inline routines
- */
-#include <asm/string.h>
+#endif
#ifdef __cplusplus
}
#define USB_PID_OUT 0xe1
#define USB_PID_ACK 0xd2
#define USB_PID_DATA0 0xc3
-#define USB_PID_UNDEF_4 0xb4
+#define USB_PID_PING 0xb4 /* USB 2.0 */
#define USB_PID_SOF 0xa5
-#define USB_PID_UNDEF_6 0x96
-#define USB_PID_UNDEF_7 0x87
-#define USB_PID_UNDEF_8 0x78
+#define USB_PID_NYET 0x96 /* USB 2.0 */
+#define USB_PID_DATA2 0x87 /* USB 2.0 */
+#define USB_PID_SPLIT 0x78 /* USB 2.0 */
#define USB_PID_IN 0x69
#define USB_PID_NAK 0x5a
#define USB_PID_DATA1 0x4b
-#define USB_PID_PREAMBLE 0x3c
+#define USB_PID_PREAMBLE 0x3c /* Token mode */
+#define USB_PID_ERR 0x3c /* USB 2.0: handshake mode */
#define USB_PID_SETUP 0x2d
#define USB_PID_STALL 0x1e
-#define USB_PID_UNDEF_F 0x0f
+#define USB_PID_MDATA 0x0f /* USB 2.0 */
/*
* Standard requests
#define USB_MAJOR 180
-/* for 2.2-kernels */
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0)
-
-static __inline__ void list_add_tail(struct list_head *new, struct list_head *head)
-{
- __list_add(new, head->prev, head);
-}
-#define LIST_HEAD_INIT(name) { &(name), &(name) }
-
-typedef struct wait_queue wait_queue_t;
-
-typedef struct wait_queue *wait_queue_head_t;
-#define DECLARE_WAITQUEUE(wait, current) \
- struct wait_queue wait = { current, NULL }
-#define DECLARE_WAIT_QUEUE_HEAD(wait)\
- wait_queue_head_t wait
-
-#define init_waitqueue_head(x) *x=NULL
-#define init_MUTEX(x) *(x)=MUTEX
-#define DECLARE_MUTEX(name) struct semaphore name=MUTEX
-#define DECLARE_MUTEX_LOCKED(name) struct semaphore name=MUTEX_LOCKED
-
-
-#define __set_current_state(state_value) \
- do { current->state = state_value; } while (0)
-#ifdef CONFIG_SMP
-#define set_current_state(state_value) \
- set_mb(current->state, state_value)
-#else
-#define set_current_state(state_value) \
- __set_current_state(state_value)
-#endif
-
-#endif // 2.2.x
-
-
static __inline__ void wait_ms(unsigned int ms)
{
if(!in_interrupt()) {
EXPORT_SYMBOL(update_atime);
EXPORT_SYMBOL(get_super);
EXPORT_SYMBOL(get_empty_super);
-EXPORT_SYMBOL(remove_vfsmnt);
EXPORT_SYMBOL(getname);
EXPORT_SYMBOL(_fput);
EXPORT_SYMBOL(igrab);
EXPORT_SYMBOL(iget4);
EXPORT_SYMBOL(iput);
EXPORT_SYMBOL(__namei);
+EXPORT_SYMBOL(follow_down);
EXPORT_SYMBOL(lookup_dentry);
EXPORT_SYMBOL(walk_init);
EXPORT_SYMBOL(walk_name);
EXPORT_SYMBOL(posix_unblock_lock);
EXPORT_SYMBOL(locks_mandatory_area);
EXPORT_SYMBOL(dput);
-EXPORT_SYMBOL(is_root_busy);
EXPORT_SYMBOL(have_submounts);
EXPORT_SYMBOL(prune_dcache);
EXPORT_SYMBOL(shrink_dcache_sb);
/* filesystem registration */
EXPORT_SYMBOL(register_filesystem);
EXPORT_SYMBOL(unregister_filesystem);
+EXPORT_SYMBOL(may_umount);
/* executable format registration */
EXPORT_SYMBOL(register_binfmt);
if (!PageLocked(page))
PAGE_BUG(page);
- /* Initiate completion of any async operations */
- sync_page(page);
-
spin_lock(&pagecache_lock);
remove_page_from_inode_queue(page);
remove_page_from_hash_queue(page);
int shrink_mmap(int priority, int gfp_mask, zone_t *zone)
{
- int ret = 0, loop = 0, count;
+ int ret = 0, count;
LIST_HEAD(young);
LIST_HEAD(old);
LIST_HEAD(forget);
struct list_head * page_lru, * dispose;
struct page * page = NULL;
struct zone_struct * p_zone;
- int maxloop = 256 >> priority;
if (!zone)
BUG();
list_del(page_lru);
p_zone = page->zone;
- /*
- * These two tests are there to make sure we don't free too
- * many pages from the "wrong" zone. We free some anyway,
- * they are the least recently used pages in the system.
- * When we don't free them, leave them in &old.
- */
- dispose = &old;
- if (p_zone != zone && (loop > (maxloop / 4) ||
- p_zone->free_pages > p_zone->pages_high))
- goto dispose_continue;
+ /* This LRU list only contains a few pages from the system,
+ * so we must fail and let swap_out() refill the list if
+ * there aren't enough freeable pages on the list */
/* The page is in use, or was used very recently, put it in
* &young to make sure that we won't try to free it the next
* time */
dispose = &young;
-
if (test_and_clear_bit(PG_referenced, &page->flags))
goto dispose_continue;
- count--;
+ if (p_zone->free_pages > p_zone->pages_high)
+ goto dispose_continue;
+
if (!page->buffers && page_count(page) > 1)
goto dispose_continue;
- /* Page not used -> free it; if that fails -> &old */
+ count--;
+ /* Page not used -> free it or put it on the old list
+ * so it gets freed first the next time */
dispose = &old;
if (TryLockPage(page))
goto dispose_continue;
/* nr_lru_pages needs the spinlock */
nr_lru_pages--;
- loop++;
/* wrong zone? not looped too often? roll again... */
- if (page->zone != zone && loop < maxloop)
+ if (page->zone != zone && count)
goto again;
out:
goto allocate_ok;
/* If we're a memory hog, unmap some pages */
- if (current->hog && low_on_memory &&
- (gfp_mask & __GFP_WAIT))
- swap_out(4, gfp_mask);
+ if (current->hog && low_on_memory && (gfp_mask & __GFP_WAIT)) {
+ // swap_out(6, gfp_mask);
+ // shm_swap(6, gfp_mask, (zone_t *)(zone));
+ try_to_free_pages(gfp_mask, (zone_t *)(zone));
+ }
/*
* (If anyone calls gfp from interrupts nonatomically then it
if (!p->swappable || !mm || mm->rss <= 0)
continue;
/* small processes are swapped out less */
- while ((mm->swap_cnt << 2 * (i + 1) < max_cnt))
- i++;
+ while ((mm->swap_cnt << 2 * (i + 1) < max_cnt)
+ && i++ < 10)
mm->swap_cnt >>= i;
mm->swap_cnt += i; /* if swap_cnt reaches 0 */
/* we're big -> hog treatment */
{
int priority;
int count = SWAP_CLUSTER_MAX;
- int ret;
/* Always trim SLAB caches when memory gets low. */
kmem_cache_reap(gfp_mask);
priority = 6;
do {
- while ((ret = shrink_mmap(priority, gfp_mask, zone))) {
+ while (shrink_mmap(priority, gfp_mask, zone)) {
if (!--count)
goto done;
}
}
}
- /* Then, try to page stuff out..
- * We use swapcount here because this doesn't actually
- * free pages */
+ /* Then, try to page stuff out.. */
while (swap_out(priority, gfp_mask)) {
if (!--count)
goto done;
pgdat = pgdat_list;
while (pgdat) {
for (i = 0; i < MAX_NR_ZONES; i++) {
- zone = pgdat->node_zones + i;
+ int count = SWAP_CLUSTER_MAX;
+ zone = pgdat->node_zones + i;
+ do {
if (tsk->need_resched)
schedule();
if ((!zone->size) || (!zone->zone_wake_kswapd))
continue;
do_try_to_free_pages(GFP_KSWAPD, zone);
+ } while (zone->free_pages < zone->pages_low &&
+ --count);
}
pgdat = pgdat->node_next;
}
struct mfc_cache *c, **cp;
if (!spin_trylock(&mfc_unres_lock)) {
- mod_timer(&ipmr_expire_timer, jiffies + HZ/10);
+ mod_timer(&ipmr_expire_timer, jiffies+HZ/10);
return;
}
c->next = mfc_unres_queue;
mfc_unres_queue = c;
- if (!del_timer(&ipmr_expire_timer))
- ipmr_expire_timer.expires = c->mfc_un.unres.expires;
- add_timer(&ipmr_expire_timer);
+ mod_timer(&ipmr_expire_timer, c->mfc_un.unres.expires);
}
/*
return protocol->invert_tuple(inverse, orig);
}
+static void
+clean_from_lists(struct ip_conntrack *ct)
+{
+ MUST_BE_WRITE_LOCKED(&ip_conntrack_lock);
+ /* Remove from both hash lists */
+ LIST_DELETE(&ip_conntrack_hash
+ [hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple)],
+ &ct->tuplehash[IP_CT_DIR_ORIGINAL]);
+ LIST_DELETE(&ip_conntrack_hash
+ [hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple)],
+ &ct->tuplehash[IP_CT_DIR_REPLY]);
+ /* If our expected is in the list, take it out. */
+ if (ct->expected.expectant) {
+ IP_NF_ASSERT(list_inlist(&expect_list, &ct->expected));
+ IP_NF_ASSERT(ct->expected.expectant == ct);
+ LIST_DELETE(&expect_list, &ct->expected);
+ }
+}
+
static void
destroy_conntrack(struct nf_conntrack *nfct)
{
struct ip_conntrack *ct = (struct ip_conntrack *)nfct;
+ /* Unconfirmed connections haven't been cleaned up by the
+ timer: hence they cannot be simply deleted here. */
+ if (!(ct->status & IPS_CONFIRMED)) {
+ WRITE_LOCK(&ip_conntrack_lock);
+ /* Race check: they can't get a reference if noone has
+ one and we have the write lock. */
+ if (atomic_read(&ct->ct_general.use) == 0) {
+ clean_from_lists(ct);
+ WRITE_UNLOCK(&ip_conntrack_lock);
+ } else {
+ /* Either a last-minute confirmation (ie. ct
+ now has timer attached), or a last-minute
+ new skb has reference (still unconfirmed). */
+ WRITE_UNLOCK(&ip_conntrack_lock);
+ return;
+ }
+ }
+
IP_NF_ASSERT(atomic_read(&nfct->use) == 0);
IP_NF_ASSERT(!timer_pending(&ct->timeout));
struct ip_conntrack *ct = (void *)ul_conntrack;
WRITE_LOCK(&ip_conntrack_lock);
- /* Remove from both hash lists */
- LIST_DELETE(&ip_conntrack_hash
- [hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple)],
- &ct->tuplehash[IP_CT_DIR_ORIGINAL]);
- LIST_DELETE(&ip_conntrack_hash
- [hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple)],
- &ct->tuplehash[IP_CT_DIR_REPLY]);
- /* If our expected is in the list, take it out. */
- if (ct->expected.expectant) {
- IP_NF_ASSERT(list_inlist(&expect_list, &ct->expected));
- IP_NF_ASSERT(ct->expected.expectant == ct);
- LIST_DELETE(&expect_list, &ct->expected);
- }
+ clean_from_lists(ct);
WRITE_UNLOCK(&ip_conntrack_lock);
ip_conntrack_put(ct);
}
return h;
}
+/* Confirm a connection */
+void
+ip_conntrack_confirm(struct ip_conntrack *ct)
+{
+ DEBUGP("Confirming conntrack %p\n", ct);
+ WRITE_LOCK(&ip_conntrack_lock);
+ /* Race check */
+ if (!(ct->status & IPS_CONFIRMED)) {
+ IP_NF_ASSERT(!timer_pending(&ct->timeout));
+ ct->status |= IPS_CONFIRMED;
+ /* Timer relative to confirmation time, not original
+ setting time, otherwise we'd get timer wrap in
+ wierd delay cases. */
+ ct->timeout.expires += jiffies;
+ add_timer(&ct->timeout);
+ atomic_inc(&ct->ct_general.use);
+ }
+ WRITE_UNLOCK(&ip_conntrack_lock);
+}
+
/* Returns true if a connection correspondings to the tuple (required
for NAT). */
int
return h != NULL;
}
-/* Returns TRUE if it dealt with ICMP, and filled in skb fields */
-int icmp_error_track(struct sk_buff *skb)
+/* Returns conntrack if it dealt with ICMP, and filled in skb fields */
+struct ip_conntrack *
+icmp_error_track(struct sk_buff *skb, enum ip_conntrack_info *ctinfo)
{
- const struct iphdr *iph = skb->nh.iph;
- struct icmphdr *hdr = (struct icmphdr *)((u_int32_t *)iph + iph->ihl);
+ const struct iphdr *iph;
+ struct icmphdr *hdr;
struct ip_conntrack_tuple innertuple, origtuple;
- struct iphdr *inner = (struct iphdr *)(hdr + 1);
- size_t datalen = skb->len - iph->ihl*4 - sizeof(*hdr);
+ struct iphdr *inner;
+ size_t datalen;
struct ip_conntrack_protocol *innerproto;
struct ip_conntrack_tuple_hash *h;
- enum ip_conntrack_info ctinfo;
- if (iph->protocol != IPPROTO_ICMP)
- return 0;
+ IP_NF_ASSERT(iph->protocol == IPPROTO_ICMP);
+
+ iph = skb->nh.iph;
+ hdr = (struct icmphdr *)((u_int32_t *)iph + iph->ihl);
+ inner = (struct iphdr *)(hdr + 1);
+ datalen = skb->len - iph->ihl*4 - sizeof(*hdr);
if (skb->len < iph->ihl * 4 + sizeof(struct icmphdr)) {
DEBUGP("icmp_error_track: too short\n");
- return 1;
+ return NULL;
}
if (hdr->type != ICMP_DEST_UNREACH
&& hdr->type != ICMP_TIME_EXCEEDED
&& hdr->type != ICMP_PARAMETERPROB
&& hdr->type != ICMP_REDIRECT)
- return 0;
+ return NULL;
/* Ignore it if the checksum's bogus. */
if (ip_compute_csum((unsigned char *)hdr, sizeof(*hdr) + datalen)) {
DEBUGP("icmp_error_track: bad csum\n");
- return 1;
+ return NULL;
}
innerproto = find_proto(inner->protocol);
DEBUGP("icmp_error: ! get_tuple p=%u (%u*4+%u dlen=%u)\n",
inner->protocol, inner->ihl, 8,
datalen);
- return 1;
+ return NULL;
}
/* Ordinarily, we'd expect the inverted tupleproto, but it's
been preserved inside the ICMP. */
if (!invert_tuple(&innertuple, &origtuple, innerproto)) {
DEBUGP("icmp_error_track: Can't invert tuple\n");
- return 1;
+ return NULL;
}
h = ip_conntrack_find_get(&innertuple, NULL);
if (!h) {
DEBUGP("icmp_error_track: no match\n");
- return 1;
+ return NULL;
+ }
+ if (!(h->ctrack->status & IPS_CONFIRMED)) {
+ DEBUGP("icmp_error_track: unconfirmed\n");
+ ip_conntrack_put(h->ctrack);
+ return NULL;
}
- ctinfo = IP_CT_RELATED;
+ *ctinfo = IP_CT_RELATED;
if (DIRECTION(h) == IP_CT_DIR_REPLY)
- ctinfo += IP_CT_IS_REPLY;
+ *ctinfo += IP_CT_IS_REPLY;
/* Update skb to refer to this connection */
- skb->nfct = &h->ctrack->infos[ctinfo];
- return 1;
+ skb->nfct = &h->ctrack->infos[*ctinfo];
+ return h->ctrack;
+}
+
+/* There's a small race here where we may free a just-replied to
+ connection. Too bad: we're in trouble anyway. */
+static inline int unreplied(const struct ip_conntrack_tuple_hash *i)
+{
+ /* Unconfirmed connections either really fresh or transitory
+ anyway */
+ if (!(i->ctrack->status & IPS_SEEN_REPLY)
+ && (i->ctrack->status & IPS_CONFIRMED))
+ return 1;
+ return 0;
+}
+
+static int early_drop(struct list_head *chain)
+{
+ /* Traverse backwards: gives us oldest, which is roughly LRU */
+ struct ip_conntrack_tuple_hash *h;
+ int dropped = 0;
+
+ READ_LOCK(&ip_conntrack_lock);
+ h = LIST_FIND(chain, unreplied, struct ip_conntrack_tuple_hash *);
+ if (h)
+ atomic_inc(&h->ctrack->ct_general.use);
+ READ_UNLOCK(&ip_conntrack_lock);
+
+ if (!h)
+ return dropped;
+
+ if (del_timer(&h->ctrack->timeout)) {
+ death_by_timeout((unsigned long)h->ctrack);
+ dropped = 1;
+ }
+ ip_conntrack_put(h->ctrack);
+ return dropped;
}
static inline int helper_cmp(const struct ip_conntrack_helper *i,
enum ip_conntrack_info ctinfo;
unsigned long extra_jiffies;
int i;
+ static unsigned int drop_next = 0;
- if (!invert_tuple(&repl_tuple, tuple, protocol)) {
- DEBUGP("Can't invert tuple.\n");
- return 1;
- }
+ hash = hash_conntrack(tuple);
- if(ip_conntrack_max &&
- (atomic_read(&ip_conntrack_count) >= ip_conntrack_max)) {
+ if (ip_conntrack_max &&
+ atomic_read(&ip_conntrack_count) >= ip_conntrack_max) {
if (net_ratelimit())
- printk(KERN_WARNING "ip_conntrack: maximum limit of %d entries exceeded\n", ip_conntrack_max);
+ printk(KERN_WARNING "ip_conntrack: maximum limit of"
+ " %d entries exceeded\n", ip_conntrack_max);
+
+ /* Try dropping from random chain, or else from the
+ chain about to put into (in case they're trying to
+ bomb one hash chain). */
+ if (!early_drop(&ip_conntrack_hash[drop_next++])
+ && !early_drop(&ip_conntrack_hash[hash]))
+ return 1;
+ }
+
+ if (!invert_tuple(&repl_tuple, tuple, protocol)) {
+ DEBUGP("Can't invert tuple.\n");
return 1;
}
+ repl_hash = hash_conntrack(&repl_tuple);
conntrack = kmem_cache_alloc(ip_conntrack_cachep, GFP_ATOMIC);
if (!conntrack) {
DEBUGP("Can't allocate conntrack.\n");
return 1;
}
- hash = hash_conntrack(tuple);
- repl_hash = hash_conntrack(&repl_tuple);
memset(conntrack, 0, sizeof(struct ip_conntrack));
- atomic_set(&conntrack->ct_general.use, 2);
+ atomic_set(&conntrack->ct_general.use, 1);
conntrack->ct_general.destroy = destroy_conntrack;
conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *tuple;
conntrack->tuplehash[IP_CT_DIR_ORIGINAL].ctrack = conntrack;
kmem_cache_free(ip_conntrack_cachep, conntrack);
return 1;
}
+ /* Don't set timer yet: wait for confirmation */
+ init_timer(&conntrack->timeout);
conntrack->timeout.data = (unsigned long)conntrack;
conntrack->timeout.function = death_by_timeout;
- conntrack->timeout.expires = jiffies + extra_jiffies;
- add_timer(&conntrack->timeout);
+ conntrack->timeout.expires = extra_jiffies;
/* Sew in at head of hash list. */
WRITE_LOCK(&ip_conntrack_lock);
/* Check noone else beat us in the race... */
if (__ip_conntrack_find(tuple, NULL)) {
WRITE_UNLOCK(&ip_conntrack_lock);
- printk("ip_conntrack: Wow someone raced us!\n");
kmem_cache_free(ip_conntrack_cachep, conntrack);
return 0;
}
&conntrack->tuplehash[IP_CT_DIR_ORIGINAL]);
list_prepend(&ip_conntrack_hash[repl_hash],
&conntrack->tuplehash[IP_CT_DIR_REPLY]);
+ atomic_inc(&ip_conntrack_count);
WRITE_UNLOCK(&ip_conntrack_lock);
/* Update skb to refer to this connection */
skb->nfct = &conntrack->infos[ctinfo];
- atomic_inc(&ip_conntrack_count);
return 1;
}
-static void
-resolve_normal_ct(struct sk_buff *skb, int create)
+/* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
+static inline struct ip_conntrack *
+resolve_normal_ct(struct sk_buff *skb,
+ struct ip_conntrack_protocol *proto,
+ enum ip_conntrack_info *ctinfo)
{
struct ip_conntrack_tuple tuple;
struct ip_conntrack_tuple_hash *h;
- struct ip_conntrack_protocol *proto;
- enum ip_conntrack_info ctinfo;
- proto = find_proto(skb->nh.iph->protocol);
if (!get_tuple(skb->nh.iph, skb->len, &tuple, proto))
- return;
+ return NULL;
/* Loop around search/insert race */
do {
/* look for tuple match */
h = ip_conntrack_find_get(&tuple, NULL);
- if (!h && (!create || init_conntrack(&tuple, proto, skb)))
- return;
+ if (!h && init_conntrack(&tuple, proto, skb))
+ return NULL;
} while (!h);
/* It exists; we have (non-exclusive) reference. */
if (DIRECTION(h) == IP_CT_DIR_REPLY) {
- ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY;
+ /* Reply on unconfirmed connection => unclassifiable */
+ if (!(h->ctrack->status & IPS_CONFIRMED)) {
+ DEBUGP("Reply on unconfirmed connection\n");
+ ip_conntrack_put(h->ctrack);
+ return NULL;
+ }
+
+ *ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY;
h->ctrack->status |= IPS_SEEN_REPLY;
} else {
/* Once we've had two way comms, always ESTABLISHED. */
if (h->ctrack->status & IPS_SEEN_REPLY) {
DEBUGP("ip_conntrack_in: normal packet for %p\n",
h->ctrack);
- ctinfo = IP_CT_ESTABLISHED;
+ *ctinfo = IP_CT_ESTABLISHED;
} else if (h->ctrack->status & IPS_EXPECTED) {
DEBUGP("ip_conntrack_in: related packet for %p\n",
h->ctrack);
- ctinfo = IP_CT_RELATED;
+ *ctinfo = IP_CT_RELATED;
} else {
DEBUGP("ip_conntrack_in: new packet for %p\n",
h->ctrack);
- ctinfo = IP_CT_NEW;
+ *ctinfo = IP_CT_NEW;
}
}
- skb->nfct = &h->ctrack->infos[ctinfo];
+ skb->nfct = &h->ctrack->infos[*ctinfo];
+ return h->ctrack;
}
/* Return conntrack and conntrack_info a given skb */
-static struct ip_conntrack *
-__ip_conntrack_get(struct sk_buff *skb,
- enum ip_conntrack_info *ctinfo,
- int create)
+inline struct ip_conntrack *
+ip_conntrack_get(struct sk_buff *skb, enum ip_conntrack_info *ctinfo)
{
- if (!skb->nfct) {
- /* It may be an icmp error... */
- if (!icmp_error_track(skb))
- resolve_normal_ct(skb, create);
- }
-
if (skb->nfct) {
struct ip_conntrack *ct
= (struct ip_conntrack *)skb->nfct->master;
return NULL;
}
-struct ip_conntrack *
-ip_conntrack_get(struct sk_buff *skb, enum ip_conntrack_info *ctinfo)
-{
- return __ip_conntrack_get(skb, ctinfo, 0);
-}
/* Netfilter hook itself. */
unsigned int ip_conntrack_in(unsigned int hooknum,
return NF_STOLEN;
}
- ct = __ip_conntrack_get(*pskb, &ctinfo, 1);
- if (!ct) {
- /* Not valid part of a connection */
- return NF_ACCEPT;
+ proto = find_proto((*pskb)->nh.iph->protocol);
+
+ /* It may be an icmp error... */
+ if ((*pskb)->nh.iph->protocol != IPPROTO_ICMP
+ || !(ct = icmp_error_track(*pskb, &ctinfo))) {
+ if (!(ct = resolve_normal_ct(*pskb, proto, &ctinfo))) {
+ /* Not valid part of a connection */
+ return NF_ACCEPT;
+ }
}
+ IP_NF_ASSERT((*pskb)->nfct);
- proto = find_proto((*pskb)->nh.iph->protocol);
ret = proto->packet(ct, (*pskb)->nh.iph, (*pskb)->len, ctinfo);
-
if (ret == -1) {
/* Invalid */
nf_conntrack_put((*pskb)->nfct);
IP_NF_ASSERT(ct->timeout.data == (unsigned long)ct);
WRITE_LOCK(&ip_conntrack_lock);
- /* Need del_timer for race avoidance (may already be dying). */
- if (del_timer(&ct->timeout)) {
- ct->timeout.expires = jiffies + extra_jiffies;
- add_timer(&ct->timeout);
+ /* Timer may not be active yet */
+ if (!(ct->status & IPS_CONFIRMED))
+ ct->timeout.expires = extra_jiffies;
+ else {
+ /* Need del_timer for race avoidance (may already be dying). */
+ if (del_timer(&ct->timeout)) {
+ ct->timeout.expires = jiffies + extra_jiffies;
+ add_timer(&ct->timeout);
+ }
}
WRITE_UNLOCK(&ip_conntrack_lock);
}
/* Time to push up daises... */
if (del_timer(&h->ctrack->timeout))
death_by_timeout((unsigned long)h->ctrack);
+ else if (!(h->ctrack->status & IPS_CONFIRMED)) {
+ /* Unconfirmed connection. Clean from lists,
+ mark confirmed so it gets cleaned as soon
+ as packet comes back. */
+ WRITE_LOCK(&ip_conntrack_lock);
+ if (!(h->ctrack->status & IPS_CONFIRMED)) {
+ clean_from_lists(h->ctrack);
+ h->ctrack->status |= IPS_CONFIRMED;
+ }
+ WRITE_UNLOCK(&ip_conntrack_lock);
+ }
/* ... else the timer will get him soon. */
ip_conntrack_put(h->ctrack);
#ifdef CONFIG_SYSCTL
unregister_sysctl_table(ip_conntrack_sysctl_header);
#endif
+
+ i_see_dead_people:
ip_ct_selective_cleanup(kill_all, NULL);
+ if (atomic_read(&ip_conntrack_count) != 0) {
+ schedule();
+ goto i_see_dead_people;
+ }
+
kmem_cache_destroy(ip_conntrack_cachep);
vfree(ip_conntrack_hash);
nf_unregister_sockopt(&so_getorigdst);
/* FIXME: Examine ipfilter's timeouts and conntrack transitions more
closely. They're more complex. --RR */
+/* We steal a bit to indicate no reply yet (can't use status, because
+ it's set before we get into packet handling). */
+#define TCP_REPLY_BIT 0x1000
+
/* Actually, I believe that neither ipmasq (where this code is stolen
from) nor ipfilter do it exactly right. A new conntrack machine taking
into account packet loss (which creates uncertainty as to exactly
enum tcp_conntrack state;
READ_LOCK(&tcp_lock);
- state = conntrack->proto.tcp_state;
+ state = (conntrack->proto.tcp_state & ~TCP_REPLY_BIT);
READ_UNLOCK(&tcp_lock);
return sprintf(buffer, "%s ", tcp_conntrack_names[state]);
struct iphdr *iph, size_t len,
enum ip_conntrack_info ctinfo)
{
- enum tcp_conntrack newconntrack;
+ enum tcp_conntrack newconntrack, oldtcpstate;
struct tcphdr *tcph = (struct tcphdr *)((u_int32_t *)iph + iph->ihl);
/* We're guaranteed to have the base header, but maybe not the
}
WRITE_LOCK(&tcp_lock);
+ oldtcpstate = conntrack->proto.tcp_state;
newconntrack
= tcp_conntracks
[CTINFO2DIR(ctinfo)]
- [get_conntrack_index(tcph)][conntrack->proto.tcp_state];
+ [get_conntrack_index(tcph)][oldtcpstate & ~TCP_REPLY_BIT];
/* Invalid */
if (newconntrack == TCP_CONNTRACK_MAX) {
}
conntrack->proto.tcp_state = newconntrack;
+ if ((oldtcpstate & TCP_REPLY_BIT)
+ || ctinfo >= IP_CT_IS_REPLY)
+ conntrack->proto.tcp_state |= TCP_REPLY_BIT;
+
WRITE_UNLOCK(&tcp_lock);
- ip_ct_refresh(conntrack, tcp_timeouts[conntrack->proto.tcp_state]);
+ /* If only reply is a RST, we can consider ourselves not to
+ have an established connection: this is a fairly common
+ problem case, so we can delete the conntrack
+ immediately. --RR */
+ if (!(oldtcpstate & TCP_REPLY_BIT) && tcph->rst) {
+ if (del_timer(&conntrack->timeout))
+ conntrack->timeout.function((unsigned long)conntrack);
+ } else
+ ip_ct_refresh(conntrack, tcp_timeouts[newconntrack]);
+
return NF_ACCEPT;
}
len += print_tuple(buffer + len,
&conntrack->tuplehash[IP_CT_DIR_REPLY].tuple,
proto);
+#if 0
+ if (!(conntrack->status & IPS_CONFIRMED))
+ len += sprintf(buffer + len, "[UNCONFIRMED] ");
+ len += sprintf(buffer + len, "use=%u ",
+ atomic_read(&conntrack->ct_general.use));
+#endif
len += sprintf(buffer + len, "\n");
return len;
return len;
}
+static unsigned int ip_confirm(unsigned int hooknum,
+ struct sk_buff **pskb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ /* We've seen it coming out the other side: confirm */
+ if ((*pskb)->nfct) {
+ struct ip_conntrack *ct
+ = (struct ip_conntrack *)(*pskb)->nfct->master;
+ if (!(ct->status & IPS_CONFIRMED))
+ ip_conntrack_confirm(ct);
+ }
+ return NF_ACCEPT;
+}
+
static unsigned int ip_refrag(unsigned int hooknum,
struct sk_buff **pskb,
const struct net_device *in,
{
struct rtable *rt = (struct rtable *)(*pskb)->dst;
+ /* We've seen it coming out the other side: confirm */
+ if ((*pskb)->nfct) {
+ struct ip_conntrack *ct
+ = (struct ip_conntrack *)(*pskb)->nfct->master;
+ if (!(ct->status & IPS_CONFIRMED))
+ ip_conntrack_confirm(ct);
+ }
+
/* Local packets are never produced too large for their
interface. We degfragment them at LOCAL_OUT, however,
so we have to refragment them here. */
/* Refragmenter; last chance. */
static struct nf_hook_ops ip_conntrack_out_ops
= { { NULL, NULL }, ip_refrag, PF_INET, NF_IP_POST_ROUTING, NF_IP_PRI_LAST };
+static struct nf_hook_ops ip_conntrack_local_in_ops
+= { { NULL, NULL }, ip_confirm, PF_INET, NF_IP_LOCAL_IN, NF_IP_PRI_LAST-1 };
static int init_or_cleanup(int init)
{
printk("ip_conntrack: can't register post-routing hook.\n");
goto cleanup_inandlocalops;
}
+ ret = nf_register_hook(&ip_conntrack_local_in_ops);
+ if (ret < 0) {
+ printk("ip_conntrack: can't register local in hook.\n");
+ goto cleanup_inoutandlocalops;
+ }
return ret;
cleanup:
+ nf_unregister_hook(&ip_conntrack_local_in_ops);
+ cleanup_inoutandlocalops:
nf_unregister_hook(&ip_conntrack_out_ops);
cleanup_inandlocalops:
nf_unregister_hook(&ip_conntrack_local_out_ops);
#include <net/route.h>
#include <linux/netfilter_ipv4/compat_firewall.h>
#include <linux/netfilter_ipv4/ip_conntrack.h>
+#include <linux/netfilter_ipv4/ip_conntrack_core.h>
static struct firewall_ops *fwops;
return 0;
}
+static inline void
+confirm_connection(struct sk_buff *skb)
+{
+ if (skb->nfct) {
+ struct ip_conntrack *ct
+ = (struct ip_conntrack *)skb->nfct->master;
+
+ if (!(ct->status & IPS_CONFIRMED))
+ ip_conntrack_confirm(ct);
+ }
+}
+
static unsigned int
fw_in(unsigned int hooknum,
struct sk_buff **pskb,
ret = fwops->fw_output(fwops, PF_INET,
(struct net_device *)out,
(*pskb)->nh.raw, &redirpt, pskb);
- if (fwops->fw_acct_out && (ret == FW_ACCEPT || ret == FW_SKIP))
- fwops->fw_acct_out(fwops, PF_INET,
- (struct net_device *)in,
- (*pskb)->nh.raw, &redirpt, pskb);
+ if (ret == FW_ACCEPT || ret == FW_SKIP) {
+ if (fwops->fw_acct_out)
+ fwops->fw_acct_out(fwops, PF_INET,
+ (struct net_device *)in,
+ (*pskb)->nh.raw, &redirpt,
+ pskb);
+ confirm_connection(*pskb);
+ }
break;
}
}
}
+static unsigned int fw_confirm(unsigned int hooknum,
+ struct sk_buff **pskb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ confirm_connection(*pskb);
+ return NF_ACCEPT;
+}
+
extern int ip_fw_ctl(int optval, void *user, unsigned int len);
static int sock_fn(struct sock *sk, int optval, void *user, unsigned int len)
static struct nf_hook_ops forward_ops
= { { NULL, NULL }, fw_in, PF_INET, NF_IP_FORWARD, NF_IP_PRI_FILTER };
+static struct nf_hook_ops local_in_ops
+= { { NULL, NULL }, fw_confirm, PF_INET, NF_IP_LOCAL_IN, NF_IP_PRI_LAST - 1 };
+
static struct nf_sockopt_ops sock_ops
= { { NULL, NULL }, PF_INET, 64, 64 + 1024 + 1, &sock_fn, 0, 0, NULL,
0, NULL };
nf_register_hook(&preroute_ops);
nf_register_hook(&postroute_ops);
nf_register_hook(&forward_ops);
+ nf_register_hook(&local_in_ops);
return ret;
nf_unregister_hook(&preroute_ops);
nf_unregister_hook(&postroute_ops);
nf_unregister_hook(&forward_ops);
+ nf_unregister_hook(&local_in_ops);
masq_cleanup();
struct ip_conntrack_protocol *protocol;
struct ip_conntrack_tuple_hash *h;
enum ip_conntrack_info ctinfo;
+ struct ip_conntrack *ct;
int ret;
protocol = find_proto(iph->protocol);
switch (iph->protocol) {
case IPPROTO_ICMP:
/* ICMP errors. */
- if (icmp_error_track(*pskb)) {
- /* If it is valid, tranlsate it */
- if ((*pskb)->nfct) {
- struct ip_conntrack *ct
- = (struct ip_conntrack *)
- (*pskb)->nfct->master;
- enum ip_conntrack_dir dir;
-
- if ((*pskb)->nfct-ct->infos >= IP_CT_IS_REPLY)
- dir = IP_CT_DIR_REPLY;
- else
- dir = IP_CT_DIR_ORIGINAL;
-
- icmp_reply_translation(*pskb,
- ct,
- NF_IP_PRE_ROUTING,
- dir);
- }
+ if ((ct = icmp_error_track(*pskb, &ctinfo))) {
+ icmp_reply_translation(*pskb, ct,
+ NF_IP_PRE_ROUTING,
+ CTINFO2DIR(ctinfo));
return NF_ACCEPT;
}
/* Fall thru... */
case IPPROTO_TCP:
case IPPROTO_UDP:
if (!get_tuple(iph, (*pskb)->len, &tuple, protocol)) {
- printk("ip_fw_compat_masq: Couldn't get tuple\n");
+ if (net_ratelimit())
+ printk("ip_fw_compat_masq: Can't get tuple\n");
return NF_ACCEPT;
}
break;
NF_IP_PRE_ROUTING,
pskb);
} else
- printk("ip_fw_compat_masq: conntrack"
- " didn't like\n");
+ if (net_ratelimit())
+ printk("ip_fw_compat_masq: conntrack"
+ " didn't like\n");
}
} else {
if (h)
if (i && (*i)-- == 0)
return 1;
+ if (m->u.match->destroy)
+ m->u.match->destroy(m->data, m->match_size - sizeof(*m));
+
if (m->u.match->me)
__MOD_DEC_USE_COUNT(m->u.match->me);
/* Cleanup all matches */
IPT_MATCH_ITERATE(e, cleanup_match, NULL);
t = ipt_get_target(e);
+ if (t->u.target->destroy)
+ t->u.target->destroy(t->data, t->target_size - sizeof(*t));
if (t->u.target->me)
__MOD_DEC_USE_COUNT(t->u.target->me);
/* Silent error: too late now. */
copy_to_user(tmp.counters, counters,
sizeof(struct ipt_counters) * tmp.num_counters);
-
+ vfree(counters);
up(&ipt_mutex);
return 0;
}
static struct ipt_target ipt_log_reg
-= { { NULL, NULL }, "LOG", ipt_log_target, ipt_log_checkentry, THIS_MODULE };
+= { { NULL, NULL }, "LOG", ipt_log_target, ipt_log_checkentry, NULL,
+ THIS_MODULE };
static int __init init(void)
{
}
static struct ipt_target ipt_mark_reg
-= { { NULL, NULL }, "MARK", target, checkentry, THIS_MODULE };
+= { { NULL, NULL }, "MARK", target, checkentry, NULL, THIS_MODULE };
static int __init init(void)
{
{
struct ip_conntrack *ct;
enum ip_conntrack_info ctinfo;
- const struct ip_nat_range *r;
+ const struct ip_nat_multi_range *mr;
struct ip_nat_multi_range newrange;
u_int32_t newsrc;
struct rtable *rt;
IP_NF_ASSERT(ct && (ctinfo == IP_CT_NEW
|| ctinfo == IP_CT_RELATED));
- r = targinfo;
+ mr = targinfo;
if (ip_route_output(&rt, (*pskb)->nh.iph->daddr,
0,
/* Transfer from original range. */
newrange = ((struct ip_nat_multi_range)
- { 1, { { r->flags | IP_NAT_RANGE_MAP_IPS,
+ { 1, { { mr->range[0].flags | IP_NAT_RANGE_MAP_IPS,
newsrc, newsrc,
- r->min, r->max } } });
+ mr->range[0].min, mr->range[0].max } } });
/* Hand modified range to generic setup. */
return ip_nat_setup_info(ct, &newrange, hooknum);
};
static struct ipt_target masquerade
-= { { NULL, NULL }, "MASQUERADE", masquerade_target, masquerade_check,
+= { { NULL, NULL }, "MASQUERADE", masquerade_target, masquerade_check, NULL,
THIS_MODULE };
static int __init init(void)
}
static struct ipt_target ipt_mirror_reg
-= { { NULL, NULL }, "MIRROR", ipt_mirror_target, ipt_mirror_checkentry,
+= { { NULL, NULL }, "MIRROR", ipt_mirror_target, ipt_mirror_checkentry, NULL,
THIS_MODULE };
static int __init init(void)
struct ip_conntrack *ct;
enum ip_conntrack_info ctinfo;
u_int32_t newdst;
- const struct ip_nat_range *r = targinfo;
+ const struct ip_nat_multi_range *mr = targinfo;
struct ip_nat_multi_range newrange;
IP_NF_ASSERT(hooknum == NF_IP_PRE_ROUTING
/* Transfer from original range. */
newrange = ((struct ip_nat_multi_range)
- { 1, { { r->flags | IP_NAT_RANGE_MAP_IPS,
+ { 1, { { mr->range[0].flags | IP_NAT_RANGE_MAP_IPS,
newdst, newdst,
- r->min, r->max } } });
+ mr->range[0].min, mr->range[0].max } } });
/* Hand modified range to generic setup. */
return ip_nat_setup_info(ct, &newrange, hooknum);
}
static struct ipt_target redirect_reg
-= { { NULL, NULL }, "REDIRECT", redirect_target, redirect_check, THIS_MODULE };
+= { { NULL, NULL }, "REDIRECT", redirect_target, redirect_check, NULL,
+ THIS_MODULE };
static int __init init(void)
{
}
static struct ipt_target ipt_reject_reg
-= { { NULL, NULL }, "REJECT", reject, check, THIS_MODULE };
+= { { NULL, NULL }, "REJECT", reject, check, NULL, THIS_MODULE };
static int __init init(void)
{
}
static struct ipt_target ipt_tos_reg
-= { { NULL, NULL }, "TOS", target, checkentry, THIS_MODULE };
+= { { NULL, NULL }, "TOS", target, checkentry, NULL, THIS_MODULE };
static int __init init(void)
{
}
static struct ipt_match ipt_limit_reg
-= { { NULL, NULL }, "limit", ipt_limit_match, ipt_limit_checkentry,
+= { { NULL, NULL }, "limit", ipt_limit_match, ipt_limit_checkentry, NULL,
THIS_MODULE };
static int __init init(void)
}
static struct ipt_match mac_match
-= { { NULL, NULL }, "mac", &match, &ipt_mac_checkentry, THIS_MODULE };
+= { { NULL, NULL }, "mac", &match, &ipt_mac_checkentry, NULL, THIS_MODULE };
static int __init init(void)
{
}
static struct ipt_match mark_match
-= { { NULL, NULL }, "mark", &match, &checkentry, THIS_MODULE };
+= { { NULL, NULL }, "mark", &match, &checkentry, NULL, THIS_MODULE };
static int __init init(void)
{
}
static struct ipt_match multiport_match
-= { { NULL, NULL }, "multiport", &match, &checkentry, THIS_MODULE };
+= { { NULL, NULL }, "multiport", &match, &checkentry, NULL, THIS_MODULE };
static int __init init(void)
{
}
static struct ipt_match owner_match
-= { { NULL, NULL }, "owner", &match, &checkentry, THIS_MODULE };
+= { { NULL, NULL }, "owner", &match, &checkentry, NULL, THIS_MODULE };
static int __init init(void)
{
}
static struct ipt_match state_match
-= { { NULL, NULL }, "state", &match, &check, THIS_MODULE };
+= { { NULL, NULL }, "state", &match, &check, NULL, THIS_MODULE };
static int __init init(void)
{
}
static struct ipt_match tos_match
-= { { NULL, NULL }, "tos", &match, &checkentry, THIS_MODULE };
+= { { NULL, NULL }, "tos", &match, &checkentry, NULL, THIS_MODULE };
static int __init init(void)
{
}
static struct ipt_match unclean_match
-= { { NULL, NULL }, "unclean", &match, &checkentry, THIS_MODULE };
+= { { NULL, NULL }, "unclean", &match, &checkentry, NULL, THIS_MODULE };
static int __init init(void)
{
NF_IP_PRI_FILTER }
};
-/* Default to no forward for security reasons. */
-static int forward = NF_DROP;
+/* Default to forward because I got too much mail already. */
+static int forward = NF_ACCEPT;
MODULE_PARM(forward, "i");
static int __init init(void)
ipxh->spx.allocseq = htons(pdata->alloc);
/* Reset/Set WD timer */
- del_timer(&pdata->watchdog);
- pdata->watchdog.expires = jiffies + VERIFY_TIMEOUT;
- add_timer(&pdata->watchdog);
+ mod_timer(&pdata->watchdog, jiffies+VERIFY_TIMEOUT);
switch(type)
{
if (delay == 0)
delay = 1;
- del_timer(&q->wd_timer);
- q->wd_timer.expires = jiffies + delay;
- add_timer(&q->wd_timer);
+ mod_timer(&q->wd_timer, jiffies+delay);
}
/* Maybe we have a shorter packet in the queue,
* The generic interface for RPC authentication on the server side.
*
* Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
+ *
+ * CHANGES
+ * 19-Apr-2000 Chris Evans - Security fix
*/
#include <linux/types.h>
struct svc_buf *argp = &rqstp->rq_argbuf;
struct svc_buf *resp = &rqstp->rq_resbuf;
struct svc_cred *cred = &rqstp->rq_cred;
- u32 *bufp = argp->buf;
- int len = argp->len, slen, i;
+ u32 *bufp = argp->buf, slen, i;
+ int len = argp->len;
if ((len -= 3) < 0) {
*statp = rpc_garbage_args;
bufp++; /* length */
bufp++; /* time stamp */
slen = (ntohl(*bufp++) + 3) >> 2; /* machname length */
- if (slen > 64 || (len -= slen) < 0)
+ if (slen > 64 || (len -= slen + 3) < 0)
goto badcred;
bufp += slen; /* skip machname */