removed from the running kernel whenever you want), say M here and
read Documentation/modules.txt. The module will be called lp.o.
- If you have several parallel ports, you should specify the base
- address for the port to be used by the printer with the "lp" kernel
- command line option. (Try "man bootparam" or see the documentation
- of your boot loader (lilo or loadlin) about how to pass options to
- the kernel at boot time. The lilo procedure is also explained in the
- SCSI-HOWTO, available via FTP (user: anonymous) in
- ftp://metalab.unc.edu/pub/Linux/docs/HOWTO.) The standard base
- addresses as well as the syntax of the "lp" command line option can
- be found in drivers/char/lp.c.
+ If you have several parallel ports, you can specify which ports to
+ use with the "lp" kernel command line option. (Try "man bootparam"
+ or see the documentation of your boot loader (lilo or loadlin)
+ about how to pass options to the kernel at boot time. The lilo
+ procedure is also explained in the SCSI-HOWTO, available via FTP
+ (user: anonymous) in ftp://metalab.unc.edu/pub/Linux/docs/HOWTO.)
+ The syntax of the "lp" command line option can be found in
+ drivers/char/lp.c.
If you have more than 3 printers, you need to increase the LP_NO
variable in lp.c.
If you have configured the /proc filesystem into your kernel, you will
see a new directory entry: /proc/parport. In there will be a
directory entry for each parallel port for which parport is
-configured. In each of those directories are three files describing
+configured. In each of those directories are four files describing
that parallel port. For example:
File: Contents:
-/* $Id: process.c,v 1.132 1999/03/22 02:12:13 davem Exp $
+/* $Id: process.c,v 1.133 1999/03/24 11:42:30 davem Exp $
* linux/arch/sparc/kernel/process.c
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
p->tss.kregs = childregs;
if(regs->psr & PSR_PS) {
- childregs->u_regs[UREG_FP] = p->tss.ksp;
+ new_stack = (struct reg_window *)
+ ((((unsigned long)p) +
+ (TASK_UNION_SIZE)) -
+ (REGWIN_SZ));
+ childregs->u_regs[UREG_FP] = (unsigned long) new_stack;
p->tss.flags |= SPARC_FLAG_KTHREAD;
p->tss.current_ds = KERNEL_DS;
+ memcpy((void *)new_stack,
+ (void *)regs->u_regs[UREG_FP],
+ sizeof(struct reg_window));
childregs->u_regs[UREG_G6] = (unsigned long) p;
} else {
childregs->u_regs[UREG_FP] = sp;
-/* $Id: srmmu.c,v 1.184 1999/03/20 22:02:03 davem Exp $
+/* $Id: srmmu.c,v 1.185 1999/03/24 11:42:35 davem Exp $
* srmmu.c: SRMMU specific routines for memory management.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
static void srmmu_switch_to_context(struct task_struct *tsk)
{
- int set = 0;
-
if(tsk->mm->context == NO_CONTEXT) {
alloc_context(tsk->mm);
- flush_cache_mm(tsk->mm);
ctxd_set(&srmmu_context_table[tsk->mm->context], tsk->mm->pgd);
- flush_tlb_mm(tsk->mm);
- set = 1;
- } else if(tsk->mm != current->mm)
- set = 1;
-
- if(set != 0)
- srmmu_set_context(tsk->mm->context);
+ }
+ srmmu_set_context(tsk->mm->context);
}
static void srmmu_init_new_context(struct mm_struct *mm)
static void viking_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp)
{
if(pgdp != swapper_pg_dir)
- viking_flush_page((unsigned long)pgdp);
+ flush_chunk((unsigned long)pgdp);
if(tsk->mm->context != NO_CONTEXT &&
tsk->mm->pgd != pgdp) {
flush_cache_mm(tsk->mm);
static void hypersparc_switch_to_context(struct task_struct *tsk)
{
- int set = 0;
-
if(tsk->mm->context == NO_CONTEXT) {
ctxd_t *ctxp;
ctxp = &srmmu_context_table[tsk->mm->context];
srmmu_set_entry((pte_t *)ctxp, __pte((SRMMU_ET_PTD | (srmmu_v2p((unsigned long) tsk->mm->pgd) >> 4))));
hypersparc_flush_page_to_ram((unsigned long)ctxp);
- set = 1;
- } else if(tsk->mm != current->mm)
- set = 1;
-
- if(set != 0) {
- hyper_flush_whole_icache();
- srmmu_set_context(tsk->mm->context);
}
+ hyper_flush_whole_icache();
+ srmmu_set_context(tsk->mm->context);
}
static void hypersparc_init_new_context(struct mm_struct *mm)
BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page_for_dma, BTFIXUPCALL_NOP);
}
- /* flush_cache_* are nops */
- BTFIXUPSET_CALL(flush_cache_all, viking_flush_cache_all, BTFIXUPCALL_NOP);
- BTFIXUPSET_CALL(flush_cache_mm, viking_flush_cache_mm, BTFIXUPCALL_NOP);
- BTFIXUPSET_CALL(flush_cache_page, viking_flush_cache_page, BTFIXUPCALL_NOP);
- BTFIXUPSET_CALL(flush_cache_range, viking_flush_cache_range, BTFIXUPCALL_NOP);
+ BTFIXUPSET_CALL(flush_cache_all, viking_flush_cache_all, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_mm, viking_flush_cache_mm, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_page, viking_flush_cache_page, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_range, viking_flush_cache_range, BTFIXUPCALL_NORM);
#ifdef __SMP__
if (sparc_cpu_model == sun4d) {
-/* $Id: viking.S,v 1.12 1999/02/23 13:23:50 jj Exp $
+/* $Id: viking.S,v 1.13 1999/03/24 11:42:32 davem Exp $
* viking.S: High speed Viking cache/mmu operations
*
* Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
clr %o1 ! set counter, 0 - 127
sethi %hi(KERNBASE + PAGE_SIZE - 0x80000000), %o3
sethi %hi(0x80000000), %o4
- sethi %hi(VIKING_PTAG_VALID | VIKING_PTAG_DIRTY), %o5
+ sethi %hi(VIKING_PTAG_VALID), %o5
sethi %hi(2*PAGE_SIZE), %o0
sethi %hi(PAGE_SIZE), %g7
clr %o2 ! block counter, 0 - 3
or %g5, %g4, %g5
ldda [%g5] ASI_M_DATAC_TAG, %g2
cmp %g3, %g1 ! ptag == ppage?
- bne,a 7f
+ bne 7f
inc %o2
- and %g2, %o5, %g3 ! ptag VALID and DIRTY?
- cmp %g3, %o5
- bne,a 7f
- inc %o2
-
- add %g4, %o3, %g2 ! (KERNBASE + PAGE_SIZE) | (set << 5)
+ andcc %g2, %o5, %g0 ! ptag VALID?
+ be 7f
+ add %g4, %o3, %g2 ! (KERNBASE + PAGE_SIZE) | (set << 5)
ld [%g2], %g3
ld [%g2 + %g7], %g3
add %g2, %o0, %g2
ld [%g2 + %g7], %g3
add %g2, %o0, %g2
ld [%g2], %g3
- ld [%g2 + %g7], %g3
-
b 8f
- inc %o1
+ ld [%g2 + %g7], %g3
7:
cmp %o2, 3
ble 6b
sll %o2, 26, %g5 ! block << 26
- inc %o1
-8:
+8: inc %o1
cmp %o1, 0x7f
ble 5b
clr %o2
retl
nop
-viking_flush_cache_all:
+#define WINDOW_FLUSH(tmp1, tmp2) \
+ mov 0, tmp1; \
+98: ld [%g6 + AOFF_task_tss + AOFF_thread_uwinmask], tmp2; \
+ orcc %g0, tmp2, %g0; \
+ add tmp1, 1, tmp1; \
+ bne 98b; \
+ save %sp, -64, %sp; \
+99: subcc tmp1, 1, tmp1; \
+ bne 99b; \
+ restore %g0, %g0, %g0;
+
+viking_flush_cache_page:
+#ifndef __SMP__
+ ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
+#endif
viking_flush_cache_mm:
viking_flush_cache_range:
-viking_flush_cache_page:
+#ifndef __SMP__
+ ld [%o0 + AOFF_mm_context], %g1
+ cmp %g1, -1
+ bne viking_flush_cache_all
+ nop
+ b,a viking_flush_cache_out
+#endif
+viking_flush_cache_all:
+ WINDOW_FLUSH(%g4, %g5)
+viking_flush_cache_out:
retl
nop
-/* $Id: ioctl32.c,v 1.59 1999/03/12 13:30:21 jj Exp $
+/* $Id: ioctl32.c,v 1.60 1999/03/22 10:40:54 jj Exp $
* ioctl32.c: Conversion between 32bit and 64bit native ioctls.
*
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
cmap.transp = kmalloc(cmap.len * sizeof(__u16), GFP_KERNEL);
if (!cmap.transp)
goto out;
- } else
- cmap.transp = NULL;
+ }
if (cmd == FBIOGETCMAP)
break;
/* read drive info out of physical CMOS */
drive=0;
if (!UDP->cmos)
- UDP->cmos= FLOPPY0_TYPE;
+ UDP->cmos = FLOPPY0_TYPE;
drive=1;
if (!UDP->cmos && FLOPPY1_TYPE)
UDP->cmos = FLOPPY1_TYPE;
/* additional physical CMOS drive detection should go here */
for (drive=0; drive < N_DRIVE; drive++){
- if (UDP->cmos >= 16)
- UDP->cmos = 0;
- if (UDP->cmos >= 0 && UDP->cmos <= NUMBER(default_drive_params))
- memcpy((char *) UDP,
- (char *) (&default_drive_params[(int)UDP->cmos].params),
- sizeof(struct floppy_drive_params));
- if (UDP->cmos){
- if (first)
- printk(KERN_INFO "Floppy drive(s): ");
- else
- printk(", ");
- first=0;
- if (UDP->cmos > 0){
+ unsigned int type = UDP->cmos;
+ struct floppy_drive_params *params;
+ const char *name = NULL;
+ static char temparea[32];
+
+ if (type < NUMBER(default_drive_params)) {
+ params = &default_drive_params[type].params;
+ if (type) {
+ name = default_drive_params[type].name;
allowed_drive_mask |= 1 << drive;
- printk("fd%d is %s", drive,
- default_drive_params[(int)UDP->cmos].name);
- } else
- printk("fd%d is unknown type %d",drive,
- UDP->cmos);
+ }
+ } else {
+ params = &default_drive_params[0].params;
+ sprintf(temparea, "unknown type %d (usb?)", type);
+ name = temparea;
}
+ if (name) {
+ const char * prepend = ",";
+ if (first) {
+ prepend = KERN_INFO "Floppy drive(s):";
+ first = 0;
+ }
+ printk("%s fd%d is %s", prepend, drive, name);
+ }
+ *UDP = *params;
}
if (!first)
printk("\n");
}
if (current_drive >= 4 && !FDC2)
FDC2 = 0x370;
- if (ints[2] <= 0 ||
- (ints[2] >= NUMBER(default_drive_params) && ints[2] != 16)){
- DPRINT("bad CMOS code %d\n", ints[2]);
- return;
- }
DP->cmos = ints[2];
DPRINT("setting CMOS code to %d\n", ints[2]);
}
static int rd_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
{
- int err;
-
if (!inode || !inode->i_rdev)
return -EINVAL;
#ifdef MODULE
+EXPORT_NO_SYMBOLS;
+
int init_module(void)
{
#else
kfree(qcam);
}
+/* The parport parameter controls which parports will be scanned.
+ * Scanning all parports causes some printers to print a garbage page.
+ * -- March 14, 1999 Billy Donahue <billy@escape.com> */
+#ifdef MODULE
+static char *parport[MAX_CAMS] = { NULL, };
+MODULE_PARM(parport, "1-" __MODULE_STRING(MAX_CAMS) "s");
+#endif
+
#ifdef MODULE
int init_module(void)
{
struct parport *port;
-
+ int n;
+ if(parport[0] && strncmp(parport[0], "auto", 4)){
+ /* user gave parport parameters */
+ for(n=0; parport[n] && n<MAX_CAMS; n++){
+ char *ep;
+ unsigned long r;
+ r = simple_strtoul(parport[n], &ep, 0);
+ if(ep == parport[n]){
+ printk(KERN_ERR
+ "bw-qcam: bad port specifier \"%s\"\n",
+ parport[n]);
+ continue;
+ }
+ for (port=parport_enumerate(); port; port=port->next){
+ if(r!=port->number)
+ continue;
+ init_bwqcam(port);
+ break;
+ }
+ }
+ return (num_cams)?0:-ENODEV;
+ }
+ /* no parameter or "auto" */
for (port = parport_enumerate(); port; port=port->next)
init_bwqcam(port);
extern void watchdog_init(void);
extern void wdt_init(void);
extern void acq_init(void);
+extern void dtlk_init(void);
extern void pcwatchdog_init(void);
extern int rtc_init(void);
extern int rtc_DP8570A_init(void);
#include <asm/openprom.h>
#include <asm/oplib.h>
#include <asm/auxio.h>
-#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/irq.h>
#include <linux/string.h>
#include <linux/parport.h>
#include <linux/bitops.h>
-#include <linux/sched.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
#include <asm/auxio.h>
-#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/irq.h>
-/* $Id: sab82532.c,v 1.28 1999/01/02 16:47:35 davem Exp $
+/* $Id: sab82532.c,v 1.30 1999/03/24 11:34:52 davem Exp $
* sab82532.c: ASYNC Driver for the SIEMENS SAB82532 DUSCC.
*
* Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
__initfunc(static inline void show_serial_version(void))
{
- char *revision = "$Revision: 1.28 $";
+ char *revision = "$Revision: 1.30 $";
char *version, *p;
version = strchr(revision, ' ');
restore_flags(flags);
for (i = 0; i < NR_PORTS; i++) {
- if (sab82532_table[i].type != PORT_UNKNOWN)
- release_region(sab82532_table[i].port, 8);
+ struct sab82532 *info = (struct sab82532 *)sab82532_table[i]->driver_data;
+ if (info->type != PORT_UNKNOWN)
+ release_region((unsigned long)info->regs,
+ sizeof(union sab82532_async_regs));
}
if (tmp_buf) {
free_page((unsigned long) tmp_buf);
}
#include "ppa.h"
-#include <linux/parport.h>
#define NO_HOSTS 4
static ppa_struct ppa_hosts[NO_HOSTS] =
* 16.12.98 0.16 Don't wake up app until there are fragsize bytes to read/write
* 06.01.99 0.17 remove the silly SA_INTERRUPT flag.
* hopefully killed the egcs section type conflict
+ * 12.03.99 0.18 cinfo.blocks should be reset after GETxPTR ioctl.
+ * reported by Johan Maes <joma@telindus.be>
+ * 22.03.99 0.19 return EAGAIN instead of EBUSY when O_NONBLOCK
+ * read/write cannot be executed
*
* some important things missing in Ensoniq documentation:
*
if (cnt <= 0) {
start_adc(s);
if (file->f_flags & O_NONBLOCK)
- return ret ? ret : -EBUSY;
+ return ret ? ret : -EAGAIN;
interruptible_sleep_on(&s->dma_adc.wait);
if (signal_pending(current))
return ret ? ret : -ERESTARTSYS;
if (cnt <= 0) {
start_dac2(s);
if (file->f_flags & O_NONBLOCK)
- return ret ? ret : -EBUSY;
+ return ret ? ret : -EAGAIN;
interruptible_sleep_on(&s->dma_dac2.wait);
if (signal_pending(current))
return ret ? ret : -ERESTARTSYS;
spin_lock_irqsave(&s->lock, flags);
es1370_update_ptr(s);
cinfo.bytes = s->dma_adc.total_bytes;
- cinfo.blocks = s->dma_adc.total_bytes >> s->dma_adc.fragshift;
+ cinfo.blocks = s->dma_adc.count >> s->dma_adc.fragshift;
cinfo.ptr = s->dma_adc.hwptr;
if (s->dma_adc.mapped)
s->dma_adc.count &= s->dma_adc.fragsize-1;
spin_lock_irqsave(&s->lock, flags);
es1370_update_ptr(s);
cinfo.bytes = s->dma_dac2.total_bytes;
- cinfo.blocks = s->dma_dac2.total_bytes >> s->dma_dac2.fragshift;
+ cinfo.blocks = s->dma_dac2.count >> s->dma_dac2.fragshift;
cinfo.ptr = s->dma_dac2.hwptr;
if (s->dma_dac2.mapped)
s->dma_dac2.count &= s->dma_dac2.fragsize-1;
if (cnt <= 0) {
start_dac1(s);
if (file->f_flags & O_NONBLOCK)
- return ret ? ret : -EBUSY;
+ return ret ? ret : -EAGAIN;
interruptible_sleep_on(&s->dma_dac1.wait);
if (signal_pending(current))
return ret ? ret : -ERESTARTSYS;
spin_lock_irqsave(&s->lock, flags);
es1370_update_ptr(s);
cinfo.bytes = s->dma_dac1.total_bytes;
- cinfo.blocks = s->dma_dac1.total_bytes >> s->dma_dac1.fragshift;
+ cinfo.blocks = s->dma_dac1.count >> s->dma_dac1.fragshift;
cinfo.ptr = s->dma_dac1.hwptr;
if (s->dma_dac1.mapped)
s->dma_dac1.count &= s->dma_dac1.fragsize-1;
cnt = count;
if (cnt <= 0) {
if (file->f_flags & O_NONBLOCK)
- return ret ? ret : -EBUSY;
+ return ret ? ret : -EAGAIN;
interruptible_sleep_on(&s->midi.iwait);
if (signal_pending(current))
return ret ? ret : -ERESTARTSYS;
cnt = count;
if (cnt <= 0) {
if (file->f_flags & O_NONBLOCK)
- return ret ? ret : -EBUSY;
+ return ret ? ret : -EAGAIN;
interruptible_sleep_on(&s->midi.owait);
if (signal_pending(current))
return ret ? ret : -ERESTARTSYS;
if (!pci_present()) /* No PCI bus in this machine! */
return -ENODEV;
- printk(KERN_INFO "es1370: version v0.17 time " __TIME__ " " __DATE__ "\n");
+ printk(KERN_INFO "es1370: version v0.19 time " __TIME__ " " __DATE__ "\n");
while (index < NR_DEVICE &&
(pcidev = pci_find_device(PCI_VENDOR_ID_ENSONIQ, PCI_DEVICE_ID_ENSONIQ_ES1370, pcidev))) {
if (pcidev->base_address[0] == 0 ||
* Don't wake up app until there are fragsize bytes to read/write
* 06.01.99 0.8 remove the silly SA_INTERRUPT flag.
* hopefully killed the egcs section type conflict
+ * 12.03.99 0.9 cinfo.blocks should be reset after GETxPTR ioctl.
+ * reported by Johan Maes <joma@telindus.be>
+ * 22.03.99 0.10 return EAGAIN instead of EBUSY when O_NONBLOCK
+ * read/write cannot be executed
*
*/
if (cnt <= 0) {
start_adc(s);
if (file->f_flags & O_NONBLOCK)
- return ret ? ret : -EBUSY;
+ return ret ? ret : -EAGAIN;
interruptible_sleep_on(&s->dma_adc.wait);
if (signal_pending(current))
return ret ? ret : -ERESTARTSYS;
if (cnt <= 0) {
start_dac2(s);
if (file->f_flags & O_NONBLOCK)
- return ret ? ret : -EBUSY;
+ return ret ? ret : -EAGAIN;
interruptible_sleep_on(&s->dma_dac2.wait);
if (signal_pending(current))
return ret ? ret : -ERESTARTSYS;
spin_lock_irqsave(&s->lock, flags);
es1371_update_ptr(s);
cinfo.bytes = s->dma_adc.total_bytes;
- cinfo.blocks = s->dma_adc.total_bytes >> s->dma_adc.fragshift;
+ cinfo.blocks = s->dma_adc.count >> s->dma_adc.fragshift;
cinfo.ptr = s->dma_adc.hwptr;
if (s->dma_adc.mapped)
s->dma_adc.count &= s->dma_adc.fragsize-1;
spin_lock_irqsave(&s->lock, flags);
es1371_update_ptr(s);
cinfo.bytes = s->dma_dac2.total_bytes;
- cinfo.blocks = s->dma_dac2.total_bytes >> s->dma_dac2.fragshift;
+ cinfo.blocks = s->dma_dac2.count >> s->dma_dac2.fragshift;
cinfo.ptr = s->dma_dac2.hwptr;
if (s->dma_dac2.mapped)
s->dma_dac2.count &= s->dma_dac2.fragsize-1;
if (cnt <= 0) {
start_dac1(s);
if (file->f_flags & O_NONBLOCK)
- return ret ? ret : -EBUSY;
+ return ret ? ret : -EAGAIN;
interruptible_sleep_on(&s->dma_dac1.wait);
if (signal_pending(current))
return ret ? ret : -ERESTARTSYS;
spin_lock_irqsave(&s->lock, flags);
es1371_update_ptr(s);
cinfo.bytes = s->dma_dac1.total_bytes;
- cinfo.blocks = s->dma_dac1.total_bytes >> s->dma_dac1.fragshift;
+ cinfo.blocks = s->dma_dac1.count >> s->dma_dac1.fragshift;
cinfo.ptr = s->dma_dac1.hwptr;
if (s->dma_dac1.mapped)
s->dma_dac1.count &= s->dma_dac1.fragsize-1;
cnt = count;
if (cnt <= 0) {
if (file->f_flags & O_NONBLOCK)
- return ret ? ret : -EBUSY;
+ return ret ? ret : -EAGAIN;
interruptible_sleep_on(&s->midi.iwait);
if (signal_pending(current))
return ret ? ret : -ERESTARTSYS;
cnt = count;
if (cnt <= 0) {
if (file->f_flags & O_NONBLOCK)
- return ret ? ret : -EBUSY;
+ return ret ? ret : -EAGAIN;
interruptible_sleep_on(&s->midi.owait);
if (signal_pending(current))
return ret ? ret : -ERESTARTSYS;
if (!pci_present()) /* No PCI bus in this machine! */
return -ENODEV;
- printk(KERN_INFO "es1371: version v0.8 time " __TIME__ " " __DATE__ "\n");
+ printk(KERN_INFO "es1371: version v0.10 time " __TIME__ " " __DATE__ "\n");
while (index < NR_DEVICE &&
(pcidev = pci_find_device(PCI_VENDOR_ID_ENSONIQ, PCI_DEVICE_ID_ENSONIQ_ES1371, pcidev))) {
if (pcidev->base_address[0] == 0 ||
* 16.12.98 0.9 Fix a few f_file & FMODE_ bugs
* 06.01.99 0.10 remove the silly SA_INTERRUPT flag.
* hopefully killed the egcs section type conflict
+ * 12.03.99 0.11 cinfo.blocks should be reset after GETxPTR ioctl.
+ * reported by Johan Maes <joma@telindus.be>
+ * 22.03.99 0.12 return EAGAIN instead of EBUSY when O_NONBLOCK
+ * read/write cannot be executed
*
*/
if (cnt <= 0) {
start_adc(s);
if (file->f_flags & O_NONBLOCK)
- return ret ? ret : -EBUSY;
+ return ret ? ret : -EAGAIN;
interruptible_sleep_on(&s->dma_adc.wait);
if (signal_pending(current))
return ret ? ret : -ERESTARTSYS;
if (cnt <= 0) {
start_dac(s);
if (file->f_flags & O_NONBLOCK)
- return ret ? ret : -EBUSY;
+ return ret ? ret : -EAGAIN;
interruptible_sleep_on(&s->dma_dac.wait);
if (signal_pending(current))
return ret ? ret : -ERESTARTSYS;
spin_lock_irqsave(&s->lock, flags);
sv_update_ptr(s);
cinfo.bytes = s->dma_adc.total_bytes;
- cinfo.blocks = s->dma_adc.total_bytes >> s->dma_adc.fragshift;
+ cinfo.blocks = s->dma_adc.count >> s->dma_adc.fragshift;
cinfo.ptr = s->dma_adc.hwptr;
if (s->dma_adc.mapped)
s->dma_adc.count &= s->dma_adc.fragsize-1;
spin_lock_irqsave(&s->lock, flags);
sv_update_ptr(s);
cinfo.bytes = s->dma_dac.total_bytes;
- cinfo.blocks = s->dma_dac.total_bytes >> s->dma_dac.fragshift;
+ cinfo.blocks = s->dma_dac.count >> s->dma_dac.fragshift;
cinfo.ptr = s->dma_dac.hwptr;
if (s->dma_dac.mapped)
s->dma_dac.count &= s->dma_dac.fragsize-1;
cnt = count;
if (cnt <= 0) {
if (file->f_flags & O_NONBLOCK)
- return ret ? ret : -EBUSY;
+ return ret ? ret : -EAGAIN;
interruptible_sleep_on(&s->midi.iwait);
if (signal_pending(current))
return ret ? ret : -ERESTARTSYS;
cnt = count;
if (cnt <= 0) {
if (file->f_flags & O_NONBLOCK)
- return ret ? ret : -EBUSY;
+ return ret ? ret : -EAGAIN;
interruptible_sleep_on(&s->midi.owait);
if (signal_pending(current))
return ret ? ret : -ERESTARTSYS;
if (!pci_present()) /* No PCI bus in this machine! */
return -ENODEV;
- printk(KERN_INFO "sv: version v0.10 time " __TIME__ " " __DATE__ "\n");
+ printk(KERN_INFO "sv: version v0.12 time " __TIME__ " " __DATE__ "\n");
#if 0
if (!(wavetable_mem = __get_free_pages(GFP_KERNEL, 20-PAGE_SHIFT)))
printk(KERN_INFO "sv: cannot allocate 1MB of contiguous nonpageable memory for wavetable data\n");
#define inode_dirindex(idx) (((idx) & 0xff) * 26 - 21)
#define frag_id(x) (((x) >> 8) & 0x7fff)
-#define off(x) (((x) & 0xff) ? ((x) & 0xff) - 1 : 0)
+#define off(x) (((x) & 0xff) ? (((x) & 0xff) - 1) << sb->u.adfs_sb.s_dr->log2sharesize : 0)
static inline int adfs_inode_validate_no (struct super_block *sb, unsigned int inode_no)
{
return 0;
}
+ if (block < 0) {
+ adfs_error(sb, "adfs_bmap", "block(%d) < 0", block);
+ return 0;
+ }
+
+ if (block > inode->i_blocks)
+ return 0;
+
+ block += off(inode->u.adfs_i.file_id);
+
if (frag_id(inode->u.adfs_i.file_id) == ADFS_ROOT_FRAG)
- blk = sb->u.adfs_sb.s_map_block + off(inode_frag (inode->i_ino)) + block;
+ blk = sb->u.adfs_sb.s_map_block + block;
else
- blk = adfs_map_lookup (sb, frag_id(inode->u.adfs_i.file_id),
- off (inode->u.adfs_i.file_id) + block);
+ blk = adfs_map_lookup (sb, frag_id(inode->u.adfs_i.file_id), block);
return blk;
}
fragment = inode_frag (inode->i_ino);
if (frag_id (fragment) == ADFS_ROOT_FRAG)
- blk = sb->u.adfs_sb.s_map_block + off (fragment) + block;
+ blk = sb->u.adfs_sb.s_map_block + off(fragment) + block;
else
- blk = adfs_map_lookup (sb, frag_id (fragment), off (fragment) + block);
+ blk = adfs_map_lookup (sb, frag_id (fragment), off(fragment) + block);
return blk;
}
-static int adfs_atts2mode (unsigned char mode, unsigned int filetype)
+static int adfs_atts2mode(struct super_block *sb, unsigned char mode, unsigned int filetype)
{
int omode = 0;
S_IRGRP|S_IWGRP|S_IXGRP|
S_IROTH|S_IWOTH|S_IXOTH;
} else {
- if (mode & ADFS_NDA_DIRECTORY)
- omode |= S_IFDIR|S_IRUSR|S_IXUSR|S_IXGRP|S_IXOTH;
- else
+ if (mode & ADFS_NDA_DIRECTORY) {
+ omode |= S_IRUGO & sb->u.adfs_sb.s_owner_mask;
+ omode |= S_IFDIR|S_IXUSR|S_IXGRP|S_IXOTH;
+ } else
omode |= S_IFREG;
+
if (mode & ADFS_NDA_OWNER_READ) {
- omode |= S_IRUSR;
+ omode |= S_IRUGO & sb->u.adfs_sb.s_owner_mask;
if (filetype == 0xfe6 /* UnixExec */)
- omode |= S_IXUSR;
+ omode |= S_IXUGO & sb->u.adfs_sb.s_owner_mask;
}
+
if (mode & ADFS_NDA_OWNER_WRITE)
- omode |= S_IWUSR;
+ omode |= S_IWUGO & sb->u.adfs_sb.s_owner_mask;
+
if (mode & ADFS_NDA_PUBLIC_READ) {
- omode |= S_IRGRP | S_IROTH;
- if (filetype == 0xfe6)
- omode |= S_IXGRP | S_IXOTH;
+ omode |= S_IRUGO & sb->u.adfs_sb.s_other_mask;
+ if (filetype == 0xfe6 /* UnixExec */)
+ omode |= S_IXUGO & sb->u.adfs_sb.s_other_mask;
}
+
if (mode & ADFS_NDA_PUBLIC_WRITE)
- omode |= S_IWGRP | S_IWOTH;
+ omode |= S_IWUGO & sb->u.adfs_sb.s_other_mask;
}
return omode;
}
int buffers;
sb = inode->i_sb;
- inode->i_uid = 0;
- inode->i_gid = 0;
+ inode->i_uid = sb->u.adfs_sb.s_uid;
+ inode->i_gid = sb->u.adfs_sb.s_gid;
inode->i_version = ++event;
if (adfs_inode_validate_no (sb, inode->i_ino & 0xffffff00)) {
goto bad;
}
adfs_dir_free (bh, buffers);
- inode->i_mode = adfs_atts2mode (ide.mode, ide.filetype);
+ inode->i_mode = adfs_atts2mode(sb, ide.mode, ide.filetype);
inode->i_nlink = 2;
inode->i_size = ide.size;
inode->i_blksize = PAGE_SIZE;
return;
bad:
- inode->i_mode = 0;
- inode->i_nlink = 1;
- inode->i_size = 0;
- inode->i_blksize = 0;
- inode->i_blocks = 0;
- inode->i_mtime =
- inode->i_atime =
- inode->i_ctime = 0;
- inode->i_op = NULL;
+ make_bad_inode(inode);
}
#include <stdarg.h>
-static void adfs_put_super (struct super_block *sb);
-static int adfs_statfs (struct super_block *sb, struct statfs *buf, int bufsiz);
-void adfs_read_inode (struct inode *inode);
+static void adfs_put_super(struct super_block *sb);
+static int adfs_remount(struct super_block *sb, int *flags, char *data);
+static int adfs_statfs(struct super_block *sb, struct statfs *buf, int bufsiz);
+void adfs_read_inode(struct inode *inode);
-void adfs_error (struct super_block *sb, const char *function, const char *fmt, ...)
+void adfs_error(struct super_block *sb, const char *function, const char *fmt, ...)
{
char error_buf[128];
va_list args;
- va_start (args, fmt);
- vsprintf (error_buf, fmt, args);
- va_end (args);
+ va_start(args, fmt);
+ vsprintf(error_buf, fmt, args);
+ va_end(args);
- printk (KERN_CRIT "ADFS-fs error (device %s)%s%s: %s\n",
- kdevname (sb->s_dev), function ? ": " : "",
+ printk(KERN_CRIT "ADFS-fs error (device %s)%s%s: %s\n",
+ kdevname(sb->s_dev), function ? ": " : "",
function ? function : "", error_buf);
}
-unsigned char adfs_calccrosscheck (struct super_block *sb, char *map)
+static unsigned char adfs_calczonecheck(struct super_block *sb, char *map)
{
unsigned int v0, v1, v2, v3;
int i;
return v0 ^ v1 ^ v2 ^ v3;
}
-static int adfs_checkmap (struct super_block *sb)
+static int adfs_checkmap(struct super_block *sb)
{
unsigned char crosscheck = 0, zonecheck = 1;
int i;
char *map;
map = sb->u.adfs_sb.s_map[i]->b_data;
- if (adfs_calccrosscheck (sb, map) != map[0]) {
- adfs_error (sb, "adfs_checkmap", "zone %d fails zonecheck", i);
+ if (adfs_calczonecheck(sb, map) != map[0]) {
+ adfs_error(sb, "adfs_checkmap", "zone %d fails zonecheck", i);
zonecheck = 0;
}
crosscheck ^= map[3];
}
if (crosscheck != 0xff)
- adfs_error (sb, "adfs_checkmap", "crosscheck != 0xff");
+ adfs_error(sb, "adfs_checkmap", "crosscheck != 0xff");
return crosscheck == 0xff && zonecheck;
}
adfs_put_super,
NULL,
adfs_statfs,
- NULL
+ adfs_remount
};
-static void adfs_put_super (struct super_block *sb)
+static void adfs_put_super(struct super_block *sb)
{
int i;
for (i = 0; i < sb->u.adfs_sb.s_map_size; i++)
- brelse (sb->u.adfs_sb.s_map[i]);
- kfree (sb->u.adfs_sb.s_map);
- brelse (sb->u.adfs_sb.s_sbh);
+ brelse(sb->u.adfs_sb.s_map[i]);
+ kfree(sb->u.adfs_sb.s_map);
+ brelse(sb->u.adfs_sb.s_sbh);
MOD_DEC_USE_COUNT;
}
-struct super_block *adfs_read_super (struct super_block *sb, void *data, int silent)
+static int parse_options(struct super_block *sb, char *options)
+{
+ char *value, *opt;
+
+ if (!options)
+ return 0;
+
+ for (opt = strtok(options, ","); opt != NULL; opt = strtok(NULL, ",")) {
+ value = strchr(opt, '=');
+ if (value)
+ *value++ = '\0';
+
+ if (!strcmp(opt, "uid")) { /* owner of all files */
+ if (!value || !*value)
+ return -EINVAL;
+ sb->u.adfs_sb.s_uid = simple_strtoul(value, &value, 0);
+ if (*value)
+ return -EINVAL;
+ } else
+ if (!strcmp(opt, "gid")) { /* group owner of all files */
+ if (!value || !*value)
+ return -EINVAL;
+ sb->u.adfs_sb.s_gid = simple_strtoul(value, &value, 0);
+ if (*value)
+ return -EINVAL;
+ } else
+ if (!strcmp(opt, "ownmask")) { /* owner permission mask */
+ if (!value || !*value)
+ return -EINVAL;
+ sb->u.adfs_sb.s_owner_mask = simple_strtoul(value, &value, 8);
+ if (*value)
+ return -EINVAL;
+ } else
+ if (!strcmp(opt, "othmask")) { /* others permission mask */
+ if (!value || !*value)
+ return -EINVAL;
+ sb->u.adfs_sb.s_other_mask = simple_strtoul(value, &value, 8);
+ if (*value)
+ return -EINVAL;
+ } else { /* eh? say again. */
+ printk("ADFS-fs: unrecognised mount option %s\n", opt);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int adfs_remount(struct super_block *sb, int *flags, char *data)
+{
+ return parse_options(sb, data);
+}
+
+struct super_block *adfs_read_super(struct super_block *sb, void *data, int silent)
{
struct adfs_discrecord *dr;
struct buffer_head *bh;
kdev_t dev = sb->s_dev;
int i, j;
+ /* set default options */
+ sb->u.adfs_sb.s_uid = 0;
+ sb->u.adfs_sb.s_gid = 0;
+ sb->u.adfs_sb.s_owner_mask = S_IRWXU;
+ sb->u.adfs_sb.s_other_mask = S_IRWXG | S_IRWXO;
+
+ if (parse_options(sb, data))
+ goto error;
+
MOD_INC_USE_COUNT;
- lock_super (sb);
- set_blocksize (dev, BLOCK_SIZE);
- if (!(bh = bread (dev, ADFS_DISCRECORD / BLOCK_SIZE, BLOCK_SIZE))) {
- unlock_super (sb);
- adfs_error (sb, NULL, "unable to read superblock");
- MOD_DEC_USE_COUNT;
- return NULL;
+ lock_super(sb);
+ set_blocksize(dev, BLOCK_SIZE);
+ if (!(bh = bread(dev, ADFS_DISCRECORD / BLOCK_SIZE, BLOCK_SIZE))) {
+ adfs_error(sb, NULL, "unable to read superblock");
+ goto error_unlock;
}
b_data = bh->b_data + (ADFS_DISCRECORD % BLOCK_SIZE);
- if (adfs_checkbblk (b_data)) {
+ if (adfs_checkbblk(b_data)) {
if (!silent)
- printk ("VFS: Can't find an adfs filesystem on dev "
+ printk("VFS: Can't find an adfs filesystem on dev "
"%s.\n", kdevname(dev));
-failed_mount:
- unlock_super (sb);
- if (bh)
- brelse (bh);
- MOD_DEC_USE_COUNT;
- return NULL;
+ goto error_free_bh;
}
dr = (struct adfs_discrecord *)(b_data + ADFS_DR_OFFSET);
(sb->s_blocksize == 512 || sb->s_blocksize == 1024 ||
sb->s_blocksize == 2048 || sb->s_blocksize == 4096)) {
- brelse (bh);
- set_blocksize (dev, sb->s_blocksize);
- bh = bread (dev, ADFS_DISCRECORD / sb->s_blocksize, sb->s_blocksize);
+ brelse(bh);
+ set_blocksize(dev, sb->s_blocksize);
+ bh = bread(dev, ADFS_DISCRECORD / sb->s_blocksize, sb->s_blocksize);
if (!bh) {
- adfs_error (sb, NULL, "couldn't read superblock on "
+ adfs_error(sb, NULL, "couldn't read superblock on "
"2nd try.");
- goto failed_mount;
+ goto error_unlock;
}
b_data = bh->b_data + (ADFS_DISCRECORD % sb->s_blocksize);
- if (adfs_checkbblk (b_data)) {
- adfs_error (sb, NULL, "disc record mismatch, very weird!");
- goto failed_mount;
+ if (adfs_checkbblk(b_data)) {
+ adfs_error(sb, NULL, "disc record mismatch, very weird!");
+ goto error_free_bh;
}
dr = (struct adfs_discrecord *)(b_data + ADFS_DR_OFFSET);
}
if (sb->s_blocksize != bh->b_size) {
if (!silent)
- printk (KERN_ERR "VFS: Unsupported blocksize on dev "
- "%s.\n", kdevname (dev));
- goto failed_mount;
+ printk(KERN_ERR "VFS: Unsupported blocksize on dev "
+ "%s.\n", kdevname(dev));
+ goto error_free_bh;
}
/* blocksize on this device should now be set to the adfs log2secsize */
else
sb->u.adfs_sb.s_map_block >>= -sb->u.adfs_sb.s_map2blk;
- printk (KERN_DEBUG "ADFS: zone size %d, IDs per zone %d, map address %X size %d sectors\n",
+ printk(KERN_DEBUG "ADFS: zone size %d, IDs per zone %d, map address %X size %d sectors\n",
sb->u.adfs_sb.s_zone_size, sb->u.adfs_sb.s_ids_per_zone,
sb->u.adfs_sb.s_map_block, sb->u.adfs_sb.s_map_size);
- printk (KERN_DEBUG "ADFS: sector size %d, map bit size %d\n",
- 1 << dr->log2secsize, 1 << dr->log2bpmb);
+ printk(KERN_DEBUG "ADFS: sector size %d, map bit size %d, share size %d\n",
+ 1 << dr->log2secsize, 1 << dr->log2bpmb,
+ 1 << (dr->log2secsize + dr->log2sharesize));
sb->s_magic = ADFS_SUPER_MAGIC;
- sb->s_flags |= MS_RDONLY; /* we don't support writing yet */
- sb->u.adfs_sb.s_map = kmalloc (sb->u.adfs_sb.s_map_size *
- sizeof (struct buffer_head *), GFP_KERNEL);
+ sb->u.adfs_sb.s_map = kmalloc(sb->u.adfs_sb.s_map_size *
+ sizeof(struct buffer_head *), GFP_KERNEL);
if (sb->u.adfs_sb.s_map == NULL) {
- adfs_error (sb, NULL, "not enough memory");
- goto failed_mount;
+ adfs_error(sb, NULL, "not enough memory");
+ goto error_free_bh;
}
for (i = 0; i < sb->u.adfs_sb.s_map_size; i++) {
- sb->u.adfs_sb.s_map[i] = bread (dev,
+ sb->u.adfs_sb.s_map[i] = bread(dev,
sb->u.adfs_sb.s_map_block + i,
sb->s_blocksize);
if (!sb->u.adfs_sb.s_map[i]) {
for (j = 0; j < i; j++)
- brelse (sb->u.adfs_sb.s_map[j]);
- kfree (sb->u.adfs_sb.s_map);
- adfs_error (sb, NULL, "unable to read map");
- goto failed_mount;
+ brelse(sb->u.adfs_sb.s_map[j]);
+ kfree(sb->u.adfs_sb.s_map);
+ adfs_error(sb, NULL, "unable to read map");
+ goto error_free_bh;
}
}
- if (!adfs_checkmap (sb)) {
+ if (!adfs_checkmap(sb)) {
for (i = 0; i < sb->u.adfs_sb.s_map_size; i++)
- brelse (sb->u.adfs_sb.s_map[i]);
- adfs_error (sb, NULL, "map corrupted");
- goto failed_mount;
+ brelse(sb->u.adfs_sb.s_map[i]);
+ adfs_error(sb, NULL, "map corrupted");
+ goto error_free_bh;
}
dr = (struct adfs_discrecord *)(sb->u.adfs_sb.s_map[0]->b_data + 4);
- unlock_super (sb);
+ unlock_super(sb);
/*
* set up enough so that it can read an inode
*/
sb->s_op = &adfs_sops;
- sb->u.adfs_sb.s_root = adfs_inode_generate (dr->root, 0);
+ sb->u.adfs_sb.s_root = adfs_inode_generate(dr->root, 0);
sb->s_root = d_alloc_root(iget(sb, sb->u.adfs_sb.s_root), NULL);
if (!sb->s_root) {
- sb->s_dev = 0;
for (i = 0; i < sb->u.adfs_sb.s_map_size; i++)
- brelse (sb->u.adfs_sb.s_map[i]);
- brelse (bh);
- adfs_error (sb, NULL, "get root inode failed\n");
- MOD_DEC_USE_COUNT;
- return NULL;
+ brelse(sb->u.adfs_sb.s_map[i]);
+ brelse(bh);
+ adfs_error(sb, NULL, "get root inode failed\n");
+ goto error_dec_use;
}
return sb;
+
+error_free_bh:
+ if (bh)
+ brelse(bh);
+error_unlock:
+ unlock_super(sb);
+error_dec_use:
+ MOD_DEC_USE_COUNT;
+error:
+ sb->s_dev = 0;
+ return NULL;
}
-static int adfs_statfs (struct super_block *sb, struct statfs *buf, int bufsiz)
+static int adfs_statfs(struct super_block *sb, struct statfs *buf, int bufsiz)
{
struct statfs tmp;
const unsigned int nidlen = sb->u.adfs_sb.s_idlen + 1;
tmp.f_type = ADFS_SUPER_MAGIC;
tmp.f_bsize = sb->s_blocksize;
- tmp.f_blocks = (sb->u.adfs_sb.s_dr->disc_size) >> (sb->s_blocksize_bits);
+ tmp.f_blocks = sb->u.adfs_sb.s_dr->disc_size_high << (32 - sb->s_blocksize_bits) |
+ sb->u.adfs_sb.s_dr->disc_size >> sb->s_blocksize_bits;
tmp.f_files = tmp.f_blocks >> nidlen;
{
unsigned int i, j = 0;
if (freelink <= nidlen) break;
} while (mapindex < 8 * sb->s_blocksize);
if (mapindex > 8 * sb->s_blocksize)
- adfs_error (sb, NULL, "oversized free fragment\n");
+ adfs_error(sb, NULL, "oversized free fragment\n");
else if (freelink)
- adfs_error (sb, NULL, "undersized free fragment\n");
+ adfs_error(sb, NULL, "undersized free fragment\n");
}
tmp.f_bfree = tmp.f_bavail = j <<
(sb->u.adfs_sb.s_dr->log2bpmb - sb->s_blocksize_bits);
}
tmp.f_ffree = tmp.f_bfree >> nidlen;
tmp.f_namelen = ADFS_NAME_LEN;
- return copy_to_user (buf, &tmp, bufsiz) ? -EFAULT : 0;
+ return copy_to_user(buf, &tmp, bufsiz) ? -EFAULT : 0;
}
static struct file_system_type adfs_fs_type = {
"adfs", FS_REQUIRES_DEV, adfs_read_super, NULL
};
-__initfunc(int init_adfs_fs (void))
+__initfunc(int init_adfs_fs(void))
{
- return register_filesystem (&adfs_fs_type);
+ return register_filesystem(&adfs_fs_type);
}
#ifdef MODULE
-int init_module (void)
+int init_module(void)
{
return init_adfs_fs();
}
-void cleanup_module (void)
+void cleanup_module(void)
{
- unregister_filesystem (&adfs_fs_type);
+ unregister_filesystem(&adfs_fs_type);
}
#endif
kfree(file);
return;
}
- fp = &file->f_next;
+ fp = &f->f_next;
}
printk(KERN_WARNING "lockd: attempt to release unknown file!\n");
poll_table wait_table;
struct poll_table_entry entry;
int init_timeout, max_timeout;
- int timeout; long tmp_timeout;
+ int timeout;
int retrans;
int major_timeout_seen;
int acknowledge_seen;
#define MIN(a,b) (((a)<(b))?(a):(b))
#define MAX(a,b) (((a)>(b))?(a):(b))
-#include <linux/fs.h>
-#include <linux/qnx4_fs.h>
static int qnx4_readpage(struct file *file, struct page *page);
#define destroy_context(mm) BTFIXUP_CALL(destroy_context)(mm)
-/* After we have set current->mm to a new value, this activates
- * the context for the new mm so we see the new mappings.
+/* This need not do anything on Sparc32. The switch happens
+ * properly later as a side effect of calling flush_thread.
*/
-#define activate_context(tsk) switch_to_context(tsk)
+#define activate_context(tsk) do { } while(0)
#endif /* !(__SPARC_MMU_CONTEXT_H) */
-/* $Id: processor.h,v 1.69 1999/01/19 07:57:44 davem Exp $
+/* $Id: processor.h,v 1.70 1999/03/24 11:42:44 davem Exp $
* include/asm-sparc/processor.h
*
* Copyright (C) 1994 David S. Miller (davem@caip.rutgers.edu)
extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
-#define copy_segments(nr, tsk, mm) do { } while (0)
+#define copy_segments(__nr, __tsk, __mm) \
+ if((__tsk) == current && \
+ (__mm) != NULL) \
+ flush_user_windows()
#define release_segments(mm) do { } while (0)
#define forget_segments() do { } while (0)
-/* $Id: termios.h,v 1.28 1999/01/02 16:50:22 davem Exp $ */
+/* $Id: termios.h,v 1.29 1999/03/25 09:11:18 davem Exp $ */
#ifndef _SPARC_TERMIOS_H
#define _SPARC_TERMIOS_H
-/* $Id: unistd.h,v 1.53 1999/02/21 02:34:54 anton Exp $ */
+/* $Id: unistd.h,v 1.54 1999/03/25 00:40:12 davem Exp $ */
#ifndef _SPARC_UNISTD_H
#define _SPARC_UNISTD_H
static __inline__ _syscall0(int,sync)
static __inline__ _syscall0(pid_t,setsid)
static __inline__ _syscall3(int,write,int,fd,__const__ char *,buf,off_t,count)
+static __inline__ _syscall3(int,read,int,fd,char *,buf,off_t,count)
+static __inline__ _syscall3(off_t,lseek,int,fd,off_t,offset,int,count)
static __inline__ _syscall1(int,dup,int,fd)
static __inline__ _syscall3(int,execve,__const__ char *,file,char **,argv,char **,envp)
static __inline__ _syscall3(int,open,__const__ char *,file,int,flag,int,mode)
-/* $Id: termios.h,v 1.7 1999/01/02 16:50:29 davem Exp $ */
+/* $Id: termios.h,v 1.8 1999/03/25 09:11:26 davem Exp $ */
#ifndef _SPARC64_TERMIOS_H
#define _SPARC64_TERMIOS_H
-/* $Id: unistd.h,v 1.26 1999/02/10 22:24:35 jj Exp $ */
+/* $Id: unistd.h,v 1.27 1999/03/25 00:40:14 davem Exp $ */
#ifndef _SPARC64_UNISTD_H
#define _SPARC64_UNISTD_H
static __inline__ _syscall0(pid_t,setsid)
static __inline__ _syscall3(int,write,int,fd,__const__ char *,buf,off_t,count)
static __inline__ _syscall3(int,read,int,fd,char *,buf,off_t,count)
+static __inline__ _syscall3(off_t,lseek,int,fd,off_t,offset,int,count)
static __inline__ _syscall1(int,dup,int,fd)
static __inline__ _syscall3(int,execve,__const__ char *,file,char **,argv,char **,envp)
static __inline__ _syscall3(int,open,__const__ char *,file,int,flag,int,mode)
struct adfs_sb_info {
struct buffer_head *s_sbh; /* buffer head containing disc record */
struct adfs_discrecord *s_dr; /* pointer to disc record in s_sbh */
+ uid_t s_uid; /* owner uid */
+ gid_t s_gid; /* owner gid */
+ int s_owner_mask; /* ADFS Owner perm -> unix perm */
+ int s_other_mask; /* ADFS Other perm -> unix perm */
__u16 s_zone_size; /* size of a map zone in bits */
__u16 s_ids_per_zone; /* max. no ids in one zone */
__u32 s_idlen; /* length of ID in map */
void hippi_setup(struct device *dev);
extern struct device *init_hippi_dev(struct device *, int);
+extern void unregister_hipdev(struct device *dev);
#endif
#endif /* _LINUX_HIPPIDEVICE_H */
#define ARPHRD_ASH 781 /* Nexus 64Mbps Ash */
#define ARPHRD_ECONET 782 /* Acorn Econet */
#define ARPHRD_IRDA 783 /* Linux/IR */
+/* ARP works differently on different FC media .. so */
+#define ARPHRD_FCPP 784 /* Point to point fibrechanel */
+#define ARPHRD_FCAL 785 /* Fibrechannel arbitrated loop */
+#define ARPHRD_FCPL 786 /* Fibrechannel public loop */
+#define ARPHRD_FCFABRIC 786 /* Fibrechannel fabric */
+ /* 787->799 reserved for fibrechannel media types */
+
/* ARP protocol opcodes. */
#define ARPOP_REQUEST 1 /* ARP request */
void nfsd_fh_init(void);
void nfsd_fh_free(void);
+void expire_all(void);
+void expire_by_dentry(struct dentry *);
+
static __inline__ struct svc_fh *
fh_copy(struct svc_fh *dst, struct svc_fh *src)
{
extern __inline__ unsigned long cls_set_class(unsigned long *clp, unsigned long cl)
{
- net_serialize_enter();
cl = xchg(clp, cl);
- net_serialize_leave();
+ synchronize_bh();
return cl;
}
return in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
}
-#ifdef __SMP__
-#define net_serialize_enter() start_bh_atomic()
-#define net_serialize_leave() end_bh_atomic()
-#else
-#define net_serialize_enter() barrier();
-#define net_serialize_leave() barrier();
-#endif
-
/*
* Enable debug/info messages
*/
#define uidhashfn(uid) (((uid >> 8) ^ uid) & (UIDHASH_SZ - 1))
+/*
+ * These routines must be called with the uidhash spinlock held!
+ */
static inline void uid_hash_insert(struct user_struct *up, unsigned int hashent)
{
- spin_lock(&uidhash_lock);
if((up->next = uidhash[hashent]) != NULL)
uidhash[hashent]->pprev = &up->next;
up->pprev = &uidhash[hashent];
uidhash[hashent] = up;
- spin_unlock(&uidhash_lock);
}
static inline void uid_hash_remove(struct user_struct *up)
{
- spin_lock(&uidhash_lock);
if(up->next)
up->next->pprev = up->pprev;
*up->pprev = up->next;
- spin_unlock(&uidhash_lock);
}
-static inline struct user_struct *uid_find(unsigned short uid, unsigned int hashent)
+static inline struct user_struct *uid_hash_find(unsigned short uid, unsigned int hashent)
{
- struct user_struct *up;
-
- spin_lock(&uidhash_lock);
- for(up = uidhash[hashent]; (up && up->uid != uid); up = up->next)
- ;
- spin_unlock(&uidhash_lock);
+ struct user_struct *up, *next;
+
+ next = uidhash[hashent];
+ for (;;) {
+ up = next;
+ if (next) {
+ next = up->next;
+ if (up->uid != uid)
+ continue;
+ atomic_inc(&up->count);
+ }
+ break;
+ }
return up;
}
if (up) {
p->user = NULL;
if (atomic_dec_and_test(&up->count)) {
+ spin_lock(&uidhash_lock);
uid_hash_remove(up);
+ spin_unlock(&uidhash_lock);
kmem_cache_free(uid_cachep, up);
}
}
int alloc_uid(struct task_struct *p)
{
unsigned int hashent = uidhashfn(p->uid);
- struct user_struct *up = uid_find(p->uid, hashent);
+ struct user_struct *up;
+
+ spin_lock(&uidhash_lock);
+ up = uid_hash_find(p->uid, hashent);
+ spin_unlock(&uidhash_lock);
- p->user = up;
if (!up) {
- up = kmem_cache_alloc(uid_cachep, SLAB_KERNEL);
- if (!up)
+ struct user_struct *new;
+
+ new = kmem_cache_alloc(uid_cachep, SLAB_KERNEL);
+ if (!new)
return -EAGAIN;
- p->user = up;
- up->uid = p->uid;
- atomic_set(&up->count, 0);
- uid_hash_insert(up, hashent);
- }
+ new->uid = p->uid;
+ atomic_set(&new->count, 1);
- atomic_inc(&up->count);
+ /*
+ * Before adding this, check whether we raced
+ * on adding the same user already..
+ */
+ spin_lock(&uidhash_lock);
+ up = uid_hash_find(p->uid, hashent);
+ if (up) {
+ kmem_cache_free(uid_cachep, new);
+ } else {
+ uid_hash_insert(new, hashent);
+ up = new;
+ }
+ spin_unlock(&uidhash_lock);
+
+ }
+ p->user = up;
return 0;
}
}
sigaddset(&t->signal, sig);
- if (!sigismember(&t->blocked, sig))
+ if (!sigismember(&t->blocked, sig)) {
t->sigpending = 1;
+#ifdef __SMP__
+ /*
+ * If the task is running on a different CPU
+ * force a reschedule on the other CPU - note that
+ * the code below is a tad loose and might occasionally
+ * kick the wrong CPU if we catch the process in the
+ * process of changing - but no harm is done by that
+ * other than doing an extra (lightweight) IPI interrupt.
+ *
+ * note that we rely on the previous spin_lock to
+ * lock interrupts for us! No need to set need_resched
+ * since signal event passing goes through ->blocked.
+ */
+ spin_lock(&runqueue_lock);
+ if (t->has_cpu && t->processor != smp_processor_id())
+ smp_send_reschedule(t->processor);
+ spin_unlock(&runqueue_lock);
+#endif /* __SMP__ */
+ }
out:
spin_unlock_irqrestore(&t->sigmask_lock, flags);
/*
* Scan the networks.
*/
-
+ atif->status |= ATIF_PROBE;
for(netct = 0; netct <= netrange; netct++)
{
/*
*/
aarp_probe_network(atif);
- if(!(atif->status & ATIF_PROBE_FAIL))
+ if(!(atif->status & ATIF_PROBE_FAIL)) {
+ atif->status &= ~ATIF_PROBE;
return (0);
+ }
}
atif->status &= ~ATIF_PROBE_FAIL;
}
if(probe_net > ntohs(atif->nets.nr_lastnet))
probe_net = ntohs(atif->nets.nr_firstnet);
}
-
+ atif->status &= ~ATIF_PROBE;
return (-EADDRINUSE); /* Network is full... */
}
{
if(pt==(*pt1))
{
- net_serialize_enter();
*pt1=pt->next;
- net_serialize_leave();
+ synchronize_bh();
#ifdef CONFIG_NET_FASTROUTE
if (pt->data)
netdev_fastroute_obstacles--;
/* And unlink it from device chain. */
for (dp = &dev_base; (d=*dp) != NULL; dp=&d->next) {
if (d == dev) {
- net_serialize_enter();
*dp = d->next;
- net_serialize_leave();
+ synchronize_bh();
d->next = NULL;
if (dev->destructor)
/*
* It failed to come up. Unhook it.
*/
- net_serialize_enter();
*dp = dev->next;
- net_serialize_leave();
+ synchronize_bh();
}
else
{
if ((err = sk_chk_filter(fp->insns, fp->len))==0) {
struct sk_filter *old_fp = sk->filter;
- net_serialize_enter();
sk->filter = fp;
- net_serialize_leave();
+ synchronize_bh();
fp = old_fp;
}
for (np = &tbl->phash_buckets[hash_val]; (n=*np) != NULL; np = &n->next) {
if (memcmp(n->key, pkey, key_len) == 0 && n->dev == dev) {
- net_serialize_enter();
*np = n->next;
- net_serialize_leave();
+ synchronize_bh();
if (tbl->pdestructor)
tbl->pdestructor(n);
kfree(n);
np = &tbl->phash_buckets[h];
for (np = &tbl->phash_buckets[h]; (n=*np) != NULL; np = &n->next) {
if (n->dev == dev || dev == NULL) {
- net_serialize_enter();
*np = n->next;
- net_serialize_leave();
+ synchronize_bh();
if (tbl->pdestructor)
tbl->pdestructor(n);
kfree(n);
return;
for (p = &tbl->parms.next; *p; p = &(*p)->next) {
if (*p == parms) {
- net_serialize_enter();
*p = parms->next;
- net_serialize_leave();
+ synchronize_bh();
#ifdef CONFIG_SYSCTL
neigh_sysctl_unregister(parms);
#endif
printk(KERN_CRIT "neighbour leakage\n");
for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
if (*tp == tbl) {
- net_serialize_enter();
*tp = tbl->next;
- net_serialize_leave();
+ synchronize_bh();
break;
}
}
* handler for protocols to use and generic option handler.
*
*
- * Version: $Id: sock.c,v 1.77 1999/03/21 05:22:26 davem Exp $
+ * Version: $Id: sock.c,v 1.78 1999/03/25 10:03:55 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
filter = sk->filter;
- net_serialize_enter();
sk->filter = NULL;
- net_serialize_leave();
+ synchronize_bh();
if (filter)
sk_filter_release(sk, filter);
*
* PF_INET protocol family socket handler.
*
- * Version: $Id: af_inet.c,v 1.85 1999/03/21 05:22:28 davem Exp $
+ * Version: $Id: af_inet.c,v 1.86 1999/03/25 00:38:15 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
lock_sock(sk);
tcp_set_state(sk, TCP_CLOSE);
release_sock(sk);
+ sk->zapped = 0;
}
sock->state = SS_UNCONNECTED;
return sock_error(sk);
/*
* NET3 IP device support routines.
*
- * Version: $Id: devinet.c,v 1.26 1999/03/21 05:22:31 davem Exp $
+ * Version: $Id: devinet.c,v 1.27 1999/03/25 10:04:06 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
#ifdef CONFIG_SYSCTL
devinet_sysctl_unregister(&in_dev->cnf);
#endif
- net_serialize_enter();
in_dev->dev->ip_ptr = NULL;
- net_serialize_leave();
+ synchronize_bh();
neigh_parms_release(&arp_tbl, in_dev->arp_parms);
kfree(in_dev);
}
ifap1 = &ifa->ifa_next;
continue;
}
- net_serialize_enter();
*ifap1 = ifa->ifa_next;
- net_serialize_leave();
+ synchronize_bh();
rtmsg_ifa(RTM_DELADDR, ifa);
notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa);
/* 2. Unlink it */
- net_serialize_enter();
*ifap = ifa1->ifa_next;
- net_serialize_leave();
+ synchronize_bh();
/* 3. Announce address deletion */
}
ifa->ifa_next = *ifap;
- net_serialize_enter();
+ wmb();
*ifap = ifa;
- net_serialize_leave();
/* Send message first, then call notifier.
Notifier will trigger FIB update, so that
*
* IPv4 FIB: lookup engine and maintenance routines.
*
- * Version: $Id: fib_hash.c,v 1.7 1999/03/21 05:22:32 davem Exp $
+ * Version: $Id: fib_hash.c,v 1.8 1999/03/25 10:04:17 davem Exp $
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*
if (del_fp) {
f = *del_fp;
/* Unlink replaced node */
- net_serialize_enter();
*del_fp = f->fn_next;
- net_serialize_leave();
+ synchronize_bh();
if (!(f->fn_state&FN_S_ZOMBIE))
rtmsg_fib(RTM_DELROUTE, f, z, tb->tb_id, n, req);
rtmsg_fib(RTM_DELROUTE, f, z, tb->tb_id, n, req);
if (matched != 1) {
- net_serialize_enter();
*del_fp = f->fn_next;
- net_serialize_leave();
+ synchronize_bh();
if (f->fn_state&FN_S_ACCESSED)
rt_cache_flush(-1);
struct fib_info *fi = FIB_INFO(f);
if (fi && ((f->fn_state&FN_S_ZOMBIE) || (fi->fib_flags&RTNH_F_DEAD))) {
- net_serialize_enter();
*fp = f->fn_next;
- net_serialize_leave();
+ synchronize_bh();
fn_free_node(f);
found++;
*
* IPv4 Forwarding Information Base: policy rules.
*
- * Version: $Id: fib_rules.c,v 1.8 1999/03/21 05:22:33 davem Exp $
+ * Version: $Id: fib_rules.c,v 1.9 1999/03/25 10:04:23 davem Exp $
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*
(!rtm->rtm_table || (r && rtm->rtm_table == r->r_table))) {
if (r == &local_rule)
return -EPERM;
- net_serialize_enter();
+
*rp = r->r_next;
- net_serialize_leave();
+ synchronize_bh();
+
if (r != &default_rule && r != &main_rule)
kfree(r);
return 0;
* the older version didn't come out right using gcc 2.5.8, the newer one
* seems to fall out with gcc 2.6.2.
*
- * Version: $Id: igmp.c,v 1.29 1999/03/21 05:22:36 davem Exp $
+ * Version: $Id: igmp.c,v 1.30 1999/03/25 10:04:10 davem Exp $
*
* Authors:
* Alan Cox <Alan.Cox@linux.org>
for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) {
if (i->multiaddr==addr) {
if (--i->users == 0) {
- net_serialize_enter();
*ip = i->next;
- net_serialize_leave();
+ synchronize_bh();
+
igmp_group_dropped(i);
if (in_dev->dev->flags & IFF_UP)
ip_rt_multicast_event(in_dev);
struct in_device *in_dev;
if (--iml->count)
return 0;
- net_serialize_enter();
+
*imlp = iml->next;
- net_serialize_leave();
+ synchronize_bh();
+
in_dev = inetdev_by_index(iml->multi.imr_ifindex);
if (in_dev)
ip_mc_dec_group(in_dev, imr->imr_multiaddr.s_addr);
{
struct ip_tunnel **tp = ipgre_bucket(t);
- net_serialize_enter();
t->next = *tp;
+ wmb();
*tp = t;
- net_serialize_leave();
}
static void ipgre_tunnel_unlink(struct ip_tunnel *t)
for (tp = ipgre_bucket(t); *tp; tp = &(*tp)->next) {
if (t == *tp) {
- net_serialize_enter();
*tp = t->next;
- net_serialize_leave();
+ synchronize_bh();
break;
}
}
*
* The Internet Protocol (IP) output module.
*
- * Version: $Id: ip_output.c,v 1.66 1999/03/21 05:22:41 davem Exp $
+ * Version: $Id: ip_output.c,v 1.67 1999/03/25 00:43:00 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
into account). Actually, tcp should make it. --ANK (980801)
*/
iph->frag_off |= __constant_htons(IP_DF);
- printk(KERN_DEBUG "sending pkt_too_big to self\n");
+ NETDEBUG(printk(KERN_DEBUG "sending pkt_too_big to self\n"));
/* icmp_send is not reenterable, so that bh_atomic... --ANK */
start_bh_atomic();
*
* The IP to API glue.
*
- * Version: $Id: ip_sockglue.c,v 1.40 1999/03/21 05:22:42 davem Exp $
+ * Version: $Id: ip_sockglue.c,v 1.41 1999/03/25 10:04:29 davem Exp $
*
* Authors: see ip.c
*
kfree(new_ra);
return -EADDRINUSE;
}
- net_serialize_enter();
*rap = ra->next;
- net_serialize_leave();
+ synchronize_bh();
+
if (ra->destructor)
ra->destructor(sk);
kfree(ra);
return -ENOBUFS;
new_ra->sk = sk;
new_ra->destructor = destructor;
+
new_ra->next = ra;
- net_serialize_enter();
+ wmb();
*rap = new_ra;
- net_serialize_leave();
+
return 0;
}
/*
* Linux NET3: IP/IP protocol decoder.
*
- * Version: $Id: ipip.c,v 1.25 1999/03/21 05:22:43 davem Exp $
+ * Version: $Id: ipip.c,v 1.26 1999/03/25 10:04:32 davem Exp $
*
* Authors:
* Sam Lantinga (slouken@cs.ucdavis.edu) 02/01/95
for (tp = ipip_bucket(t); *tp; tp = &(*tp)->next) {
if (t == *tp) {
- net_serialize_enter();
*tp = t->next;
- net_serialize_leave();
+ synchronize_bh();
break;
}
}
{
struct ip_tunnel **tp = ipip_bucket(t);
- net_serialize_enter();
t->next = *tp;
+ wmb();
*tp = t;
- net_serialize_leave();
}
struct ip_tunnel * ipip_tunnel_locate(struct ip_tunnel_parm *parms, int create)
static void ipip_tunnel_destroy(struct device *dev)
{
if (dev == &ipip_fb_tunnel_dev) {
- net_serialize_enter();
tunnels_wc[0] = NULL;
- net_serialize_leave();
+ synchronize_bh();
} else {
ipip_tunnel_unlink((struct ip_tunnel*)dev->priv);
kfree(dev);
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * Version: $Id: ipmr.c,v 1.39 1999/03/21 05:22:44 davem Exp $
+ * Version: $Id: ipmr.c,v 1.40 1999/03/25 10:04:25 davem Exp $
*
* Fixes:
* Michael Chastain : Incorrect size of copying.
{
if (sk == mroute_socket) {
ipv4_devconf.mc_forwarding = 0;
- net_serialize_enter();
+
mroute_socket=NULL;
- net_serialize_leave();
+ synchronize_bh();
+
mroute_close(sk);
}
}
*
* ROUTE - implementation of the IP router.
*
- * Version: $Id: route.c,v 1.64 1999/03/23 21:21:13 davem Exp $
+ * Version: $Id: route.c,v 1.65 1999/03/25 10:04:35 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
rt_deadline = 0;
- net_serialize_enter();
+ start_bh_atomic();
for (i=0; i<RT_HASH_DIVISOR; i++) {
if ((rth = xchg(&rt_hash_table[i], NULL)) == NULL)
continue;
- net_serialize_leave();
+ end_bh_atomic();
for (; rth; rth=next) {
next = rth->u.rt_next;
rt_free(rth);
}
- net_serialize_enter();
+ start_bh_atomic();
}
- net_serialize_leave();
+ end_bh_atomic();
}
void rt_cache_flush(int delay)
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
- * $Id: addrconf.c,v 1.47 1999/03/21 05:22:50 davem Exp $
+ * $Id: addrconf.c,v 1.48 1999/03/25 10:04:43 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
for (; iter; iter = iter->lst_next) {
if (iter == ifp) {
- net_serialize_enter();
*back = ifp->lst_next;
- net_serialize_leave();
+ synchronize_bh();
+
ifp->lst_next = NULL;
break;
}
for (; iter; iter = iter->if_next) {
if (iter == ifp) {
- net_serialize_enter();
*back = ifp->if_next;
- net_serialize_leave();
+ synchronize_bh();
+
ifp->if_next = NULL;
break;
}
*
* Based on linux/net/ipv4/ip_sockglue.c
*
- * $Id: ipv6_sockglue.c,v 1.25 1999/03/21 05:22:54 davem Exp $
+ * $Id: ipv6_sockglue.c,v 1.26 1999/03/25 10:04:53 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
kfree(new_ra);
return -EADDRINUSE;
}
- net_serialize_enter();
+
*rap = ra->next;
- net_serialize_leave();
+ synchronize_bh();
+
if (ra->destructor)
ra->destructor(sk);
kfree(ra);
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
- * $Id: mcast.c,v 1.18 1999/03/21 05:22:55 davem Exp $
+ * $Id: mcast.c,v 1.19 1999/03/25 10:04:50 davem Exp $
*
* Based on linux/ipv4/igmp.c and linux/ipv4/ip_sockglue.c
*
if (mc_lst->ifindex == ifindex &&
ipv6_addr_cmp(&mc_lst->addr, addr) == 0) {
struct device *dev;
- net_serialize_enter();
+
*lnk = mc_lst->next;
- net_serialize_leave();
+ synchronize_bh();
+
if ((dev = dev_get_by_index(ifindex)) != NULL)
ipv6_dev_mc_dec(dev, &mc_lst->addr);
sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
for (lnk = &idev->mc_list; (iter = *lnk) != NULL; lnk = &iter->if_next) {
if (iter == ma) {
- net_serialize_enter();
*lnk = iter->if_next;
- net_serialize_leave();
+ synchronize_bh();
return;
}
}
if (ipv6_addr_cmp(&ma->mca_addr, addr) == 0 && ma->dev == dev) {
if (atomic_dec_and_test(&ma->mca_users)) {
igmp6_group_dropped(ma);
- net_serialize_enter();
+
*lnk = ma->next;
- net_serialize_leave();
+ synchronize_bh();
+
ipv6_mca_remove(dev, ma);
kfree(ma);
}
for (lnk = &inet6_mcast_lst[hash]; *lnk; lnk = &(*lnk)->next) {
if (*lnk == i) {
- net_serialize_enter();
*lnk = i->next;
- net_serialize_leave();
+ synchronize_bh();
break;
}
}
* Pedro Roque <roque@di.fc.ul.pt>
* Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
*
- * $Id: sit.c,v 1.30 1999/03/21 05:22:58 davem Exp $
+ * $Id: sit.c,v 1.31 1999/03/25 10:04:55 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
for (tp = ipip6_bucket(t); *tp; tp = &(*tp)->next) {
if (t == *tp) {
- net_serialize_enter();
*tp = t->next;
- net_serialize_leave();
+ synchronize_bh();
break;
}
}
{
struct ip_tunnel **tp = ipip6_bucket(t);
- net_serialize_enter();
t->next = *tp;
+ wmb();
*tp = t;
- net_serialize_leave();
}
struct ip_tunnel * ipip6_tunnel_locate(struct ip_tunnel_parm *parms, int create)
static void ipip6_tunnel_destroy(struct device *dev)
{
if (dev == &ipip6_fb_tunnel_dev) {
- net_serialize_enter();
tunnels_wc[0] = NULL;
- net_serialize_leave();
+ synchronize_bh();
return;
} else {
ipip6_tunnel_unlink((struct ip_tunnel*)dev->priv);
{
struct socket *sock = netlink_kernel[unit];
- net_serialize_enter();
netlink_kernel[unit] = NULL;
- net_serialize_leave();
+ synchronize_bh();
+
sock_release(sock);
}
if (fh == 0) {
if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) {
- net_serialize_enter();
*back = tp->next;
- net_serialize_leave();
+ synchronize_bh();
+
tp->ops->destroy(tp);
kfree(tp);
err = 0;
if (*fp == f) {
unsigned long cl;
- net_serialize_enter();
*fp = f->next;
- net_serialize_leave();
+ synchronize_bh();
if ((cl = cls_set_class(&f->res.class, 0)) != 0)
tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
#ifdef CONFIG_NET_CLS_POLICE
if (tb[TCA_FW_POLICE-1]) {
struct tcf_police *police = tcf_police_locate(tb[TCA_FW_POLICE-1], tca[TCA_RATE-1]);
- net_serialize_enter();
+
police = xchg(&f->police, police);
- net_serialize_leave();
+ synchronize_bh();
+
tcf_police_release(police);
}
#endif
if (head == NULL)
return -ENOBUFS;
memset(head, 0, sizeof(*head));
- net_serialize_enter();
+
tp->root = head;
- net_serialize_leave();
+ synchronize_bh();
}
f = kmalloc(sizeof(struct fw_filter), GFP_KERNEL);
#endif
f->next = head->ht[fw_hash(handle)];
- net_serialize_enter();
+ wmb();
head->ht[fw_hash(handle)] = f;
- net_serialize_leave();
+
*arg = (unsigned long)f;
return 0;
t->tcm_handle = f->id;
- if (!f->res.classid && !f->police)
+ if (!f->res.classid
+#ifdef CONFIG_NET_CLS_POLICE
+ && !f->police
+#endif
+ )
return skb->len;
rta = (struct rtattr*)b;
if (*fp == f) {
unsigned long cl;
- net_serialize_enter();
*fp = f->next;
- net_serialize_leave();
+ synchronize_bh();
+
route4_reset_fastmap(head, f->id);
if ((cl = cls_set_class(&f->res.class, 0)) != 0)
return 0;
/* OK, session has no flows */
- net_serialize_enter();
head->table[to_hash(h)] = NULL;
- net_serialize_leave();
+ synchronize_bh();
+
kfree(b);
return 0;
}
#ifdef CONFIG_NET_CLS_POLICE
if (tb[TCA_ROUTE4_POLICE-1]) {
struct tcf_police *police = tcf_police_locate(tb[TCA_ROUTE4_POLICE-1], tca[TCA_RATE-1]);
- net_serialize_enter();
+
police = xchg(&f->police, police);
- net_serialize_leave();
+ synchronize_bh();
+
tcf_police_release(police);
}
#endif
if (head == NULL)
return -ENOBUFS;
memset(head, 0, sizeof(struct route4_head));
- net_serialize_enter();
+
tp->root = head;
- net_serialize_leave();
+ synchronize_bh();
}
f = kmalloc(sizeof(struct route4_filter), GFP_KERNEL);
if (b == NULL)
goto errout;
memset(b, 0, sizeof(*b));
- net_serialize_enter();
+
head->table[h1] = b;
- net_serialize_leave();
+ synchronize_bh();
}
f->bkt = b;
#endif
f->next = f1;
- net_serialize_enter();
+ wmb();
*ins_f = f;
- net_serialize_leave();
+
route4_reset_fastmap(head, f->id);
*arg = (unsigned long)f;
return 0;
struct iphdr *nhptr = skb->nh.iph;
#endif
-#if !defined( __i386__) && !defined(__m68k__)
+#if !defined( __i386__) && !defined(__mc68000__)
if ((unsigned long)nhptr & 3)
return -1;
#endif
if (*fp == f) {
unsigned long cl;
- net_serialize_enter();
+
*fp = f->next;
- net_serialize_leave();
+ synchronize_bh();
if ((cl = cls_set_class(&f->res.class, 0)) != 0)
tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
for (sp = &((struct rsvp_head*)tp->root)->ht[h&0xFF];
*sp; sp = &(*sp)->next) {
if (*sp == s) {
- net_serialize_enter();
*sp = s->next;
- net_serialize_leave();
+ synchronize_bh();
+
kfree(s);
return 0;
}
#ifdef CONFIG_NET_CLS_POLICE
if (tb[TCA_RSVP_POLICE-1]) {
struct tcf_police *police = tcf_police_locate(tb[TCA_RSVP_POLICE-1], tca[TCA_RATE-1]);
- net_serialize_enter();
+
police = xchg(&f->police, police);
- net_serialize_leave();
+ synchronize_bh();
+
tcf_police_release(police);
}
#endif
if (((*fp)->spi.mask&f->spi.mask) != f->spi.mask)
break;
f->next = *fp;
- net_serialize_enter();
+ wmb();
*fp = f;
- net_serialize_leave();
+
*arg = (unsigned long)f;
return 0;
}
break;
}
s->next = *sp;
- net_serialize_enter();
+ wmb();
*sp = s;
- net_serialize_leave();
goto insert;
int sel = 0;
int i;
-#if !defined(__i386__) && !defined(__m68k__)
+#if !defined(__i386__) && !defined(__mc68000__)
if ((unsigned long)ptr & 3)
return -1;
#endif
if (ht) {
for (kp = &ht->ht[TC_U32_HASH(key->handle)]; *kp; kp = &(*kp)->next) {
if (*kp == key) {
- net_serialize_enter();
*kp = key->next;
- net_serialize_leave();
+ synchronize_bh();
u32_destroy_key(tp, key);
return 0;
for (h=0; h<=ht->divisor; h++) {
while ((n = ht->ht[h]) != NULL) {
- net_serialize_enter();
ht->ht[h] = n->next;
- net_serialize_leave();
+ synchronize_bh();
+
u32_destroy_key(tp, n);
}
}
ht_down->refcnt++;
}
- net_serialize_enter();
ht_down = xchg(&n->ht_down, ht_down);
- net_serialize_leave();
+ synchronize_bh();
if (ht_down)
ht_down->refcnt--;
#ifdef CONFIG_NET_CLS_POLICE
if (tb[TCA_U32_POLICE-1]) {
struct tcf_police *police = tcf_police_locate(tb[TCA_U32_POLICE-1], est);
- net_serialize_enter();
+
police = xchg(&n->police, police);
- net_serialize_leave();
+ synchronize_bh();
+
tcf_police_release(police);
}
#endif
for (ins = &ht->ht[TC_U32_HASH(handle)]; *ins; ins = &(*ins)->next)
if (TC_U32_NODE(handle) < TC_U32_NODE((*ins)->handle))
break;
- net_serialize_enter();
+
n->next = *ins;
+ wmb();
*ins = n;
- net_serialize_leave();
+
*arg = (unsigned long)n;
return 0;
}
pest = &est->next;
continue;
}
- net_serialize_enter();
+
*pest = est->next;
- net_serialize_leave();
+ synchronize_bh();
+
kfree(est);
killed++;
}
struct Qdisc *child;
child = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
if (child) {
- net_serialize_enter();
child = xchg(&q->queues[band], child);
- net_serialize_leave();
+ synchronize_bh();
+
if (child != &noop_qdisc)
qdisc_destroy(child);
}
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * Version: $Id: af_unix.c,v 1.74 1999/03/21 05:23:16 davem Exp $
+ * Version: $Id: af_unix.c,v 1.75 1999/03/22 05:02:45 davem Exp $
*
* Fixes:
* Linus Torvalds : Assorted bug cures.