Also, don't forget http://www.linuxhq.com/ for all your Linux kernel
needs.
-Last updated: April 27, 1998
+Last updated: May 5, 1998
Current Author: Chris Ricker (kaboom@gatech.edu).
Current Minimal Requirements
- Bash 1.14.7 ; bash -version
- Ncpfs 2.1.1 ; ncpmount -v
- Pcmcia-cs 3.0.0
-- PPP 2.3.3 ; pppd -v
+- PPP 2.3.5 ; pppd -v
Upgrade notes
*************
to find out the proper way to upgrade it. No, the instruction to "rm
`which encaps`" is not a joke.
+The last public release of the binutils 2.8.x series is 2.8.1.0.23.
+Binutils 2.8.1.0.25 to 2.9.1.0.2 are all very buggy; do not use them.
+Binutils 2.9.1 (note the absence of a suffix) is all right, and binutils
+2.9.1.0.3 (and presumably later revisions) will probably work, too.
+Stick with 2.8.1.0.23 to be safe.
+
Gnu C
=====
Pcmcia-cs
=========
-The 3.0.0 release:
-ftp://hyper.stanford.edu/pub/pcmcia/pcmcia-cs-3.0.0.tar.gz
+The May 4, 1998 release:
+ftp://hyper.stanford.edu/pub/pcmcia/NEW/pcmcia-cs.04-May-98.tar.gz
PPP
===
-The 2.3.3 release:
-ftp://cs.anu.edu.au/pub/software/ppp/ppp-2.3.3.tar.gz
+The 2.3.5 release:
+ftp://cs.anu.edu.au/pub/software/ppp/ppp-2.3.5.tar.gz
Other Info
==========
unsigned int uint;
long err = 0;
- if (!suser())
+ if (!capable(CAP_SYS_ADMIN))
return -EPERM;
lock_kernel();
unsigned int uint;
long err = 0;
- if (!suser())
+ if (!capable(CAP_SYS_ADMIN))
return -EPERM;
lock_kernel();
(current->uid != child->uid) ||
(current->gid != child->egid) ||
(current->gid != child->sgid) ||
- (current->gid != child->gid)) && !suser())
+ (current->gid != child->gid)) && !capable(CAP_SYS_PTRACE))
goto out;
/* the same process cannot be attached many times */
if (child->flags & PF_PTRACED)
if (from + num > IO_BITMAP_SIZE*32)
return -EINVAL;
#endif
- if (!suser())
+ if (!capable(CAP_SYS_RAWIO))
return -EPERM;
#ifdef IODEBUG
if (level > 3)
return -EINVAL;
- if (!suser())
+ if (!capable(CAP_SYS_RAWIO))
return -EPERM;
*(&eflags) = (eflags & 0xffffcfff) | (level << 12);
return 0;
(current->uid != child->uid) ||
(current->gid != child->egid) ||
(current->gid != child->sgid) ||
- (current->gid != child->gid)) && !suser())
+ (current->gid != child->gid)) && !capable(CAP_SYS_PTRACE))
goto out;
/* the same process cannot be attached many times */
if (child->flags & PF_PTRACED)
.long SYMBOL_NAME(sys_pwrite)
.long SYMBOL_NAME(sys_chown)
.long SYMBOL_NAME(sys_getcwd)
+ .long SYMBOL_NAME(sys_capget)
+ .long SYMBOL_NAME(sys_capset) /* 185 */
- .rept NR_syscalls-182
+ .rept NR_syscalls-184
.long SYMBOL_NAME(sys_ni_syscall)
.endr
if ((from + num <= from) || (from + num > IO_BITMAP_SIZE*32))
return -EINVAL;
- if (!suser())
+ if (!capable(CAP_SYS_RAWIO))
return -EPERM;
/*
* If it's the first ioperm() call in this thread's lifetime, set the
if (level > 3)
return -EINVAL;
- if (!suser())
+ if (!capable(CAP_SYS_RAWIO))
return -EPERM;
regs->eflags = (regs->eflags & 0xffffcfff) | (level << 12);
return 0;
p += sprintf(p, "%10u ",
kstat.irqs[cpu_logical_map(j)][i]);
#endif
-
if (IO_APIC_IRQ(i)) {
p += sprintf(p, " IO-APIC");
+#ifdef __SMP__
if (irq_desc[i].handler == &ioapic_level_irq_type)
p += sprintf(p, "-level ");
else
p += sprintf(p, "-edge ");
+#endif
} else
p += sprintf(p, " XT-PIC ");
p += sprintf(p, " %s", action->name);
{
}
+/*
+ * if we enable this, why does it cause a hang in the BusLogic
+ * driver, when level triggered PCI IRQs are used?
+ */
+#define NOT_BROKEN 0
+
static void enable_level_ioapic_irq(unsigned int irq)
{
+#if NOT_BROKEN
enable_IO_APIC_irq(irq);
+#endif
self_IPI(irq);
}
static void disable_level_ioapic_irq(unsigned int irq)
{
+#if NOT_BROKEN
disable_IO_APIC_irq(irq);
+#endif
}
/*
* in the IO-APIC, then we 'early ACK' the IRQ, then we
* handle it and enable the IRQ when finished.
*/
+#if NOT_BROKEN
disable_IO_APIC_irq(irq);
+#endif
ack_APIC_irq();
desc->ipi = 0;
(current->uid != child->uid) ||
(current->gid != child->egid) ||
(current->gid != child->sgid) ||
- (current->gid != child->gid)) && !suser())
+ (current->gid != child->gid)) && !capable(CAP_SYS_PTRACE))
goto out;
/* the same process cannot be attached many times */
if (child->flags & PF_PTRACED)
{
unsigned long cfg;
+ /* local APIC has default address */
+ mp_lapic_addr = 0xFEE00000;
/*
* We need to know what the local
* APIC id of the boot CPU is!
lmsw %ax # into protected mode
jmp flush_instr
flush_instr:
- ljmp $__KERNEL_CS, $0x00100000
+ ljmpl $__KERNEL_CS, $0x00100000
# jump to startup_32
idt_48:
int sig = irqnumber >> 8;
int irq = irqnumber & 255;
handle_irq_zombies();
- if (!suser()) return -EPERM;
+ if (!capable(CAP_SYS_ADMIN)) return -EPERM;
if (!((1 << sig) & ALLOWED_SIGS)) return -EPERM;
if ( (irq<3) || (irq>15) ) return -EPERM;
if (vm86_irqs[irq].tsk) return -EPERM;
(current->uid != child->uid) ||
(current->gid != child->egid) ||
(current->gid != child->sgid) ||
- (current->gid != child->gid)) && !suser())
+ (current->gid != child->gid)) && !capable(CAP_SYS_PTRACE))
goto out;
/* the same process cannot be attached many times */
if (child->flags & PF_PTRACED)
if (scope == FLUSH_SCOPE_ALL) {
/* Only the superuser may flush the whole cache. */
ret = -EPERM;
- if (!suser ())
+ if (!capable(CAP_SYS_ADMIN))
goto out;
} else {
/* Verify that the specified address region actually belongs to
(current->uid != child->uid) ||
(current->gid != child->egid) ||
(current->gid != child->sgid) ||
- (current->gid != child->gid)) && !suser()) {
+ (current->gid != child->gid)) &&
+ !capable(CAP_SYS_PTRACE)) {
res = -EPERM;
goto out;
}
current->comm, current->pid, (unsigned long) value);
if(value > RLIM_INFINITY)
value = RLIM_INFINITY;
- if(suser()) {
+ if(capable(CAP_SYS_ADMIN)) {
current->rlim[RLIMIT_STACK].rlim_max =
current->rlim[RLIMIT_STACK].rlim_cur = value;
error = value;
int ret;
lock_kernel();
- if(!suser()) {
+ if(!capable(CAP_SYS_TIME)) {
ret = -EPERM;
goto out;
}
{
case SETNAME:
retval = -EPERM;
- if (!suser())
+ if (!capable(CAP_SYS_ADMIN))
goto out;
name = (char *) arg1;
(current->uid != child->euid) ||
(current->uid != child->uid) ||
(current->gid != child->egid) ||
- (current->gid != child->gid)) && !suser())
+ (current->gid != child->gid)) && !capable(CAP_SYS_PTRACE))
goto out;
/* the same process cannot be attached many times */
if (child->flags & PF_PTRACED)
{
#ifndef CONFIG_8xx
mm->context = NO_CONTEXT;
- if (mm == current->mm) {
- get_mmu_context(current);
- /* done by get_mmu_context() now -- Cort */
- /*set_context(current->mm->context);*/
- }
+ if (mm == current->mm)
+ activate_context(current);
#else
asm volatile ("tlbia" : : );
#endif
(current->uid != child->euid) ||
(current->uid != child->uid) ||
(current->gid != child->egid) ||
- (current->gid != child->gid)) && !suser()) {
+ (current->gid != child->gid)) &&
+ !capable(CAP_SYS_PTRACE)) {
pt_error_return(regs, EPERM);
goto out;
}
unsigned int uint;
int err = 0;
- if(!suser())
+ if(!capable(CAP_SYS_ADMIN))
return -EPERM;
lock_kernel();
unsigned int uint;
int err = 0;
- if(!suser())
+ if(!capable(CAP_SYS_ADMIN))
return -EPERM;
lock_kernel();
(current->uid != child->euid) ||
(current->uid != child->uid) ||
(current->gid != child->egid) ||
- (current->gid != child->gid)) && !suser()) {
+ (current->gid != child->gid)) &&
+ !capable(CAP_SYS_PTRACE)) {
pt_error_return(regs, EPERM);
goto out;
}
unsigned long type_page;
int err, is_smb, is_ncp;
- if(!suser())
+ if(!capable(CAP_SYS_ADMIN))
return -EPERM;
is_smb = is_ncp = 0;
err = copy_mount_stuff_to_kernel((const void *)A(type), &type_page);
regs->tpc = regs->tnpc;
regs->tnpc = regs->tnpc + 4;
- if (!suser()) return;
+ if (!capable(CAP_SYS_ADMIN)) return;
size >>= PAGE_SHIFT;
addr = PAGE_OFFSET - PAGE_SIZE;
page = mem_map - 1;
val <<= 9;
lock_kernel();
if (val > current->rlim[RLIMIT_FSIZE].rlim_max) {
- if (!suser()) {
+ if (!capable(CAP_SYS_RESOURCE)) {
unlock_kernel();
return -EPERM;
}
case FDFLUSH:
return invalidate_drive(drive);
}
- if (!suser())
+ if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (drive < 0 || drive > 3)
return -EINVAL;
return 0;
case BLKFLSBUF:
- if (!suser())
+ if (!capable(CAP_SYS_ADMIN))
return -EACCES;
fsync_dev(dev);
invalidate_buffers(dev);
return 0;
case BLKRASET:
- if (!suser())
+ if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (arg > 0xff)
return -EINVAL;
return put_user (mfm[minor].nr_sects, (long *)arg);
case BLKFRASET:
- if (!suser())
+ if (!capable(CAP_SYS_ADMIN))
return -EACCES;
max_readahead[major][minor] = arg;
return 0;
return put_user(max_sectors[major][minor], (long *) arg);
case BLKRRPART:
- if (!suser())
+ if (!capable(CAP_SYS_ADMIN))
return -EACCES;
return mfm_reread_partitions(dev);
case BLKRRPART:
printk("\tBLKRRPART\n");
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
return ddv_revalidate(inode->i_rdev,&ddv_gendisk);
case BLKGETSIZE: /* Return device size */
(long *) arg);
case BLKFLSBUF:
- if(!suser()) return -EACCES;
+ if(!capable(CAP_SYS_ADMIN)) return -EACCES;
if(!inode->i_rdev) return -EINVAL;
fsync_dev(inode->i_rdev);
invalidate_buffers(inode->i_rdev);
return 0;
case BLKRRPART: /* Re-read partition tables */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
return revalidate_acsidisk(inode->i_rdev, 1);
RO_IOCTLS(inode->i_rdev,arg);
default:
/* stuff timing parameters into controller registers */
driveNum = (HWIF(drive)->index << 1) + drive->select.b.unit;
- save_flags(flags);
- cli();
+ save_flags(flags); /* all CPUs */
+ cli(); /* all CPUs */
outb_p(regOn, basePort);
outReg(param1, regTab[driveNum].reg1);
outReg(param2, regTab[driveNum].reg2);
outReg(param3, regTab[driveNum].reg3);
outReg(param4, regTab[driveNum].reg4);
outb_p(regOff, basePort);
- restore_flags(flags);
+ restore_flags(flags); /* all CPUs */
}
/*
byte t;
unsigned long flags;
- save_flags(flags);
- cli();
+ __save_flags(flags); /* local CPU only */
+ __cli(); /* local CPU only */
for (i = 0; i < ALI_NUM_PORTS; ++i) {
basePort = ports[i];
regOff = inb(basePort);
dataPort = basePort + 8;
t = inReg(0) & 0xf0;
outb_p(regOff, basePort);
- restore_flags(flags);
+ __restore_flags(flags); /* local CPU only */
if (t != 0x50)
return 0;
return 1; /* success */
}
outb_p(regOff, basePort);
}
- restore_flags(flags);
+ __restore_flags(flags); /* local CPU only */
return 0;
}
byte t;
unsigned long flags;
- save_flags(flags);
- cli();
+ __save_flags(flags); /* local CPU only */
+ __cli(); /* local CPU only */
outb_p(regOn, basePort);
for (p = initData; p->reg != 0; ++p)
outReg(p->data, p->reg);
outb_p(0x01, regPort);
t = inb(regPort) & 0x01;
outb_p(regOff, basePort);
- restore_flags(flags);
+ __restore_flags(flags); /* local CPU only */
return t;
}
return -EFAULT;
return 0;
case BLKRASET:
- if (!suser())
+ if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (param > 0xff)
return -EINVAL;
return put_user(read_ahead[MAJOR(inode->i_rdev)],
(int *) param);
case BLKFLSBUF:
- if (!suser())
+ if (!capable(CAP_SYS_ADMIN))
return -EACCES;
fsync_dev(inode->i_rdev);
invalidate_buffers(inode->i_rdev);
pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
if (pio >= 3) {
- save_flags(flags);
- cli();
+ save_flags(flags); /* all CPUs */
+ cli(); /* all CPUs */
/*
* This enables PIO mode4 (3?) on the first interface
*/
sub22(1,0xc3);
sub22(0,0xa0);
- restore_flags(flags);
+ restore_flags(flags); /* all CPUs */
} else {
/* we don't know how to set it back again.. */
}
{
unsigned long flags;
- save_flags(flags);
- cli();
+ __save_flags(flags); /* local CPU only */
+ __cli(); /* local CPU only */
/*
* This enables the second interface
*/
sub22(1,0xc3);
sub22(0,0xa0);
#endif
- restore_flags(flags);
+ __restore_flags(flags); /* local CPU only */
ide_hwifs[0].serialized = 1;
ide_hwifs[1].serialized = 1;
if (!initialising)
show_floppy();
cancel_activity();
- sti();
floppy_enable_hlt();
fd_disable_dma();
printk("sect=%ld cmd=%d\n", CURRENT->sector, CURRENT->cmd);
return;
}
- sti();
if (fdc_busy){
/* fdc busy, this new request will be treated when the
current one is done */
(g->stretch&~(FD_STRETCH|FD_SWAPSIDES)) != 0)
return -EINVAL;
if (type){
- if (!suser())
+ if (!capable(CAP_SYS_ADMIN))
return -EPERM;
LOCK_FDC(drive,1);
for (cnt = 0; cnt < N_DRIVE; cnt++){
return _COPYOUT(loc);
}
case BLKRASET:
- if(!suser()) return -EACCES;
+ if(!capable(CAP_SYS_ADMIN)) return -EACCES;
if(param > 0xff) return -EINVAL;
read_ahead[MAJOR(inode->i_rdev)] = param;
return 0;
return put_user(read_ahead[MAJOR(inode->i_rdev)],
(long *) param);
case BLKFLSBUF:
- if(!suser()) return -EACCES;
+ if(!capable(CAP_SYS_ADMIN)) return -EACCES;
fsync_dev(inode->i_rdev);
invalidate_buffers(inode->i_rdev);
return 0;
return copy_to_user(loc, &g, sizeof g) ? -EFAULT : 0;
}
case BLKRASET:
- if(!suser()) return -EACCES;
+ if(!capable(CAP_SYS_ADMIN)) return -EACCES;
if(arg > 0xff) return -EINVAL;
read_ahead[MAJOR(inode->i_rdev)] = arg;
return 0;
return put_user(hd[MINOR(inode->i_rdev)].nr_sects,
(long *) arg);
case BLKFLSBUF:
- if(!suser()) return -EACCES;
+ if(!capable(CAP_SYS_ADMIN)) return -EACCES;
fsync_dev(inode->i_rdev);
invalidate_buffers(inode->i_rdev);
return 0;
case BLKRRPART: /* Re-read partition tables */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
return revalidate_hddisk(inode->i_rdev, 1);
RO_IOCTLS(inode->i_rdev,arg);
if (select != current_select || timing != current_timing) {
current_select = select;
current_timing = timing;
- save_flags (flags);
- cli();
+ __save_flags (flags); /* local CPU only */
+ __cli(); /* local CPU only */
(void) inb(HT_SELECT_PORT);
(void) inb(HT_SELECT_PORT);
(void) inb(HT_SELECT_PORT);
*/
outb (timing, IDE_SELECT_REG);
(void) inb (IDE_STATUS_REG);
- restore_flags (flags);
+ __restore_flags (flags); /* local CPU only */
#ifdef DEBUG
printk("ht6560b: %s: select=%#x timing=%#x\n", drive->name, t, timing);
#endif
/* From Table 124 of the ATAPI 1.2 spec.
Unchanged in Table 140 of the ATAPI 2.6 draft standard. */
-const char *sense_key_texts[16] = {
+const char * const sense_key_texts[16] = {
"No sense data",
"Recovered error",
"Not ready",
/* From Table 37 of the ATAPI 2.6 draft standard. */
-struct {
+const struct {
unsigned short packet_command;
- const char *text;
+ const char * const text;
} packet_command_texts[] = {
{ TEST_UNIT_READY, "Test Unit Ready" },
{ REQUEST_SENSE, "Request Sense" },
/* From Table 125 of the ATAPI 1.2 spec.,
with additions from Tables 141 and 142 of the ATAPI 2.6 draft standard. */
-struct {
+const struct {
unsigned short asc_ascq;
- const char *text;
+ const char * const text;
} sense_data_texts[] = {
{ 0x0000, "No additional sense information" },
* Version 1.04 add /proc configurable settings and S.M.A.R.T support
* Version 1.05 add capacity support for ATA3 >= 8GB
* Version 1.06 get boot-up messages to show full cyl count
+ * Version 1.07 disable door-locking if it fails
*/
-#define IDEDISK_VERSION "1.06"
+#define IDEDISK_VERSION "1.07"
#undef REALLY_SLOW_IO /* most systems can safely undef this */
}
} else
error = 1;
-
out:
-
if (error)
ide_error(drive, "write_intr", stat);
}
}
} else
error = 1;
-
out:
-
if (error)
ide_error(drive, "multwrite_intr", stat);
}
return;
}
if (!drive->unmask)
- __cli();
+ __cli(); /* local CPU only */
if (drive->mult_count) {
HWGROUP(drive)->wrq = *rq; /* scratchpad */
ide_set_handler (drive, &multwrite_intr, WAIT_CMD);
* since the open() has already succeeded,
* and the door_lock is irrelevant at this point.
*/
- (void) ide_wait_cmd(drive, WIN_DOORLOCK, 0, 0, 0, NULL);
+ if (drive->doorlocking && ide_wait_cmd(drive, WIN_DOORLOCK, 0, 0, 0, NULL))
+ drive->doorlocking = 0;
}
return 0;
}
{
if (drive->removable && !drive->usage) {
invalidate_buffers(inode->i_rdev);
- (void) ide_wait_cmd(drive, WIN_DOORUNLOCK, 0, 0, 0, NULL);
+ if (drive->doorlocking && ide_wait_cmd(drive, WIN_DOORUNLOCK, 0, 0, 0, NULL))
+ drive->doorlocking = 0;
}
MOD_DEC_USE_COUNT;
}
static int set_nowerr(ide_drive_t *drive, int arg)
{
+ unsigned long flags;
+
+ if (ide_spin_wait_hwgroup("set_nowerr", drive, &flags))
+ return -EBUSY;
drive->nowerr = arg;
drive->bad_wstat = arg ? BAD_R_STAT : BAD_W_STAT;
+ spin_unlock_irqrestore(&HWGROUP(drive)->spinlock, flags);
return 0;
}
/* check for removable disks (eg. SYQUEST), ignore 'WD' drives */
if (id->config & (1<<7)) { /* removable disk ? */
- if (id->model[0] != 'W' || id->model[1] != 'D')
+ if (id->model[0] != 'W' || id->model[1] != 'D') {
drive->removable = 1;
+ drive->doorlocking = 1;
+ }
}
- /* SunDisk drives: treat as non-removable; can mess up non-Sun systems! FIXME */
- if (id->model[0] == 'S' && id->model[1] == 'u')
- drive->removable = 0;
-
/* Extract geometry if we did not already have one for the drive */
if (!drive->cyl || !drive->head || !drive->sect) {
drive->cyl = drive->bios_cyl = id->cyls;
if (drive->cyl > drive->bios_cyl)
drive->bios_cyl = drive->cyl;
}
+#if 0 /* done instead for entire identify block in arch/ide.h stuff */
/* fix byte-ordering of buffer size field */
id->buf_size = le16_to_cpu(id->buf_size);
-
+#endif
printk (KERN_INFO "%s: %.40s, %ldMB w/%dkB Cache, CHS=%d/%d/%d",
drive->name, id->model, idedisk_capacity(drive)/2048L, id->buf_size/2,
drive->bios_cyl, drive->bios_head, drive->bios_sect);
*/
const char *good_dma_drives[] = {"Micropolis 2112A",
"CONNER CTMA 4000",
- "ST34342A",
+ "ST34342A", /* for Sun Ultra */
NULL};
/*
}
printk("%s: dma_intr: bad DMA status\n", drive->name);
}
- sti();
+ ide__sti(); /* local CPU only */
ide_error(drive, "dma_intr", stat);
}
struct hd_driveid *id = drive->id;
ide_hwif_t *hwif = HWIF(drive);
- if (id && (id->capability & 1) && !HWIF(drive)->no_autodma) {
+ if (id && (id->capability & 1) && !hwif->no_autodma) {
/* Enable DMA on any drive that has UltraDMA (mode 0/1/2) enabled */
if (id->field_valid & 4) /* UltraDMA */
if ((id->dma_ultra & (id->dma_ultra >> 8) & 7))
ide_hwif_t *hwif = HWIF(drive);
unsigned long dma_base = hwif->dma_base;
unsigned int count, reading = 0;
+ byte dma_stat;
switch (func) {
case ide_dma_off:
return 1; /* try PIO instead of DMA */
outl(virt_to_bus(hwif->dmatable), dma_base + 4); /* PRD table */
outb(reading, dma_base); /* specify r/w */
- outb(inb(dma_base+2)|0x06, dma_base+2); /* clear status bits */
+ outb(inb(dma_base+2)|6, dma_base+2); /* clear INTR & ERROR flags */
+ drive->waiting_for_dma = 1;
if (drive->media != ide_disk)
return 0;
ide_set_handler(drive, &ide_dma_intr, WAIT_CMD);/* issue cmd to drive */
OUT_BYTE(reading ? WIN_READDMA : WIN_WRITEDMA, IDE_COMMAND_REG);
case ide_dma_begin:
+ /* Note that this is done *after* the cmd has
+ * been issued to the drive, as per the BM-IDE spec.
+ * The Promise Ultra33 doesn't work correctly when
+ * we do this part before issuing the drive cmd.
+ */
outb(inb(dma_base)|1, dma_base); /* start DMA */
return 0;
case ide_dma_end: /* returns 1 on error, 0 otherwise */
- {
- byte dma_stat = inb(dma_base+2);
- int rc = (dma_stat & 7) != 4;
+ drive->waiting_for_dma = 0;
+ dma_stat = inb(dma_base+2);
outb(inb(dma_base)&~1, dma_base); /* stop DMA */
outb(dma_stat|6, dma_base+2); /* clear the INTR & ERROR bits */
- return rc; /* verify good DMA status */
- }
+ return (dma_stat & 7) != 4; /* verify good DMA status */
+ case ide_dma_test_irq: /* returns 1 if dma irq issued, 0 otherwise */
+ dma_stat = inb(dma_base+2);
+ return (dma_stat & 4) == 4; /* return 1 if INTR asserted */
default:
printk("ide_dmaproc: unsupported func: %d\n", func);
return 1;
/*
* Fetch the DMA Bus-Master-I/O-Base-Address (BMIBA) from PCI space:
*/
-__initfunc(unsigned long ide_get_or_set_dma_base (struct pci_dev *dev, ide_hwif_t *hwif, int extra, const char *name))
+__initfunc(unsigned long ide_get_or_set_dma_base (ide_hwif_t *hwif, int extra, const char *name))
{
- unsigned long dma_base = 0;
+ unsigned long dma_base = 0;
+ struct pci_dev *dev = hwif->pci_dev;
if (hwif->mate && hwif->mate->dma_base) {
dma_base = hwif->mate->dma_base - (hwif->channel ? 0 : 8);
#endif /* IDEFLOPPY_DEBUG_LOG */
clear_bit (PC_DMA_IN_PROGRESS, &pc->flags);
- ide_sti();
+ ide__sti(); /* local CPU only */
if (status.b.check || test_bit (PC_DMA_ERROR, &pc->flags)) { /* Error detected */
#if IDEFLOPPY_DEBUG_LOG
#define DEVID_NS87415 ((ide_pci_devid_t){PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87415})
#define DEVID_HT6565 ((ide_pci_devid_t){PCI_VENDOR_ID_HOLTEK, PCI_DEVICE_ID_HOLTEK_6565})
#define DEVID_AEC6210 ((ide_pci_devid_t){0x1191, 0x0005})
+#define DEVID_W82C105 ((ide_pci_devid_t){PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_82C105})
#define IDE_IGNORE ((void *)-1)
extern void ide_init_cmd646(ide_hwif_t *);
#define INIT_CMD646 &ide_init_cmd646
#else
+#ifdef __sparc_v9__
#define INIT_CMD646 IDE_IGNORE
+#else
+#define INIT_CMD646 NULL
+#endif
+#endif
+
+#ifdef CONFIG_BLK_DEV_SL82C105
+extern void ide_init_sl82c105(ide_hwif_t *);
+#define INIT_W82C105 &ide_init_sl82c105
+#else
+#define INIT_W82C105 IDE_IGNORE
#endif
#ifdef CONFIG_BLK_DEV_RZ1000
{DEVID_TRM290, "TRM290", INIT_TRM290, {{0x00,0x00,0x00}, {0x00,0x00,0x00}} },
{DEVID_NS87415, "NS87415", INIT_NS87415, {{0x00,0x00,0x00}, {0x00,0x00,0x00}} },
{DEVID_AEC6210, "AEC6210", NULL, {{0x00,0x00,0x00}, {0x00,0x00,0x00}} },
+ {DEVID_W82C105, "W82C105", INIT_W82C105, {{0x40,0x01,0x01}, {0x40,0x10,0x10}} },
{IDE_PCI_DEVID_NULL, "PCI_IDE", NULL, {{0x00,0x00,0x00}, {0x00,0x00,0x00}} }};
/*
ide_pci_enablebit_t *e = &(d->enablebits[port]);
if (e->reg && (pci_read_config_byte(dev, e->reg, &tmp) || (tmp & e->mask) != e->val))
continue; /* port not enabled */
- ctl = dev->base_address[1+2*port] & PCI_BASE_ADDRESS_IO_MASK;
+ ctl = dev->base_address[(2*port)+1] & PCI_BASE_ADDRESS_IO_MASK;
if (!ctl)
ctl = port ? 0x374 : 0x3f4; /* use default value */
base = dev->base_address[2*port] & ~7;
if (IDE_PCI_DEVID_EQ(d->devid, DEVID_PDC20246) ||
((dev->class >> 8) == PCI_CLASS_STORAGE_IDE && (dev->class & 0x80))) {
unsigned int extra = (!mate && IDE_PCI_DEVID_EQ(d->devid, DEVID_PDC20246)) ? 16 : 0;
- unsigned long dma_base = ide_get_or_set_dma_base(dev, hwif, extra, d->name);
+ unsigned long dma_base = ide_get_or_set_dma_base(hwif, extra, d->name);
if (dma_base && !(pcicmd & PCI_COMMAND_MASTER)) {
/*
* Set up BM-DMA capability (PnP BIOS should have done this)
id = drive->id = kmalloc (SECTOR_WORDS*4, GFP_KERNEL);
ide_input_data(drive, id, SECTOR_WORDS); /* read 512 bytes of id info */
- sti();
+ ide__sti(); /* local CPU only */
ide_fix_driveid(id);
#if defined (CONFIG_SCSI_EATA_DMA) || defined (CONFIG_SCSI_EATA_PIO) || defined (CONFIG_SCSI_EATA)
delay_50ms(); /* wait for IRQ and DRQ_STAT */
if (OK_STAT(GET_STAT(),DRQ_STAT,BAD_R_STAT)) {
unsigned long flags;
- save_flags(flags);
- cli(); /* some systems need this */
+ __save_flags(flags); /* local CPU only */
+ __cli(); /* local CPU only; some systems need this */
do_identify(drive, cmd); /* drive returned ID */
rc = 0; /* drive responded with ID */
(void) GET_STAT(); /* clear drive IRQ */
- restore_flags(flags);
+ __restore_flags(flags); /* local CPU only */
} else
rc = 2; /* drive refused ID */
if (!HWIF(drive)->irq) {
return;
}
- save_flags(flags);
- sti(); /* needed for jiffies and irq probing */
+ __save_flags(flags); /* local CPU only */
+ __sti(); /* local CPU only; needed for jiffies and irq probing */
/*
* Second drive should only exist if first drive was found,
* but a lot of cdrom drives are configured as single slaves.
} while ((stat & BUSY_STAT) && 0 < (signed long)(timeout - jiffies));
}
- restore_flags(flags);
+ __restore_flags(flags); /* local CPU only */
for (unit = 0; unit < MAX_DRIVES; ++unit) {
ide_drive_t *drive = &hwif->drives[unit];
if (drive->present) {
ide_hwgroup_t *hwgroup;
ide_hwif_t *match = NULL;
- save_flags(flags);
- cli();
+ save_flags(flags); /* all CPUs */
+ cli(); /* all CPUs */
hwif->hwgroup = NULL;
#if MAX_HWIFS > 1
if (h->hwgroup) { /* scan only initialized hwif's */
if (hwif->irq == h->irq) {
hwif->sharing_irq = h->sharing_irq = 1;
- save_match(hwif, h, &match);
+ if (hwif->chipset != ide_pci || h->chipset != ide_pci) {
+ save_match(hwif, h, &match);
+ }
}
if (hwif->serialized) {
if (hwif->mate && hwif->mate->irq == h->irq)
} else {
hwgroup = kmalloc(sizeof(ide_hwgroup_t), GFP_KERNEL);
memset(hwgroup, 0, sizeof(ide_hwgroup_t));
- hwgroup->hwif = hwif->next = hwif;
- hwgroup->rq = NULL;
- hwgroup->handler = NULL;
- hwgroup->drive = NULL;
+ hwgroup->hwif = hwif->next = hwif;
+ hwgroup->rq = NULL;
+ hwgroup->handler = NULL;
+ hwgroup->drive = NULL;
+ hwgroup->busy = 0;
+ hwgroup->spinlock = (spinlock_t)SPIN_LOCK_UNLOCKED;
+#if (DEBUG_SPINLOCK > 0)
+ printk("hwgroup(%s) spinlock is %p\n", hwif->name, &hwgroup->spinlock); /* FIXME */
+#endif
init_timer(&hwgroup->timer);
hwgroup->timer.function = &ide_timer_expiry;
hwgroup->timer.data = (unsigned long) hwgroup;
* Allocate the irq, if not already obtained for another hwif
*/
if (!match || match->irq != hwif->irq) {
- if (ide_request_irq(hwif->irq, &ide_intr, SA_INTERRUPT, hwif->name, hwgroup)) {
+ int sa = (hwif->chipset == ide_pci) ? SA_INTERRUPT|SA_SHIRQ : SA_INTERRUPT;
+ if (ide_request_irq(hwif->irq, &ide_intr, sa, hwif->name, hwgroup)) {
if (!match)
kfree(hwgroup);
- restore_flags(flags);
+ restore_flags(flags); /* all CPUs */
return 1;
}
}
hwgroup->drive->next = drive;
}
hwgroup->hwif = HWIF(hwgroup->drive);
- restore_flags(flags); /* safe now that hwif->hwgroup is set up */
+ restore_flags(flags); /* all CPUs; safe now that hwif->hwgroup is set up */
#ifndef __mc68000__
printk("%s at 0x%03x-0x%03x,0x%03x on irq %d", hwif->name,
read_ahead[hwif->major] = 8; /* (4kB) */
hwif->present = 1; /* success */
}
+#if (DEBUG_SPINLOCK > 0)
+{
+ static int done = 0;
+ if (!done++)
+ printk("io_request_lock is %p\n", &io_request_lock); /* FIXME */
+}
+#endif
return hwif->present;
}
-
-int ideprobe_init(void);
-
-
+int ideprobe_init (void);
static ide_module_t ideprobe_module = {
IDE_PROBE_MODULE,
ideprobe_init,
unsigned long startn = 0, n, flags;
const char *start = NULL, *msg = NULL;
- if (!suser())
+ if (!capable(CAP_SYS_ADMIN))
return -EACCES;
/*
* Skip over leading whitespace
* Do one full pass to verify all parameters,
* then do another to actually write the regs.
*/
- save_flags(flags);
+ save_flags(flags); /* all CPUs */
do {
const char *p;
if (for_real) {
ide_hwgroup_t *mategroup = NULL;
if (hwif->mate && hwif->mate->hwgroup)
mategroup = (ide_hwgroup_t *)(hwif->mate->hwgroup);
- cli(); /* ensure all writes are done together */
- while (mygroup->active || (mategroup && mategroup->active)) {
- restore_flags(flags);
+ cli(); /* all CPUs; ensure all writes are done together */
+ while (mygroup->busy || (mategroup && mategroup->busy)) {
+ sti(); /* all CPUs */
if (0 < (signed long)(jiffies - timeout)) {
printk("/proc/ide/%s/config: channel(s) busy, cannot write\n", hwif->name);
+ restore_flags(flags); /* all CPUs */
return -EBUSY;
}
- cli();
+ cli(); /* all CPUs */
}
}
p = buffer;
break;
case 'P': is_pci = 1;
#ifdef CONFIG_BLK_DEV_IDEPCI
- if (!IDE_PCI_DEVID_EQ(hwif->pci_devid, IDE_PCI_DEVID_NULL))
+ if (hwif->pci_dev && !IDE_PCI_DEVID_EQ(hwif->pci_devid, IDE_PCI_DEVID_NULL))
break;
#endif /* CONFIG_BLK_DEV_IDEPCI */
msg = "not a PCI device";
msg = "bad/missing register number";
goto parse_error;
}
- if (--n < 0 || *p++ != ':') {
+ if (n-- == 0 || *p++ != ':') {
msg = "missing ':'";
goto parse_error;
}
break;
}
if (rc) {
- restore_flags(flags);
+ restore_flags(flags); /* all CPUs */
printk("proc_ide_write_config: error writing %s at bus %02x dev %02x reg 0x%x value 0x%x\n",
msg, dev->bus->number, dev->devfn, reg, val);
printk("proc_ide_write_config: error %d\n", rc);
}
}
} while (!for_real++);
- restore_flags(flags);
+ restore_flags(flags); /* all CPUs */
return count;
parse_error:
- restore_flags(flags);
+ restore_flags(flags); /* all CPUs */
printk("parse error\n");
return xx_xx_parse_error(start, startn, msg);
}
#ifdef CONFIG_BLK_DEV_IDEPCI
ide_hwif_t *hwif = (ide_hwif_t *)data;
- int reg = 0;
+ struct pci_dev *dev = hwif->pci_dev;
+ if (!IDE_PCI_DEVID_EQ(hwif->pci_devid, IDE_PCI_DEVID_NULL) && dev && dev->bus) {
+ int reg = 0;
- struct pci_dev *dev = hwif->pci_dev;
-
- out += sprintf(out, "pci bus %02x device %02x vid %04x did %04x channel %d\n",
- dev->bus->number, dev->devfn, hwif->pci_devid.vid, hwif->pci_devid.did, hwif->channel);
- do {
- byte val;
- int rc = pci_read_config_byte(dev, reg, &val);
- if (rc) {
- printk("proc_ide_read_config: error reading bus %02x dev %02x reg 0x%02x\n",
- dev->bus->number, dev->devfn, reg);
- printk("proc_ide_read_config: error %d\n", rc);
- return -EIO;
- out += sprintf(out, "??%c", (++reg & 0xf) ? ' ' : '\n');
- } else
- out += sprintf(out, "%02x%c", val, (++reg & 0xf) ? ' ' : '\n');
- } while (reg < 0x100);
-#else /* CONFIG_BLK_DEV_IDEPCI */
- out += sprintf(out, "(none)\n");
+ out += sprintf(out, "pci bus %02x device %02x vid %04x did %04x channel %d\n",
+ dev->bus->number, dev->devfn, hwif->pci_devid.vid, hwif->pci_devid.did, hwif->channel);
+ do {
+ byte val;
+ int rc = pci_read_config_byte(dev, reg, &val);
+ if (rc) {
+ printk("proc_ide_read_config: error %d reading bus %02x dev %02x reg 0x%02x\n",
+ rc, dev->bus->number, dev->devfn, reg);
+ out += sprintf(out, "??%c", (++reg & 0xf) ? ' ' : '\n');
+ } else
+ out += sprintf(out, "%02x%c", val, (++reg & 0xf) ? ' ' : '\n');
+ } while (reg < 0x100);
+ } else
#endif /* CONFIG_BLK_DEV_IDEPCI */
+ out += sprintf(out, "(none)\n");
len = out - page;
PROC_IDE_READ_RETURN(page,start,off,count,eof,len);
}
(struct file *file, const char *buffer, unsigned long count, void *data)
{
ide_drive_t *drive = (ide_drive_t *) data;
- ide_hwif_t *hwif = HWIF(drive);
char name[MAX_LEN + 1];
int for_real = 0, len;
- unsigned long n, flags;
+ unsigned long n;
const char *start = NULL;
ide_settings_t *setting;
- if (!suser())
+ if (!capable(CAP_SYS_ADMIN))
return -EACCES;
/*
* Skip over leading whitespace
}
/*
* Do one full pass to verify all parameters,
- * then do another to actually write the pci regs.
+ * then do another to actually write the new settings.
*/
- save_flags(flags);
do {
const char *p;
- if (for_real) {
- unsigned long timeout = jiffies + (3 * HZ);
- ide_hwgroup_t *mygroup = (ide_hwgroup_t *)(hwif->hwgroup);
- ide_hwgroup_t *mategroup = NULL;
- if (hwif->mate && hwif->mate->hwgroup)
- mategroup = (ide_hwgroup_t *)(hwif->mate->hwgroup);
- cli(); /* ensure all writes are done together */
- while (mygroup->active || (mategroup && mategroup->active)) {
- restore_flags(flags);
- if (0 < (signed long)(jiffies - timeout)) {
- printk("/proc/ide/%s/settings: channel(s) busy, cannot write\n", drive->name);
- return -EBUSY;
- }
- cli();
- }
- }
p = buffer;
n = count;
while (n > 0) {
ide_write_setting(drive, setting, val * setting->div_factor / setting->mul_factor);
}
} while (!for_real++);
- restore_flags(flags);
return count;
parse_error:
- restore_flags(flags);
printk("proc_ide_write_settings(): parse error\n");
return -EINVAL;
}
{
ide_drive_t *drive = (ide_drive_t *) data;
- if (!suser())
+ if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (ide_replace_subdriver(drive, buffer))
return -EINVAL;
#if IDETAPE_DEBUG_LOG
printk (KERN_INFO "Reached idetape_add_stage_tail\n");
#endif /* IDETAPE_DEBUG_LOG */
- save_flags (flags);
- cli ();
+ save_flags (flags); /* all CPUs (overkill?) */
+ cli(); /* all CPUs (overkill?) */
stage->next=NULL;
if (tape->last_stage != NULL)
tape->last_stage->next=stage;
tape->next_stage=tape->last_stage;
tape->nr_stages++;
tape->nr_pending_stages++;
- restore_flags (flags);
+ restore_flags (flags); /* all CPUs (overkill?) */
}
/*
#endif /* IDETAPE_DEBUG_LOG */
clear_bit (PC_DMA_IN_PROGRESS, &pc->flags);
- ide_sti();
+ ide__sti(); /* local CPU only */
if (status.b.check || test_bit (PC_DMA_ERROR, &pc->flags)) { /* Error detected */
#if IDETAPE_DEBUG_LOG
*/
return (idetape_queue_rw_tail (drive, IDETAPE_READ_RQ, blocks, tape->merge_stage->bh));
}
- save_flags (flags);
- cli ();
+ save_flags (flags); /* all CPUs (overkill?) */
+ cli(); /* all CPUs (overkill?) */
if (tape->active_stage == tape->first_stage)
idetape_wait_for_request (tape->active_data_request);
- restore_flags (flags);
+ restore_flags (flags); /* all CPUs (overkill?) */
rq_ptr = &tape->first_stage->rq;
bytes_read = tape->tape_block_size * (rq_ptr->nr_sectors - rq_ptr->current_nr_sectors);
* Pay special attention to possible race conditions.
*/
while ((new_stage = idetape_kmalloc_stage (tape)) == NULL) {
- save_flags (flags);
- cli ();
+ save_flags (flags); /* all CPUs (overkill?) */
+ cli(); /* all CPUs (overkill?) */
if (idetape_pipeline_active (tape)) {
idetape_wait_for_request (tape->active_data_request);
- restore_flags (flags);
+ restore_flags (flags); /* all CPUs (overkill?) */
} else {
- restore_flags (flags);
+ restore_flags (flags); /* all CPUs (overkill?) */
idetape_insert_pipeline_into_queue (drive);
if (idetape_pipeline_active (tape))
continue;
if (tape->first_stage == NULL)
return;
- save_flags (flags);
- cli ();
+ save_flags (flags); /* all CPUs (overkill?) */
+ cli(); /* all CPUs (overkill?) */
tape->next_stage = NULL;
if (idetape_pipeline_active (tape))
idetape_wait_for_request (tape->active_data_request);
- restore_flags (flags);
+ restore_flags (flags); /* all CPUs (overkill?) */
while (tape->first_stage != NULL)
idetape_remove_stage_head (drive);
if (!idetape_pipeline_active (tape))
idetape_insert_pipeline_into_queue (drive);
- save_flags (flags);
- cli ();
+ save_flags (flags); /* all CPUs (overkill?) */
+ cli(); /* all CPUs (overkill?) */
if (!idetape_pipeline_active (tape))
goto abort;
#if IDETAPE_DEBUG_BUGS
#endif /* IDETAPE_DEBUG_BUGS */
idetape_wait_for_request (&tape->last_stage->rq);
abort:
- restore_flags (flags);
+ restore_flags (flags); /* all CPUs (overkill?) */
}
static void idetape_pad_zeros (ide_drive_t *drive, int bcount)
* Wait until the first read-ahead request
* is serviced.
*/
- save_flags (flags);
- cli ();
+ save_flags (flags); /* all CPUs (overkill?) */
+ cli(); /* all CPUs (overkill?) */
if (tape->active_stage == tape->first_stage)
idetape_wait_for_request (tape->active_data_request);
- restore_flags (flags);
+ restore_flags (flags); /* all CPUs (overkill?) */
if (tape->first_stage->rq.errors == IDETAPE_ERROR_FILEMARK)
count++;
int minor = tape->minor;
unsigned long flags;
- save_flags (flags);
- cli ();
+ save_flags (flags); /* all CPUs (overkill?) */
+ cli(); /* all CPUs (overkill?) */
if (test_bit (IDETAPE_BUSY, &tape->flags) || tape->first_stage != NULL || tape->merge_stage_size || drive->usage) {
- restore_flags(flags);
+ restore_flags(flags); /* all CPUs (overkill?) */
return 1;
}
idetape_chrdevs[minor].drive = NULL;
- restore_flags (flags);
+ restore_flags (flags); /* all CPUs (overkill?) */
DRIVER(drive)->busy = 0;
(void) ide_unregister_subdriver (drive);
drive->driver_data = NULL;
* Version 6.12 integrate ioctl and proc interfaces
* fix parsing of "idex=" command line parameter
* Version 6.13 add support for ide4/ide5 courtesy rjones@orchestream.com
+ * Version 6.14 fixed IRQ sharing among PCI devices
+ * Version 6.15 added SMP awareness to IDE drivers
+ * Version 6.16 fixed various bugs; even more SMP friendly
*
* Some additional driver compile-time options are in ide.h
*
unsigned long t, flags;
int i;
- __save_flags(flags);
- __cli();
+ __save_flags(flags); /* local CPU only */
+ __cli(); /* local CPU only */
t = jiffies * 11932;
outb_p(0, 0x43);
i = inb_p(0x40);
i |= inb(0x40) << 8;
- __restore_flags(flags);
+ __restore_flags(flags); /* local CPU only */
return (t - i);
}
#endif /* DISK_RECOVERY_TIME */
*/
static void init_hwif_data (unsigned int index)
{
- byte *p;
unsigned int unit;
ide_hwif_t *hwif = &ide_hwifs[index];
/* bulk initialize hwif & drive info with zeros */
- p = ((byte *) hwif) + sizeof(ide_hwif_t);
- do {
- *--p = 0;
- } while (p > (byte *) hwif);
+ memset(hwif, 0, sizeof(ide_hwif_t));
/* fill in any non-zero initial values */
hwif->index = index;
#if SUPPORT_VLB_SYNC
if (io_32bit & 2) {
unsigned long flags;
- __save_flags(flags);
- __cli();
+ __save_flags(flags); /* local CPU only */
+ __cli(); /* local CPU only */
do_vlb_sync(IDE_NSECTOR_REG);
insl(IDE_DATA_REG, buffer, wcount);
- __restore_flags(flags);
+ __restore_flags(flags); /* local CPU only */
} else
#endif /* SUPPORT_VLB_SYNC */
insl(IDE_DATA_REG, buffer, wcount);
#if SUPPORT_VLB_SYNC
if (io_32bit & 2) {
unsigned long flags;
- __save_flags(flags);
- __cli();
+ __save_flags(flags); /* local CPU only */
+ __cli(); /* local CPU only */
do_vlb_sync(IDE_NSECTOR_REG);
outsl(IDE_DATA_REG, buffer, wcount);
- __restore_flags(flags);
+ __restore_flags(flags); /* local CPU only */
} else
#endif /* SUPPORT_VLB_SYNC */
outsl(IDE_DATA_REG, buffer, wcount);
outsw (IDE_DATA_REG, ((byte *)buffer) + (bytecount & ~0x03), 1);
}
+/*
+ * Needed for PCI irq sharing
+ */
+static inline int drive_is_ready (ide_drive_t *drive)
+{
+ if (drive->waiting_for_dma)
+ return HWIF(drive)->dmaproc(ide_dma_test_irq, drive);
+#if 0
+ udelay(1); /* need to guarantee 400ns since last command was issued */
+#endif
+ if (GET_STAT() & BUSY_STAT)
+ return 0; /* drive busy: definitely not interrupting */
+ return 1; /* drive ready: *might* be interrupting */
+}
+
+#if !defined(__SMP__) && defined(DEBUG_SPINLOCKS) && (DEBUG_SPINLOCKS > 1)
+
+static const char *ide_lock_name(spinlock_t *spinlock)
+{
+ int index;
+
+ if (spinlock == &io_request_lock)
+ return "io_request_lock";
+ for (index = 0; index < MAX_HWIFS; index++) {
+ ide_hwif_t *hwif = &ide_hwifs[index];
+ ide_hwgroup_t *hwgroup = hwif->hwgroup;
+ if (spinlock == &hwgroup->spinlock)
+ return hwif->name;
+ }
+ return "?";
+}
+
+#define IDE_SPIN_LOCK_IRQ(msg,spinlock) \
+{ \
+ static int __babble = 20; \
+ __cli(); \
+ if ((spinlock)->lock && __babble) { \
+ __babble--; \
+ printk("ide_lock: %s: already locked (%s)\n", msg, ide_lock_name(spinlock)); \
+ } \
+ /* spin_lock_irq(spinlock); */ \
+ (spinlock)->lock = 1; \
+}
+
+#define IDE_SPIN_LOCK_IRQSAVE(msg,spinlock,flags) \
+{ \
+ __save_flags(flags); \
+ IDE_SPIN_LOCK_IRQ(msg,spinlock); \
+}
+
+#define IDE_SPIN_UNLOCK_IRQRESTORE(msg,spinlock,flags) \
+{ \
+ static int __babble = 20; \
+ __cli(); \
+ if (!((spinlock)->lock) && __babble) { \
+ __babble--; \
+ printk("ide_unlock: %s: not locked (%s)\n", msg, ide_lock_name(spinlock)); \
+ } \
+ /* spin_unlock_irqrestore(msg,spinlock,flags); */ \
+ (spinlock)->lock = 0; \
+ restore_flags(flags); \
+}
+
+#define IDE_SPIN_UNLOCK(msg,spinlock) \
+{ \
+ unsigned long __flags; \
+ __save_flags(__flags); \
+ IDE_SPIN_UNLOCK_IRQRESTORE(msg,spinlock,__flags); \
+}
+
+#else /* DEBUG_SPINLOCKS */
+
+#define IDE_SPIN_LOCK_IRQ(msg,spinlock) spin_lock_irq(spinlock)
+#define IDE_SPIN_LOCK_IRQSAVE(msg,spinlock,flags) spin_lock_irqsave(spinlock,flags)
+#define IDE_SPIN_UNLOCK(msg,spinlock) spin_unlock(spinlock)
+#define IDE_SPIN_UNLOCK_IRQRESTORE(msg,spinlock,flags) spin_unlock_irqrestore(spinlock,flags)
+
+#endif /* DEBUG_SPINLOCKS */
+
/*
* This should get invoked any time we exit the driver to
* wait for an interrupt response from a drive. handler() points
*/
void ide_set_handler (ide_drive_t *drive, ide_handler_t *handler, unsigned int timeout)
{
+ unsigned long flags;
ide_hwgroup_t *hwgroup = HWGROUP(drive);
+
+ IDE_SPIN_LOCK_IRQSAVE("ide_set_handler", &hwgroup->spinlock, flags);
#ifdef DEBUG
if (hwgroup->handler != NULL) {
printk("%s: ide_set_handler: handler not null; old=%p, new=%p\n",
hwgroup->handler = handler;
hwgroup->timer.expires = jiffies + timeout;
add_timer(&(hwgroup->timer));
+ IDE_SPIN_UNLOCK_IRQRESTORE("ide_set_handler", &hwgroup->spinlock, flags);
}
/*
ide_hwif_t *hwif = HWIF(drive);
ide_hwgroup_t *hwgroup = HWGROUP(drive);
- __save_flags(flags);
- __cli(); /* Why ? */
+ __save_flags(flags); /* local CPU only */
+ __cli(); /* local CPU only */
/* For an ATAPI device, first try an ATAPI SRST. */
if (drive->media != ide_disk && !do_not_try_atapi) {
OUT_BYTE (WIN_SRST, IDE_COMMAND_REG);
hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
ide_set_handler (drive, &atapi_reset_pollfunc, HZ/20);
- __restore_flags (flags);
+ __restore_flags (flags); /* local CPU only */
return;
}
ide_set_handler (drive, &reset_pollfunc, HZ/20);
#endif /* OK_TO_RESET_CONTROLLER */
- __restore_flags (flags);
+ __restore_flags (flags); /* local CPU only */
}
/*
args[2] = IN_BYTE(IDE_NSECTOR_REG);
}
}
- __save_flags(flags);
- __cli();
+ IDE_SPIN_LOCK_IRQSAVE("ide_end_drive_cmd", &io_request_lock, flags);
drive->queue = rq->next;
blk_dev[MAJOR(rq->rq_dev)].current_request = NULL;
HWGROUP(drive)->rq = NULL;
rq->rq_status = RQ_INACTIVE;
+ IDE_SPIN_UNLOCK_IRQRESTORE("ide_end_drive_cmd", &io_request_lock, flags);
+ save_flags(flags); /* all CPUs; overkill? */
+ cli(); /* all CPUs; overkill? */
if (rq->sem != NULL)
- up(rq->sem);
- __restore_flags(flags);
+ up(rq->sem); /* inform originator that rq has been serviced */
+ restore_flags(flags); /* all CPUs; overkill? */
}
/*
unsigned long flags;
byte err = 0;
- __save_flags (flags);
- /* ide_sti(); HACK */
+ __save_flags (flags); /* local CPU only */
+ ide__sti(); /* local CPU only */
printk("%s: %s: status=0x%02x", drive->name, msg, stat);
#if FANCY_STATUS_DUMPS
printk(" { ");
#endif /* FANCY_STATUS_DUMPS */
printk("\n");
}
- __restore_flags (flags);
+ __restore_flags (flags); /* local CPU only */
return err;
}
byte err;
err = ide_dump_status(drive, msg, stat);
- if ((rq = HWGROUP(drive)->rq) == NULL || drive == NULL)
+ if (drive == NULL || (rq = HWGROUP(drive)->rq) == NULL)
return;
/* retry only "normal" I/O: */
if (rq->cmd == IDE_DRIVE_CMD) {
void ide_cmd(ide_drive_t *drive, byte cmd, byte nsect, ide_handler_t *handler)
{
ide_set_handler (drive, handler, WAIT_CMD);
- OUT_BYTE(drive->ctl,IDE_CONTROL_REG);
+ OUT_BYTE(drive->ctl,IDE_CONTROL_REG); /* clear nIEN */
OUT_BYTE(nsect,IDE_NSECTOR_REG);
OUT_BYTE(cmd,IDE_COMMAND_REG);
}
byte stat = GET_STAT();
int retries = 10;
- /* ide_sti(); HACK */
+ ide__sti(); /* local CPU only */
if ((stat & DRQ_STAT) && args && args[3]) {
byte io_32bit = drive->io_32bit;
drive->io_32bit = 0;
udelay(1); /* spec allows drive 400ns to assert "BUSY" */
if ((stat = GET_STAT()) & BUSY_STAT) {
- __save_flags(flags);
- /* ide_sti(); HACK */
+ __save_flags(flags); /* local CPU only */
+ ide__sti(); /* local CPU only */
timeout += jiffies;
while ((stat = GET_STAT()) & BUSY_STAT) {
if (0 < (signed long)(jiffies - timeout)) {
- __restore_flags(flags);
+ __restore_flags(flags); /* local CPU only */
ide_error(drive, "status timeout", stat);
return 1;
}
}
- __restore_flags(flags);
+ __restore_flags(flags); /* local CPU only */
}
udelay(1); /* allow status to settle, then read it again */
if (OK_STAT((stat = GET_STAT()), good, bad))
}
/*
- * do_request() initiates handling of a new I/O request
+ * start_request() initiates handling of a new I/O request
*/
-static inline void do_request (ide_hwgroup_t *hwgroup, ide_hwif_t *hwif, ide_drive_t *drive)
+static inline void start_request (ide_drive_t *drive)
{
unsigned long block, blockend;
struct request *rq = drive->queue;
unsigned int minor = MINOR(rq->rq_dev), unit = minor >> PARTN_BITS;
+ ide_hwif_t *hwif = HWIF(drive);
- /* ide_sti(); HACK */
+ ide__sti(); /* local CPU only */
#ifdef DEBUG
- printk("%s: do_request: current=0x%08lx\n", hwif->name, (unsigned long) rq);
+ printk("%s: start_request: current=0x%08lx\n", hwif->name, (unsigned long) rq);
#endif
if (unit >= MAX_DRIVES) {
printk("%s: bad device number: %s\n", hwif->name, kdevname(rq->rq_dev));
#if (DISK_RECOVERY_TIME > 0)
while ((read_timer() - hwif->last_time) < DISK_RECOVERY_TIME);
#endif
-
- hwgroup->hwif = hwif;
- hwgroup->drive = drive;
SELECT_DRIVE(hwif, drive);
if (ide_wait_stat(drive, drive->ready_stat, BUSY_STAT|DRQ_STAT, WAIT_READY)) {
printk("%s: drive not ready for command\n", drive->name);
return;
}
-
if (!drive->special.all) {
if (rq->cmd == IDE_DRIVE_CMD) {
execute_drive_cmd(drive, rq);
if (drive->driver != NULL)
DRIVER(drive)->end_request(0, HWGROUP(drive));
else
- ide_end_request(0, hwgroup);
+ ide_end_request(0, HWGROUP(drive));
}
/*
best = NULL;
drive = hwgroup->drive;
do {
- if (!drive->queue)
- continue;
- if (drive->sleep && 0 < (signed long)(drive->sleep - jiffies))
- continue;
- if (!best) {
- best = drive;
- continue;
+ if (drive->queue && (!drive->sleep || 0 <= (signed long)(jiffies - drive->sleep))) {
+ if (!best
+ || (drive->sleep && (!best->sleep || 0 < (signed long)(best->sleep - drive->sleep)))
+ || (!best->sleep && 0 < (signed long)(WAKEUP(best) - WAKEUP(drive))))
+ {
+ struct blk_dev_struct *bdev = &blk_dev[HWIF(drive)->major];
+ if (bdev->current_request != &bdev->plug)
+ best = drive;
+ }
}
- if (drive->sleep && (!best->sleep || drive->sleep < best->sleep))
- best = drive;
- if (!best->sleep && WAKEUP(drive) < WAKEUP(best))
- best = drive;
} while ((drive = drive->next) != hwgroup->drive);
- if (best != hwgroup->drive && best && best->service_time > WAIT_MIN_SLEEP && !best->sleep && best->nice1) {
- long t = (signed) (WAKEUP(best) - jiffies); /* BUGGY? */
+ if (best && best->nice1 && !best->sleep && best != hwgroup->drive && best->service_time > WAIT_MIN_SLEEP) {
+ long t = (signed long)(WAKEUP(best) - jiffies);
if (t >= WAIT_MIN_SLEEP) {
/*
* We *may* have some time to spare, but first let's see if
*/
drive = best->next;
do {
- if (drive->sleep) /* this drive tried to be nice to us */
- continue;
- if (WAKEUP(drive) > (jiffies - best->service_time) && WAKEUP(drive) < (jiffies + t)) { /* BUGGY? */
+ if (!drive->sleep
+ && 0 < (signed long)(WAKEUP(drive) - (jiffies - best->service_time))
+ && 0 < (signed long)((jiffies + t) - WAKEUP(drive)))
+ {
ide_stall_queue(best, IDE_MIN(t, 10 * WAIT_MIN_SLEEP));
goto repeat;
}
return best;
}
-static inline void ide_leave_hwgroup (ide_hwgroup_t *hwgroup)
-{
- ide_drive_t *drive = hwgroup->drive;
- unsigned long sleep = 0;
-
- hwgroup->rq = NULL;
- do {
- blk_dev[HWIF(drive)->major].current_request = NULL;
- if (!drive->sleep)
- continue;
- if (!sleep) {
- sleep = drive->sleep;
- continue;
- }
- if (drive->sleep < sleep)
- sleep = drive->sleep;
- } while ((drive = drive->next) != hwgroup->drive);
- if (sleep) {
- if (0 < (signed long)(jiffies + WAIT_MIN_SLEEP - sleep))
- sleep = jiffies + WAIT_MIN_SLEEP;
- hwgroup->timer.expires = sleep;
- add_timer(&hwgroup->timer);
- } else /* Ugly, but how can we sleep for the lock otherwise? perhaps from tq_scheduler? */
- ide_release_lock(&ide_lock);
- hwgroup->active = 0;
-}
-
/*
* The driver enables interrupts as much as possible. In order to do this,
* (a) the device-interrupt is always masked before entry, and
* tolerance for latency during I/O. For devices which don't suffer from
* this problem (most don't), the unmask flag can be set using the "hdparm"
* utility, to permit other interrupts during data/cmd transfers.
+ *
+ * Caller must have already acquired spinlock using *spinflags
+ *
*/
-void ide_do_request (ide_hwgroup_t *hwgroup)
+static void ide_do_request (ide_hwgroup_t *hwgroup, unsigned long *hwgroup_flags, int masked_irq)
{
- __cli(); /* paranoia */
- if (hwgroup->handler != NULL) {
- printk("%s: EEeekk!! handler not NULL in ide_do_request()\n", hwgroup->hwif->name);
- return;
- }
- do {
- ide_drive_t *drive = choose_drive(hwgroup);
- if (drive != NULL) {
- ide_hwif_t *hwif = HWIF(drive);
- if (hwgroup->hwif->sharing_irq && hwif != hwgroup->hwif)
- OUT_BYTE(hwgroup->drive->ctl|2, hwgroup->hwif->io_ports[IDE_CONTROL_OFFSET]);
- drive->sleep = 0;
- blk_dev[hwif->major].current_request = hwgroup->rq = drive->queue;
- drive->service_start = jiffies;
- do_request(hwgroup, hwif, drive);
- __cli();
- } else {
- ide_leave_hwgroup(hwgroup); /* no work left for this hwgroup */
+ struct blk_dev_struct *bdev;
+ ide_drive_t *drive;
+ ide_hwif_t *hwif;
+ unsigned long io_flags;
+
+ hwgroup->busy = 1;
+ while (hwgroup->handler == NULL) {
+ IDE_SPIN_LOCK_IRQSAVE("ide_do_request1", &io_request_lock, io_flags);
+ drive = choose_drive(hwgroup);
+ if (drive == NULL) {
+ unsigned long sleep = 0;
+
+ hwgroup->rq = NULL;
+ drive = hwgroup->drive;
+ do {
+ bdev = &blk_dev[HWIF(drive)->major];
+ if (bdev->current_request != &bdev->plug) /* FIXME: this will do for now */
+ bdev->current_request = NULL; /* (broken since patch-2.1.15) */
+ if (drive->sleep && (!sleep || 0 < (signed long)(sleep - drive->sleep)))
+ sleep = drive->sleep;
+ } while ((drive = drive->next) != hwgroup->drive);
+ IDE_SPIN_UNLOCK_IRQRESTORE("ide_do_request2", &io_request_lock, io_flags);
+ if (sleep) {
+ if (0 < (signed long)(jiffies + WAIT_MIN_SLEEP - sleep))
+ sleep = jiffies + WAIT_MIN_SLEEP;
+ hwgroup->timer.expires = sleep;
+ add_timer(&hwgroup->timer);
+ } else {
+ /* Ugly, but how can we sleep for the lock otherwise? perhaps from tq_scheduler? */
+ ide_release_lock(&ide_lock); /* for atari only */
+ }
+ hwgroup->busy = 0;
return;
}
- } while (hwgroup->handler == NULL);
+ hwif = HWIF(drive);
+ if (hwgroup->hwif->sharing_irq && hwif != hwgroup->hwif) /* set nIEN for previous hwif */
+ OUT_BYTE(hwgroup->drive->ctl|2, hwgroup->hwif->io_ports[IDE_CONTROL_OFFSET]);
+ hwgroup->hwif = hwif;
+ hwgroup->drive = drive;
+ drive->sleep = 0;
+ drive->service_start = jiffies;
+
+ bdev = &blk_dev[hwif->major];
+ if (bdev->current_request == &bdev->plug) /* FIXME: paranoia */
+ printk("%s: Huh? nuking plugged queue\n", drive->name);
+ bdev->current_request = hwgroup->rq = drive->queue;
+ IDE_SPIN_UNLOCK_IRQRESTORE("ide_do_request3", &io_request_lock, io_flags);
+
+ if (hwif->irq != masked_irq)
+ disable_irq(hwif->irq);
+ IDE_SPIN_UNLOCK_IRQRESTORE("ide_do_request4", &hwgroup->spinlock, *hwgroup_flags);
+ start_request(drive);
+ IDE_SPIN_LOCK_IRQSAVE("ide_do_request5", &hwgroup->spinlock, *hwgroup_flags);
+ if (hwif->irq != masked_irq)
+ enable_irq(hwif->irq);
+ }
}
/*
*/
struct request **ide_get_queue (kdev_t dev)
{
- struct blk_dev_struct *bdev = blk_dev + MAJOR(dev);
- ide_hwif_t *hwif = bdev->data;
+ ide_hwif_t *hwif = (ide_hwif_t *)blk_dev[MAJOR(dev)].data;
return &hwif->drives[DEVICE_NR(dev) & 1].queue;
}
/*
- * do_hwgroup_request() invokes ide_do_request() after first masking
- * all possible interrupts for the current hwgroup. This prevents race
- * conditions in the event that an unexpected interrupt occurs while
- * we are in the driver.
- *
- * Note that the io-request lock will guarantee that the driver never gets
- * re-entered even on another interrupt level, so we no longer need to
- * mask the irq's.
+ * do_hwgroup_request() invokes ide_do_request() after claiming hwgroup->busy.
*/
-static void do_hwgroup_request (ide_hwgroup_t *hwgroup)
+static void do_hwgroup_request (const char *msg, ide_hwgroup_t *hwgroup)
{
- if (hwgroup->handler == NULL) {
- del_timer(&hwgroup->timer);
- ide_get_lock(&ide_lock, ide_intr, hwgroup);
- hwgroup->active = 1;
- ide_do_request (hwgroup);
+ unsigned long flags;
+
+ IDE_SPIN_LOCK_IRQSAVE(msg, &hwgroup->spinlock, flags);
+ if (hwgroup->busy) {
+ IDE_SPIN_UNLOCK_IRQRESTORE(msg, &hwgroup->spinlock, flags);
+ return;
}
+ del_timer(&hwgroup->timer);
+ ide_get_lock(&ide_lock, ide_intr, hwgroup); /* for atari only */
+ ide_do_request(hwgroup, &flags, 0);
+ IDE_SPIN_UNLOCK_IRQRESTORE(msg, &hwgroup->spinlock, flags);
+}
+
+/*
+ * As of linux-2.1.95, ll_rw_blk.c invokes our do_idex_request()
+ * functions with the io_request_spinlock already grabbed.
+ * Since we need to do our own spinlock's internally,
+ * on paths that don't necessarily originate through the
+ * do_idex_request() path.
+ *
+ * We have to undo the spinlock on entry, and restore it again on exit.
+ * Fortunately, this is mostly a nop for non-SMP kernels.
+ */
+static inline void unlock_do_hwgroup_request (ide_hwgroup_t *hwgroup)
+{
+ IDE_SPIN_UNLOCK("unlock_do_hwgroup_request", &io_request_lock);
+ do_hwgroup_request ("from unlock_do_hwgroup_request", hwgroup);
+ IDE_SPIN_LOCK_IRQ("unlock_do_hwgroup_request", &io_request_lock);
}
-void do_ide0_request (void) /* invoked with __cli() */
+void do_ide0_request (void)
{
- do_hwgroup_request (ide_hwifs[0].hwgroup);
+ unlock_do_hwgroup_request (ide_hwifs[0].hwgroup);
}
#if MAX_HWIFS > 1
-void do_ide1_request (void) /* invoked with __cli() */
+void do_ide1_request (void)
{
- do_hwgroup_request (ide_hwifs[1].hwgroup);
+ unlock_do_hwgroup_request (ide_hwifs[1].hwgroup);
}
#endif /* MAX_HWIFS > 1 */
#if MAX_HWIFS > 2
-void do_ide2_request (void) /* invoked with __cli() */
+void do_ide2_request (void)
{
- do_hwgroup_request (ide_hwifs[2].hwgroup);
+ unlock_do_hwgroup_request (ide_hwifs[2].hwgroup);
}
#endif /* MAX_HWIFS > 2 */
#if MAX_HWIFS > 3
-void do_ide3_request (void) /* invoked with __cli() */
+void do_ide3_request (void)
{
- do_hwgroup_request (ide_hwifs[3].hwgroup);
+ unlock_do_hwgroup_request (ide_hwifs[3].hwgroup);
}
#endif /* MAX_HWIFS > 3 */
#if MAX_HWIFS > 4
-void do_ide4_request (void) /* invoked with cli() */
+void do_ide4_request (void)
{
- do_hwgroup_request (ide_hwifs[4].hwgroup);
+ unlock_do_hwgroup_request (ide_hwifs[4].hwgroup);
}
#endif /* MAX_HWIFS > 4 */
#if MAX_HWIFS > 5
-void do_ide5_request (void) /* invoked with cli() */
+void do_ide5_request (void)
{
- do_hwgroup_request (ide_hwifs[5].hwgroup);
+ unlock_do_hwgroup_request (ide_hwifs[5].hwgroup);
}
#endif /* MAX_HWIFS > 5 */
+static void start_next_request (ide_hwgroup_t *hwgroup, int masked_irq)
+{
+ unsigned long flags;
+ ide_drive_t *drive;
+
+ IDE_SPIN_LOCK_IRQSAVE("start_next_request", &hwgroup->spinlock, flags);
+ if (hwgroup->handler != NULL) {
+ IDE_SPIN_UNLOCK_IRQRESTORE("start_next_request", &hwgroup->spinlock, flags);
+ return;
+ }
+ drive = hwgroup->drive;
+ set_recovery_timer(HWIF(drive));
+ drive->service_time = jiffies - drive->service_start;
+ ide_do_request(hwgroup, &flags, masked_irq);
+ IDE_SPIN_UNLOCK_IRQRESTORE("start_next_request", &hwgroup->spinlock, flags);
+}
+
void ide_timer_expiry (unsigned long data)
{
ide_hwgroup_t *hwgroup = (ide_hwgroup_t *) data;
- ide_drive_t *drive = hwgroup->drive;
+ ide_drive_t *drive;
ide_handler_t *handler;
unsigned long flags;
- __save_flags(flags);
- __cli();
-
- if ((handler = hwgroup->handler) != NULL) {
- hwgroup->handler = NULL;
- if (hwgroup->poll_timeout != 0) /* polling in progress? */
- handler(drive);
- else { /* abort the operation */
- if (hwgroup->hwif->dmaproc)
- (void) hwgroup->hwif->dmaproc (ide_dma_end, drive);
- ide_error(drive, "irq timeout", GET_STAT());
- }
- __cli();
- if (hwgroup->handler == NULL) {
- set_recovery_timer(HWIF(drive));
- drive->service_time = jiffies - drive->service_start;
- do_hwgroup_request (hwgroup);
+ IDE_SPIN_LOCK_IRQSAVE("ide_timer_expiry1", &hwgroup->spinlock, flags);
+ drive = hwgroup->drive;
+ if ((handler = hwgroup->handler) == NULL) {
+ IDE_SPIN_UNLOCK_IRQRESTORE("ide_timer_expiry2", &hwgroup->spinlock, flags);
+ do_hwgroup_request("timer do_hwgroup_request", hwgroup);
+ return;
+ }
+ hwgroup->busy = 1; /* should already be "1" */
+ hwgroup->handler = NULL;
+ if (hwgroup->poll_timeout != 0) { /* polling in progress? */
+ IDE_SPIN_UNLOCK_IRQRESTORE("ide_timer_expiry3", &hwgroup->spinlock, flags);
+ handler(drive);
+ } else if (drive_is_ready(drive)) {
+ printk("%s: lost interrupt\n", drive->name);
+ IDE_SPIN_UNLOCK_IRQRESTORE("ide_timer_expiry4", &hwgroup->spinlock, flags);
+ handler(drive);
+ } else {
+ if (drive->waiting_for_dma) {
+ (void) hwgroup->hwif->dmaproc(ide_dma_end, drive);
+ printk("%s: timeout waiting for DMA\n", drive->name);
}
- } else
- do_hwgroup_request (hwgroup);
- __restore_flags(flags);
+ IDE_SPIN_UNLOCK_IRQRESTORE("ide_timer_expiry5", &hwgroup->spinlock, flags);
+ ide_error(drive, "irq timeout", GET_STAT());
+ }
+ del_timer(&hwgroup->timer);
+ start_next_request(hwgroup, 0);
}
/*
stat = IN_BYTE(hwif->io_ports[IDE_STATUS_OFFSET]);
if (!OK_STAT(stat, READY_STAT, BAD_STAT)) {
/* Try to not flood the console with msgs */
- static unsigned long last_msgtime = 0;
+ static unsigned long last_msgtime = 0, count = 0;
+ ++count;
if (0 < (signed long)(jiffies - (last_msgtime + HZ))) {
last_msgtime = jiffies;
- printk("%s%s: unexpected interrupt, status=0x%02x\n",
- hwif->name, (hwif->next == hwgroup->hwif) ? "" : "(?)", stat);
+ printk("%s%s: unexpected interrupt, status=0x%02x, count=%ld\n",
+ hwif->name, (hwif->next == hwgroup->hwif) ? "" : "(?)", stat, count);
}
}
}
} while ((hwif = hwif->next) != hwgroup->hwif);
}
-
-#ifdef __sparc_v9__
-#define IDE_IRQ_EQUAL(irq1, irq2) (1)
-#else
-#define IDE_IRQ_EQUAL(irq1, irq2) ((irq1) == (irq2))
-#endif
-
-static void do_ide_intr (int irq, void *dev_id, struct pt_regs *regs)
-{
- ide_hwgroup_t *hwgroup = dev_id;
- ide_hwif_t *hwif = hwgroup->hwif;
- ide_handler_t *handler;
-
- if (!ide_ack_intr (hwif->io_ports[IDE_STATUS_OFFSET], hwif->io_ports[IDE_IRQ_OFFSET]))
- return;
-
- if (IDE_IRQ_EQUAL(irq, hwif->irq)
- && (handler = hwgroup->handler) != NULL) {
- ide_drive_t *drive = hwgroup->drive;
-#if 1 /* temporary, remove later -- FIXME */
- {
- struct request *rq = hwgroup->rq;
- if (rq != NULL
- &&( MAJOR(rq->rq_dev) != HWIF(drive)->major
- || (MINOR(rq->rq_dev) >> PARTN_BITS) != drive->select.b.unit))
- {
- printk("ide_intr: got IRQ from wrong device: email mlord@pobox.com!!\n");
- return;
- }
- }
-#endif /* temporary */
- hwgroup->handler = NULL;
- del_timer(&(hwgroup->timer));
- /* if (drive->unmask)
- ide_sti(); HACK */
- handler(drive);
- /* this is necessary, as next rq may be different irq */
- if (hwgroup->handler == NULL) {
- set_recovery_timer(HWIF(drive));
- drive->service_time = jiffies - drive->service_start;
- ide_do_request(hwgroup);
- }
- } else {
- unexpected_intr(irq, hwgroup);
- }
- __cli();
- hwif = hwgroup->hwif;
-}
-
/*
* entry point for all interrupts, caller does __cli() for us
*/
void ide_intr (int irq, void *dev_id, struct pt_regs *regs)
{
unsigned long flags;
+ ide_hwgroup_t *hwgroup = (ide_hwgroup_t *)dev_id;
+ ide_hwif_t *hwif;
+ ide_drive_t *drive;
+ ide_handler_t *handler;
- spin_lock_irqsave(&io_request_lock, flags);
- do_ide_intr(irq, dev_id, regs);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ __cli(); /* local CPU only */
+ IDE_SPIN_LOCK_IRQSAVE("ide_intr1", &hwgroup->spinlock, flags);
+ hwif = hwgroup->hwif;
+ if ((handler = hwgroup->handler) == NULL || hwgroup->poll_timeout != 0) {
+ /*
+ * Not expecting an interrupt from this drive.
+ * That means this could be:
+ * (1) an interrupt from another PCI device
+ * sharing the same PCI INT# as us.
+ * or (2) a drive just entered sleep or standby mode,
+ * and is interrupting to let us know.
+ * or (3) a spurious interrupt of unknown origin.
+ *
+ * For PCI, we cannot tell the difference,
+ * so in that case we just ignore it and hope it goes away.
+ */
+#ifdef CONFIG_BLK_DEV_IDEPCI
+ if (IDE_PCI_DEVID_EQ(hwif->pci_devid, IDE_PCI_DEVID_NULL))
+#endif /* CONFIG_BLK_DEV_IDEPCI */
+ {
+ /*
+ * Probably not a shared PCI interrupt,
+ * so we can safely try to do something about it:
+ */
+ (void)ide_ack_intr(hwif->io_ports[IDE_STATUS_OFFSET], hwif->io_ports[IDE_IRQ_OFFSET]);
+ unexpected_intr(irq, hwgroup);
+ }
+ IDE_SPIN_UNLOCK_IRQRESTORE("ide_intr2", &hwgroup->spinlock, flags);
+ return;
+ }
+ drive = hwgroup->drive;
+ if (!drive || !drive_is_ready(drive)) {
+ IDE_SPIN_UNLOCK_IRQRESTORE("ide_intr3", &hwgroup->spinlock, flags);
+ return;
+ }
+ hwgroup->handler = NULL;
+ (void)ide_ack_intr(hwif->io_ports[IDE_STATUS_OFFSET], hwif->io_ports[IDE_IRQ_OFFSET]);
+ del_timer(&(hwgroup->timer));
+ IDE_SPIN_UNLOCK_IRQRESTORE("ide_intr4", &hwgroup->spinlock, flags);
+ if (drive->unmask)
+ ide__sti(); /* local CPU only */
+ handler(drive); /* service this interrupt, may set handler for next interrupt */
+ /*
+ * Note that handler() may have set things up for another
+ * interrupt to occur soon, but it cannot happen until
+ * we exit from this routine, because it will be the
+ * same irq as is currently being serviced here,
+ * and Linux won't allow another (on any CPU) until we return.
+ */
+ start_next_request(hwgroup, hwif->irq);
}
/*
if (action == ide_wait)
rq->sem = &sem;
- __save_flags(flags);
- __cli();
+ IDE_SPIN_LOCK_IRQSAVE("ide_do_drive_cmd", &io_request_lock, flags);
cur_rq = drive->queue;
-
if (cur_rq == NULL || action == ide_preempt) {
rq->next = cur_rq;
drive->queue = rq;
rq->next = cur_rq->next;
cur_rq->next = rq;
}
- if (!hwgroup->active) {
- do_hwgroup_request(hwgroup);
- __cli();
- }
- if (action == ide_wait && rq->rq_status != RQ_INACTIVE)
+ IDE_SPIN_UNLOCK_IRQRESTORE("ide_do_drive_cmd", &io_request_lock, flags);
+ do_hwgroup_request("drive_cmd do_hwgroup_request", hwgroup);
+ save_flags(flags); /* all CPUs; overkill? */
+ cli(); /* all CPUs; overkill? */
+ if (action == ide_wait && rq->rq_status != RQ_INACTIVE)
down(&sem); /* wait for it to be serviced */
- __restore_flags(flags);
+ restore_flags(flags); /* all CPUs; overkill? */
return rq->errors ? -EIO : 0; /* return -EIO if errors */
}
int ide_revalidate_disk(kdev_t i_rdev)
{
ide_drive_t *drive;
+ ide_hwgroup_t *hwgroup;
unsigned int p, major, minor;
long flags;
return -ENODEV;
major = MAJOR(i_rdev);
minor = drive->select.b.unit << PARTN_BITS;
- __save_flags(flags);
- __cli();
+ hwgroup = HWGROUP(drive);
+ IDE_SPIN_LOCK_IRQSAVE("ide_revalidate_disk", &hwgroup->spinlock, flags);
if (drive->busy || (drive->usage > 1)) {
- __restore_flags(flags);
+ IDE_SPIN_UNLOCK_IRQRESTORE("ide_revalidate_disk", &hwgroup->spinlock, flags);
return -EBUSY;
};
drive->busy = 1;
MOD_INC_USE_COUNT;
- __restore_flags(flags);
+ IDE_SPIN_UNLOCK_IRQRESTORE("ide_revalidate_disk", &hwgroup->spinlock, flags);
for (p = 0; p < (1<<PARTN_BITS); ++p) {
if (drive->part[p].nr_sects > 0) {
if (index >= MAX_HWIFS)
return;
- __save_flags(flags);
- __cli();
+ save_flags(flags); /* all CPUs */
+ cli(); /* all CPUs */
hwif = &ide_hwifs[index];
if (!hwif->present)
goto abort;
}
init_hwif_data (index); /* restore hwif data to pristine status */
abort:
- __restore_flags(flags);
+ restore_flags(flags); /* all CPUs */
}
int ide_register (int arg1, int arg2, int irq)
return hwif->present ? index : -1;
}
-void ide_add_setting(ide_drive_t *drive, char *name, int rw, int read_ioctl, int write_ioctl, int data_type, int min, int max, int mul_factor, int div_factor, void *data, ide_procset_t *set)
+void ide_add_setting(ide_drive_t *drive, const char *name, int rw, int read_ioctl, int write_ioctl, int data_type, int min, int max, int mul_factor, int div_factor, void *data, ide_procset_t *set)
{
ide_settings_t **p = (ide_settings_t **) &drive->settings, *setting = NULL;
int ide_read_setting(ide_drive_t *drive, ide_settings_t *setting)
{
- if (!(setting->rw & SETTING_READ))
- return -EINVAL;
- switch(setting->data_type) {
- case TYPE_BYTE:
- return *((u8 *) setting->data);
- case TYPE_SHORT:
- return *((u16 *) setting->data);
- case TYPE_INT:
- case TYPE_INTA:
- return *((u32 *) setting->data);
- default:
- return -EINVAL;
+ int val = -EINVAL;
+ unsigned long flags;
+
+ if ((setting->rw & SETTING_READ)) {
+ IDE_SPIN_LOCK_IRQSAVE("ide_read_setting", &HWGROUP(drive)->spinlock, flags);
+ switch(setting->data_type) {
+ case TYPE_BYTE:
+ val = *((u8 *) setting->data);
+ break;
+ case TYPE_SHORT:
+ val = *((u16 *) setting->data);
+ break;
+ case TYPE_INT:
+ case TYPE_INTA:
+ val = *((u32 *) setting->data);
+ break;
+ }
+ IDE_SPIN_UNLOCK_IRQRESTORE("ide_read_setting", &HWGROUP(drive)->spinlock, flags);
+ }
+ return val;
+}
+
+int ide_spin_wait_hwgroup (const char *msg, ide_drive_t *drive, unsigned long *flags)
+{
+ ide_hwgroup_t *hwgroup = HWGROUP(drive);
+ unsigned long timeout = jiffies + (3 * HZ);
+
+ IDE_SPIN_LOCK_IRQSAVE(msg, &hwgroup->spinlock, *flags);
+ while (hwgroup->busy) {
+ IDE_SPIN_UNLOCK_IRQRESTORE(msg, &hwgroup->spinlock, *flags);
+ __sti(); /* local CPU only; needed for jiffies */
+ if (0 < (signed long)(jiffies - timeout)) {
+ printk("%s: %s: channel busy\n", drive->name, msg);
+ return -EBUSY;
+ }
+ IDE_SPIN_LOCK_IRQSAVE(msg, &hwgroup->spinlock, *flags);
}
+ return 0;
}
int ide_write_setting(ide_drive_t *drive, ide_settings_t *setting, int val)
{
unsigned long flags;
- int i, rc = 0;
+ int i;
u32 *p;
- if (!suser())
+ if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (!(setting->rw & SETTING_WRITE))
return -EPERM;
if (val < setting->min || val > setting->max)
return -EINVAL;
- __save_flags(flags);
- __cli();
if (setting->set)
- rc = setting->set(drive, val);
- else switch (setting->data_type) {
+ return setting->set(drive, val);
+ if (ide_spin_wait_hwgroup("ide_write_settings", drive, &flags))
+ return -EBUSY;
+ switch (setting->data_type) {
case TYPE_BYTE:
*((u8 *) setting->data) = val;
break;
*p = val;
break;
}
- __restore_flags(flags);
- return rc;
+ IDE_SPIN_UNLOCK_IRQRESTORE("ide_write_setting4", &HWGROUP(drive)->spinlock, flags);
+ return 0;
}
static int set_io_32bit(ide_drive_t *drive, int arg)
return 0;
}
case BLKFLSBUF:
- if (!suser()) return -EACCES;
+ if (!capable(CAP_SYS_ADMIN)) return -EACCES;
fsync_dev(inode->i_rdev);
invalidate_buffers(inode->i_rdev);
return 0;
return put_user(drive->part[MINOR(inode->i_rdev)&PARTN_MASK].nr_sects, (long *) arg);
case BLKRRPART: /* Re-read partition tables */
- if (!suser()) return -EACCES;
+ if (!capable(CAP_SYS_ADMIN)) return -EACCES;
return ide_revalidate_disk(inode->i_rdev);
+ case HDIO_OBSOLETE_IDENTITY:
case HDIO_GET_IDENTITY:
if (MINOR(inode->i_rdev) & PARTN_MASK)
return -EINVAL;
if (drive->id == NULL)
return -ENOMSG;
-#if 0
- if (copy_to_user((char *)arg, (char *)drive->id, sizeof(*drive->id)))
+ if (copy_to_user((char *)arg, (char *)drive->id, (cmd == HDIO_GET_IDENTITY) ? sizeof(*drive->id) : 142))
return -EFAULT;
-#else
- if (copy_to_user((char *)arg, (char *)drive->id, 142))
- return -EFAULT;
-#endif
return 0;
case HDIO_GET_NICE:
{
byte args[4], *argbuf = args;
int argsize = 4;
- if (!suser()) return -EACCES;
+ if (!capable(CAP_SYS_ADMIN)) return -EACCES;
if (NULL == (void *) arg)
return ide_do_drive_cmd(drive, &rq, ide_wait);
if (copy_from_user(args, (void *)arg, 4))
case HDIO_SCAN_HWIF:
{
int args[3];
- if (!suser()) return -EACCES;
+ if (!capable(CAP_SYS_ADMIN)) return -EACCES;
if (copy_from_user(args, (void *)arg, 3 * sizeof(int)))
return -EFAULT;
if (ide_register(args[0], args[1], args[2]) == -1)
return 0;
}
case HDIO_SET_NICE:
- if (!suser()) return -EACCES;
+ if (!capable(CAP_SYS_ADMIN)) return -EACCES;
if (drive->driver == NULL)
return -EPERM;
if (arg != (arg & ((1 << IDE_NICE_DSC_OVERLAP) | (1 << IDE_NICE_1))))
goto bad_option; /* chipset already specified */
if (i <= -7 && hw != 0)
goto bad_hwif; /* chipset drivers are for "ide0=" only */
- if (ide_hwifs[hw^1].chipset != ide_unknown)
+ if (i <= -7 && ide_hwifs[hw^1].chipset != ide_unknown)
goto bad_option; /* chipset for 2nd port already specified */
printk("\n");
}
ide_probe_for_rz100x();
}
#endif /* CONFIG_BLK_DEV_RZ1000 */
-#ifdef CONFIG_BLK_DEV_SL82C105
- {
- extern void ide_probe_for_sl82c105(void);
- ide_probe_for_sl82c105();
- }
-#endif /* CONFIG_BLK_DEV_SL82C105 */
#endif /* CONFIG_BLK_DEV_IDEPCI */
}
#endif /* CONFIG_PCI */
probe_for_hwifs ();
#ifdef CONFIG_BLK_DEV_IDE
-#ifdef __mc68000__
+#if defined(__mc68000__) || defined(CONFIG_APUS)
if (ide_hwifs[0].io_ports[IDE_DATA_OFFSET]) {
- ide_get_lock(&ide_lock, NULL, NULL);
+ ide_get_lock(&ide_lock, NULL, NULL); /* for atari only */
disable_irq(ide_hwifs[0].irq);
}
-#endif /* __mc68000__ */
+#endif /* __mc68000__ || CONFIG_APUS */
(void) ideprobe_init();
-#ifdef __mc68000__
+#if defined(__mc68000__) || defined(CONFIG_APUS)
if (ide_hwifs[0].io_ports[IDE_DATA_OFFSET]) {
enable_irq(ide_hwifs[0].irq);
- ide_release_lock(&ide_lock);
+ ide_release_lock(&ide_lock); /* for atari only */
}
-#endif /* __mc68000__ */
+#endif /* __mc68000__ || CONFIG_APUS */
#endif /* CONFIG_BLK_DEV_IDE */
#ifdef CONFIG_PROC_FS
{
unsigned long flags;
- __save_flags(flags);
- __cli();
- if (version != IDE_SUBDRIVER_VERSION || !drive->present || drive->driver != NULL ||
- drive->busy || drive->usage) {
- __restore_flags(flags);
+ save_flags(flags); /* all CPUs */
+ cli(); /* all CPUs */
+ if (version != IDE_SUBDRIVER_VERSION || !drive->present || drive->driver != NULL || drive->busy || drive->usage) {
+ restore_flags(flags); /* all CPUs */
return 1;
}
drive->driver = driver;
setup_driver_defaults(drive);
- __restore_flags(flags);
+ restore_flags(flags); /* all CPUs */
if (drive->autotune != 2) {
if (driver->supports_dma && HWIF(drive)->dmaproc != NULL)
(void) (HWIF(drive)->dmaproc(ide_dma_check, drive));
{
unsigned long flags;
- __save_flags(flags);
- __cli();
+ save_flags(flags); /* all CPUs */
+ cli(); /* all CPUs */
if (drive->usage || drive->busy || drive->driver == NULL || DRIVER(drive)->busy) {
- __restore_flags(flags);
+ restore_flags(flags); /* all CPUs */
return 1;
}
#ifdef CONFIG_PROC_FS
#endif
auto_remove_settings(drive);
drive->driver = NULL;
- __restore_flags(flags);
+ restore_flags(flags); /* all CPUs */
return 0;
}
unsigned long service_start; /* time we started last request */
unsigned long service_time; /* service time of last request */
special_t special; /* special action flags */
+ byte keep_settings; /* restore settings after drive reset */
+ byte using_dma; /* disk is using dma for read/write */
+ byte waiting_for_dma; /* dma currently in progress */
+ byte unmask; /* flag: okay to unmask other irqs */
+ byte slow; /* flag: slow data port */
+ byte bswap; /* flag: byte swap data */
+ byte dsc_overlap; /* flag: DSC overlap */
+ byte nice1; /* flag: give potential excess bandwidth */
unsigned present : 1; /* drive is physically present */
unsigned noprobe : 1; /* from: hdx=noprobe */
- byte keep_settings; /* restore settings after drive reset */
unsigned busy : 1; /* currently doing revalidate_disk() */
unsigned removable : 1; /* 1 if need to do check_media_change */
- byte using_dma; /* disk is using dma for read/write */
unsigned forced_geom : 1; /* 1 if hdx=c,h,s was given at boot */
- byte unmask; /* flag: okay to unmask other irqs */
unsigned no_unmask : 1; /* disallow setting unmask bit */
unsigned no_io_32bit : 1; /* disallow enabling 32bit I/O */
unsigned nobios : 1; /* flag: do not probe bios for drive */
- byte slow; /* flag: slow data port */
- unsigned autotune : 2; /* 1=autotune, 2=noautotune, 0=default */
unsigned revalidate : 1; /* request revalidation */
- byte bswap; /* flag: byte swap data */
- byte dsc_overlap; /* flag: DSC overlap */
unsigned atapi_overlap : 1; /* flag: ATAPI overlap (not supported) */
unsigned nice0 : 1; /* flag: give obvious excess bandwidth */
- byte nice1; /* flag: give potential excess bandwidth */
unsigned nice2 : 1; /* flag: give a share in our own bandwidth */
+ unsigned doorlocking : 1; /* flag: for removable only: door lock/unlock works */
+ unsigned autotune : 2; /* 1=autotune, 2=noautotune, 0=default */
#if FAKE_FDISK_FOR_EZDRIVE
unsigned remap_0_to_1 : 1; /* flag: partitioned with ezdrive */
#endif /* FAKE_FDISK_FOR_EZDRIVE */
* should either try again later, or revert to PIO for the current request.
*/
typedef enum { ide_dma_read, ide_dma_write, ide_dma_begin, ide_dma_end,
- ide_dma_check, ide_dma_on, ide_dma_off, ide_dma_off_quietly
+ ide_dma_check, ide_dma_on, ide_dma_off, ide_dma_off_quietly,
+ ide_dma_test_irq
} ide_dma_action_t;
typedef int (ide_dmaproc_t)(ide_dma_action_t, ide_drive_t *);
unsigned reset : 1; /* reset after probe */
unsigned no_autodma : 1; /* don't automatically enable DMA at boot */
byte channel; /* for dual-port chips: 0=primary, 1=secondary */
- struct pci_dev *pci_dev; /* for pci chipsets */
+ struct pci_dev *pci_dev; /* for pci chipsets */
ide_pci_devid_t pci_devid; /* for pci chipsets: {VID,DID} */
#if (DISK_RECOVERY_TIME > 0)
unsigned long last_time; /* time when previous rq was done */
typedef void (ide_handler_t)(ide_drive_t *);
typedef struct hwgroup_s {
+ spinlock_t spinlock; /* protects "busy" and "handler" */
ide_handler_t *handler;/* irq handler, if active */
+ int busy; /* BOOL: protects all fields below */
ide_drive_t *drive; /* current drive */
ide_hwif_t *hwif; /* ptr to current hwif in linked-list */
struct request *rq; /* current request */
struct timer_list timer; /* failsafe timer */
struct request wrq; /* local copy of current write rq */
unsigned long poll_timeout; /* timeout value during long polls */
- int active; /* set when servicing requests */
} ide_hwgroup_t;
/*
struct ide_settings_s *next;
} ide_settings_t;
-void ide_add_setting(ide_drive_t *drive, char *name, int rw, int read_ioctl, int write_ioctl, int data_type, int min, int max, int mul_factor, int div_factor, void *data, ide_procset_t *set);
+void ide_add_setting(ide_drive_t *drive, const char *name, int rw, int read_ioctl, int write_ioctl, int data_type, int min, int max, int mul_factor, int div_factor, void *data, ide_procset_t *set);
void ide_remove_setting(ide_drive_t *drive, char *name);
ide_settings_t *ide_find_setting_by_name(ide_drive_t *drive, char *name);
int ide_read_setting(ide_drive_t *t, ide_settings_t *setting);
*/
struct request **ide_get_queue (kdev_t dev);
+int ide_spin_wait_hwgroup(const char *msg, ide_drive_t *drive, unsigned long *flags);
void ide_timer_expiry (unsigned long data);
void ide_intr (int irq, void *dev_id, struct pt_regs *regs);
void ide_geninit (struct gendisk *gd);
#endif
#ifdef _IDE_C
+#ifdef CONFIG_BLK_DEV_IDE
+int ideprobe_init (void);
+#endif /* CONFIG_BLK_DEV_IDE */
#ifdef CONFIG_BLK_DEV_IDEDISK
int idedisk_init (void);
#endif /* CONFIG_BLK_DEV_IDEDISK */
void ide_dma_intr (ide_drive_t *drive);
int ide_dmaproc (ide_dma_action_t func, ide_drive_t *drive);
void ide_setup_dma (ide_hwif_t *hwif, unsigned long dmabase, unsigned int num_ports) __init;
-unsigned long ide_get_or_set_dma_base (struct pci_dev *dev, ide_hwif_t *hwif, int extra, const char *name) __init;
+unsigned long ide_get_or_set_dma_base (ide_hwif_t *hwif, int extra, const char *name) __init;
#endif
-#ifdef CONFIG_BLK_DEV_IDE
-int ideprobe_init (void);
-#endif /* CONFIG_BLK_DEV_IDE */
-
#ifdef CONFIG_BLK_DEV_PDC4030
#include "pdc4030.h"
#define IS_PDC4030_DRIVE (HWIF(drive)->chipset == ide_pdc4030)
info.lo_flags = lo->lo_flags;
strncpy(info.lo_name, lo->lo_name, LO_NAME_SIZE);
info.lo_encrypt_type = lo->lo_encrypt_type;
- if (lo->lo_encrypt_key_size && suser()) {
+ if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) {
info.lo_encrypt_key_size = lo->lo_encrypt_key_size;
memcpy(info.lo_encrypt_key, lo->lo_encrypt_key,
lo->lo_encrypt_key_size);
int minor, err;
struct hd_geometry *loc = (struct hd_geometry *) arg;
- if (!suser())
+ if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (((minor=MINOR(inode->i_rdev)) & 0x80) &&
struct nbd_device *lo;
int dev, error;
- if (!suser())
+ if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (!inode)
return -EINVAL;
struct pci_dev *dev = hwif->pci_dev;
unsigned long flags;
- save_flags(flags); cli();
+ __save_flags(flags); /* local CPU only */
+ __cli(); /* local CPU only */
new = *old;
/* adjust IRQ enable bit */
*old = new;
(void) pci_write_config_dword(dev, 0x40, new);
}
- restore_flags(flags);
+ __restore_flags(flags); /* local CPU only */
}
static void ns87415_selectproc (ide_drive_t *drive)
static int ns87415_dmaproc(ide_dma_action_t func, ide_drive_t *drive)
{
- ide_hwif_t *hwif = HWIF(drive);
+ ide_hwif_t *hwif = HWIF(drive);
+ byte dma_stat;
switch (func) {
case ide_dma_end: /* returns 1 on error, 0 otherwise */
- {
- byte dma_stat = inb(hwif->dma_base+2);
- int rc = (dma_stat & 7) != 4;
- /* from errata: stop DMA, clear INTR & ERROR */
- outb(7, hwif->dma_base);
- /* clear the INTR & ERROR bits */
- outb(dma_stat|6, hwif->dma_base+2);
- /* verify good DMA status */
- return rc;
- }
+ drive->waiting_for_dma = 0;
+ dma_stat = inb(hwif->dma_base+2);
+ outb(7, hwif->dma_base); /* from errata: stop DMA, clear INTR & ERROR */
+ outb(dma_stat|6, hwif->dma_base+2); /* clear the INTR & ERROR bits */
+ return (dma_stat & 7) != 4; /* verify good DMA status */
case ide_dma_write:
case ide_dma_read:
- /* select DMA xfer */
- ns87415_prepare_drive(drive, 1);
- /* use standard DMA stuff */
- if (!ide_dmaproc(func, drive))
+ ns87415_prepare_drive(drive, 1); /* select DMA xfer */
+ if (!ide_dmaproc(func, drive)) /* use standard DMA stuff */
return 0;
- /* DMA failed: select PIO xfer */
- ns87415_prepare_drive(drive, 0);
+ ns87415_prepare_drive(drive, 0); /* DMA failed: select PIO xfer */
return 1;
default:
- /* use standard DMA stuff */
- return ide_dmaproc(func, drive);
+ return ide_dmaproc(func, drive); /* use standard DMA stuff */
}
}
{
struct pci_dev *dev = hwif->pci_dev;
unsigned int ctrl, using_inta;
- byte progif, stat;
- int timeout;
+ byte progif;
/*
* We cannot probe for IRQ: both ports share common IRQ on INTA.
pci_write_config_byte(dev, 0x55, 0xee);
#ifdef __sparc_v9__
+{
+ int timeout;
+ byte stat;
/*
* XXX: Reset the device, if we don't it will not respond
* to SELECT_DRIVE() properly during first probe_hwif().
if (stat == 0xff)
break;
} while ((stat & BUSY_STAT) && --timeout);
+}
#endif
}
if (!using_inta)
hwif->name, ax, second.data_time, second.recovery_time, drdy);
#endif
- save_flags(flags);
- cli();
+ save_flags(flags); /* all CPUs */
+ cli(); /* all CPUs */
reg_base = hwif->io_ports[IDE_DATA_OFFSET];
outb(0xc0, reg_base+CNTRL_REG); /* allow Register-B */
write_reg(misc, MISC_REG); /* set address setup, DRDY timings, */
/* and read prefetch for both drives */
- restore_flags(flags);
+ restore_flags(flags); /* all CPUs */
}
/*
put_user(pd_hd[dev].start_sect,(long *)&geo->start);
return 0;
case BLKRASET:
- if(!suser()) return -EACCES;
+ if(!capable(CAP_SYS_ADMIN)) return -EACCES;
if(!(inode->i_rdev)) return -EINVAL;
if(arg > 0xff) return -EINVAL;
read_ahead[MAJOR(inode->i_rdev)] = arg;
put_user(pd_hd[dev].nr_sects,(long *) arg);
return (0);
case BLKFLSBUF:
- if(!suser()) return -EACCES;
+ if(!capable(CAP_SYS_ADMIN)) return -EACCES;
if(!(inode->i_rdev)) return -EINVAL;
fsync_dev(inode->i_rdev);
invalidate_buffers(inode->i_rdev);
return 0;
case BLKRRPART:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
return pd_revalidate(inode->i_rdev);
RO_IOCTLS(inode->i_rdev,arg);
default:
put_user(0,(long *)&geo->start);
return 0;
case BLKRASET:
- if(!suser()) return -EACCES;
+ if(!capable(CAP_SYS_ADMIN)) return -EACCES;
if(!(inode->i_rdev)) return -EINVAL;
if(arg > 0xff) return -EINVAL;
read_ahead[MAJOR(inode->i_rdev)] = arg;
put_user(PF.capacity,(long *) arg);
return (0);
case BLKFLSBUF:
- if(!suser()) return -EACCES;
+ if(!capable(CAP_SYS_ADMIN)) return -EACCES;
if(!(inode->i_rdev)) return -EINVAL;
fsync_dev(inode->i_rdev);
invalidate_buffers(inode->i_rdev);
do {
stat=GET_STAT();
if(stat & DRQ_STAT) {
-/* unsigned long flags;
- save_flags(flags);
- cli();
disable_irq(HWIF(drive)->irq);
-*/
ide_intr(HWIF(drive)->irq,HWGROUP(drive),NULL);
-/* enable_irq(HWIF(drive)->irq);
- restore_flags(flags);
-*/
+ enable_irq(HWIF(drive)->irq);
return;
}
if(IN_BYTE(IDE_SELECT_REG) & 0x01)
return;
}
if (!drive->unmask)
- cli();
+ __cli(); /* local CPU only */
HWGROUP(drive)->wrq = *rq; /* scratchpad */
promise_write(drive);
return;
}
break;
case BLKRASET:
- if (!suser())
+ if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (!inode->i_rdev)
return -EINVAL;
}
break;
case BLKFLSBUF:
- if (!suser())
+ if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (!inode->i_rdev)
return -EINVAL;
return 0;
case BLKRRPART:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
return (ps2esdi_reread_partitions(inode->i_rdev));
RO_IOCTLS(inode->i_rdev, arg);
}
pio = ide_get_best_pio_mode(drive, pio, 3, NULL);
- save_flags(flags);
- cli();
+ save_flags(flags); /* all CPUs */
+ cli(); /* all CPUs */
outb_p(0x8d,0xb0);
outb_p(0x0 ,0xb2);
outb_p(((pio+1)<<4)|0x0f,0xb3);
inb(0x3f6);
- restore_flags(flags);
+ restore_flags(flags); /* all CPUs */
}
void init_qd6580 (void)
switch (cmd) {
case BLKFLSBUF:
- if (!suser()) return -EACCES;
+ if (!capable(CAP_SYS_ADMIN)) return -EACCES;
invalidate_buffers(inode->i_rdev);
break;
case BLKGETSIZE: /* Return device size */
ide_ioreg_t chrp_ide_regbase[MAX_HWIFS];
ide_ioreg_t chrp_idedma_regbase;
-void ide_init_sl82c105(struct pci_dev *dev) {
-
+void ide_init_sl82c105(ide_hwif_t *hwif)
+{
+ struct pci_dev *dev = hwif->pci_dev;
unsigned short t16;
unsigned int t32;
pci_write_config_dword(dev, 0x40, 0x10ff08a1);
}
-
-void ide_probe_for_sl82c105(void)
-{
- struct pci_dev *dev = NULL;
-
- while ((dev = pci_find_device(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_82C105, dev)))
- ide_init_sl82c105(dev);
-}
-
+#if 0 /* nobody ever calls these.. ?? */
void chrp_ide_probe(void) {
struct pci_dev *pdev = pci_find_device(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_82C105, NULL);
if (irq != NULL)
*irq = chrp_ide_irq;
}
-
+#endif
/* select PIO or DMA */
reg = use_dma ? (0x21 | 0x82) : (0x21 & ~0x82);
- save_flags(flags);
- cli();
+ __save_flags(flags); /* local CPU only */
+ __cli(); /* local CPU only */
if (reg != hwif->select_data) {
hwif->select_data = reg;
outw(reg, hwif->config_data+3);
}
- restore_flags(flags);
+ __restore_flags(flags); /* local CPU only */
}
static void trm290_selectproc (ide_drive_t *drive)
break; /* try PIO instead of DMA */
trm290_prepare_drive(drive, 1); /* select DMA xfer */
outl(virt_to_bus(hwif->dmatable)|reading|writing, hwif->dma_base);
+ drive->waiting_for_dma = 1;
outw((count * 2) - 1, hwif->dma_base+2); /* start DMA */
if (drive->media != ide_disk)
return 0;
ide_set_handler(drive, &ide_dma_intr, WAIT_CMD);
OUT_BYTE(reading ? WIN_READDMA : WIN_WRITEDMA, IDE_COMMAND_REG);
+ return 0;
case ide_dma_begin:
return 0;
case ide_dma_end:
+ drive->waiting_for_dma = 0;
return (inw(hwif->dma_base+2) != 0x00ff);
+ case ide_dma_test_irq:
+ return (inw(hwif->dma_base+2) == 0x00ff);
default:
return ide_dmaproc(func, drive);
}
printk("TRM290: using default config base at 0x%04lx\n", hwif->config_data);
}
- save_flags(flags);
- cli();
+ __save_flags(flags); /* local CPU only */
+ __cli(); /* local CPU only */
/* put config reg into first byte of hwif->select_data */
outb(0x51|(hwif->channel<<3), hwif->config_data+1);
hwif->select_data = 0x21; /* select PIO as default */
reg = inb(hwif->config_data+3); /* get IRQ info */
reg = (reg & 0x10) | 0x03; /* mask IRQs for both ports */
outb(reg, hwif->config_data+3);
- restore_flags(flags);
+ __restore_flags(flags); /* local CPU only */
if ((reg & 0x10))
hwif->irq = hwif->channel ? 15 : 14; /* legacy mode */
else if (!hwif->irq && hwif->mate && hwif->mate->irq)
hwif->irq = hwif->mate->irq; /* sharing IRQ with mate */
- ide_setup_dma(hwif, (hwif->config_data + 4) ^ (hwif->channel ? 0x0080 : 0x0000), 2);
+ ide_setup_dma(hwif, (hwif->config_data + 4) ^ (hwif->channel ? 0x0080 : 0x0000), 3);
hwif->dmaproc = &trm290_dmaproc;
hwif->selectproc = &trm290_selectproc;
hwif->no_autodma = 1; /* play it safe for now */
pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
printk("%s: setting umc8672 to PIO mode%d (speed %d)\n", drive->name, pio, pio_to_umc[pio]);
- save_flags(flags);
- cli();
+ save_flags(flags); /* all CPUs */
+ cli(); /* all CPUs */
if (hwgroup && hwgroup->handler != NULL) {
printk("umc8672: other interface is busy: exiting tune_umc()\n");
} else {
current_speeds[drive->name[2] - 'a'] = pio_to_umc[pio];
umc_set_speeds (current_speeds);
}
- restore_flags(flags);
+ restore_flags(flags); /* all CPUs */
}
void init_umc8672 (void) /* called from ide.c */
{
unsigned long flags;
- save_flags(flags);
- cli ();
+ __save_flags(flags); /* local CPU only */
+ __cli(); /* local CPU only */
if (check_region(0x108, 2)) {
- restore_flags(flags);
+ __restore_flags(flags);
printk("\numc8672: PORTS 0x108-0x109 ALREADY IN USE\n");
return;
}
outb_p (0x5A,0x108); /* enable umc */
if (in_umc (0xd5) != 0xa0)
{
- restore_flags(flags);
+ __restore_flags(flags); /* local CPU only */
printk ("umc8672: not found\n");
return;
}
outb_p (0xa5,0x108); /* disable umc */
umc_set_speeds (current_speeds);
- restore_flags(flags);
+ __restore_flags(flags); /* local CPU only */
request_region(0x108, 2, "umc8672");
ide_hwifs[0].chipset = ide_umc8672;
return copy_to_user(geometry, &g, sizeof g) ? -EFAULT : 0;
}
case BLKRASET:
- if(!suser()) return -EACCES;
+ if(!capable(CAP_SYS_ADMIN)) return -EACCES;
if(arg > 0xff) return -EINVAL;
read_ahead[MAJOR(inode->i_rdev)] = arg;
return 0;
if (!arg) return -EINVAL;
return put_user(xd_struct[MINOR(inode->i_rdev)].nr_sects,(long *) arg);
case BLKFLSBUF: /* Return devices size */
- if(!suser()) return -EACCES;
+ if(!capable(CAP_SYS_ADMIN)) return -EACCES;
fsync_dev(inode->i_rdev);
invalidate_buffers(inode->i_rdev);
return 0;
case HDIO_SET_DMA:
- if (!suser()) return -EACCES;
+ if (!capable(CAP_SYS_ADMIN)) return -EACCES;
if (xdc_busy) return -EBUSY;
nodma = !arg;
if (nodma && xd_dma_buffer) {
case HDIO_GET_MULTCOUNT:
return put_user(xd_maxsectors, (long *) arg);
case BLKRRPART:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
return xd_reread_partitions(inode->i_rdev);
RO_IOCTLS(inode->i_rdev,arg);
default:
switch (cmd) /* Sun-compatible */
{
case DDIOCSDBG: /* DDI Debug */
- if (!suser()) RETURN_UP(-EPERM);
+ if (!capable(CAP_SYS_ADMIN)) RETURN_UP(-EPERM);
i=sbpcd_dbg_ioctl(arg,1);
RETURN_UP(i);
case CDROMRESET: /* hard reset the drive */
} /* end of CDROMREADAUDIO */
case BLKRASET:
- if(!suser()) RETURN_UP(-EACCES);
+ if(!capable(CAP_SYS_ADMIN)) RETURN_UP(-EACCES);
if(!(cdi->dev)) RETURN_UP(-EINVAL);
if(arg > 0xff) RETURN_UP(-EINVAL);
read_ahead[MAJOR(cdi->dev)] = arg;
* we might close the device immediately without doing a
* privileged operation -- cevans
*/
- as->suser = suser();
+ as->suser = capable(CAP_SYS_ADMIN);
as->next = user_list;
user_list = as;
filp->private_data = as;
case VIDIOCSFBUF:
{
struct video_buffer v;
- if(!suser())
+ if(!capable(CAP_SYS_ADMIN))
return -EPERM;
if(copy_from_user(&v, arg,sizeof(v)))
return -EFAULT;
return 0;
case BTTV_WRITEE:
- if(!suser())
+ if(!capable(CAP_SYS_ADMIN))
return -EPERM;
if(copy_from_user((void *) eedata, (void *) arg, 256))
return -EFAULT;
return 0;
case BTTV_READEE:
- if(!suser())
+ if(!capable(CAP_SYS_ADMIN))
return -EPERM;
readee(&(btv->i2c), eedata);
if(copy_to_user((void *) arg, (void *) eedata, 256))
long p, q;
/* prevent users from taking too much memory */
- if (i >= MAX_NR_USER_CONSOLES && !suser())
+ if (i >= MAX_NR_USER_CONSOLES && !capable(CAP_SYS_RESOURCE))
return -EPERM;
/* due to the granularity of kmalloc, we waste some memory here */
"esp serial", info);
if (retval) {
- if (suser()) {
+ if (capable(CAP_SYS_ADMIN)) {
if (info->tty)
set_bit(TTY_IO_ERROR,
&info->tty->flags);
if (change_irq && (info->line % 8))
return -EINVAL;
- if (!suser()) {
+ if (!capable(CAP_SYS_ADMIN)) {
if (change_irq ||
(new_serial.close_delay != info->close_delay) ||
((new_serial.flags & ~ASYNC_USR_MASK) !=
TRACE_FUN(ft_t_flow);
TRACE(ft_t_noise, "Mag tape ioctl command: MTIOCFTCMD");
- if (!suser()) {
+ if (!capable(CAP_SYS_ADMIN)) {
TRACE_ABORT(-EPERM, ft_t_info,
- "only the superuser may send raw qic-117 commands");
+ "need CAP_SYS_ADMIN capability to send raw qic-117 commands");
}
if (zft_qic_mode) {
TRACE_ABORT(-EACCES, ft_t_info,
#endif
copy_from_user(&sio, sp, sizeof(struct serial_struct));
- if (!suser()) {
+ if (!capable(CAP_SYS_ADMIN)) {
if ((sio.baud_base != portp->baud_base) ||
(sio.close_delay != portp->close_delay) ||
((sio.flags & ~ASYNC_USR_MASK) !=
* lp_read (Status readback) support added by Carsten Gross,
* carsten@sol.wohnheim.uni-ulm.de
* Support for parport by Philip Blundell <Philip.Blundell@pobox.com>
- * parport_sharing hacking by Andrea Arcangeli <arcangeli@mbox.queen.it>
+ * Parport sharing hacking by Andrea Arcangeli <arcangeli@mbox.queen.it>
* Fixed kernel_(to/from)_user memory copy to check for errors
* by Riccardo Facchetti <fizban@tin.it>
*/
#include <linux/delay.h>
#include <linux/parport.h>
+#undef LP_STATS
+#undef LP_NEED_CAREFUL
#include <linux/lp.h>
#include <asm/irq.h>
struct lp_struct lp_table[LP_NO] =
{
[0 ... LP_NO-1] = {NULL, 0, LP_INIT_CHAR, LP_INIT_TIME, LP_INIT_WAIT,
- NULL, 0, 0, 0, {0}}
+ NULL,
+#ifdef LP_STATS
+ 0, 0, {0},
+#endif
+ NULL, 0}
};
/* Test if printer is ready (and optionally has no error conditions) */
+#ifdef LP_NEED_CAREFUL
#define LP_READY(minor, status) \
- ((LP_F(minor) & LP_CAREFUL) ? _LP_CAREFUL_READY(status) : (status & LP_PBUSY))
-#define LP_CAREFUL_READY(minor, status) \
- ((LP_F(minor) & LP_CAREFUL) ? _LP_CAREFUL_READY(status) : 1)
+ ((LP_F(minor) & LP_CAREFUL) ? _LP_CAREFUL_READY(status) : ((status) & LP_PBUSY))
#define _LP_CAREFUL_READY(status) \
- (status & (LP_PBUSY|LP_POUTPA|LP_PSELECD|LP_PERRORP)) == \
+ ((status) & (LP_PBUSY|LP_POUTPA|LP_PSELECD|LP_PERRORP)) == \
(LP_PBUSY|LP_PSELECD|LP_PERRORP)
+#else
+#define LP_READY(minor, status) ((status) & LP_PBUSY)
+#endif
#undef LP_DEBUG
#undef LP_READ_DEBUG
{
struct lp_struct *lps = (struct lp_struct *)handle;
- if (waitqueue_active (&lps->dev->wait_q))
- wake_up_interruptible(&lps->dev->wait_q);
+ if (waitqueue_active (&lps->wait_q))
+ wake_up_interruptible(&lps->wait_q);
/* Don't actually release the port now */
return 1;
static inline int lp_char(char lpchar, int minor)
{
- int status;
+ unsigned char status;
unsigned int wait = 0;
unsigned long count = 0;
+#ifdef LP_STATS
struct lp_stats *stats;
+#endif
- for (;;) {
+ for (;;)
+ {
lp_yield(minor);
status = r_str (minor);
- if (++count == LP_CHAR(minor))
+ if (LP_READY(minor, status))
+ break;
+ if (!LP_POLLED(minor) || ++count == LP_CHAR(minor) ||
+ signal_pending(current))
return 0;
- if (LP_POLLING(minor))
- {
- if (LP_READY(minor, status))
- break;
- } else {
- if (!LP_READY(minor, status))
- return 0;
- else
- break;
- }
}
w_dtr(minor, lpchar);
+#ifdef LP_STATS
stats = &LP_STAT(minor);
stats->chars++;
+#endif
/* must wait before taking strobe high, and after taking strobe
low, according spec. Some printers need it, others don't. */
#ifndef __sparc__
#endif
/* take strobe low */
w_ctr(minor, LP_PSELECP | LP_PINITP);
+
+#ifdef LP_STATS
/* update waittime statistics */
if (count > stats->maxwait) {
#ifdef LP_DEBUG
stats->meanwait - count;
stats->meanwait = (255 * stats->meanwait + count + 128) / 256;
stats->mdev = ((127 * stats->mdev) + wait + 64) / 128;
+#endif
return 1;
}
{
struct lp_struct *lp_dev = (struct lp_struct *) dev_id;
- if (waitqueue_active (&lp_dev->dev->wait_q))
- wake_up_interruptible(&lp_dev->dev->wait_q);
+ if (waitqueue_active (&lp_dev->wait_q))
+ wake_up_interruptible(&lp_dev->wait_q);
}
static void lp_error(int minor)
{
- if (LP_POLLING(minor) || LP_PREEMPTED(minor)) {
+ if (LP_POLLED(minor) || LP_PREEMPTED(minor)) {
current->state = TASK_INTERRUPTIBLE;
current->timeout = jiffies + LP_TIMEOUT_POLLED;
lp_parport_release(minor);
}
static int lp_check_status(int minor) {
- static unsigned char last = 0;
+ unsigned int last = lp_table[minor].last_error;
unsigned char status = r_str(minor);
if ((status & LP_POUTPA)) {
if (last != LP_POUTPA) {
}
else last = 0;
+ lp_table[minor].last_error = last;
+
if (last != 0) {
if (LP_F(minor) & LP_ABORT)
return 1;
return 0;
}
-static inline int lp_write_buf(unsigned int minor, const char *buf, int count)
+static int lp_write_buf(unsigned int minor, const char *buf, int count)
{
unsigned long copy_size;
unsigned long total_bytes_written = 0;
if (minor >= LP_NO)
return -ENXIO;
- if (lp_table[minor].dev == NULL)
+ if (lp->dev == NULL)
return -ENXIO;
+ lp_table[minor].last_error = 0;
+
do {
bytes_written = 0;
copy_size = (count <= LP_BUFFER_SIZE ? count : LP_BUFFER_SIZE);
if (lp_char(lp->lp_buffer[bytes_written], minor)) {
--copy_size;
++bytes_written;
- lp_table[minor].runchars++;
+#ifdef LP_STATS
+ lp->runchars++;
+#endif
} else {
int rc = total_bytes_written + bytes_written;
- if (lp_table[minor].runchars > LP_STAT(minor).maxrun)
- LP_STAT(minor).maxrun = lp_table[minor].runchars;
+
+#ifdef LP_STATS
+ if (lp->runchars > LP_STAT(minor).maxrun)
+ LP_STAT(minor).maxrun = lp->runchars;
LP_STAT(minor).sleeps++;
+#endif
- if (LP_POLLING(minor)) {
- lp_polling:
+ if (signal_pending(current)) {
+ if (total_bytes_written + bytes_written)
+ return total_bytes_written + bytes_written;
+ else
+ return -EINTR;
+ }
+
+#ifdef LP_STATS
+ lp->runchars = 0;
+#endif
+
+ if (LP_POLLED(minor)) {
if (lp_check_status(minor))
return rc ? rc : -EIO;
-#ifdef LP_DEBUG
- printk(KERN_DEBUG "lp%d sleeping at %d characters for %d jiffies\n", minor, lp_table[minor].runchars, LP_TIME(minor));
+ lp_polling:
+#if defined(LP_DEBUG) && defined(LP_STATS)
+ printk(KERN_DEBUG "lp%d sleeping at %d characters for %d jiffies\n", minor, lp->runchars, LP_TIME(minor));
#endif
current->state = TASK_INTERRUPTIBLE;
current->timeout = jiffies + LP_TIME(minor);
lp_schedule (minor);
} else {
cli();
- if (LP_PREEMPTED(minor)) {
+ if (LP_PREEMPTED(minor))
+ {
+ /*
+ * We can' t sleep on the interrupt
+ * since another pardevice need the port.
+ */
sti();
goto lp_polling;
}
- enable_irq(lp->dev->port->irq);
- w_ctr(minor, LP_PSELECP|LP_PINITP|LP_PINTEN);
+ w_ctr(minor, LP_PSELECP | LP_PINITP | LP_PINTEN);
status = r_str(minor);
- if ((!(status & LP_PACK) || (status & LP_PBUSY))
- && LP_CAREFUL_READY(minor, status)) {
+ if (!(status & LP_PACK) || (status & LP_PBUSY))
+ {
+ /*
+ * The interrupt is happened in the
+ * meantime so don' t wait for it.
+ */
w_ctr(minor, LP_PSELECP | LP_PINITP);
sti();
continue;
}
current->timeout = jiffies + LP_TIMEOUT_INTERRUPT;
- interruptible_sleep_on(&lp->dev->wait_q);
- disable_irq(lp->dev->port->irq);
+ interruptible_sleep_on(&lp->wait_q);
w_ctr(minor, LP_PSELECP | LP_PINITP);
sti();
if (lp_check_status(minor))
return rc ? rc : -EIO;
}
-
- lp_table[minor].runchars = 0;
-
- if (signal_pending(current)) {
- if (total_bytes_written + bytes_written)
- return total_bytes_written + bytes_written;
- else
- return -EINTR;
- }
}
}
unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev);
ssize_t retv;
+#ifdef LP_STATS
if (jiffies-lp_table[minor].lastcall > LP_TIME(minor))
lp_table[minor].runchars = 0;
lp_table[minor].lastcall = jiffies;
+#endif
/* Claim Parport or sleep until it becomes available
*/
return -ENXIO;
if ((LP_F(minor) & LP_EXIST) == 0)
return -ENXIO;
- if (LP_F(minor) & LP_BUSY)
+ if (test_and_set_bit(LP_BUSY_BIT_POS, &LP_F(minor)) & LP_BUSY)
return -EBUSY;
- LP_F(minor) |= LP_BUSY;
MOD_INC_USE_COUNT;
kfree_s(lp_table[minor].lp_buffer, LP_BUFFER_SIZE);
lp_table[minor].lp_buffer = NULL;
- LP_F(minor) &= ~LP_BUSY;
MOD_DEC_USE_COUNT;
+ LP_F(minor) &= ~LP_BUSY;
return 0;
}
else
LP_F(minor) &= ~LP_ABORTOPEN;
break;
+#ifdef LP_NEED_CAREFUL
case LPCAREFUL:
if (arg)
LP_F(minor) |= LP_CAREFUL;
else
LP_F(minor) &= ~LP_CAREFUL;
break;
+#endif
case LPWAIT:
LP_WAIT(minor) = arg;
break;
case LPRESET:
lp_reset(minor);
break;
+#ifdef LP_STATS
case LPGETSTATS:
if (copy_to_user((int *) arg, &LP_STAT(minor),
sizeof(struct lp_stats)))
memset(&LP_STAT(minor), 0,
sizeof(struct lp_stats));
break;
+#endif
case LPGETFLAGS:
status = LP_F(minor);
if (copy_to_user((int *) arg, &status, sizeof(int)))
switch( cmd ) {
case NVRAM_INIT: /* initialize NVRAM contents and checksum */
- if (!suser())
+ if (!capable(CAP_SYS_ADMIN))
return( -EACCES );
save_flags(flags);
case NVRAM_SETCKS: /* just set checksum, contents unchanged
* (maybe useful after checksum garbaged
* somehow...) */
- if (!suser())
+ if (!capable(CAP_SYS_ADMIN))
return( -EACCES );
save_flags(flags);
put_user(ent_count, (int *) arg);
return 0;
case RNDADDTOENTCNT:
- if (!suser())
+ if (!capable(CAP_SYS_ADMIN))
return -EPERM;
retval = verify_area(VERIFY_READ, (void *) arg, sizeof(int));
if (retval)
wake_up_interruptible(&random_read_wait);
return 0;
case RNDGETPOOL:
- if (!suser())
+ if (!capable(CAP_SYS_ADMIN))
return -EPERM;
p = (int *) arg;
retval = verify_area(VERIFY_WRITE, (void *) p, sizeof(int));
return -EFAULT;
return 0;
case RNDADDENTROPY:
- if (!suser())
+ if (!capable(CAP_SYS_ADMIN))
return -EPERM;
p = (int *) arg;
retval = verify_area(VERIFY_READ, (void *) p, 2*sizeof(int));
wake_up_interruptible(&random_read_wait);
return 0;
case RNDZAPENTCNT:
- if (!suser())
+ if (!capable(CAP_SYS_ADMIN))
return -EPERM;
random_state.entropy_count = 0;
return 0;
case RNDCLEARPOOL:
/* Clear the entropy pool and associated counters. */
- if (!suser())
+ if (!capable(CAP_SYS_ADMIN))
return -EPERM;
rand_clear_pool();
return 0;
change_speed = ((port->flags & ASYNC_SPD_MASK) !=
(tmp.flags & ASYNC_SPD_MASK));
- if (!suser()) {
+ if (!capable(CAP_SYS_ADMIN)) {
if ((tmp.close_delay != port->close_delay) ||
(tmp.closing_wait != port->closing_wait) ||
((tmp.flags & ~ASYNC_USR_MASK) !=
if (copy_from_user(&new_serial, new_info, sizeof(new_serial)))
return -EFAULT;
- if (!suser()) {
+ if (!capable(CAP_SYS_ADMIN)) {
if ((new_serial.flags & ~ROCKET_USR_MASK) !=
(info->flags & ~ROCKET_USR_MASK))
return -EPERM;
* We don't really want Joe User enabling more
* than 64Hz of interrupts on a multi-user machine.
*/
- if ((rtc_freq > 64) && (!suser()))
+ if ((rtc_freq > 64) && (!capable(CAP_SYS_RESOURCE)))
return -EACCES;
if (!(rtc_status & RTC_TIMER_ON)) {
unsigned int yrs;
unsigned long flags;
- if (!suser())
+ if (!capable(CAP_SYS_TIME))
return -EACCES;
if (copy_from_user(&rtc_tm, (struct rtc_time*)arg,
* We don't really want Joe User generating more
* than 64Hz of interrupts on a multi-user machine.
*/
- if ((arg > 64) && (!suser()))
+ if ((arg > 64) && (!capable(CAP_SYS_RESOURCE)))
return -EACCES;
while (arg > (1<<tmp))
if (arg < 1900)
return -EINVAL;
- if (!suser())
+ if (!capable(CAP_SYS_TIME))
return -EACCES;
epoch = arg;
* here.
*/
if (serial_inp(info, UART_LSR) == 0xff) {
- if (suser()) {
+ if (capable(CAP_SYS_ADMIN)) {
if (info->tty)
set_bit(TTY_IO_ERROR, &info->tty->flags);
} else
retval = request_irq(state->irq, handler, IRQ_T(info),
"serial", NULL);
if (retval) {
- if (suser()) {
+ if (capable(CAP_SYS_ADMIN)) {
if (info->tty)
set_bit(TTY_IO_ERROR,
&info->tty->flags);
change_port = (new_serial.port != state->port) ||
(new_serial.hub6 != state->hub6);
- if (!suser()) {
+ if (!capable(CAP_SYS_ADMIN)) {
if (change_irq || change_port ||
(new_serial.baud_base != state->baud_base) ||
(new_serial.type != state->type) ||
{
int retval;
- if (!suser())
+ if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (info->state->count > 1)
int retval;
void (*handler)(int, void *, struct pt_regs *);
- if (!suser())
+ if (!capable(CAP_SYS_ADMIN))
return -EPERM;
state = info->state;
change_speed = ((port->flags & ASYNC_SPD_MASK) !=
(tmp.flags & ASYNC_SPD_MASK));
- if (!suser()) {
+ if (!capable(CAP_SYS_ADMIN)) {
if ((tmp.close_delay != port->close_delay) ||
(tmp.closing_wait != port->closing_wait) ||
((tmp.flags & ~ASYNC_USR_MASK) !=
#endif
copy_from_user(&sio, sp, sizeof(struct serial_struct));
- if (!suser()) {
+ if (!capable(CAP_SYS_ADMIN)) {
if ((sio.baud_base != portp->baud_base) ||
(sio.close_delay != portp->close_delay) ||
((sio.flags & ~ASYNC_USR_MASK) !=
if (!(key_map = key_maps[s])) {
int j;
- if (keymap_count >= MAX_NR_OF_USER_KEYMAPS && !suser())
+ if (keymap_count >= MAX_NR_OF_USER_KEYMAPS &&
+ !capable(CAP_SYS_RESOURCE))
return -EPERM;
key_map = (ushort *) kmalloc(sizeof(plain_map),
/*
* Attention Key.
*/
- if (((ov == K_SAK) || (v == K_SAK)) && !suser())
+ if (((ov == K_SAK) || (v == K_SAK)) && !capable(CAP_SYS_ADMIN))
return -EPERM;
key_map[i] = U(v);
if (!s && (KTYP(ov) == KT_SHIFT || KTYP(v) == KT_SHIFT))
struct capi_manufacturer_cmd mcmd;
if (minor)
return -EINVAL;
- if (!suser())
+ if (!capable(CAP_SYS_ADMIN))
return -EPERM;
retval = copy_from_user((void *) &mcmd, (void *) arg,
sizeof(mcmd));
copy_from_user(&new_serial,new_info,sizeof(new_serial));
old_info = *info;
- if (!suser()) {
+ if (!capable(CAP_SYS_ADMIN)) {
if ((new_serial.baud_base != info->baud_base) ||
(new_serial.type != info->type) ||
(new_serial.close_delay != info->close_delay) ||
2. A better lp.c:
- a) It's a _mess_
-
- b) ECP support would be nice. This can only work if both the port and
+ a) ECP support would be nice. This can only work if both the port and
the printer support it.
- c) Errors could do with being handled better. There's no point logging a
- message every 10 seconds when the printer is out of paper.
-
- d) Handle status readback automatically. IEEE1284 printers can post status
+ b) Handle status readback automatically. IEEE1284 printers can post status
bits when they have something to say. We should read out and deal
with (maybe just log) whatever the printer wants to tell the world.
3. Support more hardware (eg m68k, Sun bpp).
4. A better PLIP (make use of bidirectional/ECP/EPP ports).
-
-
/* 3c509.c: A 3c509 EtherLink3 ethernet driver for linux. */
/*
- Written 1993-1995 by Donald Becker.
+ Written 1993-1997 by Donald Becker.
- Copyright 1994,1995 by Donald Becker.
+ Copyright 1994-1997 by Donald Becker.
Copyright 1993 United States Government as represented by the
Director, National Security Agency. This software may be used and
distributed according to the terms of the GNU Public License,
FIXES:
Alan Cox: Removed the 'Unexpected interrupt' bug.
Michael Meskes: Upgraded to Donald Becker's version 1.07.
- Phil Blundell: Media selection support.
+ Alan Cox: Increased the eeprom delay. Regardless of
+ what the docs say some people definitely
+ get problems with lower (but in card spec)
+ delays
+ v1.10 4/21/97 Fixed module code so that multiple cards may be detected,
+ other cleanups. -djb
+ Andrea Arcangeli: Upgraded to Donald Becker's version 1.12.
*/
-static char *version = "3c509.c:1.07 6/15/95 becker@cesdis.gsfc.nasa.gov\n";
+static char *version = "3c509.c:1.12 6/4/97 becker@cesdis.gsfc.nasa.gov\n";
+/* A few values that may be tweaked. */
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (400*HZ/1000)
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+#define INTR_WORK 10
#include <linux/module.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/delay.h> /* for udelay() */
-#include <linux/init.h>
#include <asm/bitops.h>
#include <asm/io.h>
#ifdef EL3_DEBUG
-static int el3_debug = EL3_DEBUG;
+int el3_debug = EL3_DEBUG;
#else
-static int el3_debug = 2;
+int el3_debug = 2;
#endif
/* To minimize the size of the driver source I only define operating
constants if they are used several times. You'll need the manual
- if you want to understand driver details. */
+ anyway if you want to understand driver details. */
/* Offsets from base I/O address. */
#define EL3_DATA 0x00
#define EL3_CMD 0x0e
#define SKB_QUEUE_SIZE 64
struct el3_private {
- struct net_device_stats stats;
+ struct enet_statistics stats;
+ struct device *next_dev;
/* skb send-queue */
int head, size;
struct sk_buff *queue[SKB_QUEUE_SIZE];
};
static int id_port = 0x100;
+static struct device *el3_root_dev = NULL;
static ushort id_read_eeprom(int index);
static ushort read_eeprom(short ioaddr, int index);
static int el3_start_xmit(struct sk_buff *skb, struct device *dev);
static void el3_interrupt(int irq, void *dev_id, struct pt_regs *regs);
static void update_stats(int addr, struct device *dev);
-static struct net_device_stats *el3_get_stats(struct device *dev);
+static struct enet_statistics *el3_get_stats(struct device *dev);
static int el3_rx(struct device *dev);
static int el3_close(struct device *dev);
-static int el3_set_config(struct device *dev, struct ifmap *map);
-#ifdef HAVE_MULTICAST
static void set_multicast_list(struct device *dev);
-#endif
\f
-__initfunc(int el3_probe(struct device *dev))
+int el3_probe(struct device *dev)
{
short lrs_state = 0xff, i;
- ushort ioaddr, irq, port;
- short *phys_addr = (short *)dev->dev_addr;
+ ushort ioaddr, irq, if_port;
+ short phys_addr[3];
static int current_tag = 0;
- static int el3_portmap[] = {
- IF_PORT_10BASET,
- IF_PORT_AUI,
- IF_PORT_UNKNOWN,
- IF_PORT_10BASE2
- };
/* First check all slots of the EISA bus. The next slot address to
probe is kept in 'eisa_addr' to support multiple probe() calls. */
outw(SelectWindow | 0, ioaddr + 0xC80 + EL3_CMD);
irq = inw(ioaddr + WN0_IRQ) >> 12;
- port = inw(ioaddr + 6)>>14;
+ if_port = inw(ioaddr + 6)>>14;
for (i = 0; i < 3; i++)
phys_addr[i] = htons(read_eeprom(ioaddr, i));
}
}
-/*
- * This has to be coded according to Documentation/mca.txt before
- * this driver can be used with the 3c529 MCA cards.
- */
-#if 0 /* #ifdef CONFIG_MCA */
+#ifdef CONFIG_MCA
if (MCA_bus) {
mca_adaptor_select_mode(1);
for (i = 0; i < 8; i++)
if ((mca_adaptor_id(i) | 1) == 0x627c) {
ioaddr = mca_pos_base_addr(i);
irq = inw(ioaddr + WN0_IRQ) >> 12;
- port = inw(ioaddr + 6)>>14;
+ if_port = inw(ioaddr + 6)>>14;
for (i = 0; i < 3; i++)
phys_addr[i] = htons(read_eeprom(ioaddr, i));
outb(0x02, 0xA79); /* Return to WaitForKey state. */
/* Select an open I/O location at 0x1*0 to do contention select. */
for (id_port = 0x100; id_port < 0x200; id_port += 0x10) {
+ if (check_region(id_port, 1))
+ continue;
outb(0x00, id_port);
outb(0xff, id_port);
if (inb(id_port) & 0x01)
on cards as they are found. Cards with their tag set will not
respond to subsequent ID sequences. */
- if (check_region(id_port,1)) {
- static int once = 1;
- if (once) printk("3c509: Somebody has reserved 0x%x, can't do ID_PORT lookup, nor card auto-probing\n",id_port);
- once = 0;
- return -ENODEV;
- }
-
outb(0x00, id_port);
outb(0x00, id_port);
for(i = 0; i < 255; i++) {
{
unsigned short iobase = id_read_eeprom(8);
- port = iobase >> 14;
+ if_port = iobase >> 14;
ioaddr = 0x200 + ((iobase & 0x1f) << 4);
}
- if (dev->irq > 1 && dev->irq < 16)
+ if (dev && dev->irq > 1 && dev->irq < 16)
irq = dev->irq;
else
irq = id_read_eeprom(9) >> 12;
- if (dev->base_addr != 0
- && dev->base_addr != (unsigned short)ioaddr) {
+ if (dev && dev->base_addr != 0
+ && dev->base_addr != (unsigned short)ioaddr) {
return -ENODEV;
}
/* Free the interrupt so that some other card can use it. */
outw(0x0f00, ioaddr + WN0_IRQ);
found:
+ if (dev == NULL) {
+ dev = init_etherdev(dev, sizeof(struct el3_private));
+ }
+ memcpy(dev->dev_addr, phys_addr, sizeof(phys_addr));
dev->base_addr = ioaddr;
dev->irq = irq;
- dev->if_port = el3_portmap[port];
+ dev->if_port = (dev->mem_start & 0x1f) ? dev->mem_start & 3 : if_port;
+
request_region(dev->base_addr, EL3_IO_EXTENT, "3c509");
{
- static const char *if_names[] = {"10baseT", "AUI", "undefined", "BNC"};
+ const char *if_names[] = {"10baseT", "AUI", "undefined", "BNC"};
printk("%s: 3c509 at %#3.3lx tag %d, %s port, address ",
- dev->name, dev->base_addr, current_tag, if_names[port]);
+ dev->name, dev->base_addr, current_tag, if_names[dev->if_port]);
}
/* Read in the station address. */
printk(", IRQ %d.\n", dev->irq);
/* Make up a EL3-specific-data structure. */
- dev->priv = kmalloc(sizeof(struct el3_private), GFP_KERNEL);
+ if (dev->priv == NULL)
+ dev->priv = kmalloc(sizeof(struct el3_private), GFP_KERNEL);
if (dev->priv == NULL)
return -ENOMEM;
memset(dev->priv, 0, sizeof(struct el3_private));
+ ((struct el3_private *)dev->priv)->next_dev = el3_root_dev;
+ el3_root_dev = dev;
+
if (el3_debug > 0)
printk(version);
dev->hard_start_xmit = &el3_start_xmit;
dev->stop = &el3_close;
dev->get_stats = &el3_get_stats;
- dev->set_config = &el3_set_config;
-#ifdef HAVE_MULTICAST
- dev->set_multicast_list = &set_multicast_list;
-#endif
+ dev->set_multicast_list = &set_multicast_list;
/* Fill in the generic fields of the device structure. */
ether_setup(dev);
- dev->flags |= IFF_PORTSEL;
return 0;
}
/* Read a word from the EEPROM using the regular EEPROM access register.
Assume that we are in register window zero.
*/
-__initfunc(static ushort read_eeprom(short ioaddr, int index))
+static ushort read_eeprom(short ioaddr, int index)
{
outw(EEPROM_READ + index, ioaddr + 10);
/* Pause for at least 162 us. for the read to take place. */
- udelay (300);
+ udelay (500);
return inw(ioaddr + 12);
}
/* Read a word from the EEPROM when in the ISA ID probe state. */
-__initfunc(static ushort id_read_eeprom(int index))
+static ushort id_read_eeprom(int index)
{
int bit, word = 0;
outb(EEPROM_READ + index, id_port);
/* Pause for at least 162 us. for the read to take place. */
- udelay (300);
-
+ udelay (500);
+
for (bit = 15; bit >= 0; bit--)
word = (word << 1) + (inb(id_port) & 0x01);
}
-\f
-static int
-el3_set_config(struct device *dev, struct ifmap *map)
-{
- int ioaddr = dev->base_addr;
- if (map->port != dev->if_port) {
- switch (map->port) {
- case IF_PORT_10BASE2:
- case IF_PORT_10BASET:
- case IF_PORT_AUI:
- if (dev->start) {
- if (dev->if_port == IF_PORT_10BASE2)
- /* Turn off thinnet power. */
- outw(StopCoax, ioaddr + EL3_CMD);
- else if (dev->if_port == IF_PORT_10BASET) {
- /* Disable link beat and jabber */
- EL3WINDOW(4);
- outw(inw(ioaddr + WN4_MEDIA) & ~MEDIA_TP, ioaddr + WN4_MEDIA);
- EL3WINDOW(1);
- }
- }
- printk(KERN_INFO "%s: %s port selected.\n", dev->name,
- if_port_text[map->port]);
- dev->if_port = map->port;
- if (dev->start) {
- if (dev->if_port == IF_PORT_10BASE2)
- /* Start the thinnet transceiver. We should really wait 50ms...*/
- outw(StartCoax, ioaddr + EL3_CMD);
- else if (dev->if_port == IF_PORT_10BASET) {
- /* 10baseT interface, enabled link beat and jabber check. */
- EL3WINDOW(4);
- outw(inw(ioaddr + WN4_MEDIA) | MEDIA_TP, ioaddr + WN4_MEDIA);
- EL3WINDOW(1);
- }
- }
- break;
- default:
- printk(KERN_ERR "%s: %s port not supported.\n", dev->name,
- if_port_text[map->port]);
- return -EINVAL;
- }
- }
- if (map->irq != dev->irq) {
- printk(KERN_ERR "%s: cannot change interrupt.\n", dev->name);
- return -EINVAL;
- }
- if (map->base_addr != dev->base_addr) {
- printk(KERN_ERR "%s: cannot change base address.\n", dev->name);
- return -EINVAL;
- }
- return 0;
-}
-
\f
static int
el3_open(struct device *dev)
for (i = 0; i < 6; i++)
outb(dev->dev_addr[i], ioaddr + i);
- if (dev->if_port == IF_PORT_10BASE2)
+ if (dev->if_port == 3)
/* Start the thinnet transceiver. We should really wait 50ms...*/
outw(StartCoax, ioaddr + EL3_CMD);
- else if (dev->if_port == IF_PORT_10BASET) {
+ else if (dev->if_port == 0) {
/* 10baseT interface, enabled link beat and jabber check. */
EL3WINDOW(4);
outw(inw(ioaddr + WN4_MEDIA) | MEDIA_TP, ioaddr + WN4_MEDIA);
return 0; /* Always succeed */
}
-static int el3_start_xmit(struct sk_buff *skb, struct device *dev)
+static int
+el3_start_xmit(struct sk_buff *skb, struct device *dev)
{
struct el3_private *lp = (struct el3_private *)dev->priv;
int ioaddr = dev->base_addr;
/* Transmitter timeout, serious problems. */
if (dev->tbusy) {
int tickssofar = jiffies - dev->trans_start;
- if (tickssofar < 40*HZ/100)
+ if (tickssofar < TX_TIMEOUT)
return 1;
printk("%s: transmit timed out, Tx_status %2.2x status %4.4x "
"Tx FIFO room %d.\n",
if (test_and_set_bit(0, (void*)&dev->tbusy) != 0)
printk("%s: Transmitter access conflict.\n", dev->name);
else {
- lp->stats.tx_bytes+=skb->len;
/* Put out the doubleword header... */
outw(skb->len, ioaddr + TX_FIFO);
outw(0x00, ioaddr + TX_FIFO);
/* ... and the packet rounded to a doubleword. */
+#ifdef __powerpc__
+ outsl_unswapped(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
+#else
outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
+#endif
dev->trans_start = jiffies;
if (inw(ioaddr + TX_FREE) > 1536) {
{
struct device *dev = (struct device *)dev_id;
int ioaddr, status;
- int i = 0;
+ int i = INTR_WORK;
if (dev == NULL) {
printk ("el3_interrupt(): irq %d for unknown device.\n", irq);
}
}
- if (++i > 10) {
+ if (--i < 0) {
printk("%s: Infinite loop in interrupt, status %4.4x.\n",
dev->name, status);
/* Clear all interrupts. */
}
-static struct net_device_stats *el3_get_stats(struct device *dev)
+static struct enet_statistics *
+el3_get_stats(struct device *dev)
{
struct el3_private *lp = (struct el3_private *)dev->priv;
unsigned long flags;
pkt_len, rx_status);
if (skb != NULL) {
skb->dev = dev;
- skb_reserve(skb,2); /* Align IP on 16 byte */
+ skb_reserve(skb, 2); /* Align IP on 16 byte */
/* 'skb->data' points to the start of sk_buff data area. */
- insl(ioaddr+RX_FIFO, skb_put(skb,pkt_len),
- (pkt_len + 3) >> 2);
+#ifdef __powerpc__
+ insl_unswapped(ioaddr+RX_FIFO, skb_put(skb,pkt_len),
+ (pkt_len + 3) >> 2);
+#else
+ insl(ioaddr + RX_FIFO, skb_put(skb,pkt_len),
+ (pkt_len + 3) >> 2);
+#endif
- skb->protocol=eth_type_trans(skb,dev);
+ skb->protocol = eth_type_trans(skb,dev);
netif_rx(skb);
outw(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */
- lp->stats.rx_bytes+=skb->len;
lp->stats.rx_packets++;
- lp->stats.rx_bytes+=pkt_len;
continue;
} else if (el3_debug)
printk("%s: Couldn't allocate a sk_buff of size %d.\n",
return 0;
}
-#ifdef HAVE_MULTICAST
/*
* Set or clear the multicast filter for this adaptor.
*/
else
outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD);
}
-#endif
static int
el3_close(struct device *dev)
outw(RxDisable, ioaddr + EL3_CMD);
outw(TxDisable, ioaddr + EL3_CMD);
- if (dev->if_port == IF_PORT_10BASE2)
+ if (dev->if_port == 3)
/* Turn off thinnet power. Green! */
outw(StopCoax, ioaddr + EL3_CMD);
- else if (dev->if_port == IF_PORT_10BASET) {
+ else if (dev->if_port == 0) {
/* Disable link beat and jabber, if_port may change ere next open(). */
EL3WINDOW(4);
outw(inw(ioaddr + WN4_MEDIA) & ~MEDIA_TP, ioaddr + WN4_MEDIA);
}
#ifdef MODULE
-#define MAX_3C_CARDS 4 /* Max number of NE cards per module */
-#define NAMELEN 8 /* # of chars for storing dev->name */
-static char namelist[NAMELEN * MAX_3C_CARDS] = { 0, };
-static struct device dev_3c509[MAX_3C_CARDS] = {
- {
- NULL, /* assign a chunk of namelist[] below */
- 0, 0, 0, 0,
- 0, 0,
- 0, 0, 0, NULL, NULL
- },
-};
-
-static int io[MAX_3C_CARDS] = { 0, };
-static int irq[MAX_3C_CARDS] = { 0, };
-MODULE_PARM(io, "1-" __MODULE_STRING(MAX_3C_CARDS) "i");
-MODULE_PARM(irq, "1-" __MODULE_STRING(MAX_3C_CARDS) "i");
+/* Parameter that may be passed into the module. */
+static int debug = -1;
+static int irq[] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int xcvr[] = {-1, -1, -1, -1, -1, -1, -1, -1};
int
init_module(void)
{
- int this_dev, found = 0;
-
- for (this_dev = 0; this_dev < MAX_3C_CARDS; this_dev++) {
- struct device *dev = &dev_3c509[this_dev];
- dev->name = namelist+(NAMELEN*this_dev);
- dev->irq = irq[this_dev];
- dev->base_addr = io[this_dev];
- dev->init = el3_probe;
- if (io[this_dev] == 0) {
- if (this_dev != 0) break; /* only complain once */
- printk("3c509: WARNING! Module load-time probing works reliably only for EISA bus!!\n");
- }
- if (register_netdev(dev) != 0) {
- printk(KERN_WARNING "3c509.c: No 3c509 card found (i/o = 0x%x).\n", io[this_dev]);
- if (found != 0) return 0; /* Got at least one. */
- return -ENXIO;
- }
- found++;
+ int el3_cards = 0;
+
+ if (debug >= 0)
+ el3_debug = debug;
+
+ el3_root_dev = NULL;
+ while (el3_probe(0) == 0) {
+ if (irq[el3_cards] > 1)
+ el3_root_dev->irq = irq[el3_cards];
+ if (xcvr[el3_cards] >= 0)
+ el3_root_dev->if_port = xcvr[el3_cards];
+ el3_cards++;
}
- return 0;
+
+ return el3_cards ? 0 : -ENODEV;
}
void
cleanup_module(void)
{
- int this_dev;
-
- for (this_dev = 0; this_dev < MAX_3C_CARDS; this_dev++) {
- struct device *dev = &dev_3c509[this_dev];
- if (dev->priv != NULL) {
- unregister_netdev(dev);
- kfree_s(dev->priv,sizeof(struct el3_private));
- dev->priv = NULL;
- free_irq(dev->irq, dev);
- release_region(dev->base_addr, EL3_IO_EXTENT);
- }
+ struct device *next_dev;
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (el3_root_dev) {
+ next_dev = ((struct el3_private *)el3_root_dev->priv)->next_dev;
+ unregister_netdev(el3_root_dev);
+ release_region(el3_root_dev->base_addr, EL3_IO_EXTENT);
+ kfree(el3_root_dev);
+ el3_root_dev = next_dev;
}
}
#endif /* MODULE */
\f
/*
* Local variables:
- * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c 3c509.c"
+ * compile-command: "gcc -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c 3c509.c"
* version-control: t
* kept-new-versions: 5
* tab-width: 4
data[3] = mdio_read(ioaddr, data[0] & 0x1f, data[1] & 0x1f);
return 0;
case SIOCDEVPRIVATE+2: /* Write the specified MII register */
- if (!suser())
+ if (!capable(CAP_NET_ADMIN))
return -EPERM;
mdio_write(ioaddr, data[0] & 0x1f, data[1] & 0x1f, data[2]);
return 0;
if (status)
break;
status = -EPERM;
- if (!suser())
+ if (!capable(CAP_NET_ADMIN))
break;
status = 0;
copy_from_user(tmp.addr, ioc->data, ETH_ALEN);
break;
case DE4X5_SET_PROM: /* Set Promiscuous Mode */
- if (suser()) {
+ if (capable(CAP_NET_ADMIN)) {
omr = inl(DE4X5_OMR);
omr |= OMR_PR;
outl(omr, DE4X5_OMR);
break;
case DE4X5_CLR_PROM: /* Clear Promiscuous Mode */
- if (suser()) {
+ if (capable(CAP_NET_ADMIN)) {
omr = inl(DE4X5_OMR);
omr &= ~OMR_PR;
outb(omr, DE4X5_OMR);
break;
case DE4X5_MCA_EN: /* Enable pass all multicast addressing */
- if (suser()) {
+ if (capable(CAP_NET_ADMIN)) {
omr = inl(DE4X5_OMR);
omr |= OMR_PM;
outl(omr, DE4X5_OMR);
break;
case DE4X5_CLR_STATS: /* Zero out the driver statistics */
- if (suser()) {
+ if (capable(CAP_NET_ADMIN)) {
cli();
memset(&lp->pktStats, 0, sizeof(lp->pktStats));
sti();
break;
case DE4X5_SET_OMR: /* Set the OMR Register contents */
- if (suser()) {
+ if (capable(CAP_NET_ADMIN)) {
if (!(status = verify_area(VERIFY_READ, (void *)ioc->data, 1))) {
copy_from_user(tmp.addr, ioc->data, 1);
outl(tmp.addr[0], DE4X5_OMR);
}
break;
case DEPCA_SET_HWADDR: /* Set the hardware address */
- if (suser()) {
+ if (capable(CAP_NET_ADMIN)) {
if (!(status = verify_area(VERIFY_READ, (void *) ioc->data, ETH_ALEN))) {
copy_from_user(tmp.addr, ioc->data, ETH_ALEN);
for (i = 0; i < ETH_ALEN; i++) {
break;
case DEPCA_SET_PROM: /* Set Promiscuous Mode */
- if (suser()) {
+ if (capable(CAP_NET_ADMIN)) {
while (dev->tbusy); /* Stop ring access */
set_bit(0, (void *) &dev->tbusy);
while (lp->tx_old != lp->tx_new); /* Wait for the ring to empty */
break;
case DEPCA_CLR_PROM: /* Clear Promiscuous Mode */
- if (suser()) {
+ if (capable(CAP_NET_ADMIN)) {
while (dev->tbusy); /* Stop ring access */
set_bit(0, (void *) &dev->tbusy);
while (lp->tx_old != lp->tx_new); /* Wait for the ring to empty */
}
break;
case DEPCA_SET_MCA: /* Set a multicast address */
- if (suser()) {
+ if (capable(CAP_NET_ADMIN)) {
if (!(status = verify_area(VERIFY_READ, ioc->data, ETH_ALEN * ioc->len))) {
copy_from_user(tmp.addr, ioc->data, ETH_ALEN * ioc->len);
set_multicast_list(dev);
break;
case DEPCA_CLR_MCA: /* Clear all multicast addresses */
- if (suser()) {
+ if (capable(CAP_NET_ADMIN)) {
set_multicast_list(dev);
} else {
status = -EPERM;
break;
case DEPCA_MCA_EN: /* Enable pass all multicast addressing */
- if (suser()) {
+ if (capable(CAP_NET_ADMIN)) {
set_multicast_list(dev);
} else {
status = -EPERM;
break;
case DEPCA_CLR_STATS: /* Zero out the driver statistics */
- if (suser()) {
+ if (capable(CAP_NET_ADMIN)) {
cli();
memset(&lp->pktStats, 0, sizeof(lp->pktStats));
sti();
{
struct dlci_local *dlp;
- if (!suser())
+ if (!capable(CAP_NET_ADMIN))
return(-EPERM);
dlp = dev->priv;
struct dlci_add add;
int err;
- if (!suser())
+ if (!capable(CAP_NET_ADMIN))
return(-EPERM);
if(copy_from_user(&add, arg, sizeof(struct dlci_add)))
data[3] = mdio_read(ioaddr, data[0], data[1]);
return 0;
case SIOCDEVPRIVATE+2: /* Write the specified MII register */
- if (!suser())
+ if (!capable(CAP_NET_ADMIN))
return -EPERM;
mdio_write(ioaddr, data[0], data[1], data[2]);
return 0;
static int eql_ioctl(struct device *dev, struct ifreq *ifr, int cmd)
{
- if(cmd!=EQL_GETMASTRCFG && cmd!=EQL_GETSLAVECFG && !suser())
+ if(cmd!=EQL_GETMASTRCFG && cmd!=EQL_GETSLAVECFG &&
+ !capable(CAP_NET_ADMIN))
return -EPERM;
switch (cmd)
{
}
break;
case EWRK3_SET_HWADDR: /* Set the hardware address */
- if (suser()) {
+ if (capable(CAP_NET_ADMIN)) {
if (!(status = verify_area(VERIFY_READ, (void *) ioc->data, ETH_ALEN))) {
csr = inb(EWRK3_CSR);
csr |= (CSR_TXD | CSR_RXD);
break;
case EWRK3_SET_PROM: /* Set Promiscuous Mode */
- if (suser()) {
+ if (capable(CAP_NET_ADMIN)) {
csr = inb(EWRK3_CSR);
csr |= CSR_PME;
csr &= ~CSR_MCE;
break;
case EWRK3_CLR_PROM: /* Clear Promiscuous Mode */
- if (suser()) {
+ if (capable(CAP_NET_ADMIN)) {
csr = inb(EWRK3_CSR);
csr &= ~CSR_PME;
outb(csr, EWRK3_CSR);
break;
case EWRK3_SET_MCA: /* Set a multicast address */
- if (suser()) {
+ if (capable(CAP_NET_ADMIN)) {
if (!(status = verify_area(VERIFY_READ, ioc->data, ETH_ALEN * ioc->len))) {
copy_from_user(tmp.addr, ioc->data, ETH_ALEN * ioc->len);
set_multicast_list(dev);
break;
case EWRK3_CLR_MCA: /* Clear all multicast addresses */
- if (suser()) {
+ if (capable(CAP_NET_ADMIN)) {
set_multicast_list(dev);
} else {
status = -EPERM;
break;
case EWRK3_MCA_EN: /* Enable multicast addressing */
- if (suser()) {
+ if (capable(CAP_NET_ADMIN)) {
csr = inb(EWRK3_CSR);
csr |= CSR_MCE;
csr &= ~CSR_PME;
break;
case EWRK3_CLR_STATS: /* Zero out the driver statistics */
- if (suser()) {
+ if (capable(CAP_NET_ADMIN)) {
cli();
memset(&lp->pktStats, 0, sizeof(lp->pktStats));
sti();
}
break;
case EWRK3_SET_CSR: /* Set the CSR Register contents */
- if (suser()) {
+ if (capable(CAP_NET_ADMIN)) {
if (!(status = verify_area(VERIFY_READ, ioc->data, 1))) {
copy_from_user(tmp.addr, ioc->data, 1);
outb(tmp.addr[0], EWRK3_CSR);
break;
case EWRK3_GET_EEPROM: /* Get the EEPROM contents */
- if (suser()) {
+ if (capable(CAP_NET_ADMIN)) {
for (i = 0; i < (EEPROM_MAX >> 1); i++) {
tmp.val[i] = (short) Read_EEPROM(iobase, i);
}
break;
case EWRK3_SET_EEPROM: /* Set the EEPROM contents */
- if (suser()) {
+ if (capable(CAP_NET_ADMIN)) {
if (!(status = verify_area(VERIFY_READ, ioc->data, EEPROM_MAX))) {
copy_from_user(tmp.addr, ioc->data, EEPROM_MAX);
for (i = 0; i < (EEPROM_MAX >> 1); i++) {
{
struct ipddp_route *rt = (struct ipddp_route *)ifr->ifr_data;
- if(!suser())
+ if(!capable(CAP_NET_ADMIN))
return -EPERM;
switch(cmd)
/*
* The user must have an euid of root to do these requests.
*/
- if (!suser ())
+ if (!capable(CAP_NET_ADMIN))
return -EPERM;
/*
* Set the MRU value
* undefined locations (think of Intel PIIX4 as a typical example).
*/
- if (fsuser())
+ if (capable(CAP_SYS_ADMIN))
size = PCI_CFG_SPACE_SIZE;
else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
size = 128;
case RTCSET:
- if (!suser())
+ if (!capable(CAP_SYS_TIME))
return -EPERM;
copy_from_user_ret(&rtc_tm, (struct rtc_time*)arg, sizeof(struct rtc_time), -EFAULT);
unsigned char *buffer;
int ret;
- if(!suser()) return -EPERM;
+ if(!capable(CAP_SYS_ADMIN)) return -EPERM;
switch(cmd) {
case VFC_I2C_SEND:
return -EFAULT;
old_info = *info;
- if (!suser()) {
+ if (!capable(CAP_SYS_ADMIN)) {
if ((new_serial.baud_base != info->baud_base) ||
(new_serial.type != info->type) ||
(new_serial.close_delay != info->close_delay) ||
if ((status & DRQ_STAT) == 0) { /* No more interrupts */
if (test_bit(IDESCSI_LOG_CMD, &scsi->log))
printk (KERN_INFO "Packet command completed, %d bytes transferred\n", pc->actually_transferred);
- ide_sti();
+ ide__sti();
if (status & ERR_STAT)
rq->errors++;
idescsi_end_request (1, HWGROUP(drive));
#include "ppa.h"
#include <linux/parport.h>
-#ifdef CONFIG_KMOD
-#include <linux/kmod.h>
-#ifndef PARPORT_MODULES
-#define PARPORT_MODULES "parport_pc"
-#endif
-#endif
-
#define NO_HOSTS 4
static ppa_struct ppa_hosts[NO_HOSTS] =
{PPA_EMPTY, PPA_EMPTY, PPA_EMPTY, PPA_EMPTY};
return 1;
}
- PPA_BASE(host_no) = ppa_hosts[host_no].dev->port->base;
if (ppa_hosts[host_no].cur_cmd)
ppa_hosts[host_no].cur_cmd->SCp.phase++;
return 0;
nhosts = 0;
try_again = 0;
-#ifdef CONFIG_KMOD
- if (!pb) {
- request_module(PARPORT_MODULES);
+ if (!pb)
pb = parport_enumerate();
- }
-#endif
if (!pb) {
printk("ppa: parport reports no devices.\n");
if (ppa_pb_claim(i))
while (ppa_hosts[i].p_busy)
schedule(); /* Whe can safe schedule() here */
- ppb = PPA_BASE(i);
+ ppb = PPA_BASE(i) = ppa_hosts[i].dev->port->base;
w_ctr(ppb, 0x0c);
modes = ppa_hosts[i].dev->port->modes;
put_user( dev->host->host_no, (int *) arg);
return 0;
case SCSI_IOCTL_TAGGED_ENABLE:
- if(!suser()) return -EACCES;
+ if(!capable(CAP_SYS_ADMIN)) return -EACCES;
if(!dev->tagged_supported) return -EINVAL;
dev->tagged_queue = 1;
dev->current_tag = 1;
return 0;
case SCSI_IOCTL_TAGGED_DISABLE:
- if(!suser()) return -EACCES;
+ if(!capable(CAP_SYS_ADMIN)) return -EACCES;
if(!dev->tagged_supported) return -EINVAL;
dev->tagged_queue = 0;
dev->current_tag = 0;
case SCSI_IOCTL_PROBE_HOST:
return ioctl_probe(dev->host, arg);
case SCSI_IOCTL_SEND_COMMAND:
- if(!suser()) return -EACCES;
+ if(!capable(CAP_SYS_ADMIN)) return -EACCES;
return scsi_ioctl_send_command((Scsi_Device *) dev,
(Scsi_Ioctl_Command *) arg);
case SCSI_IOCTL_DOORLOCK:
return 0;
case BLKRASET:
- if (!suser())
+ if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if(!(inode->i_rdev)) return -EINVAL;
if(arg > 0xff) return -EINVAL;
return 0;
case BLKFLSBUF:
- if(!suser()) return -EACCES;
+ if(!capable(CAP_SYS_ADMIN)) return -EACCES;
if(!(inode->i_rdev)) return -EINVAL;
fsync_dev(inode->i_rdev);
invalidate_buffers(inode->i_rdev);
return 0;
case BLKRRPART: /* Re-read partition tables */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
return revalidate_scsidisk(dev, 1);
RO_IOCTLS(dev, arg);
return 0;
case BLKRASET:
- if(!suser())
+ if(!capable(CAP_SYS_ADMIN))
return -EACCES;
if(!(cdi->dev))
return -EINVAL;
RO_IOCTLS(cdi->dev,arg);
case BLKFLSBUF:
- if(!suser())
+ if(!capable(CAP_SYS_ADMIN))
return -EACCES;
if(!(cdi->dev))
return -EINVAL;
if (i)
return (-EFAULT);
- if (mtc.mt_op == MTSETDRVBUFFER && !suser()) {
+ if (mtc.mt_op == MTSETDRVBUFFER && !capable(CAP_SYS_ADMIN)) {
printk(KERN_WARNING "st%d: MTSETDRVBUFFER only allowed for root.\n", dev);
return (-EPERM);
}
copy_from_user(&new_serial,new_info,sizeof(new_serial));
old_info = *info;
- if (!suser()) {
+ if (!capable(CAP_SYS_ADMIN)) {
if ((new_serial.baud_base != info->baud_base) ||
(new_serial.type != info->type) ||
(new_serial.close_delay != info->close_delay) ||
if (S_ISDIR(inode->i_mode))
goto unlink_done;
if (current->fsuid != inode->i_uid &&
- current->fsuid != dir->i_uid && !fsuser())
+ current->fsuid != dir->i_uid && !capable(CAP_FOWNER))
goto unlink_done;
if ((retval = affs_remove_header(bh,inode)) < 0)
retval = -EPERM;
if (current->fsuid != inode->i_uid &&
- current->fsuid != dir->i_uid && !fsuser())
+ current->fsuid != dir->i_uid && !capable(CAP_FOWNER))
goto rmdir_done;
if (inode->i_dev != dir->i_dev)
goto rmdir_done;
/* Make sure a caller can chown. */
if ((ia_valid & ATTR_UID) &&
(current->fsuid != inode->i_uid ||
- attr->ia_uid != inode->i_uid) && !fsuser())
+ attr->ia_uid != inode->i_uid) && !capable(CAP_CHOWN))
goto error;
/* Make sure caller can chgrp. */
if ((ia_valid & ATTR_GID) &&
(!in_group_p(attr->ia_gid) && attr->ia_gid != inode->i_gid) &&
- !fsuser())
+ !capable(CAP_CHOWN))
goto error;
/* Make sure a caller can chmod. */
if (ia_valid & ATTR_MODE) {
- if ((current->fsuid != inode->i_uid) && !fsuser())
+ if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
goto error;
/* Also check the setgid bit! */
if (!in_group_p((ia_valid & ATTR_GID) ? attr->ia_gid :
- inode->i_gid) && !fsuser())
+ inode->i_gid) && !capable(CAP_FSETID))
attr->ia_mode &= ~S_ISGID;
}
/* Check for setting the inode time. */
if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET)) {
- if (current->fsuid != inode->i_uid && !fsuser())
+ if (current->fsuid != inode->i_uid && !capable(CAP_FOWNER))
goto error;
}
fine:
inode->i_ctime = attr->ia_ctime;
if (ia_valid & ATTR_MODE) {
inode->i_mode = attr->ia_mode;
- if (!in_group_p(inode->i_gid) && !fsuser())
+ if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
inode->i_mode &= ~S_ISGID;
}
mark_inode_dirty(inode);
_IOC_NR(cmd) - _IOC_NR(AUTOFS_IOC_FIRST) >= AUTOFS_IOC_COUNT )
return -ENOTTY;
- if ( !autofs_oz_mode(sbi) && !fsuser() )
+ if ( !autofs_oz_mode(sbi) && !capable(CAP_SYS_ADMIN) )
return -EPERM;
switch(cmd) {
int i, error = -EPERM;
lock_kernel();
- if (!suser())
+ if (!capable(CAP_SYS_ADMIN))
goto out;
if (func == 1) {
if (inodes <= 0 || dquot->dq_flags & DQ_FAKE)
return(QUOTA_OK);
if (dquot->dq_ihardlimit &&
- (dquot->dq_curinodes + inodes) > dquot->dq_ihardlimit && !fsuser()) {
+ (dquot->dq_curinodes + inodes) > dquot->dq_ihardlimit &&
+ !capable(CAP_SYS_RESOURCE)) {
if ((dquot->dq_flags & DQ_INODES) == 0 &&
need_print_warning(type, dquot)) {
sprintf(quotamessage, "%s: write failed, %s file limit reached\r\n",
}
if (dquot->dq_isoftlimit &&
(dquot->dq_curinodes + inodes) > dquot->dq_isoftlimit &&
- dquot->dq_itime && CURRENT_TIME >= dquot->dq_itime && !fsuser()) {
+ dquot->dq_itime && CURRENT_TIME >= dquot->dq_itime &&
+ !capable(CAP_SYS_RESOURCE)) {
if (need_print_warning(type, dquot)) {
sprintf(quotamessage, "%s: warning, %s file quota exceeded too long.\r\n",
dquot->dq_mnt->mnt_dirname, quotatypes[type]);
}
if (dquot->dq_isoftlimit &&
(dquot->dq_curinodes + inodes) > dquot->dq_isoftlimit &&
- dquot->dq_itime == 0 && !fsuser()) {
+ dquot->dq_itime == 0 &&
+ !capable(CAP_SYS_RESOURCE)) {
if (need_print_warning(type, dquot)) {
sprintf(quotamessage, "%s: warning, %s file quota exceeded\r\n",
dquot->dq_mnt->mnt_dirname, quotatypes[type]);
if (blocks <= 0 || dquot->dq_flags & DQ_FAKE)
return(QUOTA_OK);
if (dquot->dq_bhardlimit &&
- (dquot->dq_curblocks + blocks) > dquot->dq_bhardlimit && !fsuser()) {
+ (dquot->dq_curblocks + blocks) > dquot->dq_bhardlimit &&
+ !capable(CAP_SYS_RESOURCE)) {
if ((dquot->dq_flags & DQ_BLKS) == 0 &&
need_print_warning(type, dquot)) {
sprintf(quotamessage, "%s: write failed, %s disk limit reached.\r\n",
}
if (dquot->dq_bsoftlimit &&
(dquot->dq_curblocks + blocks) > dquot->dq_bsoftlimit &&
- dquot->dq_btime && CURRENT_TIME >= dquot->dq_btime && !fsuser()) {
+ dquot->dq_btime && CURRENT_TIME >= dquot->dq_btime &&
+ !capable(CAP_SYS_RESOURCE)) {
if (need_print_warning(type, dquot)) {
sprintf(quotamessage, "%s: write failed, %s disk quota exceeded too long.\r\n",
dquot->dq_mnt->mnt_dirname, quotatypes[type]);
}
if (dquot->dq_bsoftlimit &&
(dquot->dq_curblocks + blocks) > dquot->dq_bsoftlimit &&
- dquot->dq_btime == 0 && !fsuser()) {
+ dquot->dq_btime == 0 &&
+ !capable(CAP_SYS_RESOURCE)) {
if (need_print_warning(type, dquot)) {
sprintf(quotamessage, "%s: warning, %s disk quota exceeded\r\n",
dquot->dq_mnt->mnt_dirname, quotatypes[type]);
break;
case Q_GETQUOTA:
if (((type == USRQUOTA && current->uid != id) ||
- (type == GRPQUOTA && in_group_p(id))) && !fsuser())
+ (type == GRPQUOTA && in_group_p(id))) &&
+ !capable(CAP_SYS_ADMIN))
goto out;
break;
default:
- if (!fsuser())
+ if (!capable(CAP_SYS_ADMIN))
goto out;
}
retval = new_page_tables(current);
if (retval)
goto fail_restore;
+ activate_context(current);
up(&mm->mmap_sem);
mmput(old_mm);
return 0;
int prepare_binprm(struct linux_binprm *bprm)
{
int mode;
- int retval,id_change;
+ int retval,id_change,cap_raised;
struct inode * inode = bprm->dentry->d_inode;
mode = inode->i_mode;
bprm->e_uid = current->euid;
bprm->e_gid = current->egid;
- id_change = 0;
+ id_change = cap_raised = 0;
/* Set-uid? */
if (mode & S_ISUID) {
cap_set_full(bprm->cap_effective);
}
- /* We use a conservative definition of suid for capabilities.
- * The process is suid if the permitted set is not a subset of
- * the current permitted set after the exec call.
- * new permitted set = forced | (allowed & inherited)
- * pP' = fP | (fI & pI)
- */
-
- if ((bprm->cap_permitted.cap |
- (current->cap_inheritable.cap &
- bprm->cap_inheritable.cap)) &
- ~current->cap_permitted.cap) {
- id_change = 1;
+ /* Only if pP' is _not_ a subset of pP, do we consider there
+ * has been a capability related "change of capability". In
+ * such cases, we need to check that the elevation of
+ * privilege does not go against other system constraints.
+ * The new Permitted set is defined below -- see (***). */
+ {
+ kernel_cap_t working =
+ cap_combine(bprm->cap_permitted,
+ cap_intersect(bprm->cap_inheritable,
+ current->cap_inheritable));
+ if (!cap_issubset(working, current->cap_permitted)) {
+ cap_raised = 1;
+ }
}
- if (id_change) {
+
+
+
+ if (id_change || cap_raised) {
/* We can't suid-execute if we're sharing parts of the executable */
/* or if we're being traced (or if suid execs are not allowed) */
/* (current->mm->count > 1 is ok, as we'll get a new mm anyway) */
|| (current->fs->count > 1)
|| (atomic_read(¤t->sig->count) > 1)
|| (current->files->count > 1)) {
- if (!suser())
- return -EPERM;
+ if (id_change && !capable(CAP_SETUID))
+ return -EPERM;
+ if (cap_raised && !capable(CAP_SETPCAP))
+ return -EPERM;
}
}
* The formula used for evolving capabilities is:
*
* pI' = pI
- * pP' = fP | (fI & pI)
+ * (***) pP' = fP | (fI & pI)
* pE' = pP' & fE [NB. fE is 0 or ~0]
*
* I=Inheritable, P=Permitted, E=Effective // p=process, f=file
void compute_creds(struct linux_binprm *bprm)
{
- int new_permitted = bprm->cap_permitted.cap |
- (bprm->cap_inheritable.cap & current->cap_inheritable.cap);
-
- current->cap_permitted.cap = new_permitted;
- current->cap_effective.cap = new_permitted & bprm->cap_effective.cap;
+ /* For init, we want to retain the capabilities set
+ * in the init_task struct. Thus we skip the usual
+ * capability rules */
+ if (current->pid != 1) {
+ int new_permitted = bprm->cap_permitted.cap |
+ (bprm->cap_inheritable.cap &
+ current->cap_inheritable.cap);
+
+ current->cap_permitted.cap = new_permitted;
+ current->cap_effective.cap = new_permitted &
+ bprm->cap_effective.cap;
+ }
/* AUD: Audit candidate if current->cap_effective is set */
* Access is always granted for root. We now check last,
* though, for BSD process accounting correctness
*/
- if (((mode & mask & S_IRWXO) == mask) || fsuser())
+ if (((mode & mask & S_IRWXO) == mask) || capable(CAP_DAC_OVERRIDE))
return 0;
- else
- return -EACCES;
+ if ((mask == S_IROTH) ||
+ (S_ISDIR(mode) && !(mask & ~(S_IROTH | S_IXOTH))))
+ if (capable(CAP_DAC_READ_SEARCH))
+ return 0;
+ return -EACCES;
}
if (le32_to_cpu(es->s_free_blocks_count) <= le32_to_cpu(es->s_r_blocks_count) &&
((sb->u.ext2_sb.s_resuid != current->fsuid) &&
(sb->u.ext2_sb.s_resgid == 0 ||
- !in_group_p (sb->u.ext2_sb.s_resgid)) && !fsuser())) {
+ !in_group_p (sb->u.ext2_sb.s_resgid)) &&
+ !capable(CAP_SYS_RESOURCE))) {
unlock_super (sb);
return 0;
}
/* was any of the uid bits set? */
mode &= inode->i_mode;
- if (mode && !suser()) {
+ if (mode && !capable(CAP_FSETID)) {
inode->i_mode &= ~mode;
mark_inode_dirty(inode);
}
(ATTR_FLAG_APPEND | ATTR_FLAG_IMMUTABLE)) ^
(inode->u.ext2_i.i_flags &
(EXT2_APPEND_FL | EXT2_IMMUTABLE_FL))) {
- if (!fsuser())
+ if (!capable(CAP_LINUX_IMMUTABLE))
goto out;
- } else if ((current->fsuid != inode->i_uid) && !fsuser())
+ } else if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
goto out;
retval = inode_change_ok(inode, iattr);
(inode->u.ext2_i.i_flags &
(EXT2_APPEND_FL | EXT2_IMMUTABLE_FL))) {
/* This test looks nicer. Thanks to Pauline Middelink */
- if (!fsuser())
+ if (!capable(CAP_LINUX_IMMUTABLE))
return -EPERM;
} else
- if ((current->fsuid != inode->i_uid) && !fsuser())
+ if ((current->fsuid != inode->i_uid) &&
+ !capable(CAP_FOWNER))
return -EPERM;
if (IS_RDONLY(inode))
return -EROFS;
case EXT2_IOC_GETVERSION:
return put_user(inode->u.ext2_i.i_version, (int *) arg);
case EXT2_IOC_SETVERSION:
- if ((current->fsuid != inode->i_uid) && !fsuser())
+ if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
return -EPERM;
if (IS_RDONLY(inode))
return -EROFS;
retval = -EPERM;
if ((dir->i_mode & S_ISVTX) &&
current->fsuid != inode->i_uid &&
- current->fsuid != dir->i_uid && !fsuser())
+ current->fsuid != dir->i_uid && !capable(CAP_FOWNER))
goto end_rmdir;
if (inode == dir) /* we may not delete ".", but "../dir" is ok */
goto end_rmdir;
goto end_unlink;
if ((dir->i_mode & S_ISVTX) &&
current->fsuid != inode->i_uid &&
- current->fsuid != dir->i_uid && !fsuser())
+ current->fsuid != dir->i_uid && !capable(CAP_FOWNER))
goto end_unlink;
retval = -EIO;
retval = -EPERM;
if ((old_dir->i_mode & S_ISVTX) &&
current->fsuid != old_inode->i_uid &&
- current->fsuid != old_dir->i_uid && !fsuser())
+ current->fsuid != old_dir->i_uid && !capable(CAP_FOWNER))
goto end_rename;
if (IS_APPEND(old_inode) || IS_IMMUTABLE(old_inode))
goto end_rename;
if (new_inode) {
if ((new_dir->i_mode & S_ISVTX) &&
current->fsuid != new_inode->i_uid &&
- current->fsuid != new_dir->i_uid && !fsuser())
+ current->fsuid != new_dir->i_uid && !capable(CAP_FOWNER))
goto end_rename;
if (IS_APPEND(new_inode) || IS_IMMUTABLE(new_inode))
goto end_rename;
if ((dir->i_mode & S_ISVTX) &&
current->fsuid != inode->i_uid &&
- current->fsuid != dir->i_uid && !fsuser())
+ current->fsuid != dir->i_uid && !capable(CAP_FOWNER))
goto end_rmdir;
if (inode->i_dev != dir->i_dev)
goto end_rmdir;
}
if ((dir->i_mode & S_ISVTX) &&
current->fsuid != inode->i_uid &&
- current->fsuid != dir->i_uid && !fsuser())
+ current->fsuid != dir->i_uid && !capable(CAP_FOWNER))
goto end_unlink;
if (de->inode != inode->i_ino) {
retval = -ENOENT;
retval = -EPERM;
if ((old_dir->i_mode & S_ISVTX) &&
current->fsuid != old_inode->i_uid &&
- current->fsuid != old_dir->i_uid && !fsuser())
+ current->fsuid != old_dir->i_uid && !capable(CAP_FOWNER))
goto end_rename;
new_inode = new_dentry->d_inode;
new_bh = minix_find_entry(new_dir, new_dentry->d_name.name,
retval = -EPERM;
if (new_inode && (new_dir->i_mode & S_ISVTX) &&
current->fsuid != new_inode->i_uid &&
- current->fsuid != new_dir->i_uid && !fsuser())
+ current->fsuid != new_dir->i_uid && !capable(CAP_FOWNER))
goto end_rename;
if (S_ISDIR(old_inode->i_mode)) {
retval = -ENOTDIR;
mode >>= 6;
else if (in_group_p(inode->i_gid))
mode >>= 3;
- if (((mode & mask & 0007) == mask) || fsuser())
+ if (((mode & mask & S_IRWXO) == mask) || capable(CAP_DAC_OVERRIDE))
return 0;
+ /* read and search access */
+ if ((mask == S_IROTH) ||
+ (S_ISDIR(mode) && !(mask & ~(S_IROTH | S_IXOTH))))
+ if (capable(CAP_DAC_READ_SEARCH))
+ return 0;
return -EACCES;
}
lock_kernel();
error = -EPERM;
- if (S_ISDIR(mode) || (!S_ISFIFO(mode) && !fsuser()))
+ if (S_ISDIR(mode) || (!S_ISFIFO(mode) && !capable(CAP_SYS_ADMIN)))
goto out;
error = -EINVAL;
switch (mode & S_IFMT) {
if (!initialized)
nfsd_init();
err = -EPERM;
- if (!suser()) {
+ if (!capable(CAP_SYS_ADMIN)) {
goto done;
}
err = -EFAULT;
/*
* access() needs to use the real uid/gid, not the effective uid/gid.
- * We do this by temporarily setting fsuid/fsgid to the wanted values
+ * We do this by temporarily clearing all FS-related capabilities and
+ * switching the fsuid/fsgid around to the real ones.
*/
asmlinkage int sys_access(const char * filename, int mode)
{
struct dentry * dentry;
int old_fsuid, old_fsgid;
+ kernel_cap_t old_cap;
int res = -EINVAL;
lock_kernel();
goto out;
old_fsuid = current->fsuid;
old_fsgid = current->fsgid;
+ old_cap = current->cap_effective;
+
current->fsuid = current->uid;
current->fsgid = current->gid;
+ /* Clear the capabilities if we switch to a non-root user */
+ if (current->uid)
+ cap_clear(current->cap_effective);
+
dentry = namei(filename);
res = PTR_ERR(dentry);
if (!IS_ERR(dentry)) {
current->fsuid = old_fsuid;
current->fsgid = old_fsgid;
+ current->cap_effective = old_cap;
out:
unlock_kernel();
return res;
goto dput_and_out;
error = -EPERM;
- if (!fsuser())
+ if (!capable(CAP_SYS_CHROOT))
goto dput_and_out;
/* exchange dentries */
int ret = -EPERM;
lock_kernel();
- if (!suser())
+ if (!capable(CAP_SYS_TTY_CONFIG))
goto out;
/* If there is a controlling tty, hang it up */
if (current->tty)
return buffer;
}
+extern inline char *task_cap(struct task_struct *p, char *buffer)
+{
+ return buffer + sprintf(buffer, "CapInh:\t%016x\n"
+ "CapPrm:\t%016x\n"
+ "CapEff:\t%016x\n",
+ p->cap_inheritable.cap,
+ p->cap_permitted.cap,
+ p->cap_effective.cap);
+}
+
+
static int get_status(int pid, char * buffer)
{
char * orig = buffer;
buffer = task_state(tsk, buffer);
buffer = task_mem(tsk, buffer);
buffer = task_sig(tsk, buffer);
+ buffer = task_cap(tsk, buffer);
return buffer - orig;
}
mode >>= 6;
else if (in_group_p(inode->i_gid))
mode >>= 3;
- if (((mode & mask & 0007) == mask) || fsuser())
+ if (((mode & mask & S_IRWXO) == mask) || capable(CAP_DAC_OVERRIDE))
return 0;
+ /* read and search access */
+ if ((mask == S_IROTH) ||
+ (S_ISDIR(mode) && !(mask & ~(S_IROTH | S_IXOTH))))
+ if (capable(CAP_DAC_READ_SEARCH))
+ return 0;
return -EACCES;
}
goto out;
error = -EACCES;
- if (current->uid != server->mnt->mounted_uid && !suser())
+ if (current->uid != server->mnt->mounted_uid &&
+ !capable(CAP_SYS_ADMIN))
goto out;
error = -EBADF;
struct dentry * dentry;
int retval;
- if (!suser())
+ if (!capable(CAP_SYS_ADMIN))
return -EPERM;
lock_kernel();
struct file dummy; /* allows read-write or read-only flag */
lock_kernel();
- if (!suser())
+ if (!capable(CAP_SYS_ADMIN))
goto out;
if ((new_flags &
(MS_MGC_MSK | MS_REMOUNT)) == (MS_MGC_VAL | MS_REMOUNT)) {
if ((dir->i_mode & S_ISVTX) &&
current->fsuid != inode->i_uid &&
- current->fsuid != dir->i_uid && !fsuser())
+ current->fsuid != dir->i_uid && !capable(CAP_FOWNER))
goto end_rmdir;
if (inode->i_dev != dir->i_dev)
goto end_rmdir;
}
if ((dir->i_mode & S_ISVTX) &&
current->fsuid != inode->i_uid &&
- current->fsuid != dir->i_uid && !fsuser())
+ current->fsuid != dir->i_uid && !capable(CAP_FOWNER))
goto end_unlink;
if (de->inode != inode->i_ino) {
retval = -ENOENT;
retval = -EPERM;
if ((old_dir->i_mode & S_ISVTX) &&
current->fsuid != old_inode->i_uid &&
- current->fsuid != old_dir->i_uid && !fsuser())
+ current->fsuid != old_dir->i_uid && !capable(CAP_FOWNER))
goto end_rename;
new_inode = new_dentry->d_inode;
new_bh = sysv_find_entry(new_dir, new_dentry->d_name.name,
retval = -EPERM;
if (new_inode && (new_dir->i_mode & S_ISVTX) &&
current->fsuid != new_inode->i_uid &&
- current->fsuid != new_dir->i_uid && !fsuser())
+ current->fsuid != new_dir->i_uid && !capable(CAP_FOWNER))
goto end_rename;
if (S_ISDIR(old_inode->i_mode)) {
retval = -ENOTDIR;
Printk (("ret %d ",ret));
if (ret == 0){
/* check sticky bit on old_dir */
- if ( !(old_dir->i_mode & S_ISVTX) || fsuser() ||
+ if ( !(old_dir->i_mode & S_ISVTX) || capable(CAP_FOWNER) ||
current->fsuid == old_info.entry.uid ||
current->fsuid == old_dir->i_uid ) {
/* Does new_name already exist? */
PRINTK(("new findentry "));
ret = umsdos_findentry(new_dir,&new_info,0);
if (ret != 0 || /* if destination file exists, are we allowed to replace it ? */
- !(new_dir->i_mode & S_ISVTX) || fsuser() ||
+ !(new_dir->i_mode & S_ISVTX) || capable(CAP_FOWNER) ||
current->fsuid == new_info.entry.uid ||
current->fsuid == new_dir->i_uid ) {
PRINTK (("new newentry "));
umsdos_real_lookup (dir, tdentry); /* fill inode part */
Printk (("isempty %d i_count %d ",empty,sdir->i_count));
/* check sticky bit */
- if ( !(dir->i_mode & S_ISVTX) || fsuser() ||
+ if ( !(dir->i_mode & S_ISVTX) || capable(CAP_FOWNER) ||
current->fsuid == sdir->i_uid ||
current->fsuid == dir->i_uid ) {
if (empty == 1){
if (ret == 0){
Printk (("UMSDOS_unlink %.*s ",info.fake.len,info.fake.fname));
/* check sticky bit */
- if ( !(dir->i_mode & S_ISVTX) || fsuser() ||
+ if ( !(dir->i_mode & S_ISVTX) || capable(CAP_FOWNER) ||
current->fsuid == info.entry.uid ||
current->fsuid == dir->i_uid ) {
if (info.entry.flags & UMSDOS_HLINK){
#define MAX_HWIFS 4
#endif
-#define ide_sti() sti()
+#define ide__sti() __sti()
static __inline__ int ide_default_irq(ide_ioreg_t base)
{
#define destroy_context(mm) do { } while(0)
+/*
+ * After we have set current->mm to a new value, this activates
+ * the context for the new mm so we see the new mappings.
+ * Ideally this would be an extern inline function, but reload_context
+ * is declared in pgtable.h, which includes this file. :-(
+ */
+#define activate_context(tsk) \
+ do { \
+ get_mmu_context(tsk); \
+ reload_context(tsk); \
+ } while (0)
+
#endif
#define MAX_HWIFS 4
#endif
-#define ide_sti() sti()
+#define ide__sti() __sti()
#include <asm/arch/ide.h>
#define init_new_context(mm) do { } while(0)
#define destroy_context(mm) do { } while(0)
+#define activate_context(tsk) do { } while(0)
#endif
#define MAX_HWIFS 6
#endif
-#define ide_sti() sti()
+#define ide__sti() __sti()
static __inline__ int ide_default_irq(ide_ioreg_t base)
{
#define init_new_context(mm) do { } while(0)
#define destroy_context(mm) do { } while(0)
+#define activate_context(tsk) do { } while(0)
#endif
#ifndef __SMP__
+#define DEBUG_SPINLOCKS 0 /* 0 == no debugging, 1 == maintain lock state, 2 == full debug */
+
+#if (DEBUG_SPINLOCKS < 1)
+
/*
* Your basic spinlocks, allowing only a single CPU anywhere
*/
#define spin_lock_init(lock) do { } while(0)
#define spin_lock(lock) do { } while(0)
-#define spin_trylock(lock) do { } while(0)
+#define spin_trylock(lock) (1)
#define spin_unlock_wait(lock) do { } while(0)
#define spin_unlock(lock) do { } while(0)
#define spin_lock_irq(lock) cli()
#define spin_unlock_irqrestore(lock, flags) \
restore_flags(flags)
+#elif (DEBUG_SPINLOCKS < 2)
+
+typedef struct {
+ volatile unsigned int lock;
+} spinlock_t;
+#define SPIN_LOCK_UNLOCKED { 0 }
+
+#define spin_lock_init(x) do { (x)->lock = 0; } while (0)
+#define spin_trylock(lock) (!test_and_set_bit(0,(lock)))
+
+#define spin_lock(x) do { (x)->lock = 1; } while (0)
+#define spin_unlock_wait(x) do { } while (0)
+#define spin_unlock(x) do { (x)->lock = 0; } while (0)
+#define spin_lock_irq(x) do { cli(); spin_lock(x); } while (0)
+#define spin_unlock_irq(x) do { spin_unlock(x); sti(); } while (0)
+
+#define spin_lock_irqsave(x, flags) \
+ do { save_flags(flags); spin_lock_irq(x); } while (0)
+#define spin_unlock_irqrestore(x, flags) \
+ do { spin_unlock(x); restore_flags(flags); } while (0)
+
+#else /* (DEBUG_SPINLOCKS >= 2) */
+
+typedef struct {
+ volatile unsigned int lock;
+ volatile unsigned int babble;
+ const char *module;
+} spinlock_t;
+#define SPIN_LOCK_UNLOCKED { 0, 25, __BASE_FILE__ }
+
+#include <linux/kernel.h>
+
+#define spin_lock_init(x) do { (x)->lock = 0; } while (0)
+#define spin_trylock(lock) (!test_and_set_bit(0,(lock)))
+
+#define spin_lock(x) do {unsigned long __spinflags; save_flags(__spinflags); cli(); if ((x)->lock&&(x)->babble) {printk("%s: spin_lock(%s:%p) already locked\n", __BASE_FILE__, (x)->module, (x));(x)->babble--;} (x)->lock = 1; restore_flags(__spinflags);} while (0)
+#define spin_unlock_wait(x) do {unsigned long __spinflags; save_flags(__spinflags); cli(); if ((x)->lock&&(x)->babble) {printk("%s: spin_unlock_wait(%s:%p) deadlock\n", __BASE_FILE__, (x)->module, (x));(x)->babble--;} restore_flags(__spinflags);} while (0)
+#define spin_unlock(x) do {unsigned long __spinflags; save_flags(__spinflags); cli(); if (!(x)->lock&&(x)->babble) {printk("%s: spin_unlock(%s:%p) not locked\n", __BASE_FILE__, (x)->module, (x));(x)->babble--;} (x)->lock = 0; restore_flags(__spinflags);} while (0)
+#define spin_lock_irq(x) do {cli(); if ((x)->lock&&(x)->babble) {printk("%s: spin_lock_irq(%s:%p) already locked\n", __BASE_FILE__, (x)->module, (x));(x)->babble--;} (x)->lock = 1;} while (0)
+#define spin_unlock_irq(x) do {cli(); if ((x)->lock&&(x)->babble) {printk("%s: spin_lock(%s:%p) already locked\n", __BASE_FILE__, (x)->module, (x));(x)->babble--;} (x)->lock = 1; sti();} while (0)
+
+#define spin_lock_irqsave(x,flags) do {save_flags(flags); cli(); if ((x)->lock&&(x)->babble) {printk("%s: spin_lock_irqsave(%s:%p) already locked\n", __BASE_FILE__, (x)->module, (x));(x)->babble--;} (x)->lock = 1;} while (0)
+#define spin_unlock_irqrestore(x,flags) do {cli(); if (!(x)->lock&&(x)->babble) {printk("%s: spin_unlock_irqrestore(%s:%p) not locked\n", __BASE_FILE__, (x)->module, (x));(x)->babble--;} (x)->lock = 0; restore_flags(flags);} while (0)
+
+#endif /* DEBUG_SPINLOCKS */
+
/*
* Read-write spinlocks, allowing multiple readers
* but only one writer.
#define write_unlock_irqrestore(lock, flags) \
restore_flags(flags)
-#else
+#else /* __SMP__ */
/*
- * Simple spin lock operations. There are two variants, one clears IRQ's
- * on the local processor, one does not.
- *
- * We make no fairness assumptions. They have a cost.
+ * Your basic spinlocks, allowing only a single CPU anywhere
*/
typedef struct {
#define SPIN_LOCK_UNLOCKED { 0 }
#define spin_lock_init(x) do { (x)->lock = 0; } while(0)
+/*
+ * Simple spin lock operations. There are two variants, one clears IRQ's
+ * on the local processor, one does not.
+ *
+ * We make no fairness assumptions. They have a cost.
+ */
+
#define spin_unlock_wait(x) do { barrier(); } while(((volatile spinlock_t *)(x))->lock)
typedef struct { unsigned long a[100]; } __dummy_lock_t;
#define write_unlock_irqrestore(lock, flags) \
do { write_unlock(lock); __restore_flags(flags); } while (0)
-#endif /* SMP */
+#endif /* __SMP__ */
#endif /* __ASM_SPINLOCK_H */
#define __NR_pwrite 181
#define __NR_chown 182
#define __NR_getcwd 183
+#define __NR_capget 184
+#define __NR_capset 185
/* user-visible error numbers are in the range -1 - -122: see <asm-i386/errno.h> */
* works. (Roman)
*/
#if defined(CONFIG_ATARI) && !defined(CONFIG_AMIGA)
-#define ide_sti() \
+#define ide__sti() \
do { \
- if (!in_interrupt()) sti(); \
+ if (!in_interrupt()) __sti(); \
} while(0)
#elif defined(CONFIG_ATARI)
-#define ide_sti() \
+#define ide__sti() \
do { \
if (!MACH_IS_ATARI || !in_interrupt()) sti(); \
} while(0)
#else /* !defined(CONFIG_ATARI) */
-#define ide_sti() sti()
+#define ide__sti() __sti()
#endif
#endif /* __KERNEL__ */
#define init_new_context(mm) do { } while(0)
#define destroy_context(mm) do { } while(0)
+#define activate_context(tsk) do { } while(0)
#endif
#define MAX_HWIFS 4
#endif
-#define ide_sti() sti()
+#define ide__sti() __sti()
static __inline__ int ide_default_irq(ide_ioreg_t base)
{
mm->context = 0;
}
+/*
+ * After we have set current->mm to a new value, this activates
+ * the context for the new mm so we see the new mappings.
+ */
+extern inline activate_context(struct task_struct *tsk)
+{
+ get_mmu_context(tsk);
+ /* XXX here we presumably need to set some cpu register - paulus. */
+}
+
#endif /* __ASM_MIPS_MMU_CONTEXT_H */
#define SUPPORT_VLB_SYNC 0
-#define ide_sti() sti()
+#define ide__sti() __sti()
typedef unsigned int ide_ioreg_t;
void ide_init_hwif_ports(ide_ioreg_t *p, ide_ioreg_t base, int *irq);
#ifdef CONFIG_8xx
#define NO_CONTEXT 16
#define LAST_CONTEXT 15
+#define MUNGE_CONTEXT(n) (n)
+
#else
+
+/* PPC 6xx, 7xx CPUs */
#define NO_CONTEXT 0
#define LAST_CONTEXT 0xfffff
+
+/*
+ * Allocating context numbers this way tends to spread out
+ * the entries in the hash table better than a simple linear
+ * allocation.
+ */
+#define MUNGE_CONTEXT(n) (((n) * 897) & LAST_CONTEXT)
#endif
extern int next_mmu_context;
#define set_context(context) do { } while (0)
#endif
-#ifndef CONFIG_8xx
-/*
- * Allocating context numbers this way tends to spread out
- * the entries in the hash table better than a simple linear
- * allocation.
- */
-#define MUNGE_CONTEXT(n) (((n) * 897) & LAST_CONTEXT)
-#else
-#define MUNGE_CONTEXT(n) (n)
-#endif
-
/*
* Get a new mmu context for task tsk if necessary.
*/
if (next_mmu_context == LAST_CONTEXT) \
mmu_context_overflow(); \
mm->context = MUNGE_CONTEXT(++next_mmu_context);\
- if ( tsk == current ) \
- set_context(mm->context); \
} \
} while (0)
/*
* We're finished using the context for an address space.
*/
-#ifdef CONFIG_8xx
#define destroy_context(mm) ((mm)->context = NO_CONTEXT)
-#else
-#define destroy_context(mm) do { } while (0)
-#endif
+
+/*
+ * After we have set current->mm to a new value, this activates
+ * the context for the new mm so we see the new mappings.
+ */
+extern inline void activate_context(struct task_struct *tsk)
+{
+ get_mmu_context(tsk);
+ set_context(tsk->mm->context);
+}
/*
* compute the vsid from the context and segment
#define destroy_context(mm) BTFIXUP_CALL(destroy_context)(mm)
+/*
+ * After we have set current->mm to a new value, this activates
+ * the context for the new mm so we see the new mappings.
+ * XXX this presumably needs a sensible implementation - paulus.
+ */
+#define activate_context(tsk) do { } while(0)
+
#endif /* !(__SPARC_MMU_CONTEXT_H) */
#undef MAX_HWIFS
#define MAX_HWIFS 2
-#define ide_sti() sti()
+#define ide__sti() __sti()
static __inline__ int ide_default_irq(ide_ioreg_t base)
{
for ( ; i < 10; i++)
*p++ = 0;
/* PCI code needs to figure out this. */
- if(irq != NULL)
+ if (irq != NULL)
*irq = 0;
}
: "o4");
}
+/*
+ * After we have set current->mm to a new value, this activates
+ * the context for the new mm so we see the new mappings.
+ */
+#define activate_context(tsk) get_mmu_context(tsk)
+
#endif /* !(__ASSEMBLY__) */
#endif /* !(__SPARC64_MMU_CONTEXT_H) */
#endif
#define RO_IOCTLS(dev,where) \
- case BLKROSET: { int __val; if (!suser()) return -EACCES; \
+ case BLKROSET: { int __val; if (!capable(CAP_SYS_ADMIN)) return -EACCES; \
if (get_user(__val, (int *)(where))) return -EFAULT; \
set_device_ro((dev),__val); return 0; } \
case BLKROGET: { int __val = (is_read_only(dev) != 0) ; \
#ifdef IDE_DRIVER
void ide_end_request(byte uptodate, ide_hwgroup_t *hwgroup) {
- struct request *req = hwgroup->rq;
+ int nsect;
+ struct buffer_head *bh;
+ struct request *req;
+ unsigned long flags;
+
+ spin_lock_irqsave(&io_request_lock,flags);
+ req = hwgroup->rq;
#else
static void end_request(int uptodate) {
- struct request *req = CURRENT;
-#endif /* IDE_DRIVER */
- struct buffer_head * bh;
int nsect;
+ struct buffer_head *bh;
+ struct request *req = CURRENT;
+#endif /* IDE_DRIVER */
req->errors = 0;
if (!uptodate) {
printk("end_request: I/O error, dev %s, sector %lu\n",
printk("end_request: buffer-list destroyed\n");
}
req->buffer = bh->b_data;
+#ifdef IDE_DRIVER
+ spin_unlock_irqrestore(&io_request_lock,flags);
+#endif /* IDE_DRIVER */
return;
}
}
up(req->sem);
req->rq_status = RQ_INACTIVE;
wake_up(&wait_for_request);
+#ifdef IDE_DRIVER
+ spin_unlock_irqrestore(&io_request_lock,flags);
+#endif /* IDE_DRIVER */
}
#endif /* defined(IDE_DRIVER) && !defined(_IDE_C) */
#endif /* ! SCSI_BLK_MAJOR(MAJOR_NR) */
kernel might be somewhat backwards compatible, but don't bet on
it. */
+/* XXX - Note, cap_t, is defined by POSIX to be an "opaque" pointer to
+ a set of three capability sets. The transposition of 3*the
+ following structure to such a composite is better handled in a user
+ library since the draft standard requires the use of malloc/free
+ etc.. */
+
#define _LINUX_CAPABILITY_VERSION 0x19980330
-typedef struct _user_cap_struct {
+typedef struct __user_cap_header_struct {
__u32 version;
- __u32 size;
- __u8 cap[1];
-} *cap_t;
-
+ int pid;
+} *cap_user_header_t;
+
+typedef struct __user_cap_data_struct {
+ __u32 effective;
+ __u32 permitted;
+ __u32 inheritable;
+} *cap_user_data_t;
+
#ifdef __KERNEL__
typedef struct kernel_cap_struct {
- int cap;
+ __u32 cap;
} kernel_cap_t;
+
+#define _USER_CAP_HEADER_SIZE (2*sizeof(__u32))
+#define _KERNEL_CAP_T_SIZE (sizeof(kernel_cap_t))
#endif
/* Override all DAC access, including ACL execute access if
[_POSIX_ACL] is defined. Excluding DAC access covered by
- CAP_LINUX_IMMUTABLE */
+ CAP_LINUX_IMMUTABLE. */
#define CAP_DAC_OVERRIDE 1
/* Overrides all DAC restrictions regarding read and search on files
and directories, including ACL restrictions if [_POSIX_ACL] is
- defined. Excluding DAC access covered by CAP_LINUX_IMMUTABLE */
+ defined. Excluding DAC access covered by CAP_LINUX_IMMUTABLE. */
#define CAP_DAC_READ_SEARCH 2
#define CAP_KILL 5
/* Allows setgid(2) manipulation */
+/* Allows setgroups(2) */
+/* Allows forged gids on socket credentials passing. */
#define CAP_SETGID 6
-/* Allows setuid(2) manipulation */
+/* Allows set*uid(2) manipulation (including fsuid). */
+/* Allows forged pids on socket credentials passing. */
#define CAP_SETUID 7
#define CAP_NET_BROADCAST 11
/* Allow interface configuration */
-/* Allow configuring of firewall stuff */
+/* Allow administration of IP firewall, masquerading and accounting */
/* Allow setting debug option on sockets */
/* Allow modification of routing tables */
+/* Allow setting arbitrary process / process group ownership on
+ sockets */
+/* Allow binding to any address for transparent proxying */
+/* Allow setting TOS (type of service) */
+/* Allow setting promiscuous mode */
+/* Allow clearing driver statistics */
+/* Allow multicasting */
+/* Allow read/write of device-specific registers */
#define CAP_NET_ADMIN 12
#define CAP_NET_RAW 13
-/* Allow locking of segments in memory */
+/* Allow locking of shared memory segments */
+/* Allow mlock and mlockall (which doesn't really have anything to do
+ with IPC) */
#define CAP_IPC_LOCK 14
/* Allow configuration of the secure attention key */
/* Allow administration of the random device */
-/* Allow device administration */
+/* Allow device administration (mknod)*/
/* Allow examination and configuration of disk quotas */
-/* System Admin functions: mount et al */
+/* Allow configuring the kernel's syslog (printk behaviour) */
+/* Allow sending a signal to any process */
+/* Allow setting the domainname */
+/* Allow setting the hostname */
+/* Allow calling bdflush() */
+/* Allow mount() and umount(), setting up new smb connection */
+/* Allow some autofs root ioctls */
+/* Allow nfsservctl */
+/* Allow VM86_REQUEST_IRQ */
+/* Allow to read/write pci config on alpha */
+/* Allow irix_prctl on mips (setstacksize) */
+/* Allow flushing all cache on m68k (sys_cacheflush) */
+/* Allow removing semaphores */
+/* Used instead of CAP_CHOWN to "chown" IPC message queues, semaphores
+ and shared memory */
+/* Allow locking/unlocking of shared memory segment */
+/* Allow turning swap on/off */
+/* Allow forged pids on socket credentials passing */
+/* Allow setting readahead and flushing buffers on block devices */
+/* Allow setting geometry in floppy driver */
+/* Allow turning DMA on/off in xd driver */
+/* Allow administration of md devices (mostly the above, but some
+ extra ioctls) */
+/* Allow tuning the ide driver */
+/* Allow access to the nvram device */
+/* Allow administration of apm_bios, serial and bttv (TV) device */
+/* Allow manufacturer commands in isdn CAPI support driver */
+/* Allow reading non-standardized portions of pci configuration space */
+/* Allow DDI debug ioctl on sbpcd driver */
+/* Allow setting up serial ports */
+/* Allow sending raw qic-117 commands */
+/* Allow enabling/disabling tagged queuing on SCSI controllers and sending
+ arbitrary SCSI commands */
+/* Allow setting encryption key on loopback filesystem */
#define CAP_SYS_ADMIN 21
#define CAP_SYS_BOOT 22
-/* Allow use of renice() on others, and raising of priority */
+/* Allow raising priority and setting priority on other (different
+ UID) processes */
+/* Allow use of FIFO and round-robin (realtime) scheduling on own
+ processes and setting the scheduling algorithm used by another
+ process. */
#define CAP_SYS_NICE 23
-/* Override resource limits */
+/* Override resource limits. Set resource limits. */
+/* Override quota limits. */
+/* Override reserved space on ext2 filesystem */
+/* NOTE: ext2 honors fsuid when checking for resource overrides, so
+ you can override using fsuid too */
+/* Override size restrictions on IPC message queues */
+/* Allow more than 64hz interrupts from the real-time clock */
+/* Override max number of consoles on console allocation */
+/* Override max number of keymaps */
#define CAP_SYS_RESOURCE 24
/* Allow manipulation of system clock */
+/* Allow irix_stime on mips */
+/* Allow setting the real-time clock */
#define CAP_SYS_TIME 25
/* Allow configuration of tty devices */
+/* Allow vhangup() of tty */
#define CAP_SYS_TTY_CONFIG 26
#define CAP_EMPTY_SET { 0 }
#define CAP_FULL_SET { ~0 }
+#define CAP_INIT_EFF_SET { ~0 & ~CAP_TO_MASK(CAP_SETPCAP) }
+#define CAP_INIT_INH_SET { ~0 & ~CAP_TO_MASK(CAP_SETPCAP) }
#define CAP_TO_MASK(x) (1 << (x))
-#define cap_raise(c, flag) (c.cap |= CAP_TO_MASK(flag))
-#define cap_lower(c, flag) (c.cap &= ~CAP_TO_MASK(flag))
-#define cap_raised(c, flag) (c.cap & CAP_TO_MASK(flag))
-
-#define cap_isclear(c) (!c.cap)
-
-#define cap_copy(dest,src) do { (dest).cap = (src).cap; } while(0)
-#define cap_clear(c) do { c.cap = 0; } while(0)
-#define cap_set_full(c) do { c.cap = ~0; } while(0)
+#define cap_raise(c, flag) ((c).cap |= CAP_TO_MASK(flag))
+#define cap_lower(c, flag) ((c).cap &= ~CAP_TO_MASK(flag))
+#define cap_raised(c, flag) ((c).cap & CAP_TO_MASK(flag))
+
+static inline kernel_cap_t cap_combine(kernel_cap_t a, kernel_cap_t b)
+{
+ kernel_cap_t dest;
+ dest.cap = a.cap | b.cap;
+ return dest;
+}
+
+static inline kernel_cap_t cap_intersect(kernel_cap_t a, kernel_cap_t b)
+{
+ kernel_cap_t dest;
+ dest.cap = a.cap & b.cap;
+ return dest;
+}
+
+static inline kernel_cap_t cap_drop(kernel_cap_t a, kernel_cap_t drop)
+{
+ kernel_cap_t dest;
+ dest.cap = a.cap & ~drop.cap;
+ return dest;
+}
+
+static inline kernel_cap_t cap_invert(kernel_cap_t c)
+{
+ kernel_cap_t dest;
+ dest.cap = ~c.cap;
+ return dest;
+}
+
+#define cap_isclear(c) (!(c).cap)
+#define cap_issubset(a,set) (!((a).cap & ~(set).cap))
+
+#define cap_clear(c) do { (c).cap = 0; } while(0)
+#define cap_set_full(c) do { (c).cap = ~0; } while(0)
+#define cap_mask(c,mask) do { (c).cap &= (mask).cap; } while(0)
#define cap_is_fs_cap(c) ((c) & CAP_FS_MASK)
#define WIN_VERIFY 0x40
#define WIN_FORMAT 0x50
#define WIN_INIT 0x60
-#define WIN_SEEK 0x70
+#define WIN_SEEK 0x70
#define WIN_DIAGNOSE 0x90
#define WIN_SPECIFY 0x91 /* set drive geometry translation */
#define WIN_SETIDLE1 0xE3
#define HDIO_GETGEO 0x0301 /* get device geometry */
#define HDIO_GET_UNMASKINTR 0x0302 /* get current unmask setting */
#define HDIO_GET_MULTCOUNT 0x0304 /* get current IDE blockmode setting */
-#define HDIO_GET_IDENTITY 0x0307 /* get IDE identification info */
-#define HDIO_GET_KEEPSETTINGS 0x0308 /* get keep-settings-on-reset flag */
-#define HDIO_GET_32BIT 0x0309 /* get current io_32bit setting */
+#define HDIO_OBSOLETE_IDENTITY 0x0307 /* OBSOLETE, DO NOT USE: returns 142 bytes */
+#define HDIO_GET_KEEPSETTINGS 0x0308 /* get keep-settings-on-reset flag */
+#define HDIO_GET_32BIT 0x0309 /* get current io_32bit setting */
#define HDIO_GET_NOWERR 0x030a /* get ignore-write-error flag */
#define HDIO_GET_DMA 0x030b /* get use-dma flag */
#define HDIO_GET_NICE 0x030c /* get nice flags */
+#define HDIO_GET_IDENTITY 0x030d /* get IDE identification info */
#define HDIO_DRIVE_CMD 0x031f /* execute a special drive command */
/* hd/ide ctl's that pass (arg) non-ptr values are numbered 0x032n/0x033n */
#define LP_EXIST 0x0001
#define LP_SELEC 0x0002
#define LP_BUSY 0x0004
+#define LP_BUSY_BIT_POS 2
#define LP_OFFL 0x0008
#define LP_NOPA 0x0010
#define LP_ERR 0x0020
#define LP_ABORT 0x0040
+#ifdef LP_NEED_CAREFUL
#define LP_CAREFUL 0x0080
+#endif
#define LP_ABORTOPEN 0x0100
/* timeout for each character. This is relative to bus cycles -- it
or 0 for polling (no IRQ) */
#define LPGETIRQ 0x0606 /* get the current IRQ number */
#define LPWAIT 0x0608 /* corresponds to LP_INIT_WAIT */
+#ifdef LP_NEED_CAREFUL
#define LPCAREFUL 0x0609 /* call with TRUE arg to require out-of-paper, off-
line, and error indicators good on all writes,
FALSE to ignore them. Default is ignore. */
+#endif
#define LPABORTOPEN 0x060a /* call with TRUE arg to abort open() on error,
FALSE to ignore error. Default is ignore. */
#define LPGETSTATUS 0x060b /* return LP_S(minor) */
#define LPRESET 0x060c /* reset printer */
+#ifdef LP_STATS
#define LPGETSTATS 0x060d /* get statistics (struct lp_stats) */
+#endif
#define LPGETFLAGS 0x060e /* get status flags */
/* timeout for printk'ing a timeout, in jiffies (100ths of a second).
#define LP_WAIT(minor) lp_table[(minor)].wait /* strobe wait */
#define LP_IRQ(minor) lp_table[(minor)].dev->port->irq /* interrupt # */
/* 0 means polled */
+#ifdef LP_STATS
#define LP_STAT(minor) lp_table[(minor)].stats /* statistics area */
+#endif
#define LP_BUFFER_SIZE 256
#define LP_BASE(x) lp_table[(x)].dev->port->base
+#ifdef LP_STATS
struct lp_stats {
unsigned long chars;
unsigned long sleeps;
unsigned int meanwait;
unsigned int mdev;
};
+#endif
struct lp_struct {
struct pardevice *dev;
unsigned int time;
unsigned int wait;
char *lp_buffer;
+#ifdef LP_STATS
unsigned int lastcall;
unsigned int runchars;
- unsigned int waittime;
struct lp_stats stats;
+#endif
+ struct wait_queue *wait_q;
+ unsigned int last_error;
};
/*
*/
#define LP_DELAY 50
-#define LP_POLLING(minor) (lp_table[(minor)].dev->port->irq == PARPORT_IRQ_NONE)
+#define LP_POLLED(minor) (lp_table[(minor)].dev->port->irq == PARPORT_IRQ_NONE)
#define LP_PREEMPTED(minor) (lp_table[(minor)].dev->port->waithead != NULL)
/*
extern struct inode_operations proc_file_inode_operations;
extern struct inode_operations proc_net_inode_operations;
extern struct inode_operations proc_netdir_inode_operations;
-extern struct inode_operations proc_scsi_inode_operations;
extern struct inode_operations proc_openprom_inode_operations;
extern struct inode_operations proc_mem_inode_operations;
extern struct inode_operations proc_sys_inode_operations;
extern struct inode_operations proc_omirr_inode_operations;
extern struct inode_operations proc_ppc_htab_inode_operations;
-#endif
-
/*
* generic.c
*/
* proc_devtree.c
*/
extern void proc_device_tree_init(void);
+
+#endif /* _LINUX_PROC_FS_H */
/* process credentials */ \
/* uid etc */ 0,0,0,0,0,0,0,0, \
/* suppl grps*/ 0, {0,}, \
-/* caps */ CAP_FULL_SET, CAP_FULL_SET, CAP_FULL_SET, \
+/* caps */ CAP_INIT_EFF_SET,CAP_INIT_INH_SET,CAP_FULL_SET, \
/* rlimits */ INIT_RLIMITS, \
/* math */ 0, \
/* comm */ "swapper", \
case IPC_SET:
err = -EPERM;
if (current->euid != ipcp->cuid &&
- current->euid != ipcp->uid && !suser())
+ current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN))
+ /* We _could_ check for CAP_CHOWN above, but we don't */
goto out;
- if (tbuf.msg_qbytes > MSGMNB && !suser())
+ if (tbuf.msg_qbytes > MSGMNB && !capable(CAP_SYS_RESOURCE))
goto out;
msq->msg_qbytes = tbuf.msg_qbytes;
ipcp->uid = tbuf.msg_perm.uid;
case IPC_RMID:
err = -EPERM;
if (current->euid != ipcp->cuid &&
- current->euid != ipcp->uid && !suser())
+ current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN))
goto out;
freeque (id);
goto out;
break;
case IPC_RMID:
- if (current->euid == ipcp->cuid || current->euid == ipcp->uid || suser()) {
+ if (current->euid == ipcp->cuid ||
+ current->euid == ipcp->uid || capable(CAP_SYS_ADMIN)) {
freeary (id);
err = 0;
goto out;
update_queue(sma);
break;
case IPC_SET:
- if (current->euid == ipcp->cuid || current->euid == ipcp->uid || suser()) {
+ if (current->euid == ipcp->cuid ||
+ current->euid == ipcp->uid || capable(CAP_SYS_ADMIN)) {
ipcp->uid = tbuf.sem_perm.uid;
ipcp->gid = tbuf.sem_perm.gid;
ipcp->mode = (ipcp->mode & ~S_IRWXUGO)
switch (cmd) {
case SHM_UNLOCK:
err = -EPERM;
- if (!suser())
+ if (!capable(CAP_IPC_LOCK))
goto out;
err = -EINVAL;
if (!(ipcp->mode & SHM_LOCKED))
/* Should the pages be faulted in here or leave it to user? */
/* need to determine interaction with current->swappable */
err = -EPERM;
- if (!suser())
+ if (!capable(CAP_IPC_LOCK))
goto out;
err = -EINVAL;
if (ipcp->mode & SHM_LOCKED)
break;
case IPC_SET:
if (current->euid == shp->shm_perm.uid ||
- current->euid == shp->shm_perm.cuid || suser()) {
+ current->euid == shp->shm_perm.cuid ||
+ capable(CAP_SYS_ADMIN)) {
ipcp->uid = tbuf.shm_perm.uid;
ipcp->gid = tbuf.shm_perm.gid;
ipcp->mode = (ipcp->mode & ~S_IRWXUGO)
goto out;
case IPC_RMID:
if (current->euid == shp->shm_perm.uid ||
- current->euid == shp->shm_perm.cuid || suser()) {
+ current->euid == shp->shm_perm.cuid ||
+ capable(CAP_SYS_ADMIN)) {
shp->shm_perm.mode |= SHM_DEST;
if (shp->shm_nattch <= 0)
killseg (id);
else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
granted_mode >>= 3;
/* is there some bit set in requested_mode but not in granted_mode? */
- if ((requested_mode & ~granted_mode & 0007) && !suser())
+ if ((requested_mode & ~granted_mode & 0007) &&
+ !capable(CAP_IPC_OWNER))
return -1;
+
return 0;
}
O_TARGET := kernel.o
O_OBJS = sched.o dma.o fork.o exec_domain.o panic.o printk.o sys.o \
module.o exit.o itimer.o info.o time.o softirq.o resource.o \
- sysctl.o acct.o
+ sysctl.o acct.o capability.o
OX_OBJS += signal.o
int error = -EPERM;
lock_kernel();
- if (!suser())
+ if (!capable(CAP_SYS_PACCT))
goto out;
if (name == (char *)NULL) {
--- /dev/null
+/*
+ * linux/kernel/capability.c
+ *
+ * Copyright (C) 1997 Andrew Main <zefram@fysh.org>
+ * Integrated into 2.1.97+, Andrew G. Morgan <morgan@transmeta.com>
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/capability.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+
+#include <asm/uaccess.h>
+
+static inline void cap_fromuser(kernel_cap_t *k, __u32 *u)
+{
+ copy_from_user(k, u, sizeof(*k));
+}
+
+
+static inline void cap_touser(__u32 *u, const kernel_cap_t *k)
+{
+ copy_to_user(u, k, sizeof(*k));
+}
+
+#ifdef __SMP__
+static spinlock_t task_capability_lock;
+#endif
+
+/*
+ * For sys_getproccap() and sys_setproccap(), any of the three
+ * capability set pointers may be NULL -- indicating that that set is
+ * uninteresting and/or not to be changed.
+ */
+
+asmlinkage int sys_capget(cap_user_header_t header, cap_user_data_t data)
+{
+ int error = -EINVAL, pid;
+ __u32 version;
+ struct task_struct *target;
+
+ if (!access_ok(VERIFY_WRITE, &header->version, sizeof(*header))) {
+ /* not large enough for current header so indicate error */
+ if (access_ok(VERIFY_WRITE, &header->version,
+ sizeof(header->version))) {
+ return error;
+ }
+ goto all_done;
+ }
+
+ copy_from_user(&version, &header->version, sizeof(header->version));
+ if (version != _LINUX_CAPABILITY_VERSION) {
+ /* if enough space for kernel version, write that */
+
+ all_done:
+ version = _LINUX_CAPABILITY_VERSION;
+ copy_to_user(&header->version, &version,
+ sizeof(header->version));
+ return error;
+ }
+
+ if (!access_ok(VERIFY_WRITE, data, sizeof(*data))) {
+ return error;
+ }
+
+ copy_from_user(&pid, &header->pid, sizeof(header->pid));
+ if (pid < 0) {
+ return error;
+ }
+
+ spin_lock(&task_capability_lock);
+
+ if (pid && pid != current->pid) {
+ read_lock(&tasklist_lock);
+ target = find_task_by_pid(pid); /* identify target of query */
+ if (!target) {
+ error = -ESRCH;
+ goto out;
+ }
+ } else {
+ target = current;
+ }
+
+ cap_touser(&data->permitted, &target->cap_permitted);
+ cap_touser(&data->inheritable, &target->cap_inheritable);
+ cap_touser(&data->effective, &target->cap_effective);
+
+ error = 0;
+
+out:
+ if (target != current) {
+ read_unlock(&tasklist_lock);
+ }
+ spin_unlock(&task_capability_lock);
+ return error;
+}
+
+/* set capabilities for all processes in a given process group */
+
+static void cap_set_pg(int pgrp,
+ kernel_cap_t *effective,
+ kernel_cap_t *inheritable,
+ kernel_cap_t *permitted)
+{
+ struct task_struct *target;
+
+ /* FIXME: do we need to have a write lock here..? */
+ read_lock(&tasklist_lock);
+ for_each_task(target) {
+ if (target->pgrp != pgrp)
+ continue;
+ target->cap_effective = *effective;
+ target->cap_inheritable = *inheritable;
+ target->cap_permitted = *permitted;
+ }
+ read_unlock(&tasklist_lock);
+}
+
+/* set capabilities for all processes other than 1 and self */
+
+static void cap_set_all(kernel_cap_t *effective,
+ kernel_cap_t *inheritable,
+ kernel_cap_t *permitted)
+{
+ struct task_struct *target;
+
+ /* FIXME: do we need to have a write lock here..? */
+ read_lock(&tasklist_lock);
+ /* ALL means everyone other than self or 'init' */
+ for_each_task(target) {
+ if (target == current || target->pid == 1)
+ continue;
+ target->cap_effective = *effective;
+ target->cap_inheritable = *inheritable;
+ target->cap_permitted = *permitted;
+ }
+ read_unlock(&tasklist_lock);
+}
+
+/*
+ * The restrictions on setting capabilities are specified as:
+ *
+ * [pid is for the 'target' task. 'current' is the calling task.]
+ *
+ * I: any raised capabilities must be a subset of the (old current) Permitted
+ * P: any raised capabilities must be a subset of the (old current) permitted
+ * E: must be set to a subset of (new target) Permitted
+ */
+
+asmlinkage int sys_capset(cap_user_header_t header, const cap_user_data_t data)
+{
+ kernel_cap_t inheritable, permitted, effective;
+ __u32 version;
+ struct task_struct *target;
+ int error = -EINVAL, pid;
+
+ if (!access_ok(VERIFY_WRITE, &header->version, sizeof(*header))) {
+ /* not large enough for current header so indicate error */
+ if (!access_ok(VERIFY_WRITE, &header->version,
+ sizeof(header->version))) {
+ return error;
+ }
+ goto all_done;
+ }
+
+ copy_from_user(&version, &header->version, sizeof(header->version));
+ if (version != _LINUX_CAPABILITY_VERSION) {
+
+ all_done:
+ version = _LINUX_CAPABILITY_VERSION;
+ copy_to_user(&header->version, &version,
+ sizeof(header->version));
+ return error;
+ }
+
+ if (!access_ok(VERIFY_READ, data, sizeof(*data))) {
+ return error;
+ }
+
+ /* may want to set other processes at some point -- for now demand 0 */
+ copy_from_user(&pid, &header->pid, sizeof(pid));
+
+ error = -EPERM;
+ if (pid && !capable(CAP_SETPCAP))
+ return error;
+
+ spin_lock(&task_capability_lock);
+
+ if (pid > 0 && pid != current->pid) {
+ read_lock(&tasklist_lock);
+ target = find_task_by_pid(pid); /* identify target of query */
+ if (!target) {
+ error = -ESRCH;
+ goto out;
+ }
+ } else {
+ target = current;
+ }
+
+ /* copy from userspace */
+ cap_fromuser(&effective, &data->effective);
+ cap_fromuser(&inheritable, &data->inheritable);
+ cap_fromuser(&permitted, &data->permitted);
+
+ /* verify restrictions on target's new Inheritable set */
+ if (!cap_issubset(inheritable,
+ cap_combine(target->cap_inheritable,
+ current->cap_permitted))) {
+ goto out;
+ }
+
+ /* verify restrictions on target's new Permitted set */
+ if (!cap_issubset(permitted,
+ cap_combine(target->cap_permitted,
+ current->cap_permitted))) {
+ goto out;
+ }
+
+ /* verify the _new_Effective_ is a subset of the _new_Permitted_ */
+ if (!cap_issubset(effective, permitted)) {
+ goto out;
+ }
+
+ /* having verified that the proposed changes are legal,
+ we now put them into effect. */
+ error = 0;
+
+ if (pid < 0) {
+ if (pid == -1) /* all procs other than current and init */
+ cap_set_all(&effective, &inheritable, &permitted);
+
+ else /* all procs in process group */
+ cap_set_pg(-pid, &effective, &inheritable, &permitted);
+ goto spin_out;
+ } else {
+ /* FIXME: do we need to have a write lock here..? */
+ target->cap_effective = effective;
+ target->cap_inheritable = inheritable;
+ target->cap_permitted = permitted;
+ }
+
+out:
+ if (target != current) {
+ read_unlock(&tasklist_lock);
+ }
+spin_out:
+ spin_unlock(&task_capability_lock);
+ return error;
+}
int task_count;
} *uidhash[UIDHASH_SZ];
-#ifdef __SMP__
-static spinlock_t uidhash_lock = SPIN_LOCK_UNLOCKED;
-#endif
+spinlock_t uidhash_lock = SPIN_LOCK_UNLOCKED;
kmem_cache_t *uid_cachep;
return -EAGAIN;
}
-#ifdef __SMP__
/* Protects next_safe and last_pid. */
-static spinlock_t lastpid_lock = SPIN_LOCK_UNLOCKED;
-#endif
+spinlock_t lastpid_lock = SPIN_LOCK_UNLOCKED;
static int get_pid(unsigned long flags)
{
struct module *mod;
lock_kernel();
- if (!suser()) {
+ if (!capable(CAP_SYS_MODULE)) {
error = -EPERM;
goto err0;
}
struct module_ref *dep;
lock_kernel();
- if (!suser())
+ if (!capable(CAP_SYS_MODULE))
goto err0;
if ((namelen = get_mod_name(name_user, &name)) < 0) {
error = namelen;
int something_changed;
lock_kernel();
- if (!suser())
+ if (!capable(CAP_SYS_MODULE))
goto out;
if (name_user) {
int error = -EPERM;
lock_kernel();
- if ((type != 3) && !suser())
+ if ((type != 3) && !capable(CAP_SYS_ADMIN))
goto out;
error = 0;
switch (type) {
newprio = increment;
if (increment < 0) {
- if (!suser())
+ if (!capable(CAP_SYS_NICE))
return -EPERM;
newprio = -increment;
increase = 1;
goto out_unlock;
retval = -EPERM;
- if ((policy == SCHED_FIFO || policy == SCHED_RR) && !suser())
+ if ((policy == SCHED_FIFO || policy == SCHED_RR) &&
+ !capable(CAP_SYS_NICE))
goto out_unlock;
if ((current->euid != p->euid) && (current->euid != p->uid) &&
- !suser())
+ !capable(CAP_SYS_NICE))
goto out_unlock;
retval = 0;
&& ((sig != SIGCONT) || (current->session != t->session))
&& (current->euid ^ t->suid) && (current->euid ^ t->uid)
&& (current->uid ^ t->suid) && (current->uid ^ t->uid)
- && !suser())
+ && !capable(CAP_SYS_ADMIN))
goto out_nolock;
/* The null signal is a permissions and process existance probe.
if (!proc_sel(p, which, who))
continue;
if (p->uid != current->euid &&
- p->uid != current->uid && !suser()) {
+ p->uid != current->uid && !capable(CAP_SYS_NICE)) {
error = EPERM;
continue;
}
if (error == ESRCH)
error = 0;
- if (priority > p->priority && !suser())
+ if (priority > p->priority && !capable(CAP_SYS_NICE))
error = EACCES;
else
p->priority = priority;
char buffer[256];
/* We only trust the superuser with rebooting the system. */
- if (!suser())
+ if (!capable(CAP_SYS_BOOT))
return -EPERM;
/* For safety, we require "magic" arguments. */
if (rgid != (gid_t) -1) {
if ((old_rgid == rgid) ||
(current->egid==rgid) ||
- suser())
+ capable(CAP_SETGID))
current->gid = rgid;
else
return -EPERM;
if ((old_rgid == egid) ||
(current->egid == egid) ||
(current->sgid == egid) ||
- suser())
+ capable(CAP_SETGID))
current->fsgid = current->egid = egid;
else {
current->gid = old_rgid;
{
int old_egid = current->egid;
- if (suser())
+ if (capable(CAP_SETGID))
current->gid = current->egid = current->sgid = current->fsgid = gid;
else if ((gid == current->gid) || (gid == current->sgid))
current->egid = current->fsgid = gid;
return 0;
}
+/*
+ * cap_emulate_setxuid() fixes the effective / permitted capabilities of
+ * a process after a call to setuid, setreuid, or setresuid.
+ *
+ * 1) When set*uiding _from_ one of {r,e,s}uid == 0 _to_ all of
+ * {r,e,s}uid != 0, the permitted and effective capabilities are
+ * cleared.
+ *
+ * 2) When set*uiding _from_ euid == 0 _to_ euid != 0, the effective
+ * capabilities of the process are cleared.
+ *
+ * 3) When set*uiding _from_ euid != 0 _to_ euid == 0, the effective
+ * capabilities are set to the permitted capabilities.
+ *
+ * fsuid is handled elsewhere. fsuid == 0 and {r,e,s}uid!= 0 should
+ * never happen.
+ *
+ * -astor
+ */
+extern inline void cap_emulate_setxuid(int old_ruid, int old_euid,
+ int old_suid)
+{
+ if ((old_ruid == 0 || old_euid == 0 || old_suid == 0) &&
+ (current->uid != 0 && current->euid != 0 && current->suid != 0)) {
+ cap_clear(current->cap_permitted);
+ cap_clear(current->cap_effective);
+ }
+ if (old_euid == 0 && current->euid != 0) {
+ cap_clear(current->cap_effective);
+ }
+ if (old_euid != 0 && current->euid == 0) {
+ current->cap_effective = current->cap_permitted;
+ }
+}
+
/*
* Unprivileged users may change the real uid to the effective uid
* or vice versa. (BSD-style)
*/
asmlinkage int sys_setreuid(uid_t ruid, uid_t euid)
{
- int old_ruid, old_euid, new_ruid;
+ int old_ruid, old_euid, old_suid, new_ruid;
new_ruid = old_ruid = current->uid;
old_euid = current->euid;
+ old_suid = current->suid;
if (ruid != (uid_t) -1) {
if ((old_ruid == ruid) ||
(current->euid==ruid) ||
- suser())
+ capable(CAP_SETUID))
new_ruid = ruid;
else
return -EPERM;
if ((old_ruid == euid) ||
(current->euid == euid) ||
(current->suid == euid) ||
- suser())
+ capable(CAP_SETUID))
current->fsuid = current->euid = euid;
else
return -EPERM;
if(new_ruid)
charge_uid(current, 1);
}
+
+ if (!issecure(SECURE_NO_SETUID_FIXUP)) {
+ cap_emulate_setxuid(old_ruid, old_euid, old_suid);
+ }
+
return 0;
}
+
+
/*
* setuid() is implemented like SysV w/ SAVED_IDS
*
asmlinkage int sys_setuid(uid_t uid)
{
int old_euid = current->euid;
- int old_ruid, new_ruid;
+ int old_ruid, old_suid, new_ruid;
old_ruid = new_ruid = current->uid;
- if (suser())
+ old_suid = current->suid;
+ if (capable(CAP_SETUID))
new_ruid = current->euid = current->suid = current->fsuid = uid;
else if ((uid == current->uid) || (uid == current->suid))
current->fsuid = current->euid = uid;
if(new_ruid)
charge_uid(current, 1);
}
+
+ if (!issecure(SECURE_NO_SETUID_FIXUP)) {
+ cap_emulate_setxuid(old_ruid, old_euid, old_suid);
+ }
+
return 0;
}
*/
asmlinkage int sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
{
+ int old_ruid = current->uid;
+ int old_euid = current->euid;
+ int old_suid = current->suid;
if (current->uid != 0 && current->euid != 0 && current->suid != 0) {
if ((ruid != (uid_t) -1) && (ruid != current->uid) &&
(ruid != current->euid) && (ruid != current->suid))
}
if (suid != (uid_t) -1)
current->suid = suid;
+
+ if (!issecure(SECURE_NO_SETUID_FIXUP)) {
+ cap_emulate_setxuid(old_ruid, old_euid, old_suid);
+ }
+
return 0;
}
old_fsuid = current->fsuid;
if (uid == current->uid || uid == current->euid ||
- uid == current->suid || uid == current->fsuid || suser())
+ uid == current->suid || uid == current->fsuid ||
+ capable(CAP_SETUID))
current->fsuid = uid;
if (current->fsuid != old_fsuid)
current->dumpable = 0;
+ /* We emulate fsuid by essentially doing a scaled-down version
+ * of what we did in setresuid and friends. However, we only
+ * operate on the fs-specific bits of the process' effective
+ * capabilities
+ *
+ * FIXME - is fsuser used for all CAP_FS_MASK capabilities?
+ * if not, we might be a bit too harsh here.
+ */
+
+ if (!issecure(SECURE_NO_SETUID_FIXUP)) {
+ if (old_fsuid == 0 && current->fsuid != 0) {
+ current->cap_effective.cap &= ~CAP_FS_MASK;
+ }
+ if (old_fsuid != 0 && current->fsuid == 0) {
+ current->cap_effective.cap |=
+ (current->cap_permitted.cap & CAP_FS_MASK);
+ }
+ }
+
return old_fsuid;
}
old_fsgid = current->fsgid;
if (gid == current->gid || gid == current->egid ||
- gid == current->sgid || gid == current->fsgid || suser())
+ gid == current->sgid || gid == current->fsgid ||
+ capable(CAP_SETGID))
current->fsgid = gid;
if (current->fsgid != old_fsgid)
current->dumpable = 0;
asmlinkage int sys_setgroups(int gidsetsize, gid_t *grouplist)
{
- if (!suser())
+ if (!capable(CAP_SETGID))
return -EPERM;
if ((unsigned) gidsetsize > NGROUPS)
return -EINVAL;
asmlinkage int sys_sethostname(char *name, int len)
{
- if (!suser())
+ if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (len < 0 || len > __NEW_UTS_LEN)
return -EINVAL;
*/
asmlinkage int sys_setdomainname(char *name, int len)
{
- if (!suser())
+ if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (len < 0 || len > __NEW_UTS_LEN)
return -EINVAL;
old_rlim = current->rlim + resource;
if (((new_rlim.rlim_cur > old_rlim->rlim_max) ||
(new_rlim.rlim_max > old_rlim->rlim_max)) &&
- !suser())
+ !capable(CAP_SYS_RESOURCE))
return -EPERM;
if (resource == RLIMIT_NOFILE) {
if (new_rlim.rlim_cur > NR_OPEN || new_rlim.rlim_max > NR_OPEN)
}
return error;
}
+
{
int value;
- if (!suser())
+ if (!capable(CAP_SYS_TIME))
return -EPERM;
if (get_user(value, tptr))
return -EFAULT;
{
static int firsttime = 1;
- if (!suser())
+ if (!capable(CAP_SYS_TIME))
return -EPERM;
if (tz) {
long ltemp, mtemp, save_adjust;
/* In order to modify anything, you gotta be super-user! */
- if (txc->modes && !suser())
+ if (txc->modes && !capable(CAP_SYS_TIME))
return -EPERM;
/* Now we validate the data before disabling interrupts */
struct vm_area_struct * vma, * next;
int error;
- if (!suser())
+ if (!capable(CAP_IPC_LOCK))
return -EPERM;
len = (len + ~PAGE_MASK) & PAGE_MASK;
end = start + len;
unsigned int def_flags;
struct vm_area_struct * vma;
- if (!suser())
+ if (!capable(CAP_IPC_LOCK))
return -EPERM;
def_flags = 0;
*
* Hint: -mask = 1+~mask
*/
-#ifdef __SMP__
-static spinlock_t page_alloc_lock;
-#endif
+static spinlock_t page_alloc_lock = SPIN_LOCK_UNLOCKED;
/*
* This routine is used by the kernel swap deamon to determine
int err = -EPERM;
lock_kernel();
- if (!suser())
+ if (!capable(CAP_SYS_ADMIN))
goto out;
dentry = namei(specialfile);
static int least_priority = 0;
lock_kernel();
- if (!suser())
+ if (!capable(CAP_SYS_ADMIN))
goto out;
memset(&filp, 0, sizeof(filp));
p = swap_info;
switch(cmd)
{
case SIOCSIFADDR:
- if(!suser())
+ if(!capable(CAP_NET_ADMIN))
return (-EPERM);
if(sa->sat_family != AF_APPLETALK)
return (-EINVAL);
case SIOCATALKDIFADDR:
case SIOCDIFADDR:
- if(!suser())
+ if(!capable(CAP_NET_ADMIN))
return (-EPERM);
if(sa->sat_family != AF_APPLETALK)
return (-EINVAL);
*/
case SIOCADDRT:
case SIOCDELRT:
- if(!suser())
+ if(!capable(CAP_NET_ADMIN))
return -EPERM;
return (atrtr_ioctl(cmd,(void *)arg));
void dev_load(const char *name)
{
- if(!dev_get(name) && suser())
+ if(!dev_get(name) && capable(CAP_SYS_MODULE))
request_module(name);
}
case SIOCDELMULTI:
case SIOCSIFHWBROADCAST:
case SIOCSIFTXQLEN:
- if (!suser())
+ if (!capable(CAP_NET_ADMIN))
return -EPERM;
dev_load(ifr.ifr_name);
rtnl_lock();
static __inline__ int scm_check_creds(struct ucred *creds)
{
- /* N.B. The test for suser should follow the credential check */
- if (suser())
+ if ((creds->pid == current->pid || capable(CAP_SYS_ADMIN)) &&
+ ((creds->uid == current->uid || creds->uid == current->euid ||
+ creds->uid == current->suid) || capable(CAP_SETUID)) &&
+ ((creds->gid == current->gid || creds->gid == current->egid ||
+ creds->gid == current->sgid) || capable(CAP_SETGID))) {
return 0;
- if (creds->pid != current->pid ||
- (creds->uid != current->uid && creds->uid != current->euid &&
- creds->uid != current->suid) ||
- (creds->gid != current->gid && creds->gid != current->egid &&
- creds->gid != current->sgid))
- return -EPERM;
- return 0;
+ }
+ return -EPERM;
}
-
static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
{
int *fdp = (int*)CMSG_DATA(cmsg);
switch(optname)
{
case SO_DEBUG:
- if(val && !suser())
+ if(val && !capable(CAP_NET_ADMIN))
{
ret = -EACCES;
}
*/
if (current->pgrp != -arg &&
current->pid != arg &&
- !suser()) return(-EPERM);
+ !capable(CAP_NET_ADMIN)) return(-EPERM);
sk->proc = arg;
return(0);
case F_GETOWN:
sock->ops = &inet_dgram_ops;
break;
case SOCK_RAW:
- if (!suser())
+ if (!capable(CAP_NET_RAW))
goto free_and_badperm;
if (!protocol)
goto free_and_noproto;
#endif
if (snum == 0)
snum = sk->prot->good_socknum();
- if (snum < PROT_SOCK && !suser())
+ if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
return(-EACCES);
chk_addr_ret = inet_addr_type(addr->sin_addr.s_addr);
chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST) {
#ifdef CONFIG_IP_TRANSPARENT_PROXY
/* Superuser may bind to any address to allow transparent proxying. */
- if(chk_addr_ret != RTN_UNICAST || !suser())
+ if(chk_addr_ret != RTN_UNICAST || !capable(CAP_NET_ADMIN))
#endif
return -EADDRNOTAVAIL; /* Source address MUST be ours! */
}
err = get_user(pid, (int *) arg);
if (err)
return err;
- if (current->pid != pid && current->pgrp != -pid && !suser())
+ if (current->pid != pid && current->pgrp != -pid &&
+ !capable(CAP_NET_ADMIN))
return -EPERM;
sk->proc = pid;
return(0);
break;
case SIOCSIFFLAGS:
- if (!suser())
+ if (!capable(CAP_NET_ADMIN))
return -EACCES;
rtnl_lock();
exclusive = 1;
case SIOCSIFBRDADDR: /* Set the broadcast address */
case SIOCSIFDSTADDR: /* Set the destination address */
case SIOCSIFNETMASK: /* Set the netmask for the interface */
- if (!suser())
+ if (!capable(CAP_NET_ADMIN))
return -EACCES;
if (sin->sin_family != AF_INET)
return -EINVAL;
switch (cmd) {
case SIOCADDRT: /* Add a route */
case SIOCDELRT: /* Delete a route */
- if (!suser())
+ if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (copy_from_user(&r, arg, sizeof(struct rtentry)))
return -EFAULT;
/* Reject setting of unused bits */
if (val & ~(IPTOS_TOS_MASK|IPTOS_PREC_MASK))
return -EINVAL;
- if (IPTOS_PREC(val) >= IPTOS_PREC_CRITIC_ECP && !suser())
+ if (IPTOS_PREC(val) >= IPTOS_PREC_CRITIC_ECP &&
+ !capable(CAP_NET_ADMIN))
return -EPERM;
if (sk->ip_tos != val) {
sk->ip_tos=val;
case IP_FW_POLICY_OUT:
case IP_FW_POLICY_FWD:
case IP_FW_MASQ_TIMEOUTS:
- if(!suser())
+ if(!capable(CAP_NET_ADMIN))
return -EACCES;
if(optlen>sizeof(tmp_fw) || optlen<1)
return -EINVAL;
case IP_FW_MASQ_ADD:
case IP_FW_MASQ_DEL:
case IP_FW_MASQ_FLUSH:
- if(!suser())
+ if(!capable(CAP_NET_ADMIN))
return -EPERM;
if(optlen>sizeof(masq_ctl) || optlen<1)
return -EINVAL;
case IP_ACCT_DELETE:
case IP_ACCT_FLUSH:
case IP_ACCT_ZERO:
- if(!suser())
+ if(!capable(CAP_NET_ADMIN))
return -EACCES;
if(optlen>sizeof(tmp_fw) || optlen<1)
return -EINVAL;
chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST) {
#ifdef CONFIG_IP_TRANSPARENT_PROXY
/* Superuser may bind to any address to allow transparent proxying. */
- if(chk_addr_ret != RTN_UNICAST || !suser())
+ if(chk_addr_ret != RTN_UNICAST || !capable(CAP_NET_ADMIN))
#endif
return -EADDRNOTAVAIL;
}
#ifdef CONFIG_IP_TRANSPARENT_PROXY
if (msg->msg_flags&~(MSG_DONTROUTE|MSG_DONTWAIT|MSG_PROXY|MSG_NOSIGNAL))
return -EINVAL;
- if ((msg->msg_flags&MSG_PROXY) && !suser() )
+ if ((msg->msg_flags&MSG_PROXY) && !capable(CAP_NET_ADMIN))
return -EPERM;
#else
if (msg->msg_flags&~(MSG_DONTROUTE|MSG_DONTWAIT|MSG_NOSIGNAL))
struct in6_ifreq ireq;
int err;
- if (!suser())
+ if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
struct in6_ifreq ireq;
int err;
- if (!suser())
+ if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
prot=&udpv6_prot;
sock->ops = &inet6_dgram_ops;
} else if(sock->type == SOCK_RAW) {
- if (!suser())
+ if (!capable(CAP_NET_RAW))
goto free_and_badperm;
if (!protocol)
goto free_and_noproto;
snum = ntohs(addr->sin6_port);
if (snum == 0)
snum = sk->prot->good_socknum();
- if (snum < PROT_SOCK && !suser())
+ if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
return(-EACCES);
addr_type = ipv6_addr_type(&addr->sin6_addr);
return err;
/* see sock_no_fcntl */
- if (current->pid != pid && current->pgrp != -pid && !suser())
+ if (current->pid != pid && current->pgrp != -pid &&
+ !capable(CAP_NET_ADMIN))
return -EPERM;
sk->proc = pid;
return(0);
switch(cmd) {
case SIOCADDRT: /* Add a route */
case SIOCDELRT: /* Delete a route */
- if (!suser())
+ if (!capable(CAP_NET_ADMIN))
return -EPERM;
err = copy_from_user(&rtmsg, arg,
sizeof(struct in6_rtmsg));
struct sock *sk;
int err;
- if (!suser())
+ if (!capable(CAP_NET_RAW))
return -EPERM;
if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW
#ifdef CONFIG_SOCK_PACKET
err = get_user(pid, (int *) arg);
if (err)
return err;
- if (current->pid != pid && current->pgrp != -pid && !suser())
+ if (current->pid != pid && current->pgrp != -pid &&
+ !capable(CAP_NET_ADMIN))
return -EPERM;
sk->proc = pid;
return(0);
case SIOCADDRT:
case SIOCDELRT:
case SIOCRSCLRRT:
- if (!suser()) return -EPERM;
+ if (!capable(CAP_NET_ADMIN)) return -EPERM;
return rose_rt_ioctl(cmd, (void *)arg);
case SIOCRSGCAUSE: {
struct proc_dir_entry* dent;
wan_device_t* wandev;
- if (!suser())
+ if (!capable(CAP_NET_ADMIN))
return -EPERM;
if ((cmd >> 8) != ROUTER_IOCTL)
case SIOCADDRT:
case SIOCDELRT:
- if (!suser()) return -EPERM;
+ if (!capable(CAP_NET_ADMIN)) return -EPERM;
return x25_route_ioctl(cmd, (void *)arg);
case SIOCX25GSUBSCRIP: