From 6cc120a8e71a8d124bf6411fc6e730a884b82701 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 23 Nov 2007 15:30:52 -0500 Subject: [PATCH] Import 2.3.43pre7 --- CREDITS | 21 +- Documentation/Configure.help | 53 +- Documentation/i2c/i2c-protocol | 22 + Documentation/sound/CMI8330 | 80 + MAINTAINERS | 2 +- arch/alpha/kernel/alpha_ksyms.c | 3 - arch/alpha/kernel/entry.S | 24 +- arch/alpha/kernel/irq.c | 39 +- arch/alpha/kernel/smp.c | 2 +- arch/alpha/mm/init.c | 6 +- arch/arm/mm/init.c | 5 - arch/i386/defconfig | 41 +- arch/i386/kernel/entry.S | 36 +- arch/i386/kernel/i386_ksyms.c | 10 +- arch/i386/kernel/irq.c | 48 +- arch/i386/kernel/pm.c | 20 +- arch/i386/kernel/traps.c | 2 +- arch/i386/mm/init.c | 5 - arch/ia64/kernel/Makefile | 2 +- arch/ia64/kernel/irq.c | 4 + arch/ia64/kernel/pci-dma.c | 6 +- arch/ia64/kernel/smp.c | 7 +- arch/ia64/lib/strlen.S | 2 +- arch/ia64/mm/init.c | 4 - arch/m68k/mm/init.c | 5 - arch/mips/mm/init.c | 5 - arch/ppc/Makefile | 37 +- arch/ppc/amiga/amiints.c | 3 +- arch/ppc/amiga/cia.c | 4 +- arch/ppc/boot/Makefile | 10 +- arch/ppc/chrpboot/Makefile | 5 +- arch/ppc/chrpboot/main.c | 13 +- arch/ppc/coffboot/Makefile | 24 +- arch/ppc/coffboot/chrpmain.c | 13 +- arch/ppc/coffboot/dummy.c | 4 + arch/ppc/coffboot/main.c | 4 +- arch/ppc/config.in | 6 +- arch/ppc/configs/common_defconfig | 74 +- arch/ppc/configs/gemini_defconfig | 9 +- arch/ppc/configs/oak_defconfig | 7 +- arch/ppc/configs/walnut_defconfig | 11 +- arch/ppc/defconfig | 74 +- arch/ppc/kernel/Makefile | 17 +- arch/ppc/kernel/apus_setup.c | 216 +- arch/ppc/kernel/chrp_pci.c | 15 +- arch/ppc/kernel/chrp_setup.c | 8 +- arch/ppc/kernel/chrp_time.c | 7 +- arch/ppc/kernel/entry.S | 27 +- arch/ppc/kernel/feature.c | 281 ++- arch/ppc/kernel/galaxy_pci.c | 612 +++++ arch/ppc/kernel/gemini_setup.c | 21 +- arch/ppc/kernel/head.S | 81 +- arch/ppc/kernel/head_4xx.S | 32 +- arch/ppc/kernel/idle.c | 17 +- arch/ppc/kernel/irq.c | 4 +- arch/ppc/kernel/local_irq.h | 2 + arch/ppc/kernel/misc.S | 30 +- arch/ppc/kernel/oak_setup.c | 57 +- arch/ppc/kernel/oak_setup.h | 6 +- arch/ppc/kernel/open_pic.c | 151 +- arch/ppc/kernel/pci-dma.c | 52 + arch/ppc/kernel/pci.c | 2 +- arch/ppc/kernel/pmac_nvram.c | 33 +- arch/ppc/kernel/pmac_pci.c | 253 ++- arch/ppc/kernel/pmac_pic.c | 226 +- arch/ppc/kernel/pmac_setup.c | 85 +- arch/ppc/kernel/pmac_time.c | 4 +- arch/ppc/kernel/ppc-stub.c | 2 +- arch/ppc/kernel/ppc_htab.c | 3 +- arch/ppc/kernel/ppc_ksyms.c | 2 + arch/ppc/kernel/prep_pci.c | 2 +- arch/ppc/kernel/process.c | 2 + arch/ppc/kernel/prom.c | 625 +++++- arch/ppc/kernel/semaphore.c | 41 + arch/ppc/kernel/setup.c | 54 +- arch/ppc/kernel/sleep.S | 5 + arch/ppc/kernel/syscalls.c | 13 +- arch/ppc/kernel/traps.c | 14 + arch/ppc/kernel/walnut_setup.c | 1475 ++++++++++++ arch/ppc/kernel/walnut_setup.h | 250 +++ arch/ppc/mm/4xx_tlb.c | 561 +++-- arch/ppc/mm/init.c | 86 +- arch/ppc/xmon/start.c | 154 +- arch/ppc/xmon/xmon.c | 28 + arch/sh/mm/init.c | 4 - arch/sparc/kernel/irq.c | 40 +- arch/sparc/kernel/rtrap.S | 16 +- arch/sparc/kernel/sparc_ksyms.c | 4 +- arch/sparc/kernel/time.c | 6 +- arch/sparc/mm/init.c | 7 +- arch/sparc64/kernel/irq.c | 29 +- arch/sparc64/kernel/rtrap.S | 20 +- arch/sparc64/kernel/sparc64_ksyms.c | 9 +- arch/sparc64/mm/init.c | 6 +- arch/sparc64/solaris/socksys.c | 2 +- drivers/block/Config.in | 4 +- drivers/block/floppy.c | 117 +- drivers/block/hpt366.c | 6 +- drivers/block/ide-floppy.c | 7 + drivers/block/ide-pci.c | 6 +- drivers/block/ide-pmac.c | 622 +++++- drivers/block/ide-probe.c | 10 +- drivers/block/ide-tape.c | 8 +- drivers/block/ll_rw_blk.c | 17 +- drivers/block/loop.c | 4 +- drivers/block/rd.c | 9 +- drivers/char/bttv.c | 20 +- drivers/char/efirtc.c | 363 +++ drivers/char/keyboard.c | 9 +- drivers/char/mem.c | 2 +- drivers/char/pc_keyb.c | 3 +- drivers/char/saa5249.c | 2 +- drivers/char/vt.c | 6 +- drivers/i2c/i2c-algo-bit.c | 117 +- drivers/i2c/i2c-algo-pcf.c | 420 ++-- drivers/i2c/i2c-core.c | 70 +- drivers/i2c/i2c-elv.c | 94 +- drivers/i2c/i2c-philips-par.c | 26 +- drivers/i2c/i2c-velleman.c | 27 +- drivers/macintosh/adb.c | 43 +- drivers/macintosh/mac_keyb.c | 102 +- drivers/macintosh/macserial.c | 208 +- drivers/macintosh/mediabay.c | 474 ++-- drivers/macintosh/via-pmu.c | 263 ++- drivers/net/3c501.c | 269 ++- drivers/net/3c527.c | 403 +++- drivers/net/3c59x.c | 74 +- drivers/net/8390.c | 62 +- drivers/net/Config.in | 1 + drivers/net/Makefile | 1 + drivers/net/acenic.c | 22 +- drivers/net/de4x5.c | 43 +- drivers/net/eepro100.c | 60 +- drivers/net/ethertap.c | 7 +- drivers/net/gmac.c | 614 +++++ drivers/net/gmac.h | 113 + drivers/net/loopback.c | 8 - drivers/net/myri_sbus.c | 41 +- drivers/net/ne2k-pci.c | 15 +- drivers/net/net_init.c | 4 +- drivers/net/oaknet.c | 332 ++- drivers/net/pcmcia/pcnet_cs.c | 35 +- drivers/net/ppp_generic.c | 17 +- drivers/net/rtl8139.c | 53 +- drivers/net/setup.c | 4 + drivers/net/shaper.c | 5 +- drivers/net/sk98lin/skge.c | 23 +- drivers/net/skeleton.c | 236 +- drivers/net/slip.c | 209 +- drivers/net/slip.h | 4 +- drivers/net/starfire.c | 60 +- drivers/net/sunbmac.c | 90 +- drivers/net/sunbmac.h | 4 +- drivers/net/sunhme.c | 114 +- drivers/net/sunhme.h | 4 +- drivers/net/sunlance.c | 119 +- drivers/net/sunqe.c | 89 +- drivers/net/sunqe.h | 3 +- drivers/net/tulip.c | 77 +- drivers/parport/ieee1284_ops.c | 68 +- drivers/parport/parport_pc.c | 24 +- drivers/parport/probe.c | 7 +- drivers/sbus/audio/audio.c | 2 +- drivers/sbus/char/envctrl.c | 2 +- drivers/sbus/char/flash.c | 2 +- drivers/sbus/char/pcikbd.c | 13 +- drivers/sbus/char/rtc.c | 2 +- drivers/sbus/char/su.c | 4 +- drivers/sbus/char/sunkbd.c | 30 +- drivers/sbus/char/sunkbd.h | 3 +- drivers/sbus/char/uctrl.c | 2 +- drivers/sbus/char/zs.c | 4 +- drivers/scsi/Config.in | 12 + drivers/scsi/advansys.c | 4 +- drivers/scsi/eata.c | 6 +- drivers/scsi/eata_dma.c | 27 +- drivers/scsi/hosts.c | 9 +- drivers/scsi/hosts.h | 31 +- drivers/scsi/ips.c | 1 - drivers/scsi/mesh.c | 4 +- drivers/scsi/pluto.c | 4 +- drivers/scsi/scsi.c | 147 +- drivers/scsi/scsi.h | 7 +- drivers/scsi/scsi_error.c | 27 +- drivers/scsi/scsi_ioctl.c | 2 +- drivers/scsi/scsi_lib.c | 24 +- drivers/scsi/scsi_scan.c | 113 +- drivers/scsi/scsi_syms.c | 5 +- drivers/scsi/sd.c | 27 +- drivers/scsi/sg.c | 1969 ++++++++++++----- drivers/scsi/sr.c | 22 +- drivers/scsi/u14-34f.c | 6 +- drivers/sound/Makefile | 2 +- drivers/sound/sb_card.c | 516 ++++- drivers/sound/trident.c | 6 +- .../sound/{via82cxxx.c => via82cxxx_audio.c} | 4 +- drivers/usb/usb-ohci.c | 6 + drivers/video/atyfb.c | 101 +- drivers/video/chipsfb.c | 13 +- drivers/video/controlfb.c | 28 +- drivers/video/offb.c | 38 +- fs/adfs/map.c | 20 +- fs/block_dev.c | 58 +- fs/buffer.c | 379 ++-- fs/coda/file.c | 1 + fs/coda/upcall.c | 9 +- fs/cramfs/inflate/zconf.h | 4 +- fs/hpfs/dir.c | 2 +- fs/hpfs/hpfs_fn.h | 2 +- fs/minix/fsync.c | 1 + fs/openpromfs/inode.c | 2 +- fs/partitions/mac.c | 54 +- fs/proc/proc_misc.c | 3 +- fs/proc/procfs_syms.c | 3 +- fs/qnx4/fsync.c | 1 + fs/super.c | 5 +- fs/sysv/fsync.c | 1 + fs/udf/fsync.c | 1 + include/asm-alpha/bitops.h | 16 +- include/asm-alpha/hardirq.h | 2 + include/asm-alpha/softirq.h | 125 +- include/asm-i386/hardirq.h | 2 + include/asm-i386/softirq.h | 126 +- include/asm-i386/spinlock.h | 1 + include/asm-ia64/atomic.h | 5 +- include/asm-ia64/bitops.h | 57 +- include/asm-ia64/fpswa.h | 8 +- include/asm-ia64/irq.h | 4 +- include/asm-ia64/signal.h | 30 +- include/asm-ia64/system.h | 4 +- include/asm-ppc/bitops.h | 6 +- include/asm-ppc/bootinfo.h | 8 + include/asm-ppc/feature.h | 23 +- include/asm-ppc/heathrow.h | 45 + include/asm-ppc/irq.h | 5 +- include/asm-ppc/machdep.h | 2 +- include/asm-ppc/mediabay.h | 1 + include/asm-ppc/mmu.h | 57 + include/asm-ppc/ohare.h | 17 +- include/asm-ppc/pci.h | 52 + include/asm-ppc/pgtable.h | 88 +- include/asm-ppc/processor.h | 5 + include/asm-ppc/prom.h | 5 +- include/asm-ppc/semaphore.h | 96 + include/asm-ppc/types.h | 4 + include/asm-ppc/vga.h | 38 +- include/asm-sparc/bitops.h | 12 +- include/asm-sparc/hardirq.h | 5 + include/asm-sparc/softirq.h | 155 +- include/asm-sparc64/bitops.h | 12 +- include/asm-sparc64/hardirq.h | 5 +- include/asm-sparc64/posix_types.h | 3 +- include/asm-sparc64/softirq.h | 113 +- include/linux/fs.h | 11 +- include/linux/i2c-id.h | 45 +- include/linux/i2c.h | 109 +- include/linux/interrupt.h | 235 +- include/linux/kbd_kern.h | 4 +- include/linux/miscdevice.h | 1 + include/linux/mm.h | 2 +- include/linux/mmzone.h | 2 +- include/linux/netdevice.h | 166 +- include/linux/openpic.h | 6 +- include/linux/pmu.h | 3 + include/linux/rtnetlink.h | 17 +- include/linux/spinlock.h | 3 + include/linux/sunrpc/svc.h | 1 + include/linux/sunrpc/svcsock.h | 1 + include/linux/sysctl.h | 3 +- include/linux/timer.h | 20 +- include/linux/udf_167.h | 3 + include/linux/vt_buffer.h | 2 +- include/net/neighbour.h | 1 + include/net/pkt_sched.h | 66 +- include/net/snmp.h | 2 +- include/net/sock.h | 10 +- include/net/tcp.h | 2 +- include/scsi/sg.h | 390 ++-- include/video/macmodes.h | 1 + init/main.c | 2 + kernel/exit.c | 4 +- kernel/itimer.c | 10 +- kernel/ksyms.c | 17 +- kernel/sched.c | 63 +- kernel/softirq.c | 291 ++- kernel/timer.c | 69 +- mm/page_alloc.c | 97 +- mm/slab.c | 11 +- mm/vmscan.c | 6 +- net/core/dev.c | 621 +++--- net/core/neighbour.c | 31 +- net/core/rtnetlink.c | 25 +- net/core/skbuff.c | 106 +- net/core/sysctl_net_core.c | 4 + net/decnet/dn_dev.c | 4 +- net/decnet/dn_nsp_in.c | 8 +- net/ipv4/icmp.c | 4 +- net/ipv4/igmp.c | 8 +- net/ipv4/ip_fragment.c | 4 +- net/ipv4/ip_output.c | 6 +- net/ipv4/route.c | 18 +- net/ipv4/tcp_timer.c | 13 +- net/khttpd/structure.h | 2 +- net/khttpd/userspace.c | 4 +- net/netsyms.c | 7 +- net/sched/sch_cbq.c | 7 +- net/sched/sch_generic.c | 216 +- net/sched/sch_tbf.c | 4 +- net/sched/sch_teql.c | 29 +- net/socket.c | 16 +- net/sunrpc/sched.c | 72 +- net/sunrpc/svc.c | 1 + net/sunrpc/svcsock.c | 36 +- net/sunrpc/xprt.c | 63 +- 314 files changed, 14352 insertions(+), 6365 deletions(-) create mode 100644 arch/ppc/coffboot/dummy.c create mode 100644 arch/ppc/kernel/galaxy_pci.c create mode 100644 arch/ppc/kernel/pci-dma.c create mode 100644 arch/ppc/kernel/walnut_setup.c create mode 100644 arch/ppc/kernel/walnut_setup.h create mode 100644 drivers/char/efirtc.c create mode 100644 drivers/net/gmac.c create mode 100644 drivers/net/gmac.h rename drivers/sound/{via82cxxx.c => via82cxxx_audio.c} (99%) create mode 100644 include/asm-ppc/heathrow.h diff --git a/CREDITS b/CREDITS index 260adbbdd847..6aafceed18a9 100644 --- a/CREDITS +++ b/CREDITS @@ -542,11 +542,11 @@ S: 4850 Moresnet S: Belgium N: Cort Dougan -E: cort@ppc.kernel.org +E: cort@fsmlabs.com W: http://www.ppc.kernel.org/~cort/ D: PowerPC -S: Computer Science Department -S: New Mexico Tech +S: Finite State Machine Labs +S: P.O. 1829 S: Socorro, New Mexico 87801 S: USA @@ -932,6 +932,7 @@ E: bh40@calva.net E: benh@mipsys.com D: PowerMac booter (BootX) D: Additional PowerBook support +D: Apple "Core99" machines support (ibook,g4,...) S: 22, rue des Marguettes S: 75012 Paris S: France @@ -1981,11 +1982,10 @@ E: rubini@ipvvis.unipv.it D: the gpm mouse server and kernel support for it N: Philipp Rumpf -E: prumpf@jcsbs.lanobis.de -D: ipi_count for x86 +E: prumpf@tux.org D: random bugfixes -S: Rueting 4 -S: 23743 Groemitz +S: Drausnickstrasse 29 +S: 91052 Erlangen S: Germany N: Paul `Rusty' Russell @@ -2614,6 +2614,13 @@ S: 3078 Sulphur Spring Court S: San Jose, California 95148 S: USA +N: Alessandro Zummo +E: azummo@ita.flashnet.it +W: http://freepage.logicom.it/azummo/ +D: CMI8330 support is sb_card.c +D: ISAPnP fixes in sb_card.c +S: Italy + N: Marc Zyngier E: maz@wild-wind.fr.eu.org D: MD driver diff --git a/Documentation/Configure.help b/Documentation/Configure.help index 9e7540295b3d..765e248f8fd9 100644 --- a/Documentation/Configure.help +++ b/Documentation/Configure.help @@ -557,6 +557,13 @@ CONFIG_BLK_DEV_RZ1000 People with SCSI-only systems should say N here. If unsure, say Y. +Cyrix CS5530 MediaGX chipset support +CONFIG_BLK_DEV_CS5530 + Include support for UDMA on the Cyrix MediaGX 5530 chipset. This + will automatically be detected and configured if found. + + It is safe to say Y to this question. + Generic PCI IDE chipset support CONFIG_BLK_DEV_IDEPCI Say Y here for PCI systems which use IDE drive(s). @@ -583,7 +590,7 @@ CONFIG_BLK_DEV_IDEDMA_PCI It is safe to say Y to this question. Good-Bad DMA Model-Firmware (EXPERIMENTAL) -IDEDMA_NEW_DRIVE_LISTINGS +CONFIG_IDEDMA_NEW_DRIVE_LISTINGS If you say Y here, the model and firmware revision of your drive will be compared against a blacklist of buggy drives that claim to be (U)DMA capable but aren't. This is a blanket on/off test with no @@ -711,12 +718,12 @@ CONFIG_BLK_DEV_HPT366 Please read the comments at the top of drivers/block/hpt366.c HPT366 Fast Interrupt support (EXPERIMENTAL) (WIP) -HPT366_FAST_IRQ_PREDICTION +CONFIG_HPT366_FAST_IRQ_PREDICTION If unsure, say N. HPT366 mode three unsupported (EXPERIMENTAL) (WIP) -HPT366_MODE3 +CONFIG_HPT366_MODE3 This is an undocumented mode that the HA366 can default to in many cases. If unsure, say N. @@ -785,7 +792,7 @@ CONFIG_BLK_DEV_PDC202XX If unsure, say N. Special UDMA Feature -PDC202XX_FORCE_BURST_BIT +CONFIG_PDC202XX_FORCE_BURST_BIT For PDC20246 and PDC20262 Ultra DMA chipsets. Designed originally for PDC20246/Ultra33 that has BIOS setup failures when using 3 or more cards. @@ -795,7 +802,7 @@ PDC202XX_FORCE_BURST_BIT If unsure, say N. Special Mode Feature (EXPERIMENTAL) -PDC202XX_FORCE_MASTER_MODE +CONFIG_PDC202XX_FORCE_MASTER_MODE For PDC20246 and PDC20262 Ultra DMA chipsets. This is reserved for possible Hardware RAID 0,1 for the FastTrak Series. @@ -4121,6 +4128,18 @@ CONFIG_BLK_DEV_SD on a SCSI disk. In this case, do not compile the driver for your SCSI host adapter (below) as a module either. +Extra SCSI Disks +CONFIG_SD_EXTRA_DEVS + This controls the amount of additional space allocated in tables for + drivers that are loaded as modules after the kernel is booted. In + the event that the SCSI core itself was loaded as a module, this this + value is the number of additional disks that can be loaded after the + first host driver is loaded. + + Admittedly this isn't pretty, but there are tons of race conditions + involved with resizing the internal arrays on the fly. Someday this + flag will go away, and everything will work automatically. + SCSI tape support CONFIG_CHR_DEV_ST If you want to use a SCSI tape drive under Linux, say Y and read the @@ -4135,6 +4154,18 @@ CONFIG_CHR_DEV_ST module, say M here and read Documentation/modules.txt and Documentation/scsi.txt . +Extra SCSI Tapes +CONFIG_ST_EXTRA_DEVS + This controls the amount of additional space allocated in tables for + drivers that are loaded as modules after the kernel is booted. In the + event that the SCSI core itself was loaded as a module, this this value + is the number of additional tape devices that can be loaded after the + first host driver is loaded. + + Admittedly this isn't pretty, but there are tons of race conditions + involved with resizing the internal arrays on the fly. Someday this + flag will go away, and everything will work automatically. + SCSI CDROM support CONFIG_BLK_DEV_SR If you want to use a SCSI CDROM under Linux, say Y and read the @@ -4148,6 +4179,18 @@ CONFIG_BLK_DEV_SR module, say M here and read Documentation/modules.txt and Documentation/scsi.txt . +Extra SCSI CDROMs +CONFIG_SR_EXTRA_DEVS + This controls the amount of additional space allocated in tables for + drivers that are loaded as modules after the kernel is booted. In the + event that the SCSI core itself was loaded as a module, this this value + is the number of additional CDROMs that can be loaded after the first + host driver is loaded. + + Admittedly this isn't pretty, but there are tons of race conditions + involved with resizing the internal arrays on the fly. Someday this + flag will go away, and everything will work automatically. + Enable vendor-specific extensions (for SCSI CDROM) CONFIG_BLK_DEV_SR_VENDOR This enables the usage of vendor specific SCSI commands. This is diff --git a/Documentation/i2c/i2c-protocol b/Documentation/i2c/i2c-protocol index 16ba77b7a20d..d8cfdc77b396 100644 --- a/Documentation/i2c/i2c-protocol +++ b/Documentation/i2c/i2c-protocol @@ -44,3 +44,25 @@ a start bit S is sent and the transaction continues. An example of a byte read, followed by a byte write: S Addr Rd [A] [Data] NA S Addr Wr [A] Data [A] P + + +Modified transactions +===================== + +We have found some I2C devices that needs the following modifications: + + Flag I2C_M_NOSTART: + In a combined transaction, no 'S Addr' is generated at some point. + For example, setting I2C_M_NOSTART on the second partial message + generateds something like: + S Addr Rd [A] [Data] NA Wr [A] Data [A] P + If you set the I2C_M_NOSTART variable for the first partial message, + we do not generate Addr, but we do generate the startbit S. This will + probably confuse all other clients on your bus, so don't try this. + + Flags I2C_M_REV_DIR_ADDR + This toggles the Rd/Wr flag. That is, if you want to do a write, but + need to emit an Rd instead of a Wr, or vice versa, you set this + flag. For example: + S Addr Rd [A] Data [A] Data [A] ... [A] Data [A] P + diff --git a/Documentation/sound/CMI8330 b/Documentation/sound/CMI8330 index a12bed1d2df9..0ca5af70fc90 100644 --- a/Documentation/sound/CMI8330 +++ b/Documentation/sound/CMI8330 @@ -1,3 +1,83 @@ +Documentation for CMI 8330 (SoundPRO) +------------------------------------- +Alessandro Zummo + +This adapter is now directly supported by the sb driver. + + The only thing you have to do is to compile the kernel sound +support as a module and to enable kernel ISAPnP support, +as shown below. + + +CONFIG_SOUND=m +CONFIG_SOUND_SB=m + +CONFIG_PNP=y +CONFIG_ISAPNP=y + + +and optionally: + + +CONFIG_SOUND_MPU401=m + + for MPU401 support. + + +CONFIG_SOUND_YM3812=m + + for OPL3 support. Please note that there are better ways to play midi files, like + timidity or the softoss2 module. + + +CONFIG_JOYSTICK=y + + to activate the joystick port. + + +(I suggest you to use "make menuconfig" or "make xconfig" + for a more comfortable configuration editing) + + + +Then you can do + + modprobe sb + +and everything will be (hopefully) configured. + +You should get something similar in syslog: + +sb: CMI8330 detected. +sb: CMI8330 sb base located at 0x220 +sb: CMI8330 mpu base located at 0x330 +sb: CMI8330 gameport base located at 0x200 +sb: CMI8330 opl3 base located at 0x388 +sb: CMI8330 mail reports to Alessandro Zummo +sb: ISAPnP reports CMI 8330 SoundPRO at i/o 0x220, irq 7, dma 1,5 + + + + +To activate the OPL3 support, you need these lines in /etc/modules.conf +or in a file in /etc/modutils + +alias synth0 opl3 +options opl3 io=0x388 + +and then you can do: + + modprobe opl3 + + + + + + +The old documentation file follows for reference +purposes. + + How to enable CMI 8330 (SOUNDPRO) soundchip on Linux ------------------------------------------ Stefan Laudat diff --git a/MAINTAINERS b/MAINTAINERS index 8a04fc92a4bd..be47cf0face3 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -561,7 +561,7 @@ S: Maintained LINUX FOR POWERPC P: Cort Dougan -M: cort@ppc.kernel.org +M: cort@fsmlabs.com W: http://www.ppc.kernel.org/ S: Maintained diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c index cb658dbef321..15c7afd8c49b 100644 --- a/arch/alpha/kernel/alpha_ksyms.c +++ b/arch/alpha/kernel/alpha_ksyms.c @@ -165,9 +165,6 @@ EXPORT_SYMBOL(flush_tlb_page); EXPORT_SYMBOL(flush_tlb_range); EXPORT_SYMBOL(cpu_data); EXPORT_SYMBOL(__cpu_number_map); -EXPORT_SYMBOL(global_bh_lock); -EXPORT_SYMBOL(global_bh_count); -EXPORT_SYMBOL(synchronize_bh); EXPORT_SYMBOL(global_irq_holder); EXPORT_SYMBOL(__global_cli); EXPORT_SYMBOL(__global_sti); diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S index af1567fd1dd5..cd8ce5c670b6 100644 --- a/arch/alpha/kernel/entry.S +++ b/arch/alpha/kernel/entry.S @@ -32,6 +32,7 @@ #define TASK_ADDR_LIMIT 24 #define TASK_EXEC_DOMAIN 32 #define TASK_NEED_RESCHED 40 +#define TASK_PROCESSOR 100 /* * task flags (must match include/linux/sched.h): @@ -572,12 +573,15 @@ entSys: .align 3 ret_from_sys_call: cmovne $26,0,$19 /* $19 = 0 => non-restartable */ - /* check bottom half interrupts */ - ldq $3,bh_active - ldq $4,bh_mask - and $3,$4,$2 - bne $2,handle_bottom_half -ret_from_handle_bh: + ldq $3,TASK_PROCESSOR($8) + lda $4,softirq_state + sll $3,5,$3 + addq $3,$4,$4 + ldq $4,0($4) + sll $4,32,$3 + and $4,$3,$4 + bne $4,handle_softirq +ret_from_softirq: ldq $0,SP_OFF($30) and $0,8,$0 beq $0,restore_all @@ -656,16 +660,16 @@ strace_error: br ret_from_sys_call .align 3 -handle_bottom_half: +handle_softirq: subq $30,16,$30 stq $19,0($30) /* save syscall nr */ stq $20,8($30) /* and error indication (a3) */ - jsr $26,do_bottom_half + jsr $26,do_softirq ldq $19,0($30) ldq $20,8($30) addq $30,16,$30 - br ret_from_handle_bh - + br ret_from_softirq + .align 3 syscall_error: /* diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c index 4d3c2fdfbfd1..bae28a6a4bb4 100644 --- a/arch/alpha/kernel/irq.c +++ b/arch/alpha/kernel/irq.c @@ -377,10 +377,6 @@ spinlock_t global_irq_lock = SPIN_LOCK_UNLOCKED; /* Global IRQ locking depth. */ atomic_t global_irq_count = ATOMIC_INIT(0); -/* This protects BH software state (masks, things like that). */ -atomic_t global_bh_lock = ATOMIC_INIT(0); -atomic_t global_bh_count = ATOMIC_INIT(0); - static void *previous_irqholder = NULL; #define MAXCOUNT 100000000 @@ -401,7 +397,7 @@ wait_on_irq(int cpu, void *where) */ if (!atomic_read(&global_irq_count)) { if (local_bh_count(cpu) - || !atomic_read(&global_bh_count)) + || !spin_is_locked(&global_bh_lock)) break; } @@ -422,7 +418,7 @@ wait_on_irq(int cpu, void *where) if (spin_is_locked(&global_irq_lock)) continue; if (!local_bh_count(cpu) - && atomic_read(&global_bh_count)) + && spin_is_locked(&global_bh_lock)) continue; if (spin_trylock(&global_irq_lock)) break; @@ -552,7 +548,7 @@ show(char * str, void *where) cpu_data[1].irq_count); printk("bh: %d [%d %d]\n", - atomic_read(&global_bh_count), + spin_is_locked(&global_bh_lock) ? 1 : 0, cpu_data[0].bh_count, cpu_data[1].bh_count); #if 0 @@ -567,35 +563,6 @@ show(char * str, void *where) #endif } -static inline void -wait_on_bh(void) -{ - int count = MAXCOUNT; - do { - if (!--count) { - show("wait_on_bh", 0); - count = ~0; - } - /* nothing .. wait for the other bh's to go away */ - barrier(); - } while (atomic_read(&global_bh_count) != 0); -} - -/* - * This is called when we want to synchronize with - * bottom half handlers. We need to wait until - * no other CPU is executing any bottom half handler. - * - * Don't wait if we're already running in an interrupt - * context or are inside a bh handler. - */ -void -synchronize_bh(void) -{ - if (atomic_read(&global_bh_count) && !in_interrupt()) - wait_on_bh(); -} - /* * From its use, I infer that synchronize_irq() stalls a thread until * the effects of a command to an external device are known to have diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c index 1601dcb18865..30ed75ead9e7 100644 --- a/arch/alpha/kernel/smp.c +++ b/arch/alpha/kernel/smp.c @@ -722,7 +722,7 @@ again: return -EBUSY; while (*(void **)lock) - schedule(); + barrier(); goto again; } diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c index 97bb6df9d677..e2142b63c90f 100644 --- a/arch/alpha/mm/init.c +++ b/arch/alpha/mm/init.c @@ -34,7 +34,6 @@ static unsigned long totalram_pages; extern void die_if_kernel(char *,struct pt_regs *,long); -extern void show_net_buffers(void); struct thread_struct original_pcb; @@ -173,9 +172,6 @@ show_mem(void) printk("%ld pages swap cached\n",cached); printk("%ld pages in page table cache\n",pgtable_cache_size); show_buffers(); -#ifdef CONFIG_NET - show_net_buffers(); -#endif } static inline unsigned long @@ -195,7 +191,7 @@ paging_init(void) { unsigned long newptbr; unsigned long original_pcb_ptr; - unsigned int zones_size[MAX_NR_ZONES] = {0, 0, 0}; + unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0}; unsigned long dma_pfn, high_pfn; dma_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index bc4cf1ed1818..db3ac5a4d608 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -35,8 +35,6 @@ static unsigned long totalram_pages; pgd_t swapper_pg_dir[PTRS_PER_PGD]; -extern void show_net_buffers(void); - /* * empty_bad_page is the page that is used for page faults when * linux is out-of-memory. Older versions of linux just did a @@ -157,9 +155,6 @@ void show_mem(void) printk("%ld page tables cached\n", pgtable_cache_size); #endif show_buffers(); -#ifdef CONFIG_NET - show_net_buffers(); -#endif } /* diff --git a/arch/i386/defconfig b/arch/i386/defconfig index a8b2de3fb26a..4f000c9dd624 100644 --- a/arch/i386/defconfig +++ b/arch/i386/defconfig @@ -56,15 +56,7 @@ CONFIG_PCI_BIOS=y CONFIG_PCI_DIRECT=y CONFIG_PCI_NAMES=y # CONFIG_MCA is not set -CONFIG_HOTPLUG=y - -# -# PCMCIA/CardBus support -# -CONFIG_PCMCIA=y -CONFIG_CARDBUS=y -# CONFIG_I82365 is not set -CONFIG_TCIC=y +# CONFIG_HOTPLUG is not set CONFIG_SYSVIPC=y # CONFIG_BSD_PROCESS_ACCT is not set CONFIG_SYSCTL=y @@ -173,7 +165,9 @@ CONFIG_SCSI=y # SCSI support type (disk, tape, CD-ROM) # CONFIG_BLK_DEV_SD=y +CONFIG_SD_EXTRA_DEVS=40 # CONFIG_CHR_DEV_ST is not set +CONFIG_ST_EXTRA_DEVS=2 # CONFIG_BLK_DEV_SR is not set # CONFIG_CHR_DEV_SG is not set @@ -307,28 +301,6 @@ CONFIG_EEXPRESS_PRO100=y # # CONFIG_WAN is not set -# -# PCMCIA network device support -# -CONFIG_NET_PCMCIA=y -# CONFIG_PCMCIA_3C589 is not set -# CONFIG_PCMCIA_3C574 is not set -# CONFIG_PCMCIA_FMVJ18X is not set -CONFIG_PCMCIA_PCNET=y -# CONFIG_PCMCIA_NMCLAN is not set -# CONFIG_PCMCIA_SMC91C92 is not set -# CONFIG_PCMCIA_XIRC2PS is not set -# CONFIG_AIRONET4500_CS is not set -# CONFIG_ARCNET_COM20020_CS is not set -# CONFIG_PCMCIA_3C575 is not set -# CONFIG_PCMCIA_TULIP is not set -# CONFIG_PCMCIA_EPIC100 is not set -CONFIG_NET_PCMCIA_RADIO=y -CONFIG_PCMCIA_RAYCS=y -# CONFIG_PCMCIA_NETWAVE is not set -# CONFIG_PCMCIA_WAVELAN is not set -CONFIG_PCMCIA_NETCARD=y - # # Amateur Radio support # @@ -403,13 +375,6 @@ CONFIG_PSMOUSE=y CONFIG_DRM=y CONFIG_DRM_TDFX=y # CONFIG_DRM_GAMMA is not set -CONFIG_PCMCIA_SERIAL=y - -# -# PCMCIA character device support -# -# CONFIG_PCMCIA_SERIAL_CS is not set -# CONFIG_PCMCIA_SERIAL_CB is not set # # USB support diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S index f93765754d26..bcca244c1360 100644 --- a/arch/i386/kernel/entry.S +++ b/arch/i386/kernel/entry.S @@ -76,6 +76,7 @@ sigpending = 8 addr_limit = 12 exec_domain = 16 need_resched = 20 +processor = 56 ENOSYS = 38 @@ -203,9 +204,17 @@ ENTRY(system_call) .globl ret_from_sys_call .globl ret_from_intr ret_from_sys_call: - movl SYMBOL_NAME(bh_mask),%eax - andl SYMBOL_NAME(bh_active),%eax - jne handle_bottom_half +#ifdef __SMP__ + movl processor(%ebx),%eax + shll $5,%eax + movl SYMBOL_NAME(softirq_state)(,%eax),%ecx + testl SYMBOL_NAME(softirq_state)+4(,%eax),%ecx +#else + movl SYMBOL_NAME(softirq_state),%ecx + testl SYMBOL_NAME(softirq_state)+4,%ecx +#endif + jne handle_softirq + ret_with_reschedule: cmpl $0,need_resched(%ebx) jne reschedule @@ -250,9 +259,18 @@ badsys: ALIGN ret_from_exception: - movl SYMBOL_NAME(bh_mask),%eax - andl SYMBOL_NAME(bh_active),%eax - jne handle_bottom_half +#ifdef __SMP__ + GET_CURRENT(%ebx) + movl processor(%ebx),%eax + shll $5,%eax + movl SYMBOL_NAME(softirq_state)(,%eax),%ecx + testl SYMBOL_NAME(softirq_state)+4(,%eax),%ecx +#else + movl SYMBOL_NAME(softirq_state),%ecx + testl SYMBOL_NAME(softirq_state)+4,%ecx +#endif + jne handle_softirq + ALIGN ret_from_intr: GET_CURRENT(%ebx) @@ -263,10 +281,10 @@ ret_from_intr: jmp restore_all ALIGN -handle_bottom_half: - call SYMBOL_NAME(do_bottom_half) +handle_softirq: + call SYMBOL_NAME(do_softirq) jmp ret_from_intr - + ALIGN reschedule: call SYMBOL_NAME(schedule) # test diff --git a/arch/i386/kernel/i386_ksyms.c b/arch/i386/kernel/i386_ksyms.c index 42a06fcbb8ce..f58e7485f110 100644 --- a/arch/i386/kernel/i386_ksyms.c +++ b/arch/i386/kernel/i386_ksyms.c @@ -44,8 +44,6 @@ EXPORT_SYMBOL(dump_fpu); EXPORT_SYMBOL(__ioremap); EXPORT_SYMBOL(iounmap); EXPORT_SYMBOL(__io_virt_debug); -EXPORT_SYMBOL(local_bh_count); -EXPORT_SYMBOL(local_irq_count); EXPORT_SYMBOL(enable_irq); EXPORT_SYMBOL(disable_irq); EXPORT_SYMBOL(disable_irq_nosync); @@ -107,11 +105,7 @@ EXPORT_SYMBOL_NOVERS(__read_lock_failed); /* Global SMP irq stuff */ EXPORT_SYMBOL(synchronize_irq); -EXPORT_SYMBOL(synchronize_bh); -EXPORT_SYMBOL(global_bh_count); -EXPORT_SYMBOL(global_bh_lock); EXPORT_SYMBOL(global_irq_holder); -EXPORT_SYMBOL(i386_bh_lock); EXPORT_SYMBOL(__global_cli); EXPORT_SYMBOL(__global_sti); EXPORT_SYMBOL(__global_save_flags); @@ -142,3 +136,7 @@ EXPORT_SYMBOL(screen_info); #endif EXPORT_SYMBOL(get_wchan); + + +EXPORT_SYMBOL(local_bh_count); +EXPORT_SYMBOL(local_irq_count); diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c index 062a8422a38c..6112ac036c45 100644 --- a/arch/i386/kernel/irq.c +++ b/arch/i386/kernel/irq.c @@ -182,16 +182,12 @@ int get_irq_list(char *buf) * Global interrupt locks for SMP. Allow interrupts to come in on any * CPU, yet make cli/sti act globally to protect critical regions.. */ -spinlock_t i386_bh_lock = SPIN_LOCK_UNLOCKED; #ifdef CONFIG_SMP unsigned char global_irq_holder = NO_PROC_ID; unsigned volatile int global_irq_lock; atomic_t global_irq_count; -atomic_t global_bh_count; -atomic_t global_bh_lock; - static void show(char * str) { int i; @@ -202,7 +198,7 @@ static void show(char * str) printk("irq: %d [%d %d]\n", atomic_read(&global_irq_count), local_irq_count[0], local_irq_count[1]); printk("bh: %d [%d %d]\n", - atomic_read(&global_bh_count), local_bh_count[0], local_bh_count[1]); + spin_is_locked(&global_bh_lock) ? 1 : 0, local_bh_count[0], local_bh_count[1]); stack = (unsigned long *) &stack; for (i = 40; i ; i--) { unsigned long x = *++stack; @@ -214,18 +210,6 @@ static void show(char * str) #define MAXCOUNT 100000000 -static inline void wait_on_bh(void) -{ - int count = MAXCOUNT; - do { - if (!--count) { - show("wait_on_bh"); - count = ~0; - } - /* nothing .. wait for the other bh's to go away */ - } while (atomic_read(&global_bh_count) != 0); -} - /* * I had a lockup scenario where a tight loop doing * spin_unlock()/spin_lock() on CPU#1 was racing with @@ -265,7 +249,7 @@ static inline void wait_on_irq(int cpu) * already executing in one.. */ if (!atomic_read(&global_irq_count)) { - if (local_bh_count[cpu] || !atomic_read(&global_bh_count)) + if (local_bh_count[cpu] || !spin_is_locked(&global_bh_lock)) break; } @@ -284,7 +268,7 @@ static inline void wait_on_irq(int cpu) continue; if (global_irq_lock) continue; - if (!local_bh_count[cpu] && atomic_read(&global_bh_count)) + if (!local_bh_count[cpu] && spin_is_locked(&global_bh_lock)) continue; if (!test_and_set_bit(0,&global_irq_lock)) break; @@ -292,20 +276,6 @@ static inline void wait_on_irq(int cpu) } } -/* - * This is called when we want to synchronize with - * bottom half handlers. We need to wait until - * no other CPU is executing any bottom half handler. - * - * Don't wait if we're already running in an interrupt - * context or are inside a bh handler. - */ -void synchronize_bh(void) -{ - if (atomic_read(&global_bh_count) && !in_interrupt()) - wait_on_bh(); -} - /* * This is called when we want to synchronize with * interrupts. We may for example tell a device to @@ -605,16 +575,8 @@ asmlinkage unsigned int do_IRQ(struct pt_regs regs) desc->handler->end(irq); spin_unlock(&irq_controller_lock); - /* - * This should be conditional: we should really get - * a return code from the irq handler to tell us - * whether the handler wants us to do software bottom - * half handling or not.. - */ - if (1) { - if (bh_active & bh_mask) - do_bottom_half(); - } + if (softirq_state[cpu].active&softirq_state[cpu].mask) + do_softirq(); return 1; } diff --git a/arch/i386/kernel/pm.c b/arch/i386/kernel/pm.c index d714ea591ec1..26811bff2908 100644 --- a/arch/i386/kernel/pm.c +++ b/arch/i386/kernel/pm.c @@ -112,6 +112,20 @@ static int pm_send(struct pm_dev *dev, pm_request_t rqst, void *data) return status; } +/* + * Undo incomplete request + */ +static void pm_undo_request(struct pm_dev *last, pm_request_t undo, void *data) +{ + struct list_head *entry = last->entry.prev; + while (entry != &pm_devs) { + struct pm_dev *dev = list_entry(entry, struct pm_dev, entry); + if (dev->callback) + pm_send(dev, undo, data); + entry = entry->prev; + } +} + /* * Send a request to all devices */ @@ -122,8 +136,12 @@ int pm_send_request(pm_request_t rqst, void *data) struct pm_dev *dev = list_entry(entry, struct pm_dev, entry); if (dev->callback) { int status = pm_send(dev, rqst, data); - if (status) + if (status) { + /* resume devices on failed suspend request */ + if (rqst == PM_SUSPEND) + pm_undo_request(dev, PM_RESUME, 0); return status; + } } entry = entry->next; } diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c index 327ccad52d70..07797e7609e3 100644 --- a/arch/i386/kernel/traps.c +++ b/arch/i386/kernel/traps.c @@ -153,7 +153,7 @@ void show_stack(unsigned long * esp) stack = esp; i = 1; module_start = VMALLOC_START; - module_end = module_start + MODULE_RANGE; + module_end = VMALLOC_END; while (((long) stack & (THREAD_SIZE-1)) != 0) { addr = *stack++; /* diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c index 333f45137893..4ef25674ff78 100644 --- a/arch/i386/mm/init.c +++ b/arch/i386/mm/init.c @@ -40,8 +40,6 @@ unsigned long highstart_pfn, highend_pfn; static unsigned long totalram_pages = 0; static unsigned long totalhigh_pages = 0; -extern void show_net_buffers(void); - /* * BAD_PAGE is the page that is used for page faults when linux * is out-of-memory. Older versions of linux just did a @@ -228,9 +226,6 @@ void show_mem(void) printk("%d pages swap cached\n",cached); printk("%ld pages in page table cache\n",pgtable_cache_size); show_buffers(); -#ifdef CONFIG_NET - show_net_buffers(); -#endif } /* References to section boundaries */ diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index 0616eb0d92aa..7cb47da72f6b 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile @@ -16,7 +16,7 @@ all: kernel.o head.o init_task.o O_TARGET := kernel.o O_OBJS := acpi.o entry.o gate.o efi.o efi_stub.o irq.o irq_default.o irq_internal.o ivt.o \ - pal.o process.o perfmon.o ptrace.o sal.o sal_stub.o semaphore.o setup.o signal.o \ + pal.o pci-dma.o process.o perfmon.o ptrace.o sal.o sal_stub.o semaphore.o setup.o signal.o \ sys_ia64.o traps.o time.o unaligned.o unwind.o #O_OBJS := fpreg.o #OX_OBJS := ia64_ksyms.o diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index 2c28f0243197..01c20113757f 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c @@ -252,6 +252,7 @@ ia64_handle_irq (unsigned long irq, struct pt_regs *regs) max_prio = prev_prio; # endif /* !CONFIG_SMP */ #endif /* CONFIG_ITANIUM_ASTEP_SPECIFIC */ + ia64_srlz_d(); ia64_set_tpr(saved_tpr); ia64_srlz_d(); @@ -632,6 +633,9 @@ init_IRQ (void) init_IRQ_SMP(); #endif + ia64_set_pmv(1 << 16); + ia64_set_cmcv(CMC_IRQ); /* XXX fix me */ + platform_irq_init(irq_desc); /* clear TPR to enable all interrupt classes: */ diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c index 94c20c5b6dd9..f86f45537587 100644 --- a/arch/ia64/kernel/pci-dma.c +++ b/arch/ia64/kernel/pci-dma.c @@ -25,8 +25,10 @@ get_order (unsigned long size) printk ("get_order: size=%lu, order=%lu\n", size, order); - if (log > PAGE_SHIFT) - order -= PAGE_SHIFT;; + if (order > PAGE_SHIFT) + order -= PAGE_SHIFT; + else + order = 0; return order; } diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index 1997a1ef7f29..48a3d68b4b85 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c @@ -221,12 +221,9 @@ send_IPI(int dest_cpu, unsigned char vector) /* * Disable IVR reads */ - save_flags(flags); - __cli(); - spin_lock(&ivr_read_lock); + spin_lock_irqsave(&ivr_read_lock, flags); writeq(ipi_data, ipi_addr); - spin_unlock(&ivr_read_lock); - restore_flags(flags); + spin_unlock_irqrestore(&ivr_read_lock, flags); #else writeq(ipi_data, ipi_addr); #endif /* CONFIG_ITANIUM_ASTEP_SPECIFIC */ diff --git a/arch/ia64/lib/strlen.S b/arch/ia64/lib/strlen.S index a96d6a048873..3062716b1e03 100644 --- a/arch/ia64/lib/strlen.S +++ b/arch/ia64/lib/strlen.S @@ -187,7 +187,7 @@ recover: cmp.eq p6,p0=8,val1 // val1==8 ? (p6) br.wtop.dptk.few 2b // loop until p6 == 0 sub ret0=base,orig // distance from base - sub tmp=7,val1 // 7=8-1 because this strlen returns strlen+1 + sub tmp=8,val1 mov pr=saved_pr,0xffffffffffff0000 ;; sub ret0=ret0,tmp // length=now - back -1 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 619379a75d3e..388f1fe0cb1e 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -34,7 +34,6 @@ extern pmd_t empty_bad_pmd_table[PTRS_PER_PMD]; extern pte_t empty_bad_pte_table[PTRS_PER_PTE]; extern void ia64_tlb_init (void); -extern void show_net_buffers (void); static unsigned long totalram_pages; @@ -222,9 +221,6 @@ show_mem (void) printk("%d pages swap cached\n", cached); printk("%ld pages in page table cache\n", pgtable_cache_size); show_buffers(); -#ifdef CONFIG_NET - show_net_buffers(); -#endif } /* diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c index 5e7741ee194c..4e6b282659b5 100644 --- a/arch/m68k/mm/init.c +++ b/arch/m68k/mm/init.c @@ -38,8 +38,6 @@ static unsigned long totalram_pages = 0; void mmu_emu_reserve_pages(unsigned long max_page); #endif -extern void show_net_buffers(void); - int do_check_pgt_cache(int low, int high) { int freed = 0; @@ -116,9 +114,6 @@ void show_mem(void) printk("%d pages swap cached\n",cached); printk("%ld pages in page table cache\n",pgtable_cache_size); show_buffers(); -#ifdef CONFIG_NET - show_net_buffers(); -#endif } extern void init_pointer_table(unsigned long ptable); diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 45a8c87c6944..ad3ded0816f8 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -35,8 +35,6 @@ #endif #include -extern void show_net_buffers(void); - void __bad_pte_kernel(pmd_t *pmd) { printk("Bad pmd in pte_alloc_kernel: %08lx\n", pmd_val(*pmd)); @@ -266,9 +264,6 @@ void show_mem(void) printk("%ld pages in page table cache\n",pgtable_cache_size); printk("%d free pages\n", free); show_buffers(); -#ifdef CONFIG_NET - show_net_buffers(); -#endif } extern unsigned long free_area_init(unsigned long, unsigned long); diff --git a/arch/ppc/Makefile b/arch/ppc/Makefile index d62c076690a6..7c3f13822434 100644 --- a/arch/ppc/Makefile +++ b/arch/ppc/Makefile @@ -106,19 +106,24 @@ $(BOOT_TARGETS): $(CHECKS) vmlinux znetboot: $(CHECKS) vmlinux ifdef CONFIG_ALL_PPC ifdef CONFIG_SMP -ifdef CONFIG_PPC64 - cp -f vmlinux /tftpboot/vmlinux.smp.64 -else cp -f vmlinux /tftpboot/vmlinux.smp -endif -else -ifdef CONFIG_PPC64 - cp -f vmlinux /tftpboot/vmlinux.64 else cp -f vmlinux /tftpboot/vmlinux endif endif + @$(MAKECOFFBOOT) $@ + @$(MAKEBOOT) $@ + @$(MAKECHRPBOOT) $@ endif + +ifdef CONFIG_PPC64 +$(BOOT_TARGETS): $(CHECKS) vmlinux + @$(MAKECOFFBOOT) $@ + @$(MAKEBOOT) $@ + @$(MAKECHRPBOOT) $@ + +znetboot: $(CHECKS) vmlinux + cp -f vmlinux /tftpboot/vmlinux.64 @$(MAKECOFFBOOT) $@ @$(MAKEBOOT) $@ @$(MAKECHRPBOOT) $@ @@ -129,31 +134,31 @@ clean_config: rm -f .config arch/ppc/defconfig gemini_config: clean_config - ln -s configs/gemini_defconfig arch/ppc/defconfig + cp -f arch/ppc/configs/gemini_defconfig arch/ppc/defconfig pmac_config: clean_config - ln -s configs/pmac_defconfig arch/ppc/defconfig + cp -f arch/ppc/configs/pmac_defconfig arch/ppc/defconfig prep_config: clean_config - ln -s configs/prep_defconfig arch/ppc/defconfig + cp -f arch/ppc/configs/prep_defconfig arch/ppc/defconfig chrp_config: clean_config - ln -s configs/chrp_defconfig arch/ppc/defconfig + cp -f arch/ppc/configs/chrp_defconfig arch/ppc/defconfig common_config: clean_config - ln -s configs/common_defconfig arch/ppc/defconfig + cp -f arch/ppc/configs/common_defconfig arch/ppc/defconfig mbx_config: clean_config - ln -s configs/mbx_defconfig arch/ppc/defconfig + cp -f arch/ppc/configs/mbx_defconfig arch/ppc/defconfig apus_config: clean_config - ln -s configs/apus_defconfig arch/ppc/defconfig + cp -f arch/ppc/configs/apus_defconfig arch/ppc/defconfig oak_config: clean_config - ln -s configs/oak_defconfig arch/ppc/defconfig + cp -f arch/ppc/configs/oak_defconfig arch/ppc/defconfig walnut_config: clean_config - ln -s configs/walnut_defconfig arch/ppc/defconfig + cp -f arch/ppc/configs/walnut_defconfig arch/ppc/defconfig archclean: rm -f arch/ppc/kernel/{mk_defs,ppc_defs.h,find_name,checks} diff --git a/arch/ppc/amiga/amiints.c b/arch/ppc/amiga/amiints.c index 75d90312baa5..b895e6c02c2f 100644 --- a/arch/ppc/amiga/amiints.c +++ b/arch/ppc/amiga/amiints.c @@ -350,7 +350,8 @@ inline void amiga_do_irq(int irq, struct pt_regs *fp) void amiga_do_irq_list(int irq, struct pt_regs *fp, struct irq_server *server) { irq_node_t *node, *slow_nodes; - unsigned short flags, intena; + unsigned short intena; + unsigned long flags; kstat.irqs[0][SYS_IRQS + irq]++; if (server->count++) diff --git a/arch/ppc/amiga/cia.c b/arch/ppc/amiga/cia.c index 85133f8dace3..e420415a617f 100644 --- a/arch/ppc/amiga/cia.c +++ b/arch/ppc/amiga/cia.c @@ -45,10 +45,10 @@ struct ciabase { do { \ if (irq >= IRQ_AMIGA_CIAB) { \ base = &ciab_base; \ - irq =- IRQ_AMIGA_CIAB; \ + irq -= IRQ_AMIGA_CIAB; \ } else { \ base = &ciaa_base; \ - irq =- IRQ_AMIGA_CIAA; \ + irq -= IRQ_AMIGA_CIAA; \ } \ } while (0) diff --git a/arch/ppc/boot/Makefile b/arch/ppc/boot/Makefile index eb48313f4eb1..6b83788390ce 100644 --- a/arch/ppc/boot/Makefile +++ b/arch/ppc/boot/Makefile @@ -14,7 +14,7 @@ .s.o: $(AS) -o $*.o $< .c.o: - $(CC) $(CFLAGS) -DINITRD_OFFSET=$(IOFF) -DINITRD_SIZE=$(ISZ) -DZIMAGE_OFFSET=$(ZOFF) -DZIMAGE_SIZE=$(ZSZ) -c -o $*.o $< + $(CC) $(CFLAGS) -DINITRD_OFFSET=$(IOFF) -DINITRD_SIZE=$(ISZ) -DZIMAGE_OFFSET=$(ZOFF) -DZIMAGE_SIZE=$(ZSZ) -D__BOOTER__ -c -o $*.o $< .S.s: $(CC) -D__ASSEMBLY__ $(AFLAGS) -traditional -E -o $*.o $< .S.o: @@ -49,7 +49,7 @@ endif ZLINKFLAGS = -T ../vmlinux.lds -Ttext 0x00800000 -GZIP_FLAGS = -v9 +GZIP_FLAGS = -v9f OBJECTS := head.o misc.o ../coffboot/zlib.o CFLAGS = $(CPPFLAGS) -O2 -DSTDC_HEADERS -fno-builtin @@ -73,6 +73,7 @@ zvmlinux.initrd: zvmlinux -DINITRD_SIZE=`sh size $(OBJDUMP) zvmlinux.initrd initrd` \ -DZIMAGE_OFFSET=`sh offset $(OBJDUMP) zvmlinux.initrd image` \ -DZIMAGE_SIZE=`sh size $(OBJDUMP) zvmlinux.initrd image` \ + -D__BOOTER__ \ -c -o misc.o misc.c $(LD) $(ZLINKFLAGS) -o zvmlinux.initrd.tmp $(OBJECTS) $(OBJCOPY) $(OBJCOPY_ARGS) -R .comment \ @@ -85,6 +86,10 @@ zImage: zvmlinux mkprep sImage ifdef CONFIG_PREP ./mkprep -pbp zvmlinux zImage endif +ifdef CONFIG_APUS + $(STRIP) ../../../vmlinux -o vmapus + gzip $(GZIP_FLAGS) vmapus +endif sImage: ../../../vmlinux ifdef CONFIG_GEMINI @@ -110,6 +115,7 @@ zvmlinux: $(OBJECTS) ../coffboot/vmlinux.gz $(CC) $(CFLAGS) -DINITRD_OFFSET=0 -DINITRD_SIZE=0 \ -DZIMAGE_OFFSET=`sh offset $(OBJDUMP) zvmlinux image` \ -DZIMAGE_SIZE=`sh size $(OBJDUMP) zvmlinux image` \ + -D__BOOTER__ \ -c -o misc.o misc.c $(LD) $(ZLINKFLAGS) -o zvmlinux.tmp $(OBJECTS) $(OBJCOPY) $(OBJCOPY_ARGS) -R .comment --add-section=image=../coffboot/vmlinux.gz \ diff --git a/arch/ppc/chrpboot/Makefile b/arch/ppc/chrpboot/Makefile index 70ae95eb8e7d..5a7f063fc118 100644 --- a/arch/ppc/chrpboot/Makefile +++ b/arch/ppc/chrpboot/Makefile @@ -20,7 +20,7 @@ CFLAGS = $(CPPFLAGS) -O -fno-builtin -DSTDC_HEADERS LD_ARGS = -Ttext 0x00400000 OBJCOPY = $(CROSS_COMPILE)objcopy -OBJS = crt0.o start.o main.o misc.o ../coffboot/string.o ../coffboot/zlib.o image.o # initrd.o +OBJS = crt0.o start.o main.o misc.o ../coffboot/string.o ../coffboot/zlib.o image.o sysmap.o LIBS = $(TOPDIR)/lib/lib.a ifeq ($(CONFIG_PPC64),y) @@ -65,6 +65,9 @@ mknote: mknote.c image.o: piggyback ../coffboot/vmlinux.gz ./piggyback image < ../coffboot/vmlinux.gz | $(AS) -o image.o +sysmap.o: piggyback ../../../System.map + ./piggyback sysmap < ../../../System.map | $(AS) -o sysmap.o + initrd.o: ramdisk.image.gz piggyback ./piggyback initrd < ramdisk.image.gz | $(AS) -o initrd.o diff --git a/arch/ppc/chrpboot/main.c b/arch/ppc/chrpboot/main.c index bf506552a5fa..d54a429a93fb 100644 --- a/arch/ppc/chrpboot/main.c +++ b/arch/ppc/chrpboot/main.c @@ -34,6 +34,8 @@ extern char image_data[]; extern int image_len; extern char initrd_data[]; extern int initrd_len; +extern char sysmap_data[]; +extern int sysmap_len; chrpboot(int a1, int a2, void *prom) @@ -78,12 +80,12 @@ chrpboot(int a1, int a2, void *prom) { struct bi_record *rec; - rec = (struct bi_record *)PAGE_ALIGN((unsigned long)dst+len); - + rec = (struct bi_record *)_ALIGN((unsigned long)dst+len+(1<<20)-1,(1<<20)); + rec->tag = BI_FIRST; rec->size = sizeof(struct bi_record); rec = (struct bi_record *)((unsigned long)rec + rec->size); - + rec->tag = BI_BOOTLOADER_ID; sprintf( (char *)rec->data, "chrpboot"); rec->size = sizeof(struct bi_record) + strlen("chrpboot") + 1; @@ -95,6 +97,11 @@ chrpboot(int a1, int a2, void *prom) rec->size = sizeof(struct bi_record) + sizeof(unsigned long); rec = (struct bi_record *)((unsigned long)rec + rec->size); + rec->tag = BI_SYSMAP; + rec->data[0] = sysmap_data; + rec->data[1] = sysmap_len; + rec->size = sizeof(struct bi_record) + sizeof(unsigned long); + rec = (struct bi_record *)((unsigned long)rec + rec->size); rec->tag = BI_LAST; rec->size = sizeof(struct bi_record); rec = (struct bi_record *)((unsigned long)rec + rec->size); diff --git a/arch/ppc/coffboot/Makefile b/arch/ppc/coffboot/Makefile index 12032930dbd1..29a4fdc355b0 100644 --- a/arch/ppc/coffboot/Makefile +++ b/arch/ppc/coffboot/Makefile @@ -36,6 +36,9 @@ TFTPIMAGE=/tftpboot/zImage.pmac$(MSIZE) endif ifeq ($(CONFIG_PMAC),y) +chrpmain.o: chrpmain.c + $(CC) $(CFLAGS) -DSYSMAP_OFFSET=0 -DSYSMAP_SIZE=0 -c chrpmain.c + hack-coff: hack-coff.c $(HOSTCC) $(HOSTCFLAGS) -o hack-coff hack-coff.c @@ -52,6 +55,12 @@ floppy: zImage # cp vmlinux.coff /mnt # umount /mnt +miboot.image: dummy.o vmlinux.gz + $(OBJCOPY) $(OBJCOPY_ARGS) --add-section=image=vmlinux.gz dummy.o $@ + +miboot.image.initrd: dummy.o vmlinux.gz + $(OBJCOPY) $(OBJCOPY_ARGS) --add-section=initrd=ramdisk.image.gz miboot.image $@ + coffboot: $(COFFOBJS) no_initrd.o ld.script $(LD) -o $@ $(COFF_LD_ARGS) $(COFFOBJS) no_initrd.o $(LIBS) @@ -82,16 +91,23 @@ vmlinux.coff.initrd: coffboot.initrd hack-coff vmlinux.elf: $(CHRPOBJS) no_initrd.o mknote $(LD) $(CHRP_LD_ARGS) -o $@ $(CHRPOBJS) no_initrd.o $(LIBS) ./mknote > note - $(OBJCOPY) $@ $@ --add-section=.note=note -R .comment + $(OBJCOPY) $@ $@ --add-section=.note=note \ + --add-section=sysmap=../../../System.map -R .comment + $(CC) $(CFLAGS) chrpmain.c -c -o chrpmain.o \ + -DSYSMAP_OFFSET=`sh ../boot/offset $(OBJDUMP) $@ sysmap` \ + -DSYSMAP_SIZE=`sh ../boot/size $(OBJDUMP) $@ sysmap` + $(LD) $(CHRP_LD_ARGS) -o $@ $(CHRPOBJS) no_initrd.o $(LIBS) + $(OBJCOPY) $@ $@ --add-section=.note=note \ + --add-section=sysmap=../../../System.map -R .comment vmlinux.elf.initrd: $(CHRPOBJS) initrd.o mknote $(LD) $(CHRP_LD_ARGS) -o $@ $(CHRPOBJS) initrd.o $(LIBS) ./mknote > note $(OBJCOPY) $@ $@ --add-section=.note=note -R .comment -zImage: vmlinux.coff vmlinux.elf +zImage: vmlinux.coff vmlinux.elf miboot.image -zImage.initrd: vmlinux.coff.initrd vmlinux.elf.initrd +zImage.initrd: vmlinux.coff.initrd vmlinux.elf.initrd miboot.image.initrd else znetboot: vmlinux.gz @@ -118,5 +134,7 @@ vmlinux.gz: $(TOPDIR)/vmlinux clean: rm -f hack-coff coffboot zImage vmlinux.coff vmlinux.gz + rm -f mknote piggyback vmlinux.elf note + rm -f miboot.image miboot.image.initrd fastdep: diff --git a/arch/ppc/coffboot/chrpmain.c b/arch/ppc/coffboot/chrpmain.c index bffb9d9eeb7a..4d994d17ee88 100644 --- a/arch/ppc/coffboot/chrpmain.c +++ b/arch/ppc/coffboot/chrpmain.c @@ -92,8 +92,7 @@ boot(int a1, int a2, void *prom) void make_bi_recs(unsigned long addr) { struct bi_record *rec; - - rec = (struct bi_record *)PAGE_ALIGN(addr); + rec = (struct bi_record *)_ALIGN((unsigned long)addr+(1<<20)-1,(1<<20)); rec->tag = BI_FIRST; rec->size = sizeof(struct bi_record); @@ -109,7 +108,15 @@ void make_bi_recs(unsigned long addr) rec->data[1] = 1; rec->size = sizeof(struct bi_record) + sizeof(unsigned long); rec = (struct bi_record *)((unsigned long)rec + rec->size); - + +#ifdef SYSMAP_OFFSET + rec->tag = BI_SYSMAP; + rec->data[0] = SYSMAP_OFFSET; + rec->data[1] = SYSMAP_SIZE; + rec->size = sizeof(struct bi_record) + sizeof(unsigned long); + rec = (struct bi_record *)((unsigned long)rec + rec->size); +#endif /* SYSMAP_OFFSET */ + rec->tag = BI_LAST; rec->size = sizeof(struct bi_record); rec = (struct bi_record *)((unsigned long)rec + rec->size); diff --git a/arch/ppc/coffboot/dummy.c b/arch/ppc/coffboot/dummy.c new file mode 100644 index 000000000000..31dbf45bf99c --- /dev/null +++ b/arch/ppc/coffboot/dummy.c @@ -0,0 +1,4 @@ +int main(void) +{ + return 0; +} diff --git a/arch/ppc/coffboot/main.c b/arch/ppc/coffboot/main.c index 56d29b84f108..e6049b4a22bc 100644 --- a/arch/ppc/coffboot/main.c +++ b/arch/ppc/coffboot/main.c @@ -112,8 +112,8 @@ coffboot(int a1, int a2, void *prom) #endif { struct bi_record *rec; - - rec = (struct bi_record *)PAGE_ALIGN((unsigned long)dst+len); + + rec = (struct bi_record *)_ALIGN((unsigned long)dst+len+(1<<20)-1,(1<<20)); rec->tag = BI_FIRST; rec->size = sizeof(struct bi_record); diff --git a/arch/ppc/config.in b/arch/ppc/config.in index 43a678c6d3bf..8ea7e9000e51 100644 --- a/arch/ppc/config.in +++ b/arch/ppc/config.in @@ -15,7 +15,7 @@ mainmenu_option next_comment comment 'Platform support' define_bool CONFIG_PPC y choice 'Processor Type' \ - "6xx/7xx CONFIG_6xx \ + "6xx/7xx/7400 CONFIG_6xx \ 4xx CONFIG_4xx \ 630/Power3(64-Bit) CONFIG_PPC64 \ 82xx CONFIG_82xx \ @@ -97,6 +97,9 @@ fi if [ "$CONFIG_6xx" = "y" -a "$CONFIG_APUS" != "y" ]; then define_bool CONFIG_PCI y fi +if [ "$CONFIG_PREP" = "y" -o "$CONFIG_PMAC" = "y" -o "$CONFIG_CHRP" = "y" -o "$CONFIG_ALL_PPC" = "y"]; then + define_bool CONFIG_PCI y +fi bool 'Networking support' CONFIG_NET bool 'Sysctl support' CONFIG_SYSCTL @@ -140,6 +143,7 @@ if [ "$CONFIG_4xx" != "y" -a "$CONFIG_8xx" != "y" ]; then bool ' Include MacIO ADB driver' CONFIG_ADB_MACIO bool ' Include PMU (Powerbook) ADB driver' CONFIG_ADB_PMU bool 'Support for ADB keyboard' CONFIG_ADB_KEYBOARD + bool 'Support for ADB mouse' CONFIG_ADBMOUSE fi bool 'Support for Open Firmware device tree in /proc' CONFIG_PROC_DEVICETREE bool 'Support for TotalImpact TotalMP' CONFIG_TOTALMP diff --git a/arch/ppc/configs/common_defconfig b/arch/ppc/configs/common_defconfig index 795aa5b833e1..ac258f16e74b 100644 --- a/arch/ppc/configs/common_defconfig +++ b/arch/ppc/configs/common_defconfig @@ -1,6 +1,7 @@ # # Automatically generated make config: don't edit # +# CONFIG_UID16 is not set # # Code maturity level options @@ -37,6 +38,7 @@ CONFIG_KMOD=y # # CONFIG_PCI is not set CONFIG_PCI=y +CONFIG_PCI=y CONFIG_NET=y CONFIG_SYSCTL=y CONFIG_SYSVIPC=y @@ -45,6 +47,7 @@ CONFIG_KCORE_ELF=y CONFIG_BINFMT_ELF=y CONFIG_KERNEL_ELF=y # CONFIG_BINFMT_MISC is not set +# CONFIG_PCI_NAMES is not set # CONFIG_HOTPLUG is not set # CONFIG_PARPORT is not set CONFIG_VGA_CONSOLE=y @@ -58,6 +61,7 @@ CONFIG_ADB_CUDA=y CONFIG_ADB_MACIO=y CONFIG_ADB_PMU=y CONFIG_ADB_KEYBOARD=y +CONFIG_ADBMOUSE=y CONFIG_PROC_DEVICETREE=y # CONFIG_TOTALMP is not set CONFIG_BOOTX_TEXT=y @@ -84,7 +88,7 @@ CONFIG_BLK_DEV_IDEDISK=y CONFIG_BLK_DEV_IDECD=y # CONFIG_BLK_DEV_IDETAPE is not set CONFIG_BLK_DEV_IDEFLOPPY=y -# CONFIG_BLK_DEV_IDESCSI is not set +CONFIG_BLK_DEV_IDESCSI=y # # IDE chipset support/bugfixes @@ -111,7 +115,6 @@ CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_INITRD=y # CONFIG_BLK_DEV_XD is not set # CONFIG_BLK_DEV_DAC960 is not set -CONFIG_PARIDE_PARPORT=y # CONFIG_PARIDE is not set CONFIG_BLK_DEV_IDE_MODES=y # CONFIG_BLK_DEV_HD is not set @@ -178,11 +181,12 @@ CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y CONFIG_BLK_DEV_SR=y CONFIG_BLK_DEV_SR_VENDOR=y -# CONFIG_CHR_DEV_SG is not set +CONFIG_CHR_DEV_SG=y # # Some SCSI devices (e.g. CD jukebox) support multiple LUNs # +# CONFIG_SCSI_DEBUG_QUEUES is not set # CONFIG_SCSI_MULTI_LUN is not set CONFIG_SCSI_CONSTANTS=y # CONFIG_SCSI_LOGGING is not set @@ -244,6 +248,12 @@ CONFIG_SCSI_NCR53C8XX_SYNC=20 CONFIG_SCSI_MESH=y CONFIG_SCSI_MESH_SYNC_RATE=5 CONFIG_SCSI_MAC53C94=y +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_IEEE1394 is not set # # Network device support @@ -266,6 +276,7 @@ CONFIG_NET_ETHERNET=y CONFIG_MACE=y CONFIG_BMAC=y # CONFIG_NCR885E is not set +# CONFIG_OAKNET is not set # CONFIG_NET_VENDOR_3COM is not set # CONFIG_LANCE is not set # CONFIG_NET_VENDOR_SMC is not set @@ -406,6 +417,11 @@ CONFIG_SERIAL=m CONFIG_UNIX98_PTYS=y CONFIG_UNIX98_PTY_COUNT=256 +# +# I2C support +# +# CONFIG_I2C is not set + # # Mice # @@ -413,11 +429,16 @@ CONFIG_BUSMOUSE=y # CONFIG_ATIXL_BUSMOUSE is not set # CONFIG_LOGIBUSMOUSE is not set # CONFIG_MS_BUSMOUSE is not set -# CONFIG_ADBMOUSE is not set +CONFIG_ADBMOUSE=y CONFIG_MOUSE=y CONFIG_PSMOUSE=y # CONFIG_82C710_MOUSE is not set # CONFIG_PC110_PAD is not set + +# +# Joysticks +# +# CONFIG_JOYSTICK is not set # CONFIG_QIC02_TAPE is not set # @@ -431,11 +452,6 @@ CONFIG_NVRAM=y # Video For Linux # # CONFIG_VIDEO_DEV is not set - -# -# Joystick support -# -# CONFIG_JOYSTICK is not set # CONFIG_DTLK is not set # CONFIG_R3964 is not set # CONFIG_APPLICOM is not set @@ -445,9 +461,10 @@ CONFIG_NVRAM=y # # CONFIG_FTAPE is not set # CONFIG_DRM is not set +# CONFIG_AGP is not set # -# Support for USB +# USB support # CONFIG_USB=y @@ -456,31 +473,40 @@ CONFIG_USB=y # # CONFIG_USB_UHCI is not set CONFIG_USB_OHCI=y -CONFIG_USB_OHCI_DEBUG=y -# CONFIG_USB_OHCI_HCD is not set # # Miscellaneous USB options # -CONFIG_USB_DEBUG_ISOC=y -CONFIG_USB_PROC=y -# CONFIG_USB_EZUSB is not set +# CONFIG_USB_DEVICEFS is not set # # USB Devices # -CONFIG_USB_HUB=y -CONFIG_USB_MOUSE=y -CONFIG_USB_HP_SCANNER=m -CONFIG_USB_KBD=y +# CONFIG_USB_PRINTER is not set +# CONFIG_USB_SCANNER is not set # CONFIG_USB_AUDIO is not set # CONFIG_USB_ACM is not set -# CONFIG_USB_PRINTER is not set # CONFIG_USB_SERIAL is not set # CONFIG_USB_CPIA is not set +# CONFIG_USB_IBMCAM is not set +# CONFIG_USB_OV511 is not set # CONFIG_USB_DC2XX is not set CONFIG_USB_SCSI=m CONFIG_USB_SCSI_DEBUG=y +# CONFIG_USB_DABUSB is not set + +# +# USB HID +# +# CONFIG_USB_HID is not set +CONFIG_USB_KBD=y +CONFIG_USB_MOUSE=y +# CONFIG_USB_GRAPHIRE is not set +# CONFIG_USB_WMFORCE is not set +# CONFIG_INPUT_KEYBDEV is not set +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_EVDEV is not set # # Filesystems @@ -493,9 +519,9 @@ CONFIG_AUTOFS_FS=y # CONFIG_BFS_FS is not set # CONFIG_FAT_FS is not set # CONFIG_EFS_FS is not set +# CONFIG_CRAMFS is not set CONFIG_ISO9660_FS=y # CONFIG_JOLIET is not set -# CONFIG_UDF_FS is not set # CONFIG_MINIX_FS is not set # CONFIG_NTFS_FS is not set # CONFIG_HPFS_FS is not set @@ -505,6 +531,7 @@ CONFIG_DEVPTS_FS=y # CONFIG_ROMFS_FS is not set CONFIG_EXT2_FS=y # CONFIG_SYSV_FS is not set +# CONFIG_UDF_FS is not set # CONFIG_UFS_FS is not set # @@ -513,7 +540,7 @@ CONFIG_EXT2_FS=y # CONFIG_CODA_FS is not set CONFIG_NFS_FS=y CONFIG_NFSD=y -# CONFIG_NFSD_SUN is not set +# CONFIG_NFSD_V3 is not set CONFIG_SUNRPC=y CONFIG_LOCKD=y # CONFIG_SMB_FS is not set @@ -525,8 +552,6 @@ CONFIG_LOCKD=y # CONFIG_PARTITION_ADVANCED is not set CONFIG_MAC_PARTITION=y CONFIG_MSDOS_PARTITION=y -# CONFIG_SGI_PARTITION is not set -# CONFIG_SUN_PARTITION is not set # CONFIG_NLS is not set # @@ -540,6 +565,7 @@ CONFIG_DMASOUND=y # CONFIG_SOUND_ESSSOLO1 is not set # CONFIG_SOUND_MAESTRO is not set # CONFIG_SOUND_SONICVIBES is not set +# CONFIG_SOUND_TRIDENT is not set # CONFIG_SOUND_MSNDCLAS is not set # CONFIG_SOUND_MSNDPIN is not set CONFIG_SOUND_OSS=y diff --git a/arch/ppc/configs/gemini_defconfig b/arch/ppc/configs/gemini_defconfig index 9850f61d839a..6fe267c9a184 100644 --- a/arch/ppc/configs/gemini_defconfig +++ b/arch/ppc/configs/gemini_defconfig @@ -1,6 +1,7 @@ # # Automatically generated make config: don't edit # +# CONFIG_UID16 is not set # # Code maturity level options @@ -216,6 +217,11 @@ CONFIG_SCSI_NCR53C8XX_SYNC=20 # CONFIG_SCSI_MESH is not set # CONFIG_SCSI_MAC53C94 is not set +# +# IEEE 1394 (FireWire) support +# +# CONFIG_IEEE1394 is not set + # # Network device support # @@ -237,6 +243,7 @@ CONFIG_NET_ETHERNET=y # CONFIG_MACE is not set # CONFIG_BMAC is not set CONFIG_NCR885E=y +# CONFIG_OAKNET is not set # CONFIG_NET_VENDOR_3COM is not set # CONFIG_LANCE is not set # CONFIG_NET_VENDOR_SMC is not set @@ -414,4 +421,4 @@ CONFIG_MSDOS_PARTITION=y # # CONFIG_MAGIC_SYSRQ is not set # CONFIG_KGDB is not set -CONFIG_XMON=y +# CONFIG_XMON is not set diff --git a/arch/ppc/configs/oak_defconfig b/arch/ppc/configs/oak_defconfig index 5fe9be3760f0..deaf75f3b255 100644 --- a/arch/ppc/configs/oak_defconfig +++ b/arch/ppc/configs/oak_defconfig @@ -18,7 +18,6 @@ CONFIG_4xx=y # CONFIG_8xx is not set CONFIG_OAK=y # CONFIG_WALNUT is not set -# CONFIG_PCI is not set # CONFIG_SMP is not set CONFIG_MACH_SPECIFIC=y # CONFIG_MATH_EMULATION is not set @@ -33,6 +32,7 @@ CONFIG_KMOD=y # # General setup # +# CONFIG_PCI is not set CONFIG_NET=y CONFIG_SYSCTL=y CONFIG_SYSVIPC=y @@ -234,6 +234,11 @@ CONFIG_SERIAL_CONSOLE=y # CONFIG_SERIAL_NONSTANDARD is not set # CONFIG_UNIX98_PTYS is not set +# +# I2C support +# +# CONFIG_I2C is not set + # # Mice # diff --git a/arch/ppc/configs/walnut_defconfig b/arch/ppc/configs/walnut_defconfig index 931e1a7a9036..ad36925f32c1 100644 --- a/arch/ppc/configs/walnut_defconfig +++ b/arch/ppc/configs/walnut_defconfig @@ -18,7 +18,6 @@ CONFIG_4xx=y # CONFIG_8xx is not set # CONFIG_OAK is not set CONFIG_WALNUT=y -CONFIG_PCI=y # CONFIG_SMP is not set CONFIG_MACH_SPECIFIC=y # CONFIG_MATH_EMULATION is not set @@ -33,6 +32,7 @@ CONFIG_KMOD=y # # General setup # +CONFIG_PCI=y CONFIG_NET=y CONFIG_SYSCTL=y CONFIG_SYSVIPC=y @@ -234,6 +234,11 @@ CONFIG_SERIAL_CONSOLE=y # CONFIG_SERIAL_NONSTANDARD is not set # CONFIG_UNIX98_PTYS is not set +# +# I2C support +# +CONFIG_I2C=y + # # Mice # @@ -315,8 +320,8 @@ CONFIG_LOCKD=y # Partition Types # # CONFIG_PARTITION_ADVANCED is not set -CONFIG_MAC_PARTITION=y -CONFIG_MSDOS_PARTITION=y +# CONFIG_MAC_PARTITION is not set +# CONFIG_MSDOS_PARTITION is not set # CONFIG_SGI_PARTITION is not set # CONFIG_SUN_PARTITION is not set # CONFIG_NLS is not set diff --git a/arch/ppc/defconfig b/arch/ppc/defconfig index 795aa5b833e1..ac258f16e74b 100644 --- a/arch/ppc/defconfig +++ b/arch/ppc/defconfig @@ -1,6 +1,7 @@ # # Automatically generated make config: don't edit # +# CONFIG_UID16 is not set # # Code maturity level options @@ -37,6 +38,7 @@ CONFIG_KMOD=y # # CONFIG_PCI is not set CONFIG_PCI=y +CONFIG_PCI=y CONFIG_NET=y CONFIG_SYSCTL=y CONFIG_SYSVIPC=y @@ -45,6 +47,7 @@ CONFIG_KCORE_ELF=y CONFIG_BINFMT_ELF=y CONFIG_KERNEL_ELF=y # CONFIG_BINFMT_MISC is not set +# CONFIG_PCI_NAMES is not set # CONFIG_HOTPLUG is not set # CONFIG_PARPORT is not set CONFIG_VGA_CONSOLE=y @@ -58,6 +61,7 @@ CONFIG_ADB_CUDA=y CONFIG_ADB_MACIO=y CONFIG_ADB_PMU=y CONFIG_ADB_KEYBOARD=y +CONFIG_ADBMOUSE=y CONFIG_PROC_DEVICETREE=y # CONFIG_TOTALMP is not set CONFIG_BOOTX_TEXT=y @@ -84,7 +88,7 @@ CONFIG_BLK_DEV_IDEDISK=y CONFIG_BLK_DEV_IDECD=y # CONFIG_BLK_DEV_IDETAPE is not set CONFIG_BLK_DEV_IDEFLOPPY=y -# CONFIG_BLK_DEV_IDESCSI is not set +CONFIG_BLK_DEV_IDESCSI=y # # IDE chipset support/bugfixes @@ -111,7 +115,6 @@ CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_INITRD=y # CONFIG_BLK_DEV_XD is not set # CONFIG_BLK_DEV_DAC960 is not set -CONFIG_PARIDE_PARPORT=y # CONFIG_PARIDE is not set CONFIG_BLK_DEV_IDE_MODES=y # CONFIG_BLK_DEV_HD is not set @@ -178,11 +181,12 @@ CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y CONFIG_BLK_DEV_SR=y CONFIG_BLK_DEV_SR_VENDOR=y -# CONFIG_CHR_DEV_SG is not set +CONFIG_CHR_DEV_SG=y # # Some SCSI devices (e.g. CD jukebox) support multiple LUNs # +# CONFIG_SCSI_DEBUG_QUEUES is not set # CONFIG_SCSI_MULTI_LUN is not set CONFIG_SCSI_CONSTANTS=y # CONFIG_SCSI_LOGGING is not set @@ -244,6 +248,12 @@ CONFIG_SCSI_NCR53C8XX_SYNC=20 CONFIG_SCSI_MESH=y CONFIG_SCSI_MESH_SYNC_RATE=5 CONFIG_SCSI_MAC53C94=y +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_IEEE1394 is not set # # Network device support @@ -266,6 +276,7 @@ CONFIG_NET_ETHERNET=y CONFIG_MACE=y CONFIG_BMAC=y # CONFIG_NCR885E is not set +# CONFIG_OAKNET is not set # CONFIG_NET_VENDOR_3COM is not set # CONFIG_LANCE is not set # CONFIG_NET_VENDOR_SMC is not set @@ -406,6 +417,11 @@ CONFIG_SERIAL=m CONFIG_UNIX98_PTYS=y CONFIG_UNIX98_PTY_COUNT=256 +# +# I2C support +# +# CONFIG_I2C is not set + # # Mice # @@ -413,11 +429,16 @@ CONFIG_BUSMOUSE=y # CONFIG_ATIXL_BUSMOUSE is not set # CONFIG_LOGIBUSMOUSE is not set # CONFIG_MS_BUSMOUSE is not set -# CONFIG_ADBMOUSE is not set +CONFIG_ADBMOUSE=y CONFIG_MOUSE=y CONFIG_PSMOUSE=y # CONFIG_82C710_MOUSE is not set # CONFIG_PC110_PAD is not set + +# +# Joysticks +# +# CONFIG_JOYSTICK is not set # CONFIG_QIC02_TAPE is not set # @@ -431,11 +452,6 @@ CONFIG_NVRAM=y # Video For Linux # # CONFIG_VIDEO_DEV is not set - -# -# Joystick support -# -# CONFIG_JOYSTICK is not set # CONFIG_DTLK is not set # CONFIG_R3964 is not set # CONFIG_APPLICOM is not set @@ -445,9 +461,10 @@ CONFIG_NVRAM=y # # CONFIG_FTAPE is not set # CONFIG_DRM is not set +# CONFIG_AGP is not set # -# Support for USB +# USB support # CONFIG_USB=y @@ -456,31 +473,40 @@ CONFIG_USB=y # # CONFIG_USB_UHCI is not set CONFIG_USB_OHCI=y -CONFIG_USB_OHCI_DEBUG=y -# CONFIG_USB_OHCI_HCD is not set # # Miscellaneous USB options # -CONFIG_USB_DEBUG_ISOC=y -CONFIG_USB_PROC=y -# CONFIG_USB_EZUSB is not set +# CONFIG_USB_DEVICEFS is not set # # USB Devices # -CONFIG_USB_HUB=y -CONFIG_USB_MOUSE=y -CONFIG_USB_HP_SCANNER=m -CONFIG_USB_KBD=y +# CONFIG_USB_PRINTER is not set +# CONFIG_USB_SCANNER is not set # CONFIG_USB_AUDIO is not set # CONFIG_USB_ACM is not set -# CONFIG_USB_PRINTER is not set # CONFIG_USB_SERIAL is not set # CONFIG_USB_CPIA is not set +# CONFIG_USB_IBMCAM is not set +# CONFIG_USB_OV511 is not set # CONFIG_USB_DC2XX is not set CONFIG_USB_SCSI=m CONFIG_USB_SCSI_DEBUG=y +# CONFIG_USB_DABUSB is not set + +# +# USB HID +# +# CONFIG_USB_HID is not set +CONFIG_USB_KBD=y +CONFIG_USB_MOUSE=y +# CONFIG_USB_GRAPHIRE is not set +# CONFIG_USB_WMFORCE is not set +# CONFIG_INPUT_KEYBDEV is not set +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_EVDEV is not set # # Filesystems @@ -493,9 +519,9 @@ CONFIG_AUTOFS_FS=y # CONFIG_BFS_FS is not set # CONFIG_FAT_FS is not set # CONFIG_EFS_FS is not set +# CONFIG_CRAMFS is not set CONFIG_ISO9660_FS=y # CONFIG_JOLIET is not set -# CONFIG_UDF_FS is not set # CONFIG_MINIX_FS is not set # CONFIG_NTFS_FS is not set # CONFIG_HPFS_FS is not set @@ -505,6 +531,7 @@ CONFIG_DEVPTS_FS=y # CONFIG_ROMFS_FS is not set CONFIG_EXT2_FS=y # CONFIG_SYSV_FS is not set +# CONFIG_UDF_FS is not set # CONFIG_UFS_FS is not set # @@ -513,7 +540,7 @@ CONFIG_EXT2_FS=y # CONFIG_CODA_FS is not set CONFIG_NFS_FS=y CONFIG_NFSD=y -# CONFIG_NFSD_SUN is not set +# CONFIG_NFSD_V3 is not set CONFIG_SUNRPC=y CONFIG_LOCKD=y # CONFIG_SMB_FS is not set @@ -525,8 +552,6 @@ CONFIG_LOCKD=y # CONFIG_PARTITION_ADVANCED is not set CONFIG_MAC_PARTITION=y CONFIG_MSDOS_PARTITION=y -# CONFIG_SGI_PARTITION is not set -# CONFIG_SUN_PARTITION is not set # CONFIG_NLS is not set # @@ -540,6 +565,7 @@ CONFIG_DMASOUND=y # CONFIG_SOUND_ESSSOLO1 is not set # CONFIG_SOUND_MAESTRO is not set # CONFIG_SOUND_SONICVIBES is not set +# CONFIG_SOUND_TRIDENT is not set # CONFIG_SOUND_MSNDCLAS is not set # CONFIG_SOUND_MSNDPIN is not set CONFIG_SOUND_OSS=y diff --git a/arch/ppc/kernel/Makefile b/arch/ppc/kernel/Makefile index 7aaacfadb1b8..ea7c7c6e7c63 100644 --- a/arch/ppc/kernel/Makefile +++ b/arch/ppc/kernel/Makefile @@ -38,7 +38,7 @@ O_OBJS += hashtable.o endif ifdef CONFIG_PCI -O_OBJS += pci.o +O_OBJS += pci.o pci-dma.o endif ifdef CONFIG_KGDB @@ -59,8 +59,16 @@ endif ifeq ($(CONFIG_4xx),y) O_OBJS += ppc4xx_pic.o - ifeq ($(CONFIG_OAK),y) - O_OBJS += oak_setup.o +endif + +ifeq ($(CONFIG_OAK),y) + O_OBJS += oak_setup.o +endif + +ifeq ($(CONFIG_WALNUT),y) + O_OBJS += walnut_setup.o + ifeq ($(CONFIG_PCI),y) + O_OBJS += galaxy_pci.o endif endif @@ -83,6 +91,9 @@ endif ifeq ($(CONFIG_6xx),y) O_OBJS += open_pic.o indirect_pci.o endif +ifeq ($(CONFIG_PPC64),y) + O_OBJS += open_pic.o indirect_pci.o +endif ifeq ($(CONFIG_APUS),y) O_OBJS += apus_setup.o endif diff --git a/arch/ppc/kernel/apus_setup.c b/arch/ppc/kernel/apus_setup.c index a7b057fa164e..5f0c4b06e4ba 100644 --- a/arch/ppc/kernel/apus_setup.c +++ b/arch/ppc/kernel/apus_setup.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include @@ -28,8 +27,41 @@ #include #endif -/* Get the IDE stuff from the 68k file */ #include +#define T_CHAR (0x0000) /* char: don't touch */ +#define T_SHORT (0x4000) /* short: 12 -> 21 */ +#define T_INT (0x8000) /* int: 1234 -> 4321 */ +#define T_TEXT (0xc000) /* text: 12 -> 21 */ + +#define T_MASK_TYPE (0xc000) +#define T_MASK_COUNT (0x3fff) + +#define D_CHAR(cnt) (T_CHAR | (cnt)) +#define D_SHORT(cnt) (T_SHORT | (cnt)) +#define D_INT(cnt) (T_INT | (cnt)) +#define D_TEXT(cnt) (T_TEXT | (cnt)) + +static u_short driveid_types[] = { + D_SHORT(10), /* config - vendor2 */ + D_TEXT(20), /* serial_no */ + D_SHORT(3), /* buf_type, buf_size - ecc_bytes */ + D_TEXT(48), /* fw_rev - model */ + D_CHAR(2), /* max_multsect - vendor3 */ + D_SHORT(1), /* dword_io */ + D_CHAR(2), /* vendor4 - capability */ + D_SHORT(1), /* reserved50 */ + D_CHAR(4), /* vendor5 - tDMA */ + D_SHORT(4), /* field_valid - cur_sectors */ + D_INT(1), /* cur_capacity */ + D_CHAR(2), /* multsect - multsect_valid */ + D_INT(1), /* lba_capacity */ + D_SHORT(194) /* dma_1word - reservedyy */ +}; + +#define num_driveid_types (sizeof(driveid_types)/sizeof(*driveid_types)) + +#if 0 /* Get rid of this crud */ +/* Get the IDE stuff from the 68k file */ #define ide_init_hwif_ports m68k_ide_init_hwif_ports #define ide_default_irq m68k_ide_default_irq #undef ide_request_irq @@ -57,6 +89,7 @@ #undef ide_release_region #undef ide_fix_driveid /*-------------------------------------------*/ +#endif #include #include @@ -411,33 +444,6 @@ void kbd_reset_setup(char *str, int *ints) { } -#if defined(CONFIG_WHIPPET_SERIAL)||defined(CONFIG_MULTIFACE_III_TTY)||defined(CONFIG_GVPIOEXT)||defined(CONFIG_AMIGA_BUILTIN_SERIAL) - -long m68k_rs_init(void); -int m68k_register_serial(struct serial_struct *); -void m68k_unregister_serial(int); -long m68k_serial_console_init(long, long ); - -int rs_init(void) -{ - return m68k_rs_init(); -} -int register_serial(struct serial_struct *p) -{ - return m68k_register_serial(p); -} -void unregister_serial(int i) -{ - m68k_unregister_serial(i); -} -#ifdef CONFIG_SERIAL_CONSOLE -long serial_console_init(long kmem_start, long kmem_end) -{ - return m68k_serial_console_init(kmem_start, kmem_end); -} -#endif -#endif - /*********************************************************** FLOPPY */ #if defined(CONFIG_AMIGA_FLOPPY) __init @@ -673,7 +679,7 @@ apus_ide_outsw(ide_ioreg_t port, void *buf, int ns) int apus_ide_default_irq(ide_ioreg_t base) { - return m68k_ide_default_irq(base); + return 0; } ide_ioreg_t @@ -685,7 +691,7 @@ apus_ide_default_io_base(int index) int apus_ide_check_region(ide_ioreg_t from, unsigned int extent) { - return m68k_ide_check_region(from, extent); + return 0; } void @@ -693,27 +699,66 @@ apus_ide_request_region(ide_ioreg_t from, unsigned int extent, const char *name) { - m68k_ide_request_region(from, extent, name); } void apus_ide_release_region(ide_ioreg_t from, unsigned int extent) { - m68k_ide_release_region(from, extent); } void apus_ide_fix_driveid(struct hd_driveid *id) { - m68k_ide_fix_driveid(id); + u_char *p = (u_char *)id; + int i, j, cnt; + u_char t; + + if (!MACH_IS_AMIGA && !MACH_IS_MAC) + return; + for (i = 0; i < num_driveid_types; i++) { + cnt = driveid_types[i] & T_MASK_COUNT; + switch (driveid_types[i] & T_MASK_TYPE) { + case T_CHAR: + p += cnt; + break; + case T_SHORT: + for (j = 0; j < cnt; j++) { + t = p[0]; + p[0] = p[1]; + p[1] = t; + p += 2; + } + break; + case T_INT: + for (j = 0; j < cnt; j++) { + t = p[0]; + p[0] = p[3]; + p[3] = t; + t = p[1]; + p[1] = p[2]; + p[2] = t; + p += 4; + } + break; + case T_TEXT: + for (j = 0; j < cnt; j += 2) { + t = p[0]; + p[0] = p[1]; + p[1] = t; + p += 2; + } + break; + } + } } __init void apus_ide_init_hwif_ports (hw_regs_t *hw, ide_ioreg_t data_port, ide_ioreg_t ctrl_port, int *irq) { - m68k_ide_init_hwif_ports(hw, data_port, ctrl_port, irq); + if (data_port || ctrl_port) + printk("apus_ide_init_hwif_ports: must not be called\n"); } #endif /****************************************************** IRQ stuff */ @@ -732,7 +777,7 @@ int apus_get_irq_list(char *buf) /* IPL must be between 0 and 7 */ __apus -static inline void apus_set_IPL(int ipl) +static inline void apus_set_IPL(unsigned long ipl) { APUS_WRITE(APUS_IPL_EMU, IPLEMU_SETRESET | IPLEMU_DISABLEINT); APUS_WRITE(APUS_IPL_EMU, IPLEMU_IPLMASK); @@ -743,42 +788,22 @@ static inline void apus_set_IPL(int ipl) __apus static inline unsigned long apus_get_IPL(void) { - unsigned short __f; + /* This returns the present IPL emulation level. */ + unsigned long __f; APUS_READ(APUS_IPL_EMU, __f); return ((~__f) & IPLEMU_IPLMASK); } __apus -static inline unsigned long apus_get_prev_IPL(void) -{ - unsigned short __f; - APUS_READ(APUS_IPL_EMU, __f); - return ((~__f >> 3) & IPLEMU_IPLMASK); -} - - -__apus -static void apus_save_flags(unsigned long* flags) +static inline unsigned long apus_get_prev_IPL(struct pt_regs* regs) { - *flags = apus_get_IPL(); -} - -__apus -static void apus_restore_flags(unsigned long flags) -{ - apus_set_IPL(flags); -} - -__apus -static void apus_sti(void) -{ - apus_set_IPL(0); -} - -__apus -static void apus_cli(void) -{ - apus_set_IPL(7); + /* The value saved in mq is the IPL_EMU value at the time of + interrupt. The lower bits are the current interrupt level, + the upper bits the requested level. Thus, to restore the + IPL level to the post-interrupt state, we will need to use + the lower bits. */ + unsigned long __f = regs->mq; + return ((~__f) & IPLEMU_IPLMASK); } @@ -802,6 +827,22 @@ int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *) return amiga_request_irq (irq, handler, irqflags, devname, dev_id); } + +/* In Linux/m68k the sys_request_irq deals with vectors 0-7. That's what + callers expect - but on Linux/APUS we actually use the IRQ_AMIGA_AUTO + vectors (24-31), so we put this dummy function in between to adjust + the vector argument (rather have cruft here than in the generic irq.c). */ +int sys_request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *), + unsigned long irqflags, const char * devname, void *dev_id) +{ + extern int request_sysirq(unsigned int irq, + void (*handler)(int, void *, + struct pt_regs *), + unsigned long irqflags, + const char * devname, void *dev_id); + return request_sysirq(irq+IRQ_AMIGA_AUTO, handler, irqflags, + devname, dev_id); +} #endif __apus @@ -809,14 +850,17 @@ int apus_get_irq(struct pt_regs* regs) { #ifdef CONFIG_APUS int level = apus_get_IPL(); - unsigned short ints = custom.intreqr & custom.intenar; + +#ifdef __INTERRUPT_DEBUG + printk("<%d:%d>", level, apus_get_prev_IPL(regs)); +#endif if (0 == level) - return -1; + return -8; if (7 == level) - return -2; + return -9; - return level; + return level + IRQ_AMIGA_AUTO; #else return 0; #endif @@ -824,10 +868,13 @@ int apus_get_irq(struct pt_regs* regs) __apus -void apus_post_irq(int level) +void apus_post_irq(struct pt_regs* regs, int level) { +#ifdef __INTERRUPT_DEBUG + printk("{%d}", apus_get_prev_IPL(regs)); +#endif /* Restore IPL to the previous value */ - apus_set_IPL(apus_get_IPL()); + apus_set_IPL(apus_get_prev_IPL(regs)); } @@ -903,11 +950,28 @@ irq_node_t *new_irq_node(void) return NULL; } +extern void amiga_enable_irq(unsigned int irq); +extern void amiga_disable_irq(unsigned int irq); + +struct hw_interrupt_type amiga_irqctrl = { + " Amiga ", + NULL, + NULL, + amiga_enable_irq, + amiga_disable_irq, + 0, + 0 +}; + + __init void apus_init_IRQ(void) { int i; + for ( i = 0 ; i < NR_IRQS ; i++ ) + irq_desc[i].ctl = &amiga_irqctrl; + for (i = 0; i < NUM_IRQ_NODES; i++) nodes[i].handler = NULL; @@ -919,10 +983,10 @@ void apus_init_IRQ(void) amiga_init_IRQ(); - int_control.int_sti = apus_sti; - int_control.int_cli = apus_cli; - int_control.int_save_flags = apus_save_flags; - int_control.int_restore_flags = apus_restore_flags; + int_control.int_sti = __no_use_sti; + int_control.int_cli = __no_use_cli; + int_control.int_save_flags = __no_use_save_flags; + int_control.int_restore_flags = __no_use_restore_flags; } __init diff --git a/arch/ppc/kernel/chrp_pci.c b/arch/ppc/kernel/chrp_pci.c index b93bc45f7950..a2fbe5f14f7b 100644 --- a/arch/ppc/kernel/chrp_pci.c +++ b/arch/ppc/kernel/chrp_pci.c @@ -273,14 +273,13 @@ void __init chrp_pcibios_fixup(void) { struct pci_dev *dev; - - /* some of IBM chrps have > 1 bus */ - if ( !strncmp("IBM", get_property(find_path_device("/"), - "name", NULL),3) ) - { - - } - + int i; + extern struct pci_ops generic_pci_ops; + + /* Some IBM's with the python have >1 bus, this finds them */ + for ( i = 0; i < python_busnr ; i++ ) + pci_scan_bus(i+1, &generic_pci_ops, NULL); + /* PCI interrupts are controlled by the OpenPIC */ pci_for_each_dev(dev) { if ( dev->irq ) diff --git a/arch/ppc/kernel/chrp_setup.c b/arch/ppc/kernel/chrp_setup.c index e76aa8dd97d6..21abfc84f3c6 100644 --- a/arch/ppc/kernel/chrp_setup.c +++ b/arch/ppc/kernel/chrp_setup.c @@ -249,7 +249,6 @@ chrp_setup_arch(void) else #endif ROOT_DEV = to_kdev_t(0x0802); /* sda2 (sda1 is for the kernel) */ -sprintf(cmd_line, "console=ttyS0,9600 console=tty0"); printk("Boot arguments: %s\n", cmd_line); request_region(0x20,0x20,"pic1"); @@ -384,7 +383,7 @@ int chrp_get_irq( struct pt_regs *regs ) return irq; } -void chrp_post_irq(int irq) +void chrp_post_irq(struct pt_regs* regs, int irq) { /* * If it's an i8259 irq then we've already done the @@ -394,7 +393,7 @@ void chrp_post_irq(int irq) * We do it this way since our irq_desc[irq].handler can change * with RTL and no longer be open_pic -- Cort */ - if ( irq >= open_pic.irq_offset) + if ( irq >= open_pic_irq_offset) openpic_eoi( smp_processor_id() ); } @@ -411,10 +410,11 @@ void __init chrp_init_IRQ(void) (*(unsigned long *)get_property(np, "8259-interrupt-acknowledge", NULL)); } - open_pic.irq_offset = 16; + open_pic_irq_offset = 16; for ( i = 16 ; i < NR_IRQS ; i++ ) irq_desc[i].handler = &open_pic; openpic_init(1); + enable_irq(IRQ_8259_CASCADE); for ( i = 0 ; i < 16 ; i++ ) irq_desc[i].handler = &i8259_pic; i8259_init(); diff --git a/arch/ppc/kernel/chrp_time.c b/arch/ppc/kernel/chrp_time.c index 50c7417fb7c3..d55fa24c0d5c 100644 --- a/arch/ppc/kernel/chrp_time.c +++ b/arch/ppc/kernel/chrp_time.c @@ -171,9 +171,10 @@ void __init chrp_calibrate_decr(void) if (fp != 0) freq = *fp; } - freq *= 60; /* try to make freq/1e6 an integer */ - divisor = 60; - printk("time_init: decrementer frequency = %lu/%d\n", freq, divisor); + freq *= 30; + divisor = 30; + printk("time_init: decrementer frequency = %lu/%d (%d MHz)\n", freq, + divisor, (freq/divisor)>>20); decrementer_count = freq / HZ / divisor; count_period_num = divisor; count_period_den = freq / 1000000; diff --git a/arch/ppc/kernel/entry.S b/arch/ppc/kernel/entry.S index 2c4ff55544a2..2d1238a6baf3 100644 --- a/arch/ppc/kernel/entry.S +++ b/arch/ppc/kernel/entry.S @@ -256,6 +256,15 @@ _GLOBAL(_switch) REST_8GPRS(23, r1) REST_GPR(31, r1) lwz r2,_NIP(r1) /* Restore environment */ + /* + * We need to hard disable here even if RTL is active since + * being interrupted after here trashes SRR{0,1} + * -- Cort + */ + mfmsr r0 /* Get current interrupt state */ + rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */ + mtmsr r0 /* Update machine state */ + lwz r0,_MSR(r1) mtspr SRR0,r2 mtspr SRR1,r0 @@ -271,7 +280,7 @@ ret_from_smpfork: bl schedule_tail b ret_from_except #endif - + .globl ret_from_intercept ret_from_intercept: /* @@ -291,7 +300,7 @@ ret_from_intercept: .globl ret_from_except ret_from_except: -0: /* disable interrupts */ +0: /* disable interrupts */ lis r30,int_control@h ori r30,r30,int_control@l lwz r30,0(r30) @@ -342,16 +351,26 @@ do_bottom_half_ret: .globl do_signal_ret do_signal_ret: b 0b -8: addi r4,r1,INT_FRAME_SIZE /* size of frame */ +8: /* + * We need to hard disable here even if RTL is active since + * being interrupted after here trashes the SPRG2 + * -- Cort + */ + mfmsr r0 /* Get current interrupt state */ + rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */ + mtmsr r0 /* Update machine state */ + + addi r4,r1,INT_FRAME_SIZE /* size of frame */ stw r4,THREAD+KSP(r2) /* save kernel stack pointer */ tophys(r3,r1) mtspr SPRG2,r3 /* phys exception stack pointer */ + b 11f 10: /* make sure we hard disable here, even if rtl is active -- Cort */ mfmsr r0 /* Get current interrupt state */ rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */ sync /* Some chip revs have problems here... */ mtmsr r0 /* Update machine state */ - +11: lwz r2,_CTR(r1) lwz r0,_LINK(r1) mtctr r2 diff --git a/arch/ppc/kernel/feature.c b/arch/ppc/kernel/feature.c index a9a30396a0b2..1bd24a295c83 100644 --- a/arch/ppc/kernel/feature.c +++ b/arch/ppc/kernel/feature.c @@ -8,85 +8,143 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * + * BenH: Changed implementation to work on multiple registers + * polarity is also taken into account. Removed delay (now + * responsibility of the caller). Added spinlocks. + * */ #include #include #include #include #include +#include #include #include +#include #include #include #include -#define MAX_FEATURE_REGS 2 #undef DEBUG_FEATURE -static u32 feature_bits_pbook[] = { - 0, /* FEATURE_null */ - OH_SCC_RESET, /* FEATURE_Serial_reset */ - OH_SCC_ENABLE, /* FEATURE_Serial_enable */ - OH_SCCA_IO, /* FEATURE_Serial_IO_A */ - OH_SCCB_IO, /* FEATURE_Serial_IO_B */ - OH_FLOPPY_ENABLE, /* FEATURE_SWIM3_enable */ - OH_MESH_ENABLE, /* FEATURE_MESH_enable */ - OH_IDE_ENABLE, /* FEATURE_IDE_enable */ - OH_VIA_ENABLE, /* FEATURE_VIA_enable */ - OH_IDECD_POWER, /* FEATURE_CD_power */ - OH_BAY_RESET, /* FEATURE_Mediabay_reset */ - OH_BAY_ENABLE, /* FEATURE_Mediabay_enable */ - OH_BAY_PCI_ENABLE, /* FEATURE_Mediabay_PCI_enable */ - OH_BAY_IDE_ENABLE, /* FEATURE_Mediabay_IDE_enable */ - OH_BAY_FLOPPY_ENABLE, /* FEATURE_Mediabay_floppy_enable */ - 0, /* FEATURE_BMac_reset */ - 0, /* FEATURE_BMac_IO_enable */ - 0, /* FEATURE_Modem_Reset -> guess... */ - OH_IDE_POWER, /* FEATURE_IDE_DiskPower -> guess... */ - OH_IDE_RESET /* FEATURE_IDE_Reset (0 based) -> guess... */ +#define MAX_FEATURE_CONTROLLERS 2 +#define MAX_FEATURE_OFFSET 0x50 +#define FREG(c,r) (&(((c)->reg)[(r)>>2])) + +typedef struct feature_bit { + int reg; /* reg. offset from mac-io base */ + unsigned int polarity; /* 0 = normal, 1 = inverse */ + unsigned int mask; /* bit mask */ +} fbit; + +/* I don't have an OHare machine to test with, so I left those as they + * were. Someone with such a machine chould check out what OF says and + * try too see if they match the heathrow ones and should be changed too + */ +static fbit feature_bits_ohare_pbook[] = { + {0x38,0,0}, /* FEATURE_null */ + {0x38,0,OH_SCC_RESET}, /* FEATURE_Serial_reset */ + {0x38,0,OH_SCC_ENABLE}, /* FEATURE_Serial_enable */ + {0x38,0,OH_SCCA_IO}, /* FEATURE_Serial_IO_A */ + {0x38,0,OH_SCCB_IO}, /* FEATURE_Serial_IO_B */ + {0x38,0,OH_FLOPPY_ENABLE}, /* FEATURE_SWIM3_enable */ + {0x38,0,OH_MESH_ENABLE}, /* FEATURE_MESH_enable */ + {0x38,0,OH_IDE0_ENABLE}, /* FEATURE_IDE0_enable */ + {0x38,1,OH_IDE0_RESET_N}, /* FEATURE_IDE0_reset */ + {0x38,0,OH_IOBUS_ENABLE}, /* FEATURE_IOBUS_enable */ + {0x38,1,OH_BAY_RESET_N}, /* FEATURE_Mediabay_reset */ + {0x38,1,OH_BAY_POWER_N}, /* FEATURE_Mediabay_power */ + {0x38,0,OH_BAY_PCI_ENABLE}, /* FEATURE_Mediabay_PCI_enable */ + {0x38,0,OH_BAY_IDE_ENABLE}, /* FEATURE_Mediabay_IDE_enable */ + {0x38,1,OH_IDE1_RESET_N}, /* FEATURE_Mediabay_IDE_reset */ + {0x38,0,OH_BAY_FLOPPY_ENABLE}, /* FEATURE_Mediabay_floppy_enable */ + {0x38,0,0}, /* FEATURE_BMac_reset */ + {0x38,0,0}, /* FEATURE_BMac_IO_enable */ + {0x38,0,0}, /* FEATURE_Modem_power */ + {0x38,0,0}, /* FEATURE_Slow_SCC_PCLK */ + {0x38,0,0}, /* FEATURE_Sound_Power */ + {0x38,0,0}, /* FEATURE_Sound_CLK_Enable */ + {0x38,0,0}, /* FEATURE_IDE2_enable */ + {0x38,0,0}, /* FEATURE_IDE2_reset */ }; -/* assume these are the same as the ohare until proven otherwise */ -static u32 feature_bits_heathrow[] = { - 0, /* FEATURE_null */ - OH_SCC_RESET, /* FEATURE_Serial_reset */ - OH_SCC_ENABLE, /* FEATURE_Serial_enable */ - OH_SCCA_IO, /* FEATURE_Serial_IO_A */ - OH_SCCB_IO, /* FEATURE_Serial_IO_B */ - OH_FLOPPY_ENABLE, /* FEATURE_SWIM3_enable */ - OH_MESH_ENABLE, /* FEATURE_MESH_enable */ - OH_IDE_ENABLE, /* FEATURE_IDE_enable */ - OH_VIA_ENABLE, /* FEATURE_VIA_enable */ - OH_IDECD_POWER, /* FEATURE_CD_power */ - OH_BAY_RESET, /* FEATURE_Mediabay_reset */ - OH_BAY_ENABLE, /* FEATURE_Mediabay_enable */ - OH_BAY_PCI_ENABLE, /* FEATURE_Mediabay_PCI_enable */ - OH_BAY_IDE_ENABLE, /* FEATURE_Mediabay_IDE_enable */ - OH_BAY_FLOPPY_ENABLE, /* FEATURE_Mediabay_floppy_enable */ - 0x80000000, /* FEATURE_BMac_reset */ - 0x60000000, /* FEATURE_BMac_IO_enable */ - 0x02000000, /* FEATURE_Modem_Reset -> guess...*/ - OH_IDE_POWER, /* FEATURE_IDE_DiskPower -> guess... */ - OH_IDE_RESET /* FEATURE_IDE_Reset (0 based) -> guess... */ +/* Those bits are from a PowerBook. It's possible that desktop machines + * based on heathrow need a different definition or some bits removed + */ +static fbit feature_bits_heathrow[] = { + {0x38,0,0}, /* FEATURE_null */ + {0x38,0,HRW_RESET_SCC}, /* FEATURE_Serial_reset */ + {0x38,0,HRW_SCC_ENABLE}, /* FEATURE_Serial_enable */ + {0x38,0,HRW_SCCA_IO}, /* FEATURE_Serial_IO_A */ + {0x38,0,HRW_SCCB_IO}, /* FEATURE_Serial_IO_B */ + {0x38,0,HRW_SWIM_ENABLE}, /* FEATURE_SWIM3_enable */ + {0x38,0,HRW_MESH_ENABLE}, /* FEATURE_MESH_enable */ + {0x38,0,HRW_IDE0_ENABLE}, /* FEATURE_IDE0_enable */ + {0x38,1,HRW_IDE0_RESET_N}, /* FEATURE_IDE0_reset */ + {0x38,0,HRW_IOBUS_ENABLE}, /* FEATURE_IOBUS_enable */ + {0x38,1,HRW_BAY_RESET_N}, /* FEATURE_Mediabay_reset */ + {0x38,1,HRW_BAY_POWER_N}, /* FEATURE_Mediabay_power */ + {0x38,0,HRW_BAY_PCI_ENABLE}, /* FEATURE_Mediabay_PCI_enable */ + {0x38,0,HRW_BAY_IDE_ENABLE}, /* FEATURE_Mediabay_IDE_enable */ + {0x38,1,HRW_IDE1_RESET_N}, /* FEATURE_Mediabay_IDE_reset */ + {0x38,0,HRW_BAY_FLOPPY_ENABLE}, /* FEATURE_Mediabay_floppy_enable */ + {0x38,0,HRW_BMAC_RESET}, /* FEATURE_BMac_reset */ + {0x38,0,HRW_BMAC_IO_ENABLE}, /* FEATURE_BMac_IO_enable */ + {0x38,1,HRW_MODEM_POWER_N}, /* FEATURE_Modem_power */ + {0x38,0,HRW_SLOW_SCC_PCLK}, /* FEATURE_Slow_SCC_PCLK */ + {0x38,1,HRW_SOUND_POWER_N}, /* FEATURE_Sound_Power */ + {0x38,0,HRW_SOUND_CLK_ENABLE}, /* FEATURE_Sound_CLK_Enable */ + {0x38,0,0}, /* FEATURE_IDE2_enable */ + {0x38,0,0}, /* FEATURE_IDE2_reset */ +}; + +/* Those bits are from an iBook. + */ +static fbit feature_bits_keylargo[] = { + {0x38,0,0}, /* FEATURE_null */ + {0x38,0,0}, /* FEATURE_Serial_reset */ + {0x38,0,0x00000054}, /* FEATURE_Serial_enable */ + {0x38,0,0}, /* FEATURE_Serial_IO_A */ + {0x38,0,0}, /* FEATURE_Serial_IO_B */ + {0x38,0,0}, /* FEATURE_SWIM3_enable */ + {0x38,0,0}, /* FEATURE_MESH_enable */ + {0x38,0,0}, /* FEATURE_IDE0_enable */ + {0x3c,1,0x01000000}, /* FEATURE_IDE0_reset */ + {0x38,0,0}, /* FEATURE_IOBUS_enable */ + {0x38,0,0}, /* FEATURE_Mediabay_reset */ + {0x38,0,0}, /* FEATURE_Mediabay_power */ + {0x38,0,0}, /* FEATURE_Mediabay_PCI_enable */ + {0x38,0,0}, /* FEATURE_Mediabay_IDE_enable */ + {0x3c,1,0x08000000}, /* FEATURE_Mediabay_IDE_reset */ + {0x38,0,0}, /* FEATURE_Mediabay_floppy_enable */ + {0x38,0,0}, /* FEATURE_BMac_reset */ + {0x38,0,0}, /* FEATURE_BMac_IO_enable */ + {0x40,1,0x02000000}, /* FEATURE_Modem_power */ + {0x38,0,0}, /* FEATURE_Slow_SCC_PCLK */ + {0x38,0,0}, /* FEATURE_Sound_Power */ + {0x38,0,0}, /* FEATURE_Sound_CLK_Enable */ + {0x38,0,0}, /* FEATURE_IDE2_enable */ + {0x3c,1,0x40000000}, /* FEATURE_IDE2_reset */ }; /* definition of a feature controller object */ -struct feature_controller -{ - u32* bits; +struct feature_controller { + fbit* bits; volatile u32* reg; struct device_node* device; + spinlock_t lock; }; /* static functions */ static void -feature_add_controller(struct device_node *controller_device, u32* bits); +feature_add_controller(struct device_node *controller_device, fbit* bits); -static int +static struct feature_controller* feature_lookup_controller(struct device_node *device); /* static varialbles */ -static struct feature_controller controllers[MAX_FEATURE_REGS]; +static struct feature_controller controllers[MAX_FEATURE_CONTROLLERS]; static int controller_count = 0; @@ -96,18 +154,23 @@ feature_init(void) struct device_node *np; np = find_devices("mac-io"); - while (np != NULL) - { - feature_add_controller(np, feature_bits_heathrow); + while (np != NULL) { + /* KeyLargo contains several (5 ?) FCR registers in mac-io, + * plus some gpio's which could eventually be handled here. + */ + if (device_is_compatible(np, "Keylargo")) { + feature_add_controller(np, feature_bits_keylargo); + } else { + feature_add_controller(np, feature_bits_heathrow); + } np = np->next; } if (controller_count == 0) { np = find_devices("ohare"); - if (np) - { + if (np) { if (find_devices("via-pmu") != NULL) - feature_add_controller(np, feature_bits_pbook); + feature_add_controller(np, feature_bits_ohare_pbook); else /* else not sure; maybe this is a Starmax? */ feature_add_controller(np, NULL); @@ -116,17 +179,26 @@ feature_init(void) if (controller_count) printk(KERN_INFO "Registered %d feature controller(s)\n", controller_count); + +#ifdef CONFIG_PMAC_PBOOK +#ifdef CONFIG_DMASOUND_MODULE + /* On PowerBooks, we disable the sound chip when dmasound is a module */ + if (controller_count && find_devices("via-pmu") != NULL) { + feature_clear(controllers[0].device, FEATURE_Sound_power); + feature_clear(controllers[0].device, FEATURE_Sound_CLK_enable); + } +#endif +#endif } static void -feature_add_controller(struct device_node *controller_device, u32* bits) +feature_add_controller(struct device_node *controller_device, fbit* bits) { struct feature_controller* controller; - if (controller_count >= MAX_FEATURE_REGS) - { + if (controller_count >= MAX_FEATURE_CONTROLLERS) { printk(KERN_INFO "Feature controller %s skipped(MAX:%d)\n", - controller_device->full_name, MAX_FEATURE_REGS); + controller_device->full_name, MAX_FEATURE_CONTROLLERS); return; } controller = &controllers[controller_count]; @@ -140,30 +212,32 @@ feature_add_controller(struct device_node *controller_device, u32* bits) } controller->reg = (volatile u32 *)ioremap( - controller_device->addrs[0].address + OHARE_FEATURE_REG, 4); + controller_device->addrs[0].address, MAX_FEATURE_OFFSET); if (bits == NULL) { printk(KERN_INFO "Twiddling the magic ohare bits\n"); - out_le32(controller->reg, STARMAX_FEATURES); + out_le32(FREG(controller,OHARE_FEATURE_REG), STARMAX_FEATURES); return; } + spin_lock_init(&controller->lock); + controller_count++; } -static int +static struct feature_controller* feature_lookup_controller(struct device_node *device) { int i; if (device == NULL) - return -EINVAL; + return NULL; while(device) { for (i=0; iparent; } @@ -172,35 +246,36 @@ feature_lookup_controller(struct device_node *device) device->name); #endif - return -ENODEV; + return NULL; } int feature_set(struct device_node* device, enum system_feature f) { - int controller; - unsigned long flags; + struct feature_controller* controller; + unsigned long flags; + unsigned long value; + fbit* bit; if (f >= FEATURE_last) return -EINVAL; controller = feature_lookup_controller(device); - if (controller < 0) - return controller; + if (!controller) + return -ENODEV; + bit = &controller->bits[f]; #ifdef DEBUG_FEATURE printk("feature: <%s> setting feature %d in controller @0x%x\n", - device->name, (int)f, (unsigned int)controllers[controller].reg); + device->name, (int)f, (unsigned int)controller->reg); #endif - save_flags(flags); - cli(); - out_le32( controllers[controller].reg, - in_le32(controllers[controller].reg) | - controllers[controller].bits[f]); - (void)in_le32(controllers[controller].reg); - restore_flags(flags); - udelay(10); + spin_lock_irqsave(&controller->lock, flags); + value = in_le32(FREG(controller, bit->reg)); + value = bit->polarity ? (value & ~bit->mask) : (value | bit->mask); + out_le32(FREG(controller, bit->reg), value); + (void)in_le32(FREG(controller, bit->reg)); + spin_unlock_irqrestore(&controller->lock, flags); return 0; } @@ -208,29 +283,30 @@ feature_set(struct device_node* device, enum system_feature f) int feature_clear(struct device_node* device, enum system_feature f) { - int controller; - unsigned long flags; + struct feature_controller* controller; + unsigned long flags; + unsigned long value; + fbit* bit; if (f >= FEATURE_last) return -EINVAL; controller = feature_lookup_controller(device); - if (controller < 0) - return controller; + if (!controller) + return -ENODEV; + bit = &controller->bits[f]; #ifdef DEBUG_FEATURE printk("feature: <%s> clearing feature %d in controller @0x%x\n", - device->name, (int)f, (unsigned int)controllers[controller].reg); + device->name, (int)f, (unsigned int)controller->reg); #endif - save_flags(flags); - cli(); - out_le32( controllers[controller].reg, - in_le32(controllers[controller].reg) & - ~(controllers[controller].bits[f])); - (void)in_le32(controllers[controller].reg); - restore_flags(flags); - udelay(10); + spin_lock_irqsave(&controller->lock, flags); + value = in_le32(FREG(controller, bit->reg)); + value = bit->polarity ? (value | bit->mask) : (value & ~bit->mask); + out_le32(FREG(controller, bit->reg), value); + (void)in_le32(FREG(controller, bit->reg)); + spin_unlock_irqrestore(&controller->lock, flags); return 0; } @@ -238,16 +314,27 @@ feature_clear(struct device_node* device, enum system_feature f) int feature_test(struct device_node* device, enum system_feature f) { - int controller; + struct feature_controller* controller; + unsigned long value; + fbit* bit; if (f >= FEATURE_last) return -EINVAL; controller = feature_lookup_controller(device); - if (controller < 0) - return controller; + if (!controller) + return -ENODEV; + bit = &controller->bits[f]; - return (in_le32(controllers[controller].reg) & - controllers[controller].bits[f]) != 0; +#ifdef DEBUG_FEATURE + printk("feature: <%s> clearing feature %d in controller @0x%x\n", + device->name, (int)f, (unsigned int)controller->reg); +#endif + /* If one feature contains several bits, all of them must be set + * for value to be true, or all of them must be 0 if polarity is + * inverse + */ + value = (in_le32(FREG(controller, bit->reg)) & bit->mask); + return bit->polarity ? (value == 0) : (value == bit->mask); } diff --git a/arch/ppc/kernel/galaxy_pci.c b/arch/ppc/kernel/galaxy_pci.c new file mode 100644 index 000000000000..cd33a10caf85 --- /dev/null +++ b/arch/ppc/kernel/galaxy_pci.c @@ -0,0 +1,612 @@ +/* + * + * Copyright (c) 2000 Grant Erickson + * All rights reserved. + * + * Module name: galaxy_pci.c + * + * Description: + * PCI interface code for the IBM PowerPC 405GP on-chip PCI bus + * interface. + * + * Why is this file called "galaxy_pci"? Because on the original + * IBM "Walnut" evaluation board schematic I have, the 405GP is + * is labeled "GALAXY". + * + */ + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "pci.h" + + +/* Preprocessor Defines */ + +#define PCICFGADDR (volatile unsigned int *)(0xEEC00000) +#define PCICFGDATA (volatile unsigned int *)(0xEEC00004) + + +/* Function Prototypes */ + +decl_config_access_method(galaxy); + + +void __init +galaxy_pcibios_fixup(void) +{ + +} + +void __init +galaxy_setup_pci_ptrs(void) +{ + set_config_access_method(galaxy); + + ppc_md.pcibios_fixup = galaxy_pcibios_fixup; +} + +int +galaxy_pcibios_read_config_byte(unsigned char bus, unsigned char dev_fn, + unsigned char offset, unsigned char *val) +{ + + return (PCIBIOS_SUCCESSFUL); +} + +int +galaxy_pcibios_read_config_word(unsigned char bus, unsigned char dev_fn, + unsigned char offset, unsigned short *val) +{ + + return (PCIBIOS_SUCCESSFUL); +} + +int +galaxy_pcibios_read_config_dword(unsigned char bus, unsigned char dev_fn, + unsigned char offset, unsigned int *val) +{ + + return (PCIBIOS_SUCCESSFUL); +} + +int +galaxy_pcibios_write_config_byte(unsigned char bus, unsigned char dev_fn, + unsigned char offset, unsigned char val) +{ + + return (PCIBIOS_SUCCESSFUL); +} + +int +galaxy_pcibios_write_config_word(unsigned char bus, unsigned char dev_fn, + unsigned char offset, unsigned short val) +{ + + return (PCIBIOS_SUCCESSFUL); +} + +int +galaxy_pcibios_write_config_dword(unsigned char bus, unsigned char dev_fn, + unsigned char offset, unsigned int val) +{ + + return (PCIBIOS_SUCCESSFUL); +} +/* + * + * Copyright (c) 2000 Grant Erickson + * All rights reserved. + * + * Module name: galaxy_pci.c + * + * Description: + * PCI interface code for the IBM PowerPC 405GP on-chip PCI bus + * interface. + * + * Why is this file called "galaxy_pci"? Because on the original + * IBM "Walnut" evaluation board schematic I have, the 405GP is + * is labeled "GALAXY". + * + */ + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "pci.h" + + +/* Preprocessor Defines */ + +#define PCICFGADDR (volatile unsigned int *)(0xEEC00000) +#define PCICFGDATA (volatile unsigned int *)(0xEEC00004) + + +/* Function Prototypes */ + +decl_config_access_method(galaxy); + + +void __init +galaxy_pcibios_fixup(void) +{ + +} + +void __init +galaxy_setup_pci_ptrs(void) +{ + set_config_access_method(galaxy); + + ppc_md.pcibios_fixup = galaxy_pcibios_fixup; +} + +int +galaxy_pcibios_read_config_byte(unsigned char bus, unsigned char dev_fn, + unsigned char offset, unsigned char *val) +{ + + return (PCIBIOS_SUCCESSFUL); +} + +int +galaxy_pcibios_read_config_word(unsigned char bus, unsigned char dev_fn, + unsigned char offset, unsigned short *val) +{ + + return (PCIBIOS_SUCCESSFUL); +} + +int +galaxy_pcibios_read_config_dword(unsigned char bus, unsigned char dev_fn, + unsigned char offset, unsigned int *val) +{ + + return (PCIBIOS_SUCCESSFUL); +} + +int +galaxy_pcibios_write_config_byte(unsigned char bus, unsigned char dev_fn, + unsigned char offset, unsigned char val) +{ + + return (PCIBIOS_SUCCESSFUL); +} + +int +galaxy_pcibios_write_config_word(unsigned char bus, unsigned char dev_fn, + unsigned char offset, unsigned short val) +{ + + return (PCIBIOS_SUCCESSFUL); +} + +int +galaxy_pcibios_write_config_dword(unsigned char bus, unsigned char dev_fn, + unsigned char offset, unsigned int val) +{ + + return (PCIBIOS_SUCCESSFUL); +} +/* + * + * Copyright (c) 2000 Grant Erickson + * All rights reserved. + * + * Module name: galaxy_pci.c + * + * Description: + * PCI interface code for the IBM PowerPC 405GP on-chip PCI bus + * interface. + * + * Why is this file called "galaxy_pci"? Because on the original + * IBM "Walnut" evaluation board schematic I have, the 405GP is + * is labeled "GALAXY". + * + */ + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "pci.h" + + +/* Preprocessor Defines */ + +#define PCICFGADDR (volatile unsigned int *)(0xEEC00000) +#define PCICFGDATA (volatile unsigned int *)(0xEEC00004) + + +/* Function Prototypes */ + +decl_config_access_method(galaxy); + + +void __init +galaxy_pcibios_fixup(void) +{ + +} + +void __init +galaxy_setup_pci_ptrs(void) +{ + set_config_access_method(galaxy); + + ppc_md.pcibios_fixup = galaxy_pcibios_fixup; +} + +int +galaxy_pcibios_read_config_byte(unsigned char bus, unsigned char dev_fn, + unsigned char offset, unsigned char *val) +{ + + return (PCIBIOS_SUCCESSFUL); +} + +int +galaxy_pcibios_read_config_word(unsigned char bus, unsigned char dev_fn, + unsigned char offset, unsigned short *val) +{ + + return (PCIBIOS_SUCCESSFUL); +} + +int +galaxy_pcibios_read_config_dword(unsigned char bus, unsigned char dev_fn, + unsigned char offset, unsigned int *val) +{ + + return (PCIBIOS_SUCCESSFUL); +} + +int +galaxy_pcibios_write_config_byte(unsigned char bus, unsigned char dev_fn, + unsigned char offset, unsigned char val) +{ + + return (PCIBIOS_SUCCESSFUL); +} + +int +galaxy_pcibios_write_config_word(unsigned char bus, unsigned char dev_fn, + unsigned char offset, unsigned short val) +{ + + return (PCIBIOS_SUCCESSFUL); +} + +int +galaxy_pcibios_write_config_dword(unsigned char bus, unsigned char dev_fn, + unsigned char offset, unsigned int val) +{ + + return (PCIBIOS_SUCCESSFUL); +} +/* + * + * Copyright (c) 2000 Grant Erickson + * All rights reserved. + * + * Module name: galaxy_pci.c + * + * Description: + * PCI interface code for the IBM PowerPC 405GP on-chip PCI bus + * interface. + * + * Why is this file called "galaxy_pci"? Because on the original + * IBM "Walnut" evaluation board schematic I have, the 405GP is + * is labeled "GALAXY". + * + */ + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "pci.h" + + +/* Preprocessor Defines */ + +#define PCICFGADDR (volatile unsigned int *)(0xEEC00000) +#define PCICFGDATA (volatile unsigned int *)(0xEEC00004) + + +/* Function Prototypes */ + +decl_config_access_method(galaxy); + + +void __init +galaxy_pcibios_fixup(void) +{ + +} + +void __init +galaxy_setup_pci_ptrs(void) +{ + set_config_access_method(galaxy); + + ppc_md.pcibios_fixup = galaxy_pcibios_fixup; +} + +int +galaxy_pcibios_read_config_byte(unsigned char bus, unsigned char dev_fn, + unsigned char offset, unsigned char *val) +{ + + return (PCIBIOS_SUCCESSFUL); +} + +int +galaxy_pcibios_read_config_word(unsigned char bus, unsigned char dev_fn, + unsigned char offset, unsigned short *val) +{ + + return (PCIBIOS_SUCCESSFUL); +} + +int +galaxy_pcibios_read_config_dword(unsigned char bus, unsigned char dev_fn, + unsigned char offset, unsigned int *val) +{ + + return (PCIBIOS_SUCCESSFUL); +} + +int +galaxy_pcibios_write_config_byte(unsigned char bus, unsigned char dev_fn, + unsigned char offset, unsigned char val) +{ + + return (PCIBIOS_SUCCESSFUL); +} + +int +galaxy_pcibios_write_config_word(unsigned char bus, unsigned char dev_fn, + unsigned char offset, unsigned short val) +{ + + return (PCIBIOS_SUCCESSFUL); +} + +int +galaxy_pcibios_write_config_dword(unsigned char bus, unsigned char dev_fn, + unsigned char offset, unsigned int val) +{ + + return (PCIBIOS_SUCCESSFUL); +} +/* + * + * Copyright (c) 2000 Grant Erickson + * All rights reserved. + * + * Module name: galaxy_pci.c + * + * Description: + * PCI interface code for the IBM PowerPC 405GP on-chip PCI bus + * interface. + * + * Why is this file called "galaxy_pci"? Because on the original + * IBM "Walnut" evaluation board schematic I have, the 405GP is + * is labeled "GALAXY". + * + */ + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "pci.h" + + +/* Preprocessor Defines */ + +#define PCICFGADDR (volatile unsigned int *)(0xEEC00000) +#define PCICFGDATA (volatile unsigned int *)(0xEEC00004) + + +/* Function Prototypes */ + +decl_config_access_method(galaxy); + + +void __init +galaxy_pcibios_fixup(void) +{ + +} + +void __init +galaxy_setup_pci_ptrs(void) +{ + set_config_access_method(galaxy); + + ppc_md.pcibios_fixup = galaxy_pcibios_fixup; +} + +int +galaxy_pcibios_read_config_byte(unsigned char bus, unsigned char dev_fn, + unsigned char offset, unsigned char *val) +{ + + return (PCIBIOS_SUCCESSFUL); +} + +int +galaxy_pcibios_read_config_word(unsigned char bus, unsigned char dev_fn, + unsigned char offset, unsigned short *val) +{ + + return (PCIBIOS_SUCCESSFUL); +} + +int +galaxy_pcibios_read_config_dword(unsigned char bus, unsigned char dev_fn, + unsigned char offset, unsigned int *val) +{ + + return (PCIBIOS_SUCCESSFUL); +} + +int +galaxy_pcibios_write_config_byte(unsigned char bus, unsigned char dev_fn, + unsigned char offset, unsigned char val) +{ + + return (PCIBIOS_SUCCESSFUL); +} + +int +galaxy_pcibios_write_config_word(unsigned char bus, unsigned char dev_fn, + unsigned char offset, unsigned short val) +{ + + return (PCIBIOS_SUCCESSFUL); +} + +int +galaxy_pcibios_write_config_dword(unsigned char bus, unsigned char dev_fn, + unsigned char offset, unsigned int val) +{ + + return (PCIBIOS_SUCCESSFUL); +} +/* + * + * Copyright (c) 2000 Grant Erickson + * All rights reserved. + * + * Module name: galaxy_pci.c + * + * Description: + * PCI interface code for the IBM PowerPC 405GP on-chip PCI bus + * interface. + * + * Why is this file called "galaxy_pci"? Because on the original + * IBM "Walnut" evaluation board schematic I have, the 405GP is + * is labeled "GALAXY". + * + */ + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "pci.h" + + +/* Preprocessor Defines */ + +#define PCICFGADDR (volatile unsigned int *)(0xEEC00000) +#define PCICFGDATA (volatile unsigned int *)(0xEEC00004) + + +/* Function Prototypes */ + +decl_config_access_method(galaxy); + + +void __init +galaxy_pcibios_fixup(void) +{ + +} + +void __init +galaxy_setup_pci_ptrs(void) +{ + set_config_access_method(galaxy); + + ppc_md.pcibios_fixup = galaxy_pcibios_fixup; +} + +int +galaxy_pcibios_read_config_byte(unsigned char bus, unsigned char dev_fn, + unsigned char offset, unsigned char *val) +{ + + return (PCIBIOS_SUCCESSFUL); +} + +int +galaxy_pcibios_read_config_word(unsigned char bus, unsigned char dev_fn, + unsigned char offset, unsigned short *val) +{ + + return (PCIBIOS_SUCCESSFUL); +} + +int +galaxy_pcibios_read_config_dword(unsigned char bus, unsigned char dev_fn, + unsigned char offset, unsigned int *val) +{ + + return (PCIBIOS_SUCCESSFUL); +} + +int +galaxy_pcibios_write_config_byte(unsigned char bus, unsigned char dev_fn, + unsigned char offset, unsigned char val) +{ + + return (PCIBIOS_SUCCESSFUL); +} + +int +galaxy_pcibios_write_config_word(unsigned char bus, unsigned char dev_fn, + unsigned char offset, unsigned short val) +{ + + return (PCIBIOS_SUCCESSFUL); +} + +int +galaxy_pcibios_write_config_dword(unsigned char bus, unsigned char dev_fn, + unsigned char offset, unsigned int val) +{ + + return (PCIBIOS_SUCCESSFUL); +} diff --git a/arch/ppc/kernel/gemini_setup.c b/arch/ppc/kernel/gemini_setup.c index d7ca917803bb..fcf3a701c8ee 100644 --- a/arch/ppc/kernel/gemini_setup.c +++ b/arch/ppc/kernel/gemini_setup.c @@ -53,7 +53,7 @@ static unsigned int cpu_6xx[16] = { }; int chrp_get_irq(struct pt_regs *); -void chrp_post_irq(int); +void chrp_post_irq(struct pt_regs* regs, int); static inline unsigned long _get_HID1(void) { @@ -132,6 +132,19 @@ extern unsigned long loops_per_sec; extern int root_mountflags; extern char cmd_line[]; +void +gemini_heartbeat(void) +{ + static unsigned long led = GEMINI_LEDBASE+(4*8); + static char direction = 8; + *(char *)led = 0; + if ( (led + direction) > (GEMINI_LEDBASE+(7*8)) || + (led + direction) < (GEMINI_LEDBASE+(4*8)) ) + direction *= -1; + led += direction; + *(char *)led = 0xff; + ppc_md.heartbeat_count = ppc_md.heartbeat_reset; +} void __init gemini_setup_arch(void) { @@ -175,6 +188,10 @@ void __init gemini_setup_arch(void) printk("CPU manufacturer: %s [rev=%04x]\n", (cpu & (1<<15)) ? "IBM" : "Motorola", (cpu & 0xffff)); + ppc_md.heartbeat = gemini_heartbeat; + ppc_md.heartbeat_reset = HZ/8; + ppc_md.heartbeat_count = 1; + /* take special pains to map the MPIC, since it isn't mapped yet */ gemini_openpic_init(); /* start the L2 */ @@ -505,7 +522,7 @@ int gemini_get_irq( struct pt_regs *regs ) return irq; } -void gemini_post_irq(int irq) +void gemini_post_irq(struct pt_regs* regs, int irq) { /* * If it's an i8259 irq then we've already done the diff --git a/arch/ppc/kernel/head.S b/arch/ppc/kernel/head.S index a70ba8bfd75f..8b56c635cba3 100644 --- a/arch/ppc/kernel/head.S +++ b/arch/ppc/kernel/head.S @@ -156,6 +156,16 @@ __start: bl fix_mem_constants #endif /* CONFIG_APUS */ +#ifndef CONFIG_GEMINI +/* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains + * the physical address we are running at, returned by prom_init() + */ +__after_prom_start: + bl mmu_off + bl clear_bats + bl flush_tlbs +#endif + /* * Use the first pair of BAT registers to map the 1st 16MB * of RAM to KERNELBASE. From this point on we can't safely @@ -211,6 +221,11 @@ __start: mtspr DBAT0U,r11 /* bit in upper BAT register */ mtspr IBAT0L,r8 mtspr IBAT0U,r11 +#if 0 /* Useful debug code, please leave in for now so I don't have to + * look at docs when I need to setup a BAT ; + */ + bl setup_screen_bat +#endif 5: isync #ifndef CONFIG_APUS @@ -627,12 +642,8 @@ DataStoreTLBMiss: mtcrf 0x80,r3 rfi -/* Instruction address breakpoint exception (on 603/604) */ STD_EXCEPTION(0x1300, Trap_13, InstructionBreakpoint) - -/* System management exception (603?) */ - STD_EXCEPTION(0x1400, Trap_14, UnknownException) - + STD_EXCEPTION(0x1400, SMI, SMIException) STD_EXCEPTION(0x1500, Trap_15, UnknownException) STD_EXCEPTION(0x1600, Trap_16, UnknownException) STD_EXCEPTION(0x1700, Trap_17, TAUException) @@ -644,10 +655,7 @@ DataStoreTLBMiss: STD_EXCEPTION(0x1d00, Trap_1d, UnknownException) STD_EXCEPTION(0x1e00, Trap_1e, UnknownException) STD_EXCEPTION(0x1f00, Trap_1f, UnknownException) - - /* Run mode exception */ STD_EXCEPTION(0x2000, RunMode, RunModeException) - STD_EXCEPTION(0x2100, Trap_21, UnknownException) STD_EXCEPTION(0x2200, Trap_22, UnknownException) STD_EXCEPTION(0x2300, Trap_23, UnknownException) @@ -911,12 +919,16 @@ giveup_fpu: * the kernel image to physical address 0. */ relocate_kernel: +#if 0 /* Is this still needed ? I don't think so. It breaks new + * boot-with-mmu-off stuff + */ lis r9,0x426f /* if booted from BootX, don't */ addi r9,r9,0x6f58 /* translate source addr */ cmpw r31,r9 /* (we have to on chrp) */ beq 7f rlwinm r4,r4,0,8,31 /* translate source address */ add r4,r4,r3 /* to region mapped with BATs */ +#endif 7: addis r9,r26,klimit@ha /* fetch klimit */ lwz r25,klimit@l(r9) addis r25,r25,-KERNELBASE@h @@ -1194,14 +1206,26 @@ enable_caches: cmpi 0,r9,4 /* check for 604 */ cmpi 1,r9,9 /* or 604e */ cmpi 2,r9,10 /* or mach5 */ + cmpi 3,r9,8 /* check for 750 (G3) */ + cmpi 4,r9,12 /* or 7400 (G4) */ cror 2,2,6 cror 2,2,10 bne 4f ori r11,r11,HID0_SIED|HID0_BHTE /* for 604[e], enable */ bne 2,5f ori r11,r11,HID0_BTCD + b 5f +4: + cror 14,14,18 + bne 3,6f + /* We should add ABE here if we want to use Store Gathering + * and other nifty bridge features + */ + ori r11,r11,HID0_SGE|HID0_BHTE|HID0_BTIC /* for g3/g4, enable */ + li r3,0 + mtspr ICTC,r3 5: mtspr HID0,r11 /* superscalar exec & br history tbl */ -4: blr +6: blr /* * Load stuff into the MMU. Intended to be called with @@ -1388,6 +1412,45 @@ clear_bats: #endif /* !defined(CONFIG_GEMINI) */ blr +#ifndef CONFIG_GEMINI +flush_tlbs: + lis r20, 0x1000 +1: addic. r20, r20, -0x1000 + tlbie r20 + blt 1b + sync + blr + +mmu_off: + addi r4, r3, __after_prom_start - _start + mfmsr r3 + andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */ + beq 1f + ori r3,r3,MSR_DR|MSR_IR + xori r3,r3,MSR_DR|MSR_IR + mtspr SRR0,r4 + mtspr SRR1,r3 + sync + rfi +1: blr +#endif + +#if 0 /* That's useful debug stuff */ +setup_screen_bat: + lis r3, 0x9100 +#ifdef __SMP__ + ori r3,r3,0x12 +#else + ori r3,r3,0x2 +#endif + mtspr DBAT1L, r3 + mtspr IBAT1L, r3 + ori r3,r3,(BL_8M<<2)|0x2 /* set up BAT registers for 604 */ + mtspr DBAT1U, r3 + mtspr IBAT1U, r3 + blr +#endif + /* * We put a few things here that have to be page-aligned. * This stuff goes at the beginning of the data segment, diff --git a/arch/ppc/kernel/head_4xx.S b/arch/ppc/kernel/head_4xx.S index ba3284ad8657..bec805b92e7b 100644 --- a/arch/ppc/kernel/head_4xx.S +++ b/arch/ppc/kernel/head_4xx.S @@ -78,13 +78,17 @@ _GLOBAL(_start) li r24,0 + ## Invalidate all TLB entries + + tlbia + ## We should still be executing code at physical address 0x0000xxxx ## at this point. However, start_here is at virtual address ## 0xC000xxxx. So, set up a TLB mapping to cover this once ## translation is enabled. lis r3,KERNELBASE@h # Load the kernel virtual address - addis r3,r3,KERNELBASE@l + ori r3,r3,KERNELBASE@l tophys(r4,r3) # Load the kernel physical address ## Save the existing PID and load the kernel PID. @@ -96,11 +100,7 @@ _GLOBAL(_start) ## Configure and load entry into TLB slot 0. clrrwi r4,r4,10 # Mask off the real page number - - ## XXX - Temporarily set the TLB_I bit because of cache issues that - ## seem to foul-up the exception handling code. - - ori r4,r4,(TLB_WR | TLB_EX | TLB_I) # Set the write and execute bits + ori r4,r4,(TLB_WR | TLB_EX) # Set the write and execute bits clrrwi r3,r3,10 # Mask off the effective page number ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_16M)) @@ -333,22 +333,12 @@ _GLOBAL(timer_interrupt_intercept) #endif ### 0x1100 - Data TLB Miss Exception - - START_EXCEPTION(0x1100, DTLBMiss) - STND_EXCEPTION_PROLOG - addi r3,r1,STACK_FRAME_OVERHEAD - li r7,STND_EXC - li r20,MSR_KERNEL - FINISH_EXCEPTION(UnknownException) + + STND_EXCEPTION(0x1100, DTLBMiss, PPC4xx_dtlb_miss) ### 0x1200 - Instruction TLB Miss Exception - - START_EXCEPTION(0x1200, ITLBMiss) - STND_EXCEPTION_PROLOG - addi r3,r1,STACK_FRAME_OVERHEAD - li r7,STND_EXC - li r20,MSR_KERNEL - FINISH_EXCEPTION(UnknownException) + + STND_EXCEPTION(0x1200, ITLBMiss, PPC4xx_itlb_miss) STND_EXCEPTION(0x1300, Trap_13, UnknownException) STND_EXCEPTION(0x1400, Trap_14, UnknownException) @@ -560,8 +550,6 @@ start_here: _GLOBAL(set_context) mtspr SPRN_PID,r3 - tlbia - SYNC blr ### diff --git a/arch/ppc/kernel/idle.c b/arch/ppc/kernel/idle.c index 8dda6ad6b6be..7cf97b873552 100644 --- a/arch/ppc/kernel/idle.c +++ b/arch/ppc/kernel/idle.c @@ -154,11 +154,12 @@ unsigned long get_zero_page_fast(void) if ( zero_quicklist ) { /* atomically remove this page from the list */ - asm ( "101:lwarx %1,0,%2\n" /* reserve zero_cache */ + register unsigned long tmp; + asm ( "101:lwarx %1,0,%3\n" /* reserve zero_cache */ " lwz %0,0(%1)\n" /* get next -- new zero_cache */ - " stwcx. %0,0,%2\n" /* update zero_cache */ + " stwcx. %0,0,%3\n" /* update zero_cache */ " bne- 101b\n" /* if lost reservation try again */ - : "=&r" (zero_quicklist), "=&r" (page) + : "=&r" (tmp), "=&r" (page), "+m" (zero_cache) : "r" (&zero_quicklist) : "cc" ); #ifdef __SMP__ @@ -193,6 +194,7 @@ void zero_paged(void) { unsigned long pageptr = 0; /* current page being zero'd */ unsigned long bytecount = 0; + register unsigned long tmp; pte_t *pte; if ( atomic_read(&zero_cache_sz) >= zero_cache_water[0] ) @@ -249,15 +251,14 @@ void zero_paged(void) pte_cache(*pte); flush_tlb_page(find_vma(&init_mm,pageptr),pageptr); /* atomically add this page to the list */ - asm ( "101:lwarx %0,0,%1\n" /* reserve zero_cache */ - " stw %0,0(%2)\n" /* update *pageptr */ + asm ( "101:lwarx %0,0,%2\n" /* reserve zero_cache */ + " stw %0,0(%3)\n" /* update *pageptr */ #ifdef __SMP__ " sync\n" /* let store settle */ #endif - " mr %0,%2\n" /* update zero_cache in reg */ - " stwcx. %2,0,%1\n" /* update zero_cache in mem */ + " stwcx. %3,0,%2\n" /* update zero_cache in mem */ " bne- 101b\n" /* if lost reservation try again */ - : "=&r" (zero_quicklist) + : "=&r" (tmp), "+m" (zero_quicklist) : "r" (&zero_quicklist), "r" (pageptr) : "cc" ); /* diff --git a/arch/ppc/kernel/irq.c b/arch/ppc/kernel/irq.c index c2f2d1c11504..8b5f590fb780 100644 --- a/arch/ppc/kernel/irq.c +++ b/arch/ppc/kernel/irq.c @@ -126,7 +126,7 @@ void irq_kfree(void *ptr) */ int request_8xxirq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *), #elif defined(CONFIG_APUS) -int sys_request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *), +int request_sysirq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *), #else int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *), #endif @@ -315,7 +315,7 @@ asmlinkage int do_IRQ(struct pt_regs *regs, int isfake) } ppc_irq_dispatch_handler( regs, irq ); if ( ppc_md.post_irq ) - ppc_md.post_irq( irq ); + ppc_md.post_irq( regs, irq ); out: hardirq_exit( cpu ); diff --git a/arch/ppc/kernel/local_irq.h b/arch/ppc/kernel/local_irq.h index 60219201356c..840b14d6fbd0 100644 --- a/arch/ppc/kernel/local_irq.h +++ b/arch/ppc/kernel/local_irq.h @@ -4,6 +4,8 @@ #include #include +#include +#include #include void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq); diff --git a/arch/ppc/kernel/misc.S b/arch/ppc/kernel/misc.S index aa9f5ac83cf0..fde7112c7722 100644 --- a/arch/ppc/kernel/misc.S +++ b/arch/ppc/kernel/misc.S @@ -20,13 +20,13 @@ #include #include "ppc_asm.h" -#ifndef CONFIG_8xx -CACHE_LINE_SIZE = 32 -LG_CACHE_LINE_SIZE = 5 -#else +#if defined(CONFIG_4xx) || defined(CONFIG_8xx) CACHE_LINE_SIZE = 16 LG_CACHE_LINE_SIZE = 4 -#endif /* CONFIG_8xx */ +#else +CACHE_LINE_SIZE = 32 +LG_CACHE_LINE_SIZE = 5 +#endif /* CONFIG_4xx || CONFIG_8xx */ .text @@ -590,6 +590,20 @@ _GLOBAL(_set_THRM3) _GLOBAL(_get_PVR) mfspr r3,PVR blr + +_GLOBAL(_get_HID0) + mfspr r3,HID0 + blr + +_GLOBAL(_get_ICTC) + mfspr r3,ICTC + blr + +_GLOBAL(_set_ICTC) + mtspr ICTC,r3 + blr + + /* L2CR functions Copyright © 1997-1998 by PowerLogix R & D, Inc. @@ -656,6 +670,8 @@ _GLOBAL(_set_L2CR) rlwinm r4,r4,16,16,31 cmplwi r4,0x0008 beq thisIs750 + cmplwi r4,0x000c + beq thisIs750 li r3,-1 blr @@ -750,9 +766,11 @@ _GLOBAL(_get_L2CR) mfspr r3,PVR rlwinm r3,r3,16,16,31 cmplwi r3,0x0008 + beq 1f + cmplwi r3,0x000c li r3,0 bnelr - +1: /* Return the L2CR contents */ mfspr r3,L2CR blr diff --git a/arch/ppc/kernel/oak_setup.c b/arch/ppc/kernel/oak_setup.c index ad2c224bb660..f3c142e2bc30 100644 --- a/arch/ppc/kernel/oak_setup.c +++ b/arch/ppc/kernel/oak_setup.c @@ -1,6 +1,6 @@ /* * - * Copyright (c) 1999 Grant Erickson + * Copyright (c) 1999-2000 Grant Erickson * * Module name: oak_setup.c * @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -30,6 +31,12 @@ #include "time.h" #include "oak_setup.h" + + + + + + /* Function Prototypes */ extern void abort(void); @@ -95,32 +102,32 @@ oak_init(unsigned long r3, unsigned long r4, unsigned long r5, /* Initialize machine-dependency vectors */ - ppc_md.setup_arch = oak_setup_arch; - ppc_md.setup_residual = oak_setup_residual; - ppc_md.get_cpuinfo = NULL; - ppc_md.irq_cannonicalize = NULL; - ppc_md.init_IRQ = oak_init_IRQ; - ppc_md.get_irq = oak_get_irq; - ppc_md.init = NULL; - - ppc_md.restart = oak_restart; - ppc_md.power_off = oak_power_off; - ppc_md.halt = oak_halt; - - ppc_md.time_init = oak_time_init; - ppc_md.set_rtc_time = oak_set_rtc_time; - ppc_md.get_rtc_time = oak_get_rtc_time; - ppc_md.calibrate_decr = oak_calibrate_decr; - - ppc_md.kbd_setkeycode = NULL; - ppc_md.kbd_getkeycode = NULL; - ppc_md.kbd_translate = NULL; - ppc_md.kbd_unexpected_up = NULL; - ppc_md.kbd_leds = NULL; - ppc_md.kbd_init_hw = NULL; + ppc_md.setup_arch = oak_setup_arch; + ppc_md.setup_residual = oak_setup_residual; + ppc_md.get_cpuinfo = NULL; + ppc_md.irq_cannonicalize = NULL; + ppc_md.init_IRQ = oak_init_IRQ; + ppc_md.get_irq = oak_get_irq; + ppc_md.init = NULL; + + ppc_md.restart = oak_restart; + ppc_md.power_off = oak_power_off; + ppc_md.halt = oak_halt; + + ppc_md.time_init = oak_time_init; + ppc_md.set_rtc_time = oak_set_rtc_time; + ppc_md.get_rtc_time = oak_get_rtc_time; + ppc_md.calibrate_decr = oak_calibrate_decr; + + ppc_md.kbd_setkeycode = NULL; + ppc_md.kbd_getkeycode = NULL; + ppc_md.kbd_translate = NULL; + ppc_md.kbd_unexpected_up = NULL; + ppc_md.kbd_leds = NULL; + ppc_md.kbd_init_hw = NULL; #if defined(CONFIG_MAGIC_SYSRQ) - ppc_md.kbd_sysrq_xlate = NULL; + ppc_md.ppc_kbd_sysrq_xlate = NULL; #endif return; diff --git a/arch/ppc/kernel/oak_setup.h b/arch/ppc/kernel/oak_setup.h index 62cfac9069c3..8648bd084df8 100644 --- a/arch/ppc/kernel/oak_setup.h +++ b/arch/ppc/kernel/oak_setup.h @@ -1,14 +1,14 @@ /* * - * Copyright (c) 1999 Grant Erickson + * Copyright (c) 1999-2000 Grant Erickson * - * Module name: oak_setup.c + * Module name: oak_setup.h * * Description: * Architecture- / platform-specific boot-time initialization code for * the IBM PowerPC 403GCX "Oak" evaluation board. Adapted from original * code by Gary Thomas, Cort Dougan , and Dan Malek - * . + * . * */ diff --git a/arch/ppc/kernel/open_pic.c b/arch/ppc/kernel/open_pic.c index 6857aa36f200..50046369bd91 100644 --- a/arch/ppc/kernel/open_pic.c +++ b/arch/ppc/kernel/open_pic.c @@ -17,17 +17,30 @@ #include #include #include +#include #include "local_irq.h" volatile struct OpenPIC *OpenPIC = NULL; u_int OpenPIC_NumInitSenses __initdata = 0; u_char *OpenPIC_InitSenses __initdata = NULL; +int open_pic_irq_offset; +extern int use_of_interrupt_tree; void chrp_mask_irq(unsigned int); void chrp_unmask_irq(unsigned int); +void find_ISUs(void); static u_int NumProcessors; static u_int NumSources; +OpenPIC_Source *ISU; +/* + * We should use this if we have > 1 ISU. + * We can just point each entry to the + * appropriate source regs but it wastes a lot of space + * so until we have >1 ISU I'll leave it unimplemented. + * -- Cort +OpenPIC_Source ISU[128]; +*/ struct hw_interrupt_type open_pic = { " OpenPIC ", @@ -38,7 +51,6 @@ struct hw_interrupt_type open_pic = { 0, 0 }; -int open_pic_irq_offset; /* * Accesses to the current processor's registers @@ -96,7 +108,7 @@ void openpic_ipi_action(int cpl, void *dev_id, struct pt_regs *regs) #endif /* __SMP__ */ #ifdef __i386__ -static inline u_int ld_le32(volatile u_int *addr) +static inline u_int in_le32(volatile u_int *addr) { return *addr; } @@ -111,7 +123,7 @@ u_int openpic_read(volatile u_int *addr) { u_int val; - val = ld_le32(addr); + val = in_le32(addr); return val; } @@ -148,6 +160,9 @@ static void openpic_safe_writefield(volatile u_int *addr, u_int mask, { openpic_setfield(addr, OPENPIC_MASK); /* wait until it's not in use */ + /* BenH: Is this code really enough ? I would rather check the result + * and eventually retry ... + */ while (openpic_read(addr) & OPENPIC_ACTIVITY); openpic_writefield(addr, mask | OPENPIC_MASK, field | OPENPIC_MASK); } @@ -182,16 +197,18 @@ void __init openpic_init(int main_pic) OPENPIC_FEATURE_LAST_PROCESSOR_SHIFT) + 1; NumSources = ((t & OPENPIC_FEATURE_LAST_SOURCE_MASK) >> OPENPIC_FEATURE_LAST_SOURCE_SHIFT) + 1; - - printk("OpenPIC Version %s (%d CPUs and %d IRQ sources) at %p\n", version, - NumProcessors, NumSources, OpenPIC); - timerfreq = openpic_read(&OpenPIC->Global.Timer_Frequency); - printk("OpenPIC timer frequency is "); - if (timerfreq) - printk("%d Hz\n", timerfreq); - else - printk("not set\n"); - + if ( _machine != _MACH_Pmac ) + { + printk("OpenPIC Version %s (%d CPUs and %d IRQ sources) at %p\n", version, + NumProcessors, NumSources, OpenPIC); + timerfreq = openpic_read(&OpenPIC->Global.Timer_Frequency); + printk("OpenPIC timer frequency is "); + if (timerfreq) + printk("%d MHz\n", timerfreq>>20); + else + printk("not set\n"); + } + if ( main_pic ) { /* Initialize timer interrupts */ @@ -209,24 +226,59 @@ void __init openpic_init(int main_pic) /* Disabled, Priority 8 */ openpic_initipi(i, 8, OPENPIC_VEC_IPI+i); } - - /* Initialize external interrupts */ - if ( ppc_md.progress ) ppc_md.progress("openpic ext",0x3bc); - /* SIOint (8259 cascade) is special */ - openpic_initirq(0, 8, open_pic_irq_offset, 1, 1); - openpic_mapirq(0, 1<<0); - for (i = 1; i < NumSources; i++) { - /* Enabled, Priority 8 */ - openpic_initirq(i, 8, open_pic_irq_offset+i, 0, - i < OpenPIC_NumInitSenses ? OpenPIC_InitSenses[i] : 1); - /* Processor 0 */ - openpic_mapirq(i, 1<<0); + find_ISUs(); + if ( _machine != _MACH_Pmac ) + { + /* Initialize external interrupts */ + if ( ppc_md.progress ) ppc_md.progress("openpic ext",0x3bc); + /* SIOint (8259 cascade) is special */ + openpic_initirq(0, 8, open_pic_irq_offset, 1, 1); + openpic_mapirq(0, 1<<0); + for (i = 1; i < NumSources; i++) { + /* Enabled, Priority 8 */ + openpic_initirq(i, 8, open_pic_irq_offset+i, 0, + i < OpenPIC_NumInitSenses ? OpenPIC_InitSenses[i] : 1); + /* Processor 0 */ + openpic_mapirq(i, 1<<0); + } } - + else + { + /* Prevent any interrupt from occuring during initialisation. + * Hum... I believe this is not necessary, Apple does that in + * Darwin's PowerExpress code. + */ + openpic_set_priority(0, 0xf); + + /* First disable all interrupts and map them to CPU 0 */ + for (i = 0; i < NumSources; i++) { + openpic_disable_irq(i); + openpic_mapirq(i, 1<<0); + } + + /* If we use the device tree, then lookup all interrupts and + * initialize them according to sense infos found in the tree + */ + if (use_of_interrupt_tree) { + struct device_node* np = find_all_nodes(); + while(np) { + int j, pri; + pri = strcmp(np->name, "programmer-switch") ? 2 : 7; + for (j=0;jn_intrs;j++) + openpic_initirq( np->intrs[j].line, + pri, + np->intrs[j].line, + np->intrs[j].sense, + np->intrs[j].sense); + np = np->next; + } + } + } + /* Initialize the spurious interrupt */ if ( ppc_md.progress ) ppc_md.progress("openpic spurious",0x3bd); openpic_set_spurious(OPENPIC_VEC_SPURIOUS); - if ( _machine != _MACH_gemini ) + if ( !(_machine && (_MACH_gemini|_MACH_Pmac)) ) { if (request_irq(IRQ_8259_CASCADE, no_action, SA_INTERRUPT, "82c59 cascade", NULL)) @@ -238,6 +290,20 @@ void __init openpic_init(int main_pic) if ( ppc_md.progress ) ppc_md.progress("openpic exit",0x222); } +void find_ISUs(void) +{ +#ifdef CONFIG_PPC64 + /* hardcode this for now since the IBM 260 is the only thing with + * a distributed openpic right now. -- Cort + */ + ISU = (OpenPIC_Source *)0xfeff7c00; + NumSources = 0x10; +#else + /* for non-distributed OpenPIC implementations it's in the IDU -- Cort */ + ISU = OpenPIC->Source; +#endif +} + void openpic_reset(void) { openpic_setfield(&OpenPIC->Global.Global_Configuration0, @@ -279,6 +345,8 @@ void openpic_eoi(u_int cpu) { check_arg_cpu(cpu); openpic_write(&OpenPIC->THIS_CPU.EOI, 0); + /* Handle PCI write posting */ + (void)openpic_read(&OpenPIC->THIS_CPU.EOI); } @@ -379,7 +447,7 @@ void do_openpic_setup_cpu(void) #if 0 /* let the openpic know we want intrs */ for ( i = 0; i < NumSources ; i++ ) - openpic_mapirq(i, openpic_read(&OpenPIC->Source[i].Destination) + openpic_mapirq(i, openpic_read(ISU[i].Destination) | (1<Source[irq - open_pic_irq_offset].Vector_Priority, OPENPIC_MASK); + openpic_clearfield(&ISU[irq - open_pic_irq_offset].Vector_Priority, OPENPIC_MASK); + /* make sure mask gets to controller before we return to user */ + do { + mb(); /* sync is probably useless here */ + } while(openpic_readfield(&OpenPIC->Source[irq].Vector_Priority, + OPENPIC_MASK)); } void openpic_disable_irq(u_int irq) { check_arg_irq(irq); - openpic_setfield(&OpenPIC->Source[irq - open_pic_irq_offset].Vector_Priority, OPENPIC_MASK); + openpic_setfield(&ISU[irq - open_pic_irq_offset].Vector_Priority, OPENPIC_MASK); + /* make sure mask gets to controller before we return to user */ + do { + mb(); /* sync is probably useless here */ + } while(!openpic_readfield(&OpenPIC->Source[irq].Vector_Priority, + OPENPIC_MASK)); } /* @@ -440,12 +518,13 @@ void openpic_initirq(u_int irq, u_int pri, u_int vec, int pol, int sense) check_arg_irq(irq); check_arg_pri(pri); check_arg_vec(vec); - openpic_safe_writefield(&OpenPIC->Source[irq].Vector_Priority, + openpic_safe_writefield(&ISU[irq].Vector_Priority, OPENPIC_PRIORITY_MASK | OPENPIC_VECTOR_MASK | - OPENPIC_SENSE_POLARITY | OPENPIC_SENSE_LEVEL, + OPENPIC_SENSE_MASK | OPENPIC_POLARITY_MASK, (pri << OPENPIC_PRIORITY_SHIFT) | vec | - (pol ? OPENPIC_SENSE_POLARITY : 0) | - (sense ? OPENPIC_SENSE_LEVEL : 0)); + (pol ? OPENPIC_POLARITY_POSITIVE : + OPENPIC_POLARITY_NEGATIVE) | + (sense ? OPENPIC_SENSE_LEVEL : OPENPIC_SENSE_EDGE)); } /* @@ -454,7 +533,7 @@ void openpic_initirq(u_int irq, u_int pri, u_int vec, int pol, int sense) void openpic_mapirq(u_int irq, u_int cpumask) { check_arg_irq(irq); - openpic_write(&OpenPIC->Source[irq].Destination, cpumask); + openpic_write(&ISU[irq].Destination, cpumask); } /* @@ -465,7 +544,7 @@ void openpic_mapirq(u_int irq, u_int cpumask) void openpic_set_sense(u_int irq, int sense) { check_arg_irq(irq); - openpic_safe_writefield(&OpenPIC->Source[irq].Vector_Priority, + openpic_safe_writefield(&ISU[irq].Vector_Priority, OPENPIC_SENSE_LEVEL, (sense ? OPENPIC_SENSE_LEVEL : 0)); } diff --git a/arch/ppc/kernel/pci-dma.c b/arch/ppc/kernel/pci-dma.c new file mode 100644 index 000000000000..089566908b8f --- /dev/null +++ b/arch/ppc/kernel/pci-dma.c @@ -0,0 +1,52 @@ +/* + * Copyright (C) 2000 Ani Joshi + * + * + * Dynamic DMA mapping support. + * + * swiped from i386 + * + */ + +#include +#include +#include +#include +#include + +/* Pure 2^n version of get_order */ +extern __inline__ int __get_order(unsigned long size) +{ + int order; + + size = (size-1) >> (PAGE_SHIFT-1); + order = -1; + do { + size >>= 1; + order++; + } while (size); + return order; +} + +void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, + dma_addr_t *dma_handle) +{ + void *ret; + int gfp = GFP_ATOMIC; + + if (hwdev == NULL || hwdev->dma_mask != 0xffffffff) + gfp |= GFP_DMA; + ret = (void *)__get_free_pages(gfp, __get_order(size)); + + if (ret != NULL) { + memset(ret, 0, size); + *dma_handle = virt_to_bus(ret); + } + return ret; +} + +void pci_free_consistent(struct pci_dev *hwdev, size_t size, + void *vaddr, dma_addr_t dma_handle) +{ + free_pages((unsigned long)vaddr, __get_order(size)); +} diff --git a/arch/ppc/kernel/pci.c b/arch/ppc/kernel/pci.c index 6c98bbf2c4d7..8326bc369cfb 100644 --- a/arch/ppc/kernel/pci.c +++ b/arch/ppc/kernel/pci.c @@ -71,9 +71,9 @@ void __init pcibios_init(void) { printk("PCI: Probing PCI hardware\n"); pci_scan_bus(0, &generic_pci_ops, NULL); - pcibios_claim_resources(&pci_root_buses); if (ppc_md.pcibios_fixup) ppc_md.pcibios_fixup(); + pcibios_claim_resources(&pci_root_buses); } void __init diff --git a/arch/ppc/kernel/pmac_nvram.c b/arch/ppc/kernel/pmac_nvram.c index ea3338aefb3b..a3a9bae709e8 100644 --- a/arch/ppc/kernel/pmac_nvram.c +++ b/arch/ppc/kernel/pmac_nvram.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -20,23 +21,37 @@ static int nvram_naddrs; static volatile unsigned char *nvram_addr; static volatile unsigned char *nvram_data; -static int nvram_mult; - -#define NVRAM_SIZE 0x2000 /* 8kB of non-volatile RAM */ - +static int nvram_mult, is_core_99; +static char* nvram_image; + +#define NVRAM_SIZE 0x2000 /* 8kB of non-volatile RAM */ + __init void pmac_nvram_init(void) { struct device_node *dp; + nvram_naddrs = 0; + dp = find_devices("nvram"); if (dp == NULL) { printk(KERN_ERR "Can't find NVRAM device\n"); - nvram_naddrs = 0; return; } nvram_naddrs = dp->n_addrs; - if (_machine == _MACH_chrp && nvram_naddrs == 1) { + is_core_99 = device_is_compatible(dp, "nvram,flash"); + if (is_core_99) + { + int i; + if (nvram_naddrs < 1) + return; + nvram_image = kmalloc(dp->addrs[0].size, GFP_KERNEL); + if (!nvram_image) + return; + nvram_data = ioremap(dp->addrs[0].address, dp->addrs[0].size); + for (i=0; iaddrs[0].size; i++) + nvram_image[i] = in_8(nvram_data + i); + } else if (_machine == _MACH_chrp && nvram_naddrs == 1) { nvram_data = ioremap(dp->addrs[0].address, dp->addrs[0].size); nvram_mult = 1; } else if (nvram_naddrs == 1) { @@ -69,6 +84,8 @@ unsigned char nvram_read_byte(int addr) return req.reply[1]; #endif case 1: + if (is_core_99) + return nvram_image[addr]; return nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult]; case 2: *nvram_addr = addr >> 5; @@ -94,6 +111,10 @@ void nvram_write_byte(unsigned char val, int addr) break; #endif case 1: + if (is_core_99) { + nvram_image[addr] = val; + break; + } nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult] = val; break; case 2: diff --git a/arch/ppc/kernel/pmac_pci.c b/arch/ppc/kernel/pmac_pci.c index 75f8097fd7ab..b57d5aa281f1 100644 --- a/arch/ppc/kernel/pmac_pci.c +++ b/arch/ppc/kernel/pmac_pci.c @@ -30,6 +30,16 @@ struct bridge_data **bridges, *bridge_list; static int max_bus; +struct uninorth_data { + struct device_node* node; + volatile unsigned int* cfg_addr; + volatile unsigned int* cfg_data; +}; + +static struct uninorth_data uninorth_bridges[3]; +static int uninorth_count; +static int uninorth_default = -1; + static void add_bridges(struct device_node *dev); /* @@ -73,6 +83,159 @@ int pci_device_loc(struct device_node *dev, unsigned char *bus_ptr, return 0; } +/* This function only works for bus 0, uni-N uses a different mecanism for + * other busses (see below) + */ +#define UNI_N_CFA0(devfn, off) \ + ((1 << (unsigned long)PCI_SLOT(dev_fn)) \ + | (((unsigned long)PCI_FUNC(dev_fn)) << 8) \ + | (((unsigned long)(off)) & 0xFCUL)) + +/* This one is for type 1 config accesses */ +#define UNI_N_CFA1(bus, devfn, off) \ + ((((unsigned long)(bus)) << 16) \ + |(((unsigned long)(devfn)) << 8) \ + |(((unsigned long)(off)) & 0xFCUL) \ + |1UL) + +/* We should really use RTAS here, unfortunately, it's not available with BootX. + * (one more reason for writing a beautiful OF booter). I'll do the RTAS stuff + * later, once I have something that works enough with BootX. + */ +__pmac static +unsigned int +uni_north_access_data(unsigned char bus, unsigned char dev_fn, + unsigned char offset) +{ + struct device_node *node, *bridge_node; + int bridge = uninorth_default; + unsigned int caddr; + + if (bus == 0) { + if (PCI_SLOT(dev_fn) < 11) { + return 0; + } + /* We look for the OF device corresponding to this bus/devfn pair. If we + * don't find it, we default to the external PCI */ + bridge_node = NULL; + node = find_pci_device_OFnode(bus, dev_fn & 0xf8); + if (node) { + /* note: we don't stop on the first occurence since we need to go + * up to the root bridge */ + do { + if (!strcmp(node->type, "pci")) + bridge_node = node; + node=node->parent; + } while (node); + } + if (bridge_node) { + int i; + for (i=0;iio_base); } +#define GRACKLE_STG_ENABLE 0x00000040 + +/* N.B. this is called before bridges is initialized, so we can't + use grackle_pcibios_{read,write}_config_dword. */ +static inline void grackle_set_stg(struct bridge_data *bp, int enable) +{ + unsigned int val; + + out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8)); + val = in_le32((volatile unsigned int *)bp->cfg_data); + val = enable? (val | GRACKLE_STG_ENABLE): (val & ~GRACKLE_STG_ENABLE); + out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8)); + out_le32((volatile unsigned int *)bp->cfg_data, val); +} + void __init pmac_find_bridges(void) { int bus; @@ -411,20 +589,47 @@ static void __init add_bridges(struct device_node *dev) printk(KERN_INFO "PCI buses %d..%d", bus_range[0], bus_range[1]); printk(" controlled by %s at %x\n", dev->name, addr->address); + if (device_is_compatible(dev, "uni-north")) { + int i = uninorth_count++; + uninorth_bridges[i].cfg_addr = ioremap(addr->address + 0x800000, 0x1000); + uninorth_bridges[i].cfg_data = ioremap(addr->address + 0xc00000, 0x1000); + uninorth_bridges[i].node = dev; + /* XXX This is the bridge with the PCI expansion bus. This is also the + * address of the bus that will receive type 1 config accesses and io + * accesses. Appears to be correct for iMac DV and G4 Sawtooth too. + * That means that we cannot do io cycles on the AGP bus nor the internal + * ethernet/fw bus. Fortunately, they appear not to be needed on iMac DV + * and G4 neither. + */ + if (addr->address == 0xf2000000) + uninorth_default = i; + else + continue; + } + bp = (struct bridge_data *) alloc_bootmem(sizeof(*bp)); - if (strcmp(dev->name, "pci") != 0) { - bp->cfg_addr = (volatile unsigned int *) - ioremap(addr->address + 0x800000, 0x1000); - bp->cfg_data = (volatile unsigned char *) - ioremap(addr->address + 0xc00000, 0x1000); - bp->io_base = (void *) ioremap(addr->address, 0x10000); - } else { - /* XXX */ + if (device_is_compatible(dev, "uni-north")) { + bp->cfg_addr = 0; + bp->cfg_data = 0; + /* is 0x10000 enough for io space ? */ + bp->io_base = (void *)ioremap(addr->address, 0x10000); + } else if (strcmp(dev->name, "pci") == 0) { + /* XXX assume this is a mpc106 (grackle) */ bp->cfg_addr = (volatile unsigned int *) ioremap(0xfec00000, 0x1000); bp->cfg_data = (volatile unsigned char *) ioremap(0xfee00000, 0x1000); bp->io_base = (void *) ioremap(0xfe000000, 0x20000); +#if 0 /* Disabled for now, HW problems */ + grackle_set_stg(bp, 1); +#endif + } else { + /* a `bandit' or `chaos' bridge */ + bp->cfg_addr = (volatile unsigned int *) + ioremap(addr->address + 0x800000, 0x1000); + bp->cfg_data = (volatile unsigned char *) + ioremap(addr->address + 0xc00000, 0x1000); + bp->io_base = (void *) ioremap(addr->address, 0x10000); } if (isa_io_base == 0) isa_io_base = (unsigned long) bp->io_base; @@ -453,7 +658,7 @@ fix_intr(struct device_node *node, struct pci_dev *dev) for (; node != 0;node = node->sibling) { class_code = (unsigned int *) get_property(node, "class-code", 0); - if((*class_code >> 8) == PCI_CLASS_BRIDGE_PCI) + if(class_code && (*class_code >> 8) == PCI_CLASS_BRIDGE_PCI) fix_intr(node->child, dev); reg = (unsigned int *) get_property(node, "reg", 0); if (reg == 0 || ((reg[0] >> 8) & 0xff) != dev->devfn) @@ -490,20 +695,38 @@ pmac_pcibios_fixup(void) if (pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin) || !pin) continue; /* No interrupt generated -> no fixup */ - fix_intr(bp->node->child, dev); + /* We iterate all instances of uninorth for now */ + if (uninorth_count && dev->bus->number == 0) { + int i; + for (i=0;ichild, dev); + } else + fix_intr(bp->node->child, dev); } } void __init pmac_setup_pci_ptrs(void) { - if (find_devices("pci") != 0) { - /* looks like a G3 powermac */ - set_config_access_method(grackle); - } else { + struct device_node* np; + + np = find_devices("pci"); + if (np != 0) + { + if (device_is_compatible(np, "uni-north")) + { + /* looks like an Core99 powermac */ + set_config_access_method(uni); + } else + { + /* looks like a G3 powermac */ + set_config_access_method(grackle); + } + } else + { set_config_access_method(pmac); } - + ppc_md.pcibios_fixup = pmac_pcibios_fixup; } diff --git a/arch/ppc/kernel/pmac_pic.c b/arch/ppc/kernel/pmac_pic.c index 385e233272ee..d2d5e6b25d59 100644 --- a/arch/ppc/kernel/pmac_pic.c +++ b/arch/ppc/kernel/pmac_pic.c @@ -3,6 +3,8 @@ #include #include #include +#include +#include #include #include @@ -27,12 +29,38 @@ static volatile struct pmac_irq_hw *pmac_irq_hw[4] = { static int max_irqs; static int max_real_irqs; +static int has_openpic = 0; #define MAXCOUNT 10000000 #define GATWICK_IRQ_POOL_SIZE 10 static struct interrupt_info gatwick_int_pool[GATWICK_IRQ_POOL_SIZE]; +extern int pmac_pcibios_read_config_word(unsigned char bus, unsigned char dev_fn, + unsigned char offset, unsigned short *val); +extern int pmac_pcibios_write_config_word(unsigned char bus, unsigned char dev_fn, + unsigned char offset, unsigned short val); + +static void pmac_openpic_mask_irq(unsigned int irq_nr) +{ + openpic_disable_irq(irq_nr); +} + +static void pmac_openpic_unmask_irq(unsigned int irq_nr) +{ + openpic_enable_irq(irq_nr); +} + +struct hw_interrupt_type pmac_open_pic = { + " OpenPIC ", + NULL, + NULL, + pmac_openpic_unmask_irq, + pmac_openpic_mask_irq, + pmac_openpic_mask_irq, + 0 +}; + static void __pmac pmac_mask_and_ack_irq(unsigned int irq_nr) { unsigned long bit = 1UL << (irq_nr & 0x1f); @@ -141,74 +169,6 @@ static void gatwick_action(int cpl, void *dev_id, struct pt_regs *regs) ppc_irq_dispatch_handler( regs, irq ); } -#if 0 -void -pmac_do_IRQ(struct pt_regs *regs, - int cpu, - int isfake) -{ - int irq; - unsigned long bits = 0; - -#ifdef __SMP__ - /* IPI's are a hack on the powersurge -- Cort */ - if ( cpu != 0 ) - { -#ifdef CONFIG_XMON - static int xmon_2nd; - if (xmon_2nd) - xmon(regs); -#endif - pmac_smp_message_recv(); - return -1; - } - - { - unsigned int loops = MAXCOUNT; - while (test_bit(0, &global_irq_lock)) { - if (smp_processor_id() == global_irq_holder) { - printk("uh oh, interrupt while we hold global irq lock!\n"); -#ifdef CONFIG_XMON - xmon(0); -#endif - break; - } - if (loops-- == 0) { - printk("do_IRQ waiting for irq lock (holder=%d)\n", global_irq_holder); -#ifdef CONFIG_XMON - xmon(0); -#endif - } - } - } -#endif /* __SMP__ */ - - for (irq = max_real_irqs - 1; irq > 0; irq -= 32) { - int i = irq >> 5; - bits = ld_le32(&pmac_irq_hw[i]->flag) - | ppc_lost_interrupts[i]; - if (bits == 0) - continue; - irq -= cntlzw(bits); - break; - } - - if (irq < 0) - { - printk(KERN_DEBUG "Bogus interrupt %d from PC = %lx\n", - irq, regs->nip); - ppc_spurious_interrupts++; - } - else - { - ppc_irq_dispatch_handler( regs, irq ); - } -#ifdef CONFIG_SMP -out: -#endif /* CONFIG_SMP */ -} -#endif - int pmac_get_irq(struct pt_regs *regs) { @@ -248,15 +208,30 @@ pmac_get_irq(struct pt_regs *regs) } #endif /* __SMP__ */ - for (irq = max_real_irqs - 1; irq > 0; irq -= 32) { - int i = irq >> 5; - bits = ld_le32(&pmac_irq_hw[i]->flag) - | ppc_lost_interrupts[i]; - if (bits == 0) - continue; - irq -= cntlzw(bits); - break; - } + /* Yeah, I know, this could be a separate do_IRQ function */ + if (has_openpic) + { + irq = openpic_irq(smp_processor_id()); + if (irq == OPENPIC_VEC_SPURIOUS) + /* We get those when doing polled ADB requests, + * using -2 is a temp hack to disable the printk + */ + irq = -2; /*-1; */ + else + openpic_eoi(smp_processor_id()); + } + else + { + for (irq = max_real_irqs - 1; irq > 0; irq -= 32) { + int i = irq >> 5; + bits = ld_le32(&pmac_irq_hw[i]->flag) + | ppc_lost_interrupts[i]; + if (bits == 0) + continue; + irq -= cntlzw(bits); + break; + } + } return irq; } @@ -339,6 +314,51 @@ pmac_fix_gatwick_interrupts(struct device_node *gw, int irq_base) } } +/* + * The PowerBook 3400/2400/3500 can have a combo ethernet/modem + * card which includes an ohare chip that acts as a second interrupt + * controller. If we find this second ohare, set it up and fix the + * interrupt value in the device tree for the ethernet chip. + */ +static void __init enable_second_ohare(void) +{ + unsigned char bus, devfn; + unsigned short cmd; + unsigned long addr; + int second_irq; + struct device_node *irqctrler = find_devices("pci106b,7"); + struct device_node *ether; + + if (irqctrler == NULL || irqctrler->n_addrs <= 0) + return; + addr = (unsigned long) ioremap(irqctrler->addrs[0].address, 0x40); + pmac_irq_hw[1] = (volatile struct pmac_irq_hw *)(addr + 0x20); + max_irqs = 64; + if (pci_device_loc(irqctrler, &bus, &devfn) == 0) { + pmac_pcibios_read_config_word(bus, devfn, PCI_COMMAND, &cmd); + cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER; + cmd &= ~PCI_COMMAND_IO; + pmac_pcibios_write_config_word(bus, devfn, PCI_COMMAND, cmd); + } + + second_irq = irqctrler->intrs[0].line; + printk(KERN_INFO "irq: secondary controller on irq %d\n", second_irq); + request_irq(second_irq, gatwick_action, SA_INTERRUPT, + "interrupt cascade", 0 ); + + /* Fix interrupt for the modem/ethernet combo controller. The number + in the device tree (27) is bogus (correct for the ethernet-only + board but not the combo ethernet/modem board). + The real interrupt is 28 on the second controller -> 28+32 = 60. + */ + ether = find_devices("pci1011,14"); + if (ether && ether->n_intrs > 0) { + ether->intrs[0].line = 60; + printk(KERN_INFO "irq: Fixed ethernet IRQ to %d\n", + ether->intrs[0].line); + } +} + void __init pmac_pic_init(void) { @@ -347,9 +367,44 @@ pmac_pic_init(void) unsigned long addr; int second_irq = -999; + /* We first try to detect Apple's new Core99 chipset, since mac-io + * is quite different on those machines and contains an IBM MPIC2. + */ + irqctrler = find_type_devices("open-pic"); + if (irqctrler != NULL) + { + printk("PowerMac using OpenPIC irq controller\n"); + if (irqctrler->n_addrs > 0) + { +#ifdef CONFIG_XMON + struct device_node* pswitch; +#endif /* CONFIG_XMON */ + OpenPIC = (volatile struct OpenPIC *) + ioremap(irqctrler->addrs[0].address, + irqctrler->addrs[0].size); + for ( i = 0 ; i < NR_IRQS ; i++ ) + irq_desc[i].handler = &pmac_open_pic; + openpic_init(1); + has_openpic = 1; +#ifdef CONFIG_XMON + pswitch = find_devices("programmer-switch"); + if (pswitch && pswitch->n_intrs) + request_irq(pswitch->intrs[0].line, xmon_irq, 0, + "NMI - XMON", 0); +#endif /* CONFIG_XMON */ + return; + } + irqctrler = NULL; + } - /* G3 powermacs have 64 interrupts, G3 Series PowerBook have 128, - others have 32 */ + /* + * G3 powermacs and 1999 G3 PowerBooks have 64 interrupts, + * 1998 G3 Series PowerBooks have 128, + * other powermacs have 32. + * The combo ethernet/modem card for the Powerstar powerbooks + * (2400/3400/3500, ohare based) has a second ohare chip + * effectively making a total of 64. + */ max_irqs = max_real_irqs = 32; irqctrler = find_devices("mac-io"); if (irqctrler) @@ -389,6 +444,12 @@ pmac_pic_init(void) pmac_irq_hw[0] = (volatile struct pmac_irq_hw *) (addr + 0x20); } + /* PowerBooks 3400 and 3500 can have a second controller in a second + ohare chip, on the combo ethernet/modem card */ + if (machine_is_compatible("AAPL,3400/2400") + || machine_is_compatible("AAPL,3500")) + enable_second_ohare(); + /* disable all interrupts in all controllers */ for (i = 0; i * 32 < max_irqs; ++i) out_le32(&pmac_irq_hw[i]->enable, 0); @@ -435,7 +496,12 @@ sleep_save_intrs(int viaint) out_le32(&pmac_irq_hw[0]->enable, ppc_cached_irq_mask[0]); if (max_real_irqs > 32) out_le32(&pmac_irq_hw[1]->enable, ppc_cached_irq_mask[1]); - mb(); + (void)in_le32(&pmac_irq_hw[0]->flag); + do { + /* make sure mask gets to controller before we + return to user */ + mb(); + } while(in_le32(&pmac_irq_hw[0]->enable) != ppc_cached_irq_mask[0]); } void diff --git a/arch/ppc/kernel/pmac_setup.c b/arch/ppc/kernel/pmac_setup.c index 72d23a934e73..e1c1815acf2f 100644 --- a/arch/ppc/kernel/pmac_setup.c +++ b/arch/ppc/kernel/pmac_setup.c @@ -104,6 +104,10 @@ extern char saved_command_line[]; extern void zs_kgdb_hook(int tty_num); static void ohare_init(void); static void init_p2pbridge(void); +static void init_uninorth(void); +#ifdef CONFIG_BOOTX_TEXT +void pmac_progress(char *s, unsigned short hex); +#endif __pmac int @@ -232,8 +236,10 @@ pmac_setup_arch(void) if (fp != 0) { switch (_get_PVR() >> 16) { case 4: /* 604 */ + case 8: /* G3 */ case 9: /* 604e */ case 10: /* mach V (604ev5) */ + case 12: /* G4 */ case 20: /* 620 */ loops_per_sec = *fp; break; @@ -252,9 +258,10 @@ pmac_setup_arch(void) pmac_find_bridges(); init_p2pbridge(); - + init_uninorth(); + /* Checks "l2cr-value" property in the registry */ - if ( (_get_PVR() >> 16) == 8) { + if ( (_get_PVR() >> 16) == 8 || (_get_PVR() >> 16) == 12 ) { struct device_node *np = find_devices("cpus"); if (np == 0) np = find_type_devices("cpu"); @@ -346,6 +353,33 @@ static void __init ohare_init(void) } } +static void __init +init_uninorth(void) +{ + /* + * Turns on the gmac clock so that it responds to PCI cycles + * later, the driver may want to turn it off again to save + * power when interface is down + */ + struct device_node* uni_n = find_devices("uni-n"); + struct device_node* gmac = find_devices("ethernet"); + unsigned long* addr; + + if (!uni_n || uni_n->n_addrs < 1) + return; + addr = ioremap(uni_n->addrs[0].address, 0x300); + + while(gmac) { + if (device_is_compatible(gmac, "gmac")) + break; + gmac = gmac->next; + } + if (gmac) { + *(addr + 8) |= 2; + eieio(); + } +} + extern char *bootpath; extern char *bootdevice; void *boot_host; @@ -401,14 +435,11 @@ note_scsi_host(struct device_node *node, void *host) #endif #if defined(CONFIG_BLK_DEV_IDE) && defined(CONFIG_BLK_DEV_IDE_PMAC) -extern int pmac_ide_count; -extern struct device_node *pmac_ide_node[]; -static int ide_majors[] = { 3, 22, 33, 34, 56, 57, 88, 89, 90, 91 }; kdev_t __init find_ide_boot(void) { char *p; - int i, n; + int n; if (bootdevice == NULL) return 0; @@ -417,18 +448,7 @@ kdev_t __init find_ide_boot(void) return 0; n = p - bootdevice; - /* - * Look through the list of IDE interfaces for this one. - */ - for (i = 0; i < pmac_ide_count; ++i) { - char *name = pmac_ide_node[i]->full_name; - if (memcmp(name, bootdevice, n) == 0 && name[n] == 0) { - /* XXX should cope with the 2nd drive as well... */ - return MKDEV(ide_majors[i], 0); - } - } - - return 0; + return pmac_find_ide_boot(bootdevice, n); } #endif /* CONFIG_BLK_DEV_IDE && CONFIG_BLK_DEV_IDE_PMAC */ @@ -464,10 +484,10 @@ void note_bootable_part(kdev_t dev, int part) find_boot_device(); found_boot = 1; } - if (dev == boot_dev) { + if (boot_dev == 0 || dev == boot_dev) { ROOT_DEV = MKDEV(MAJOR(dev), MINOR(dev) + part); boot_dev = NODEV; - printk(" (root)"); + printk(" (root on %d)", part); } } @@ -550,11 +570,15 @@ pmac_ide_default_irq(ide_ioreg_t base) return 0; } +#if defined(CONFIG_BLK_DEV_IDE) && defined(CONFIG_BLK_DEV_IDE_PMAC) +extern ide_ioreg_t pmac_ide_get_base(int index); +#endif + ide_ioreg_t pmac_ide_default_io_base(int index) { #if defined(CONFIG_BLK_DEV_IDE) && defined(CONFIG_BLK_DEV_IDE_PMAC) - return pmac_ide_regbase[index]; + return pmac_ide_get_base(index); #else return 0; #endif @@ -660,5 +684,22 @@ pmac_init(unsigned long r3, unsigned long r4, unsigned long r5, ppc_ide_md.ide_init_hwif = pmac_ide_init_hwif_ports; ppc_ide_md.io_base = _IO_BASE; /* actually too early for this :-( */ -#endif +#endif +#ifdef CONFIG_BOOTX_TEXT + ppc_md.progress = pmac_progress; +#endif + if (ppc_md.progress) ppc_md.progress("pmac_init(): exit", 0); + } + +#ifdef CONFIG_BOOTX_TEXT +extern void drawchar(char c); +extern void drawstring(const char *c); +void +pmac_progress(char *s, unsigned short hex) +{ + drawstring(s); + drawchar('\n'); +} +#endif CONFIG_BOOTX_TEXT + diff --git a/arch/ppc/kernel/pmac_time.c b/arch/ppc/kernel/pmac_time.c index 3b7dd283f9d5..1c935a625655 100644 --- a/arch/ppc/kernel/pmac_time.c +++ b/arch/ppc/kernel/pmac_time.c @@ -71,8 +71,8 @@ unsigned long pmac_get_rtc_time(void) if (req.reply_len != 7) printk(KERN_ERR "pmac_get_rtc_time: got %d byte reply\n", req.reply_len); - return (req.reply[3] << 24) + (req.reply[4] << 16) - + (req.reply[5] << 8) + req.reply[6] - RTC_OFFSET; + return (unsigned long)(req.reply[1] << 24) + (req.reply[2] << 16) + + (req.reply[3] << 8) + (unsigned long)req.reply[4] - RTC_OFFSET; #endif /* CONFIG_ADB_CUDA */ #ifdef CONFIG_ADB_PMU case SYS_CTRLER_PMU: diff --git a/arch/ppc/kernel/ppc-stub.c b/arch/ppc/kernel/ppc-stub.c index b6397daacd2e..42ca7eadcea6 100644 --- a/arch/ppc/kernel/ppc-stub.c +++ b/arch/ppc/kernel/ppc-stub.c @@ -351,7 +351,7 @@ static inline int get_msr() static inline void set_msr(int msr) { - asm volatile("mfmsr %0" : : "r" (msr)); + asm volatile("mtmsr %0" : : "r" (msr)); } /* Set up exception handlers for tracing and breakpoints diff --git a/arch/ppc/kernel/ppc_htab.c b/arch/ppc/kernel/ppc_htab.c index 41bef4c26819..264a24d48cde 100644 --- a/arch/ppc/kernel/ppc_htab.c +++ b/arch/ppc/kernel/ppc_htab.c @@ -523,7 +523,8 @@ int proc_dol2crvec(ctl_table *table, int write, struct file *filp, "0.5", "1.0", "(reserved2)", "(reserved3)" }; - if ( (_get_PVR() >> 16) != 8) return -EFAULT; + if ( ((_get_PVR() >> 16) != 8) && ((_get_PVR() >> 16) != 12)) + return -EFAULT; if ( /*!table->maxlen ||*/ (filp->f_pos && !write)) { *lenp = 0; diff --git a/arch/ppc/kernel/ppc_ksyms.c b/arch/ppc/kernel/ppc_ksyms.c index 65e9250343a3..9a5444a51fac 100644 --- a/arch/ppc/kernel/ppc_ksyms.c +++ b/arch/ppc/kernel/ppc_ksyms.c @@ -229,6 +229,8 @@ EXPORT_SYMBOL(find_path_device); EXPORT_SYMBOL(find_phandle); EXPORT_SYMBOL(device_is_compatible); EXPORT_SYMBOL(machine_is_compatible); +EXPORT_SYMBOL(find_pci_device_OFnode); +EXPORT_SYMBOL(find_all_nodes); EXPORT_SYMBOL(get_property); EXPORT_SYMBOL(pci_io_base); EXPORT_SYMBOL(pci_device_loc); diff --git a/arch/ppc/kernel/prep_pci.c b/arch/ppc/kernel/prep_pci.c index 63375787599e..fd14fc483f64 100644 --- a/arch/ppc/kernel/prep_pci.c +++ b/arch/ppc/kernel/prep_pci.c @@ -40,7 +40,7 @@ unsigned char *Motherboard_routes; static unsigned long *ProcInfo; extern int chrp_get_irq(struct pt_regs *); -extern void chrp_post_irq(int); +extern void chrp_post_irq(struct pt_regs* regs, int); /* Tables for known hardware */ diff --git a/arch/ppc/kernel/process.c b/arch/ppc/kernel/process.c index 241b7c33c2f7..ed98ba6f0486 100644 --- a/arch/ppc/kernel/process.c +++ b/arch/ppc/kernel/process.c @@ -57,6 +57,8 @@ union task_union __attribute((aligned(16))) init_task_union = { }; /* only used to get secondary processor up */ struct task_struct *current_set[NR_CPUS] = {&init_task, }; +char *sysmap = NULL; +unsigned long sysmap_size = 0; #undef SHOW_TASK_SWITCHES 1 #undef CHECK_STACK 1 diff --git a/arch/ppc/kernel/prom.c b/arch/ppc/kernel/prom.c index 049cde10f5bf..a52bdd804a98 100644 --- a/arch/ppc/kernel/prom.c +++ b/arch/ppc/kernel/prom.c @@ -30,6 +30,10 @@ #include #include +#ifdef CONFIG_FB +#include +#endif + /* * Properties whose value is longer than this get excluded from our * copy of the device tree. This way we don't waste space storing @@ -91,7 +95,7 @@ unsigned int prom_num_displays = 0; char *of_stdout_device = 0; prom_entry prom = 0; -ihandle prom_chosen = 0, prom_stdout = 0; +ihandle prom_chosen = 0, prom_stdout = 0, prom_disp_node = 0; extern char *klimit; char *bootpath = 0; @@ -102,33 +106,35 @@ unsigned int rtas_entry = 0; /* physical pointer */ unsigned int rtas_size = 0; unsigned int old_rtas = 0; +int use_of_interrupt_tree = 0; static struct device_node *allnodes = 0; +#ifdef CONFIG_BOOTX_TEXT + static void clearscreen(void); static void flushscreen(void); -#ifdef CONFIG_BOOTX_TEXT - void drawchar(char c); void drawstring(const char *c); static void drawhex(unsigned long v); static void scrollscreen(void); static void draw_byte(unsigned char c, long locX, long locY); -static void draw_byte_32(unsigned char *bits, unsigned long *base); -static void draw_byte_16(unsigned char *bits, unsigned long *base); -static void draw_byte_8(unsigned char *bits, unsigned long *base); +static void draw_byte_32(unsigned char *bits, unsigned long *base, int rb); +static void draw_byte_16(unsigned char *bits, unsigned long *base, int rb); +static void draw_byte_8(unsigned char *bits, unsigned long *base, int rb); -static long g_loc_X; -static long g_loc_Y; -static long g_max_loc_X; -static long g_max_loc_Y; +/* We want those in data, not BSS */ +static long g_loc_X = 0; +static long g_loc_Y = 0; +static long g_max_loc_X = 0; +static long g_max_loc_Y = 0; #define cmapsz (16*256) static unsigned char vga_font[cmapsz]; -#endif +#endif /* CONFIG_BOOTX_TEXT */ static void *call_prom(const char *service, int nargs, int nret, ...); @@ -138,15 +144,25 @@ static unsigned long inspect_node(phandle, struct device_node *, unsigned long, unsigned long, struct device_node ***); static unsigned long finish_node(struct device_node *, unsigned long, interpret_func *); +static unsigned long finish_node_interrupts(struct device_node *, unsigned long); static unsigned long check_display(unsigned long); static int prom_next_node(phandle *); static void *early_get_property(unsigned long, unsigned long, char *); +#ifdef CONFIG_BOOTX_TEXT +static void setup_disp_fake_bi(ihandle dp); +static void prom_welcome(boot_infos_t* bi, unsigned long phys); +#endif + extern void enter_rtas(void *); extern unsigned long reloc_offset(void); extern char cmd_line[512]; /* XXX */ boot_infos_t *boot_infos = 0; /* init it so it's in data segment not bss */ +#ifdef CONFIG_BOOTX_TEXT +boot_infos_t *disp_bi = 0; +boot_infos_t fake_bi = {0,}; +#endif unsigned long dev_tree_size; /* @@ -240,7 +256,7 @@ prom_print(const char *msg) if (RELOC(prom_stdout) == 0) { #ifdef CONFIG_BOOTX_TEXT - if (RELOC(boot_infos) != 0) + if (RELOC(disp_bi) != 0) drawstring(msg); #endif return; @@ -261,7 +277,6 @@ prom_print(const char *msg) } } -unsigned long smp_ibm_chrp_hack __initdata = 0; unsigned long smp_chrp_cpu_nr __initdata = 1; /* @@ -269,23 +284,29 @@ unsigned long smp_chrp_cpu_nr __initdata = 1; * handling exceptions and the MMU hash table for us. */ __init -void +unsigned long prom_init(int r3, int r4, prom_entry pp) { #ifdef CONFIG_SMP int i; phandle node; char type[16], *path; -#endif +#endif + int chrp = 0; unsigned long mem; - ihandle prom_rtas; + ihandle prom_rtas, prom_mmu, prom_op; unsigned long offset = reloc_offset(); int l; char *p, *d; + int prom_version = 0; + unsigned long phys; + + /* Default */ + phys = offset + KERNELBASE; /* check if we're apus, return if we are */ if ( r3 == 0x61707573 ) - return; + return phys; /* If we came here from BootX, clear the screen, * set up some pointers and return. */ @@ -294,22 +315,20 @@ prom_init(int r3, int r4, prom_entry pp) unsigned long space; unsigned long ptr, x; char *model; -#ifdef CONFIG_BOOTX_TEXT - unsigned long flags; -#endif RELOC(boot_infos) = PTRUNRELOC(bi); if (!BOOT_INFO_IS_V2_COMPATIBLE(bi)) bi->logicalDisplayBase = 0; - clearscreen(); - #ifdef CONFIG_BOOTX_TEXT RELOC(g_loc_X) = 0; RELOC(g_loc_Y) = 0; RELOC(g_max_loc_X) = (bi->dispDeviceRect[2] - bi->dispDeviceRect[0]) / 8; RELOC(g_max_loc_Y) = (bi->dispDeviceRect[3] - bi->dispDeviceRect[1]) / 16; + RELOC(disp_bi) = PTRUNRELOC(bi); + clearscreen(); + /* Test if boot-info is compatible. Done only in config CONFIG_BOOTX_TEXT since there is nothing much we can do with an incompatible version, except display a message and eventually hang the processor... @@ -320,23 +339,9 @@ prom_init(int r3, int r4, prom_entry pp) if (!BOOT_INFO_IS_COMPATIBLE(bi)) prom_print(RELOC(" !!! WARNING - Incompatible version of BootX !!!\n\n\n")); - prom_print(RELOC("Welcome to Linux, kernel " UTS_RELEASE "\n")); - prom_print(RELOC("\nstarted at : 0x")); - drawhex(reloc_offset() + KERNELBASE); - prom_print(RELOC("\nlinked at : 0x")); - drawhex(KERNELBASE); - prom_print(RELOC("\nframe buffer at : 0x")); - drawhex((unsigned long)bi->dispDeviceBase); - prom_print(RELOC(" (phys), 0x")); - drawhex((unsigned long)bi->logicalDisplayBase); - prom_print(RELOC(" (log)")); - prom_print(RELOC("\nMSR : 0x")); - __asm__ __volatile__ ("mfmsr %0" : "=r" ((flags)) : : "memory"); - drawhex(flags); - prom_print(RELOC("\n\n")); -#endif - /* Out of the #if/#endif since it flushes the clearscreen too */ + prom_welcome(bi, phys); flushscreen(); +#endif /* CONFIG_BOOTX_TEXT */ /* New BootX enters kernel with MMU off, i/os are not allowed here. This hack will have been done by the boostrap anyway. @@ -381,12 +386,12 @@ prom_init(int r3, int r4, prom_entry pp) prom_print(RELOC("booting...\n")); flushscreen(); #endif - return; + return phys; } /* check if we're prep, return if we are */ if ( *(unsigned long *)(0) == 0xdeadc0de ) - return; + return phys; /* First get a handle for the stdout device */ RELOC(prom) = pp; @@ -407,6 +412,30 @@ prom_init(int r3, int r4, prom_entry pp) RELOC(of_stdout_device) = PTRUNRELOC(p); mem += strlen(p) + 1; + /* Find the OF version */ + prom_op = call_prom(RELOC("finddevice"), 1, 1, RELOC("/openprom")); + prom_version = 0; + if (prom_op != (void*)-1) { + char model[64]; + int sz; + sz = (int)call_prom(RELOC("getprop"), 4, 1, prom_op, RELOC("model"), model, 64); + if (sz > 0) { + char *c; + /* hack to skip the ibm chrp firmware # */ + if ( strncmp(model,RELOC("IBM"),3) ) { + for (c = model; *c; c++) + if (*c >= '0' && *c <= '9') { + prom_version = *c - '0'; + break; + } + } + else + chrp = 1; + } + } + if (prom_version >= 3) + prom_print(RELOC("OF Version 3 detected.\n")); + /* Get the boot device and translate it to a full OF pathname. */ p = (char *) mem; l = (int) call_prom(RELOC("getprop"), 4, 1, RELOC(prom_chosen), @@ -478,6 +507,42 @@ prom_init(int r3, int r4, prom_entry pp) prom_print(RELOC(" done\n")); } + /* If we are already running at 0xc0000000, we assume we were loaded by + * an OF bootloader which did set a BAT for us. This breaks OF translate + * so we force phys to be 0 + */ + if (offset == 0) + phys = 0; + else { + if ((int) call_prom(RELOC("getprop"), 4, 1, RELOC(prom_chosen), + RELOC("mmu"), &prom_mmu, sizeof(prom_mmu)) <= 0) { + prom_print(RELOC(" no MMU found\n")); + } else { + int nargs; + struct prom_args prom_args; + nargs = 4; + prom_args.service = RELOC("call-method"); + prom_args.nargs = nargs; + prom_args.nret = 4; + prom_args.args[0] = RELOC("translate"); + prom_args.args[1] = prom_mmu; + prom_args.args[2] = (void *)(offset + KERNELBASE); + prom_args.args[3] = (void *)1; + RELOC(prom)(&prom_args); + + /* We assume the phys. address size is 3 cells */ + if (prom_args.args[nargs] != 0) + prom_print(RELOC(" (translate failed) ")); + else + phys = (unsigned long)prom_args.args[nargs+3]; + } + } + +#ifdef CONFIG_BOOTX_TEXT + if (!chrp && RELOC(prom_disp_node) != 0) + setup_disp_fake_bi(RELOC(prom_disp_node)); +#endif + #ifdef CONFIG_SMP /* * With CHRP SMP we need to use the OF to start the other @@ -512,7 +577,7 @@ prom_init(int r3, int r4, prom_entry pp) node = call_prom(RELOC("finddevice"), 1, 1, RELOC("/")); if ( (int)call_prom(RELOC("getprop"), 4, 1, node, RELOC("device_type"),type, sizeof(type)) <= 0) - return; + return phys; /* copy the holding pattern code to someplace safe (8M) */ memcpy( (void *)(8<<20), RELOC(__secondary_hold), 0x100 ); @@ -554,6 +619,79 @@ prom_init(int r3, int r4, prom_entry pp) prom_print(RELOC("...failed\n")); } #endif + /* If OpenFirmware version >= 3, then use quiesce call */ + if (prom_version >= 3) { + prom_print(RELOC("Calling quiesce ...\n")); + call_prom(RELOC("quiesce"), 0, 0); + offset = reloc_offset(); + phys = offset + KERNELBASE; + } + +#ifdef CONFIG_BOOTX_TEXT + if (!chrp && RELOC(disp_bi)) { + RELOC(prom_stdout) = 0; + clearscreen(); + prom_welcome(PTRRELOC(RELOC(disp_bi)), phys); + prom_print(RELOC("booting...\n")); + } +#endif + + return phys; +} + +#ifdef CONFIG_BOOTX_TEXT +__init static void +prom_welcome(boot_infos_t* bi, unsigned long phys) +{ + unsigned long offset = reloc_offset(); + unsigned long flags; + unsigned long pvr; + + prom_print(RELOC("Welcome to Linux, kernel " UTS_RELEASE "\n")); + prom_print(RELOC("\nstarted at : 0x")); + drawhex(phys); + prom_print(RELOC("\nlinked at : 0x")); + drawhex(KERNELBASE); + prom_print(RELOC("\nframe buffer at : 0x")); + drawhex((unsigned long)bi->dispDeviceBase); + prom_print(RELOC(" (phys), 0x")); + drawhex((unsigned long)bi->logicalDisplayBase); + prom_print(RELOC(" (log)")); + prom_print(RELOC("\nMSR : 0x")); + __asm__ __volatile__ ("mfmsr %0" : "=r" (flags)); + drawhex(flags); + __asm__ __volatile__ ("mfspr %0, 287" : "=r" (pvr)); + pvr >>= 16; + if (pvr > 1) { + prom_print(RELOC("\nHID0 : 0x")); + __asm__ __volatile__ ("mfspr %0, 1008" : "=r" (flags)); + drawhex(flags); + } + if (pvr == 8 || pvr == 12) { + prom_print(RELOC("\nICTC : 0x")); + __asm__ __volatile__ ("mfspr %0, 1019" : "=r" (flags)); + drawhex(flags); + } + prom_print(RELOC("\n\n")); +} +#endif + +static int prom_set_color(ihandle ih, int i, int r, int g, int b) +{ + struct prom_args prom_args; + unsigned long offset = reloc_offset(); + + prom_args.service = RELOC("call-method"); + prom_args.nargs = 6; + prom_args.nret = 1; + prom_args.args[0] = RELOC("color!"); + prom_args.args[1] = ih; + prom_args.args[2] = (void *) i; + prom_args.args[3] = (void *) b; + prom_args.args[4] = (void *) g; + prom_args.args[5] = (void *) r; + RELOC(prom)(&prom_args); + return (int) prom_args.args[6]; } /* @@ -573,6 +711,26 @@ check_display(unsigned long mem) int i; unsigned long offset = reloc_offset(); char type[16], *path; + static unsigned char default_colors[] = { + 0x00, 0x00, 0x00, + 0x00, 0x00, 0xaa, + 0x00, 0xaa, 0x00, + 0x00, 0xaa, 0xaa, + 0xaa, 0x00, 0x00, + 0xaa, 0x00, 0xaa, + 0xaa, 0xaa, 0x00, + 0xaa, 0xaa, 0xaa, + 0x55, 0x55, 0x55, + 0x55, 0x55, 0xff, + 0x55, 0xff, 0x55, + 0x55, 0xff, 0xff, + 0xff, 0x55, 0x55, + 0xff, 0x55, 0xff, + 0xff, 0xff, 0x55, + 0xff, 0xff, 0xff + }; + + RELOC(prom_disp_node) = 0; for (node = 0; prom_next_node(&node); ) { type[0] = 0; @@ -595,6 +753,26 @@ check_display(unsigned long mem) } prom_print(RELOC("... ok\n")); + if (RELOC(prom_disp_node) == 0) + RELOC(prom_disp_node) = node; + + /* Setup a useable color table when the appropriate + * method is available. Should update this to set-colors */ + for (i = 0; i < 32; i++) + if (prom_set_color(ih, i, RELOC(default_colors)[i*3], + RELOC(default_colors)[i*3+1], + RELOC(default_colors)[i*3+2]) != 0) + break; + +#ifdef CONFIG_FB + for (i = 0; i < LINUX_LOGO_COLORS; i++) + if (prom_set_color(ih, i + 32, + RELOC(linux_logo_red)[i], + RELOC(linux_logo_green)[i], + RELOC(linux_logo_blue)[i]) != 0) + break; +#endif /* CONFIG_FB */ + /* * If this display is the device that OF is using for stdout, * move it to the front of the list. @@ -614,6 +792,79 @@ check_display(unsigned long mem) return ALIGN(mem); } +/* This function will enable the early boot text when doing OF booting. This + * way, xmon output should work too + */ +#ifdef CONFIG_BOOTX_TEXT +__init +static void +setup_disp_fake_bi(ihandle dp) +{ + unsigned int len; + int width = 640, height = 480, depth = 8, pitch; + unsigned address; + boot_infos_t* bi; + unsigned long offset = reloc_offset(); + + prom_print(RELOC("Initing fake screen\n")); + + len = 0; + call_prom(RELOC("getprop"), 4, 1, dp, RELOC("depth"), &len, sizeof(len)); + if (len == 0) + prom_print(RELOC("Warning: assuming display depth = 8\n")); + else + depth = len; + width = len = 0; + call_prom(RELOC("getprop"), 4, 1, dp, RELOC("width"), &len, sizeof(len)); + width = len; + if (width == 0) { + prom_print(RELOC("Failed to get width\n")); + return; + } + height = len = 0; + call_prom(RELOC("getprop"), 4, 1, dp, RELOC("height"), &len, sizeof(len)); + height = len; + if (height == 0) { + prom_print(RELOC("Failed to get height\n")); + return; + } + pitch = len = 0; + call_prom(RELOC("getprop"), 4, 1, dp, RELOC("linebytes"), &len, sizeof(len)); + pitch = len; + if (pitch == 0) { + prom_print(RELOC("Failed to get pitch\n")); + return; + } + address = len = 0; + call_prom(RELOC("getprop"), 4, 1, dp, RELOC("address"), &len, sizeof(len)); + address = len; + if (address == 0) { + prom_print(RELOC("Failed to get address\n")); + return; + } +#if 0 + /* kludge for valkyrie */ + if (strcmp(dp->name, "valkyrie") == 0) + address += 0x1000; + } +#endif + + RELOC(disp_bi) = &fake_bi; + bi = PTRRELOC((&fake_bi)); + RELOC(g_loc_X) = 0; + RELOC(g_loc_Y) = 0; + RELOC(g_max_loc_X) = width / 8; + RELOC(g_max_loc_Y) = height / 16; + bi->logicalDisplayBase = (unsigned char *)address; + bi->dispDeviceBase = (unsigned char *)address; + bi->dispDeviceRowBytes = pitch; + bi->dispDeviceDepth = depth; + bi->dispDeviceRect[0] = bi->dispDeviceRect[1] = 0; + bi->dispDeviceRect[2] = width; + bi->dispDeviceRect[3] = height; +} +#endif + __init static int prom_next_node(phandle *nodep) @@ -748,6 +999,16 @@ void finish_device_tree(void) { unsigned long mem = (unsigned long) klimit; + char* model; + + /* Here, we decide if we'll use the interrupt-tree (new Core99 code) or not. + * This code was only tested with Core99 machines so far, but should be easily + * adapted to older newworld machines (iMac, B&W G3, Lombard). + */ + model = get_property(allnodes, "model", 0); + if ((boot_infos == 0) && model && (strcmp(model, "PowerBook2,1") == 0 + || strcmp(model, "PowerMac2,1") == 0 || strcmp(model, "PowerMac3,1") == 0)) + use_of_interrupt_tree = 1; mem = finish_node(allnodes, mem, NULL); dev_tree_size = mem - (unsigned long) allnodes; @@ -788,6 +1049,9 @@ finish_node(struct device_node *np, unsigned long mem_start, if (ifunc != NULL) { mem_start = ifunc(np, mem_start); } + if (use_of_interrupt_tree) { + mem_start = finish_node_interrupts(np, mem_start); + } /* the f50 sets the name to 'display' and 'compatible' to what we * expect for the name -- Cort @@ -834,6 +1098,133 @@ finish_node(struct device_node *np, unsigned long mem_start, return mem_start; } +/* This routine walks the interrupt tree for a given device node and gather + * all necessary informations according to the draft interrupt mapping + * for CHRP. The current version was only tested on Apple "Core99" machines + * and may not handle cascaded controllers correctly. + */ +__init +static unsigned long +finish_node_interrupts(struct device_node *np, unsigned long mem_start) +{ + /* Finish this node */ + unsigned int *isizep, *asizep, *interrupts, *map, *map_mask, *reg; + phandle *parent; + struct device_node *node, *parent_node; + int l, isize, ipsize, asize, map_size, regpsize; + + /* Currently, we don't look at all nodes with no "interrupts" property */ + interrupts = (unsigned int *)get_property(np, "interrupts", &l); + if (interrupts == NULL) + return mem_start; + ipsize = l>>2; + + reg = (unsigned int *)get_property(np, "reg", &l); + regpsize = l>>2; + + /* We assume default interrupt cell size is 1 (bugus ?) */ + isize = 1; + node = np; + + do { + /* We adjust the cell size if the current parent contains an #interrupt-cells + * property */ + isizep = (unsigned int *)get_property(node, "#interrupt-cells", &l); + if (isizep) + isize = *isizep; + + /* We don't do interrupt cascade (ISA) for now, we stop on the first + * controller found + */ + if (get_property(node, "interrupt-controller", &l)) { + int i,j; + np->intrs = (struct interrupt_info *) mem_start; + np->n_intrs = ipsize / isize; + mem_start += np->n_intrs * sizeof(struct interrupt_info); + for (i = 0; i < np->n_intrs; ++i) { + np->intrs[i].line = *interrupts++; + np->intrs[i].sense = 0; + if (isize > 1) + np->intrs[i].sense = *interrupts++; + for (j=2; j>2; + map_mask = (unsigned int *)get_property(node, "interrupt-map-mask", &l); + asizep = (unsigned int *)get_property(node, "#address-cells", &l); + if (asizep && l == sizeof(unsigned int)) + asize = *asizep; + else + asize = 0; + found = 0; + while(map_size>0 && !found) { + found = 1; + for (i=0; i=regpsize) || ((mask & *map) != (mask & reg[i]))) + found = 0; + map++; + map_size--; + } + for (i=0; iparent; + } while(node); + + return mem_start; +} + + /* * When BootX makes a copy of the device tree from the MacOS * Name Registry, it is in the format we use but all of the pointers @@ -892,6 +1283,9 @@ interpret_pci_props(struct device_node *np, unsigned long mem_start) mem_start += i * sizeof(struct address_range); } + if (use_of_interrupt_tree) + return mem_start; + /* * If the pci host bridge has an interrupt-map property, * look for our node in it. @@ -901,14 +1295,28 @@ interpret_pci_props(struct device_node *np, unsigned long mem_start) get_property(np->parent, "interrupt-map", &ml)) != 0 && (ip = (int *) get_property(np, "interrupts", &l)) != 0) { unsigned int devfn = pci_addrs[0].addr.a_hi & 0xff00; + unsigned int cell_size; + struct device_node* np2; + /* This is hackish, but is only used for BootX booting */ + cell_size = sizeof(struct pci_intr_map); + np2 = np->parent; + while(np2) { + if (device_is_compatible(np2, "uni-north")) { + cell_size += 4; + break; + } + np2 = np2->parent; + } np->n_intrs = 0; np->intrs = (struct interrupt_info *) mem_start; - for (i = 0; (ml -= sizeof(struct pci_intr_map)) >= 0; ++i) { - if (imp[i].addr.a_hi == devfn) { - np->intrs[np->n_intrs].line = imp[i].intr; - np->intrs[np->n_intrs].sense = 0; + for (i = 0; (ml -= cell_size) >= 0; ++i) { + if (imp->addr.a_hi == devfn) { + np->intrs[np->n_intrs].line = imp->intr; + np->intrs[np->n_intrs].sense = 0; /* FIXME */ ++np->n_intrs; } + imp = (struct pci_intr_map *)(((unsigned int)imp) + + cell_size); } if (np->n_intrs == 0) np->intrs = 0; @@ -965,6 +1373,9 @@ interpret_dbdma_props(struct device_node *np, unsigned long mem_start) mem_start += i * sizeof(struct address_range); } + if (use_of_interrupt_tree) + return mem_start; + ip = (int *) get_property(np, "AAPL,interrupts", &l); if (ip == 0) ip = (int *) get_property(np, "interrupts", &l); @@ -988,13 +1399,14 @@ interpret_macio_props(struct device_node *np, unsigned long mem_start) struct reg_property *rp; struct address_range *adr; unsigned long base_address; - int i, l, *ip; + int i, l, keylargo, *ip; struct device_node *db; base_address = 0; for (db = np->parent; db != NULL; db = db->parent) { if (!strcmp(db->type, "mac-io") && db->n_addrs != 0) { base_address = db->addrs[0].address; + keylargo = device_is_compatible(db, "Keylargo"); break; } } @@ -1014,6 +1426,9 @@ interpret_macio_props(struct device_node *np, unsigned long mem_start) mem_start += i * sizeof(struct address_range); } + if (use_of_interrupt_tree) + return mem_start; + ip = (int *) get_property(np, "interrupts", &l); if (ip == 0) ip = (int *) get_property(np, "AAPL,interrupts", &l); @@ -1022,9 +1437,15 @@ interpret_macio_props(struct device_node *np, unsigned long mem_start) if (_machine == _MACH_Pmac) { /* for the iMac */ np->n_intrs = l / sizeof(int); + /* Hack for BootX on Core99 */ + if (keylargo) + np->n_intrs = np->n_intrs/2; for (i = 0; i < np->n_intrs; ++i) { np->intrs[i].line = *ip++; - np->intrs[i].sense = 0; + if (keylargo) + np->intrs[i].sense = *ip++; + else + np->intrs[i].sense = 0; } } else { /* CHRP machines */ @@ -1064,6 +1485,9 @@ interpret_isa_props(struct device_node *np, unsigned long mem_start) mem_start += i * sizeof(struct address_range); } + if (use_of_interrupt_tree) + return mem_start; + ip = (int *) get_property(np, "interrupts", &l); if (ip != 0) { np->intrs = (struct interrupt_info *) mem_start; @@ -1101,6 +1525,9 @@ interpret_root_props(struct device_node *np, unsigned long mem_start) mem_start += i * sizeof(struct address_range); } + if (use_of_interrupt_tree) + return mem_start; + ip = (int *) get_property(np, "AAPL,interrupts", &l); if (ip == 0) ip = (int *) get_property(np, "interrupts", &l); @@ -1157,6 +1584,49 @@ find_type_devices(const char *type) return head; } +/* Finds a device node given its PCI bus number, device number + * and function number + */ +__openfirmware +struct device_node * +find_pci_device_OFnode(unsigned char bus, unsigned char dev_fn) +{ + struct device_node* np; + unsigned int *reg; + int l; + + for (np = allnodes; np != 0; np = np->allnext) { + char *pname = np->parent ? + (char *)get_property(np->parent, "name", &l) : 0; + if (pname && strcmp(pname, "mac-io") == 0) + continue; + reg = (unsigned int *) get_property(np, "reg", &l); + if (reg == 0 || l < sizeof(struct reg_property)) + continue; + if (((reg[0] >> 8) & 0xff) == dev_fn && ((reg[0] >> 16) & 0xff) == bus) + break; + } + return np; +} + +/* + * Returns all nodes linked together + */ +__openfirmware +struct device_node * +find_all_nodes(void) +{ + struct device_node *head, **prevp, *np; + + prevp = &head; + for (np = allnodes; np != 0; np = np->allnext) { + *prevp = np; + prevp = &np->next; + } + *prevp = 0; + return head; +} + /* Checks if the given "compat" string matches one of the strings in * the device's "compatible" property */ @@ -1377,18 +1847,28 @@ abort() prom_exit(); } -#ifdef CONFIG_XMON +#ifdef CONFIG_BOOTX_TEXT + +/* Here's a small text engine to use during early boot or for debugging purposes + * + * todo: + * + * - build some kind of vgacon with it to enable early printk + * - move to a separate file + * - add a few video driver hooks to keep in sync with display + * changes. + */ + __init void map_bootx_text(void) { - if (boot_infos == 0) + if (disp_bi == 0) return; - boot_infos->logicalDisplayBase = - ioremap((unsigned long) boot_infos->dispDeviceBase, - boot_infos->dispDeviceRowBytes * boot_infos->dispDeviceRect[3]); + disp_bi->logicalDisplayBase = + ioremap((unsigned long) disp_bi->dispDeviceBase, + disp_bi->dispDeviceRowBytes * disp_bi->dispDeviceRect[3]); } -#endif /* CONFIG_XMON */ /* Calc the base address of a given point (x,y) */ __pmac @@ -1410,7 +1890,7 @@ static void clearscreen(void) { unsigned long offset = reloc_offset(); - boot_infos_t* bi = PTRRELOC(RELOC(boot_infos)); + boot_infos_t* bi = PTRRELOC(RELOC(disp_bi)); unsigned long *base = (unsigned long *)calc_base(bi, 0, 0); unsigned long width = ((bi->dispDeviceRect[2] - bi->dispDeviceRect[0]) * (bi->dispDeviceDepth >> 3)) >> 2; @@ -1435,7 +1915,7 @@ static void flushscreen(void) { unsigned long offset = reloc_offset(); - boot_infos_t* bi = PTRRELOC(RELOC(boot_infos)); + boot_infos_t* bi = PTRRELOC(RELOC(disp_bi)); unsigned long *base = (unsigned long *)calc_base(bi, 0, 0); unsigned long width = ((bi->dispDeviceRect[2] - bi->dispDeviceRect[0]) * (bi->dispDeviceDepth >> 3)) >> 2; @@ -1452,14 +1932,12 @@ flushscreen(void) } } -#ifdef CONFIG_BOOTX_TEXT - __pmac static void scrollscreen(void) { unsigned long offset = reloc_offset(); - boot_infos_t* bi = PTRRELOC(RELOC(boot_infos)); + boot_infos_t* bi = PTRRELOC(RELOC(disp_bi)); unsigned long *src = (unsigned long *)calc_base(bi,0,16); unsigned long *dst = (unsigned long *)calc_base(bi,0,0); unsigned long width = ((bi->dispDeviceRect[2] - bi->dispDeviceRect[0]) * @@ -1563,19 +2041,20 @@ static void draw_byte(unsigned char c, long locX, long locY) { unsigned long offset = reloc_offset(); - boot_infos_t* bi = PTRRELOC(RELOC(boot_infos)); + boot_infos_t* bi = PTRRELOC(RELOC(disp_bi)); unsigned char *base = calc_base(bi, locX << 3, locY << 4); unsigned char *font = &RELOC(vga_font)[((unsigned long)c) * 16]; + int rb = bi->dispDeviceRowBytes; switch(bi->dispDeviceDepth) { case 32: - draw_byte_32(font, (unsigned long *)base); + draw_byte_32(font, (unsigned long *)base, rb); break; case 16: - draw_byte_16(font, (unsigned long *)base); + draw_byte_16(font, (unsigned long *)base, rb); break; case 8: - draw_byte_8(font, (unsigned long *)base); + draw_byte_8(font, (unsigned long *)base, rb); break; default: break; @@ -1613,15 +2092,12 @@ static unsigned long expand_bits_16[4] = { __pmac static void -draw_byte_32(unsigned char *font, unsigned long *base) +draw_byte_32(unsigned char *font, unsigned long *base, int rb) { - unsigned long offset = reloc_offset(); - boot_infos_t* bi = PTRRELOC(RELOC(boot_infos)); int l, bits; int fg = 0xFFFFFFFFUL; int bg = 0x00000000UL; - for (l = 0; l < 16; ++l) { bits = *font++; @@ -1633,19 +2109,18 @@ draw_byte_32(unsigned char *font, unsigned long *base) base[5] = (-((bits >> 2) & 1) & fg) ^ bg; base[6] = (-((bits >> 1) & 1) & fg) ^ bg; base[7] = (-(bits & 1) & fg) ^ bg; - base = (unsigned long *) ((char *)base + bi->dispDeviceRowBytes); + base = (unsigned long *) ((char *)base + rb); } } __pmac static void -draw_byte_16(unsigned char *font, unsigned long *base) +draw_byte_16(unsigned char *font, unsigned long *base, int rb) { - unsigned long offset = reloc_offset(); - boot_infos_t* bi = PTRRELOC(RELOC(boot_infos)); int l, bits; int fg = 0xFFFFFFFFUL; int bg = 0x00000000UL; + unsigned long offset = reloc_offset(); unsigned long *eb = RELOC(expand_bits_16); for (l = 0; l < 16; ++l) @@ -1655,19 +2130,18 @@ draw_byte_16(unsigned char *font, unsigned long *base) base[1] = (eb[(bits >> 4) & 3] & fg) ^ bg; base[2] = (eb[(bits >> 2) & 3] & fg) ^ bg; base[3] = (eb[bits & 3] & fg) ^ bg; - base = (unsigned long *) ((char *)base + bi->dispDeviceRowBytes); + base = (unsigned long *) ((char *)base + rb); } } __pmac static void -draw_byte_8(unsigned char *font, unsigned long *base) +draw_byte_8(unsigned char *font, unsigned long *base, int rb) { - unsigned long offset = reloc_offset(); - boot_infos_t* bi = PTRRELOC(RELOC(boot_infos)); int l, bits; int fg = 0x0F0F0F0FUL; int bg = 0x00000000UL; + unsigned long offset = reloc_offset(); unsigned long *eb = RELOC(expand_bits_8); for (l = 0; l < 16; ++l) @@ -1675,7 +2149,7 @@ draw_byte_8(unsigned char *font, unsigned long *base) bits = *font++; base[0] = (eb[bits >> 4] & fg) ^ bg; base[1] = (eb[bits & 0xf] & fg) ^ bg; - base = (unsigned long *) ((char *)base + bi->dispDeviceRowBytes); + base = (unsigned long *) ((char *)base + rb); } } @@ -2026,3 +2500,4 @@ static unsigned char vga_font[cmapsz] = { }; #endif /* CONFIG_BOOTX_TEXT */ + diff --git a/arch/ppc/kernel/semaphore.c b/arch/ppc/kernel/semaphore.c index d630c80dca31..f17bc16ce4e1 100644 --- a/arch/ppc/kernel/semaphore.c +++ b/arch/ppc/kernel/semaphore.c @@ -137,3 +137,44 @@ int __down_trylock(struct semaphore * sem) { return waking_non_zero_trylock(sem); } + + +/* + * rw semaphores Ani Joshi + * based on alpha port by Andrea Arcangeli + */ + +void down_read_failed(struct rw_semaphore *sem) +{ + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + + add_wait_queue_exclusive(&sem->wait, &wait); + + do { + __set_task_state(tsk, TASK_UNINTERRUPTIBLE | TASK_EXCLUSIVE); + spin_unlock_irq(&sem->lock); + schedule(); + spin_lock_irq(&sem->lock); + } while(sem->wr); + + remove_wait_queue(&sem->wait, &wait); +} + +void down_write_failed(struct rw_semaphore *sem) +{ + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + + add_wait_queue_exclusive(&sem->wait, &wait); + + do { + __set_task_state(tsk, TASK_UNINTERRUPTIBLE | TASK_EXCLUSIVE); + spin_unlock_irq(&sem->lock); + schedule(); + spin_lock_irq(&sem->lock); + } while(sem->rd || sem->wr); + + remove_wait_queue(&sem->wait, &wait); +} + diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c index dccb066ffb89..19ce0a25e185 100644 --- a/arch/ppc/kernel/setup.c +++ b/arch/ppc/kernel/setup.c @@ -69,6 +69,13 @@ extern void gemini_init(unsigned long r3, unsigned long r6, unsigned long r7); +#ifdef CONFIG_BOOTX_TEXT +extern void map_bootx_text(void); +#endif +#ifdef CONFIG_XMON +extern void xmon_map_scc(void); +#endif + extern boot_infos_t *boot_infos; char saved_command_line[256]; unsigned char aux_device_present; @@ -261,7 +268,7 @@ int get_cpuinfo(char *buffer) } break; case 0x000C: - len += sprintf(len+buffer, "7400\n"); + len += sprintf(len+buffer, "7400 (G4)\n"); break; case 0x0020: len += sprintf(len+buffer, "403G"); @@ -292,7 +299,7 @@ int get_cpuinfo(char *buffer) * Assume here that all clock rates are the same in a * smp system. -- Cort */ -#ifndef CONFIG_8xx +#if !defined(CONFIG_4xx) && !defined(CONFIG_8xx) if ( have_of ) { struct device_node *cpu_node; @@ -316,7 +323,7 @@ int get_cpuinfo(char *buffer) len += sprintf(len+buffer, "clock\t\t: %dMHz\n", *fp / 1000000); } -#endif +#endif /* !CONFIG_4xx && !CONFIG_8xx */ if (ppc_md.setup_residual != NULL) { @@ -410,8 +417,9 @@ identify_machine(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { parse_bootinfo(); - + if ( ppc_md.progress ) ppc_md.progress("id mach(): start", 0x100); + #if !defined(CONFIG_4xx) && !defined(CONFIG_8xx) #ifndef CONFIG_MACH_SPECIFIC /* if we didn't get any bootinfo telling us what we are... */ @@ -477,11 +485,12 @@ identify_machine(unsigned long r3, unsigned long r4, unsigned long r5, char *p; #ifdef CONFIG_BLK_DEV_INITRD - if (r3 - KERNELBASE < 0x800000 - && r4 != 0 && r4 != 0xdeadbeef) { + if (r3 && r4 && r4 != 0xdeadbeef) + { initrd_start = r3; initrd_end = r3 + r4; ROOT_DEV = MKDEV(RAMDISK_MAJOR, 0); + initrd_below_start_ok = 1; } #endif cmd_line[0] = 0; @@ -519,6 +528,7 @@ identify_machine(unsigned long r3, unsigned long r4, unsigned long r5, default: printk("Unknown machine type in identify_machine!\n"); } + /* Check for nobats option (used in mapin_ram). */ if (strstr(cmd_line, "nobats")) { extern int __map_without_bats; @@ -567,9 +577,11 @@ identify_machine(unsigned long r3, unsigned long r4, unsigned long r5, int parse_bootinfo(void) { struct bi_record *rec; - extern char _end[]; + extern char __bss_start[]; + extern char *sysmap; + extern unsigned long sysmap_size; - rec = (struct bi_record *)PAGE_ALIGN((ulong)_end); + rec = (struct bi_record *)_ALIGN((ulong)__bss_start+(1<<20)-1,(1<<20)); if ( rec->tag != BI_FIRST ) { /* @@ -577,11 +589,10 @@ int parse_bootinfo(void) * we have the bootloader handle all the relocation and * prom calls -- Cort */ - rec = (struct bi_record *)PAGE_ALIGN((ulong)_end+0x10000); + rec = (struct bi_record *)_ALIGN((ulong)__bss_start+0x10000+(1<<20)-1,(1<<20)); if ( rec->tag != BI_FIRST ) return -1; } - for ( ; rec->tag != BI_LAST ; rec = (struct bi_record *)((ulong)rec + rec->size) ) { @@ -591,6 +602,11 @@ int parse_bootinfo(void) case BI_CMD_LINE: memcpy(cmd_line, (void *)data, rec->size); break; + case BI_SYSMAP: + sysmap = (char *)((data[0] >= (KERNELBASE)) ? data[0] : + (data[0]+KERNELBASE)); + sysmap_size = data[1]; + break; #ifdef CONFIG_BLK_DEV_INITRD case BI_INITRD: initrd_start = data[0]; @@ -603,7 +619,6 @@ int parse_bootinfo(void) have_of = data[1]; break; #endif /* CONFIG_MACH_SPECIFIC */ - } } @@ -613,7 +628,7 @@ int parse_bootinfo(void) /* Checks "l2cr=xxxx" command-line option */ void ppc_setup_l2cr(char *str, int *ints) { - if ( (_get_PVR() >> 16) == 8) + if ( ((_get_PVR() >> 16) == 8) || ((_get_PVR() >> 16) == 12) ) { unsigned long val = simple_strtoul(str, NULL, 0); printk(KERN_INFO "l2cr set to %lx\n", val); @@ -639,12 +654,21 @@ void __init setup_arch(char **cmdline_p) extern char *klimit; extern void do_init_bootmem(void); +#ifdef CONFIG_BOOTX_TEXT + map_bootx_text(); + prom_print("identify machine\n"); +#endif + #ifdef CONFIG_XMON - extern void xmon_map_scc(void); xmon_map_scc(); if (strstr(cmd_line, "xmon")) xmon(0); #endif /* CONFIG_XMON */ + if ( ppc_md.progress ) ppc_md.progress("setup_arch: enter", 0x3eab); +#if defined(CONFIG_KGDB) + set_debug_traps(); + breakpoint(); +#endif /* reboot on panic */ panic_timeout = 180; @@ -653,16 +677,16 @@ void __init setup_arch(char **cmdline_p) init_mm.end_code = (unsigned long) _etext; init_mm.end_data = (unsigned long) _edata; init_mm.brk = (unsigned long) klimit; - + /* Save unparsed command line copy for /proc/cmdline */ strcpy(saved_command_line, cmd_line); *cmdline_p = cmd_line; /* set up the bootmem stuff with available memory */ do_init_bootmem(); + if ( ppc_md.progress ) ppc_md.progress("setup_arch: bootmem", 0x3eab); ppc_md.setup_arch(); - /* clear the progress line */ if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab); } diff --git a/arch/ppc/kernel/sleep.S b/arch/ppc/kernel/sleep.S index 3ead7bd28eb6..b73acd6ce156 100644 --- a/arch/ppc/kernel/sleep.S +++ b/arch/ppc/kernel/sleep.S @@ -171,6 +171,11 @@ _GLOBAL(low_sleep_handler) */ wake_up: + /* Flash inval the instruction cache */ + mfspr r3,HID0 + ori r3,r3, HID0_ICFI + mtspr HID0,r3 + isync /* Restore the HID0 register. This turns on the L1 caches. */ subi r1,r1,SL_PC lwz r3,SL_HID0(r1) diff --git a/arch/ppc/kernel/syscalls.c b/arch/ppc/kernel/syscalls.c index 30bed889beab..e1a3fdcbbe14 100644 --- a/arch/ppc/kernel/syscalls.c +++ b/arch/ppc/kernel/syscalls.c @@ -252,9 +252,14 @@ asmlinkage int sys_pause(void) asmlinkage int sys_uname(struct old_utsname * name) { - if (name && !copy_to_user(name, &system_utsname, sizeof (*name))) - return 0; - return -EFAULT; + int err; + + if (!name) + return -EFAULT; + down_read(&uts_sem); + err = copy_to_user(name, &system_utsname, sizeof (*name)); + up(&uts_sem); + return err ? -EFAULT : 0; } asmlinkage int sys_olduname(struct oldold_utsname * name) @@ -266,6 +271,7 @@ asmlinkage int sys_olduname(struct oldold_utsname * name) if (!access_ok(VERIFY_WRITE,name,sizeof(struct oldold_utsname))) return -EFAULT; + down_read(&uts_sem); error = __copy_to_user(&name->sysname,&system_utsname.sysname,__OLD_UTS_LEN); error -= __put_user(0,name->sysname+__OLD_UTS_LEN); error -= __copy_to_user(&name->nodename,&system_utsname.nodename,__OLD_UTS_LEN); @@ -277,6 +283,7 @@ asmlinkage int sys_olduname(struct oldold_utsname * name) error -= __copy_to_user(&name->machine,&system_utsname.machine,__OLD_UTS_LEN); error = __put_user(0,name->machine+__OLD_UTS_LEN); error = error ? -EFAULT : 0; + up(&uts_sem); return error; } diff --git a/arch/ppc/kernel/traps.c b/arch/ppc/kernel/traps.c index 04b4e2d36aa5..5cc34c5a5a51 100644 --- a/arch/ppc/kernel/traps.c +++ b/arch/ppc/kernel/traps.c @@ -128,6 +128,20 @@ MachineCheckException(struct pt_regs *regs) _exception(SIGSEGV, regs); } +void +SMIException(struct pt_regs *regs) +{ +#if defined(CONFIG_XMON) || defined(CONFIG_KGDB) + { + debugger(regs); + return; + } +#endif + show_regs(regs); + print_backtrace((unsigned long *)regs->gpr[1]); + panic("System Management Interrupt"); +} + #if defined(CONFIG_ALTIVEC) void AltiVecUnavailable(struct pt_regs *regs) diff --git a/arch/ppc/kernel/walnut_setup.c b/arch/ppc/kernel/walnut_setup.c new file mode 100644 index 000000000000..d01cb27fdbe4 --- /dev/null +++ b/arch/ppc/kernel/walnut_setup.c @@ -0,0 +1,1475 @@ +/* + * + * Copyright (c) 1999-2000 Grant Erickson + * + * Module name: walnut_setup.c + * + * Description: + * Architecture- / platform-specific boot-time initialization code for + * the IBM PowerPC 403GP "Walnut" evaluation board. Adapted from original + * code by Gary Thomas, Cort Dougan , and Dan Malek + * . + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "local_irq.h" +#include "ppc4xx_pic.h" +#include "time.h" +#include "walnut_setup.h" + + +/* Function Prototypes */ + +extern void abort(void); + +/* Global Variables */ + +unsigned char __res[sizeof(bd_t)]; + + +/* + * void __init walnut_init() + * + * Description: + * This routine... + * + * Input(s): + * r3 - Optional pointer to a board information structure. + * r4 - Optional pointer to the physical starting address of the init RAM + * disk. + * r5 - Optional pointer to the physical ending address of the init RAM + * disk. + * r6 - Optional pointer to the physical starting address of any kernel + * command-line parameters. + * r7 - Optional pointer to the physical ending address of any kernel + * command-line parameters. + * + * Output(s): + * N/A + * + * Returns: + * N/A + * + */ +void __init +walnut_init(unsigned long r3, unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7) +{ + /* + * If we were passed in a board information, copy it into the + * residual data area. + */ + if (r3) { + memcpy((void *)__res, (void *)(r3 + KERNELBASE), sizeof(bd_t)); + } + +#if defined(CONFIG_BLK_DEV_INITRD) + /* + * If the init RAM disk has been configured in, and there's a valid + * starting address for it, set it up. + */ + if (r4) { + initrd_start = r4 + KERNELBASE; + initrd_end = r5 + KERNELBASE; + } +#endif /* CONFIG_BLK_DEV_INITRD */ + + /* Copy the kernel command line arguments to a safe place. */ + + if (r6) { + *(char *)(r7 + KERNELBASE) = 0; + strcpy(cmd_line, (char *)(r6 + KERNELBASE)); + } + + /* Initialize machine-dependency vectors */ + + ppc_md.setup_arch = walnut_setup_arch; + ppc_md.setup_residual = walnut_setup_residual; + ppc_md.get_cpuinfo = NULL; + ppc_md.irq_cannonicalize = NULL; + ppc_md.init_IRQ = walnut_init_IRQ; + ppc_md.get_irq = walnut_get_irq; + ppc_md.init = NULL; + + ppc_md.restart = walnut_restart; + ppc_md.power_off = walnut_power_off; + ppc_md.halt = walnut_halt; + + ppc_md.time_init = walnut_time_init; + ppc_md.set_rtc_time = walnut_set_rtc_time; + ppc_md.get_rtc_time = walnut_get_rtc_time; + ppc_md.calibrate_decr = walnut_calibrate_decr; + + ppc_md.kbd_setkeycode = NULL; + ppc_md.kbd_getkeycode = NULL; + ppc_md.kbd_translate = NULL; + ppc_md.kbd_unexpected_up = NULL; + ppc_md.kbd_leds = NULL; + ppc_md.kbd_init_hw = NULL; + +#if defined(CONFIG_MAGIC_SYSRQ) + ppc_md.ppc_kbd_sysrq_xlate = NULL; +#endif + + return; +} + +/* + * Document me. + */ +void __init +walnut_setup_arch(void) +{ + /* XXX - Implement me */ +} + +/* + * int walnut_setup_residual() + * + * Description: + * This routine pretty-prints the platform's internal CPU and bus clock + * frequencies into the buffer for usage in /proc/cpuinfo. + * + * Input(s): + * *buffer - Buffer into which CPU and bus clock frequencies are to be + * printed. + * + * Output(s): + * *buffer - Buffer with the CPU and bus clock frequencies. + * + * Returns: + * The number of bytes copied into 'buffer' if OK, otherwise zero or less + * on error. + */ +int +walnut_setup_residual(char *buffer) +{ + int len = 0; + bd_t *bp = (bd_t *)__res; + + len += sprintf(len + buffer, + "clock\t\t: %dMHz\n" + "bus clock\t\t: %dMHz\n", + bp->bi_intfreq / 1000000, + bp->bi_busfreq / 1000000); + + return (len); +} + +/* + * Document me. + */ +void __init +walnut_init_IRQ(void) +{ + int i; + + ppc4xx_pic_init(); + + for (i = 0; i < NR_IRQS; i++) { + irq_desc[i].handler = ppc4xx_pic; + } + + return; +} + +/* + * Document me. + */ +int +walnut_get_irq(struct pt_regs *regs) +{ + return (ppc4xx_pic_get_irq(regs)); +} + +/* + * Document me. + */ +void +walnut_restart(char *cmd) +{ + abort(); +} + +/* + * Document me. + */ +void +walnut_power_off(void) +{ + walnut_restart(NULL); +} + +/* + * Document me. + */ +void +walnut_halt(void) +{ + walnut_restart(NULL); +} + +/* + * Document me. + */ +void __init +walnut_time_init(void) +{ + /* XXX - Implement me */ +} + +/* + * Document me. + */ +int __init +walnut_set_rtc_time(unsigned long time) +{ + /* XXX - Implement me */ + + return (0); +} + +/* + * Document me. + */ +unsigned long __init +walnut_get_rtc_time(void) +{ + /* XXX - Implement me */ + + return (0); +} + +/* + * void __init walnut_calibrate_decr() + * + * Description: + * This routine retrieves the internal processor frequency from the board + * information structure, sets up the kernel timer decrementer based on + * that value, enables the 403 programmable interval timer (PIT) and sets + * it up for auto-reload. + * + * Input(s): + * N/A + * + * Output(s): + * N/A + * + * Returns: + * N/A + * + */ +void __init +walnut_calibrate_decr(void) +{ + unsigned int freq; + bd_t *bip = (bd_t *)__res; + + freq = bip->bi_intfreq; + + decrementer_count = freq / HZ; + count_period_num = 1; + count_period_den = freq; + + /* Enable the PIT and set auto-reload of its value */ + + mtspr(SPRN_TCR, TCR_PIE | TCR_ARE); + + /* Clear any pending timer interrupts */ + + mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_PIS | TSR_FIS); +} +/* + * + * Copyright (c) 1999-2000 Grant Erickson + * + * Module name: walnut_setup.c + * + * Description: + * Architecture- / platform-specific boot-time initialization code for + * the IBM PowerPC 403GP "Walnut" evaluation board. Adapted from original + * code by Gary Thomas, Cort Dougan , and Dan Malek + * . + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "local_irq.h" +#include "ppc4xx_pic.h" +#include "time.h" +#include "walnut_setup.h" + + +/* Function Prototypes */ + +extern void abort(void); + +/* Global Variables */ + +unsigned char __res[sizeof(bd_t)]; + + +/* + * void __init walnut_init() + * + * Description: + * This routine... + * + * Input(s): + * r3 - Optional pointer to a board information structure. + * r4 - Optional pointer to the physical starting address of the init RAM + * disk. + * r5 - Optional pointer to the physical ending address of the init RAM + * disk. + * r6 - Optional pointer to the physical starting address of any kernel + * command-line parameters. + * r7 - Optional pointer to the physical ending address of any kernel + * command-line parameters. + * + * Output(s): + * N/A + * + * Returns: + * N/A + * + */ +void __init +walnut_init(unsigned long r3, unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7) +{ + /* + * If we were passed in a board information, copy it into the + * residual data area. + */ + if (r3) { + memcpy((void *)__res, (void *)(r3 + KERNELBASE), sizeof(bd_t)); + } + +#if defined(CONFIG_BLK_DEV_INITRD) + /* + * If the init RAM disk has been configured in, and there's a valid + * starting address for it, set it up. + */ + if (r4) { + initrd_start = r4 + KERNELBASE; + initrd_end = r5 + KERNELBASE; + } +#endif /* CONFIG_BLK_DEV_INITRD */ + + /* Copy the kernel command line arguments to a safe place. */ + + if (r6) { + *(char *)(r7 + KERNELBASE) = 0; + strcpy(cmd_line, (char *)(r6 + KERNELBASE)); + } + + /* Initialize machine-dependency vectors */ + + ppc_md.setup_arch = walnut_setup_arch; + ppc_md.setup_residual = walnut_setup_residual; + ppc_md.get_cpuinfo = NULL; + ppc_md.irq_cannonicalize = NULL; + ppc_md.init_IRQ = walnut_init_IRQ; + ppc_md.get_irq = walnut_get_irq; + ppc_md.init = NULL; + + ppc_md.restart = walnut_restart; + ppc_md.power_off = walnut_power_off; + ppc_md.halt = walnut_halt; + + ppc_md.time_init = walnut_time_init; + ppc_md.set_rtc_time = walnut_set_rtc_time; + ppc_md.get_rtc_time = walnut_get_rtc_time; + ppc_md.calibrate_decr = walnut_calibrate_decr; + + ppc_md.kbd_setkeycode = NULL; + ppc_md.kbd_getkeycode = NULL; + ppc_md.kbd_translate = NULL; + ppc_md.kbd_unexpected_up = NULL; + ppc_md.kbd_leds = NULL; + ppc_md.kbd_init_hw = NULL; + +#if defined(CONFIG_MAGIC_SYSRQ) + ppc_md.ppc_kbd_sysrq_xlate = NULL; +#endif + + return; +} + +/* + * Document me. + */ +void __init +walnut_setup_arch(void) +{ + /* XXX - Implement me */ +} + +/* + * int walnut_setup_residual() + * + * Description: + * This routine pretty-prints the platform's internal CPU and bus clock + * frequencies into the buffer for usage in /proc/cpuinfo. + * + * Input(s): + * *buffer - Buffer into which CPU and bus clock frequencies are to be + * printed. + * + * Output(s): + * *buffer - Buffer with the CPU and bus clock frequencies. + * + * Returns: + * The number of bytes copied into 'buffer' if OK, otherwise zero or less + * on error. + */ +int +walnut_setup_residual(char *buffer) +{ + int len = 0; + bd_t *bp = (bd_t *)__res; + + len += sprintf(len + buffer, + "clock\t\t: %dMHz\n" + "bus clock\t\t: %dMHz\n", + bp->bi_intfreq / 1000000, + bp->bi_busfreq / 1000000); + + return (len); +} + +/* + * Document me. + */ +void __init +walnut_init_IRQ(void) +{ + int i; + + ppc4xx_pic_init(); + + for (i = 0; i < NR_IRQS; i++) { + irq_desc[i].handler = ppc4xx_pic; + } + + return; +} + +/* + * Document me. + */ +int +walnut_get_irq(struct pt_regs *regs) +{ + return (ppc4xx_pic_get_irq(regs)); +} + +/* + * Document me. + */ +void +walnut_restart(char *cmd) +{ + abort(); +} + +/* + * Document me. + */ +void +walnut_power_off(void) +{ + walnut_restart(NULL); +} + +/* + * Document me. + */ +void +walnut_halt(void) +{ + walnut_restart(NULL); +} + +/* + * Document me. + */ +void __init +walnut_time_init(void) +{ + /* XXX - Implement me */ +} + +/* + * Document me. + */ +int __init +walnut_set_rtc_time(unsigned long time) +{ + /* XXX - Implement me */ + + return (0); +} + +/* + * Document me. + */ +unsigned long __init +walnut_get_rtc_time(void) +{ + /* XXX - Implement me */ + + return (0); +} + +/* + * void __init walnut_calibrate_decr() + * + * Description: + * This routine retrieves the internal processor frequency from the board + * information structure, sets up the kernel timer decrementer based on + * that value, enables the 403 programmable interval timer (PIT) and sets + * it up for auto-reload. + * + * Input(s): + * N/A + * + * Output(s): + * N/A + * + * Returns: + * N/A + * + */ +void __init +walnut_calibrate_decr(void) +{ + unsigned int freq; + bd_t *bip = (bd_t *)__res; + + freq = bip->bi_intfreq; + + decrementer_count = freq / HZ; + count_period_num = 1; + count_period_den = freq; + + /* Enable the PIT and set auto-reload of its value */ + + mtspr(SPRN_TCR, TCR_PIE | TCR_ARE); + + /* Clear any pending timer interrupts */ + + mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_PIS | TSR_FIS); +} +/* + * + * Copyright (c) 1999-2000 Grant Erickson + * + * Module name: walnut_setup.c + * + * Description: + * Architecture- / platform-specific boot-time initialization code for + * the IBM PowerPC 403GP "Walnut" evaluation board. Adapted from original + * code by Gary Thomas, Cort Dougan , and Dan Malek + * . + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "local_irq.h" +#include "ppc4xx_pic.h" +#include "time.h" +#include "walnut_setup.h" + + +/* Function Prototypes */ + +extern void abort(void); + +/* Global Variables */ + +unsigned char __res[sizeof(bd_t)]; + + +/* + * void __init walnut_init() + * + * Description: + * This routine... + * + * Input(s): + * r3 - Optional pointer to a board information structure. + * r4 - Optional pointer to the physical starting address of the init RAM + * disk. + * r5 - Optional pointer to the physical ending address of the init RAM + * disk. + * r6 - Optional pointer to the physical starting address of any kernel + * command-line parameters. + * r7 - Optional pointer to the physical ending address of any kernel + * command-line parameters. + * + * Output(s): + * N/A + * + * Returns: + * N/A + * + */ +void __init +walnut_init(unsigned long r3, unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7) +{ + /* + * If we were passed in a board information, copy it into the + * residual data area. + */ + if (r3) { + memcpy((void *)__res, (void *)(r3 + KERNELBASE), sizeof(bd_t)); + } + +#if defined(CONFIG_BLK_DEV_INITRD) + /* + * If the init RAM disk has been configured in, and there's a valid + * starting address for it, set it up. + */ + if (r4) { + initrd_start = r4 + KERNELBASE; + initrd_end = r5 + KERNELBASE; + } +#endif /* CONFIG_BLK_DEV_INITRD */ + + /* Copy the kernel command line arguments to a safe place. */ + + if (r6) { + *(char *)(r7 + KERNELBASE) = 0; + strcpy(cmd_line, (char *)(r6 + KERNELBASE)); + } + + /* Initialize machine-dependency vectors */ + + ppc_md.setup_arch = walnut_setup_arch; + ppc_md.setup_residual = walnut_setup_residual; + ppc_md.get_cpuinfo = NULL; + ppc_md.irq_cannonicalize = NULL; + ppc_md.init_IRQ = walnut_init_IRQ; + ppc_md.get_irq = walnut_get_irq; + ppc_md.init = NULL; + + ppc_md.restart = walnut_restart; + ppc_md.power_off = walnut_power_off; + ppc_md.halt = walnut_halt; + + ppc_md.time_init = walnut_time_init; + ppc_md.set_rtc_time = walnut_set_rtc_time; + ppc_md.get_rtc_time = walnut_get_rtc_time; + ppc_md.calibrate_decr = walnut_calibrate_decr; + + ppc_md.kbd_setkeycode = NULL; + ppc_md.kbd_getkeycode = NULL; + ppc_md.kbd_translate = NULL; + ppc_md.kbd_unexpected_up = NULL; + ppc_md.kbd_leds = NULL; + ppc_md.kbd_init_hw = NULL; + +#if defined(CONFIG_MAGIC_SYSRQ) + ppc_md.ppc_kbd_sysrq_xlate = NULL; +#endif + + return; +} + +/* + * Document me. + */ +void __init +walnut_setup_arch(void) +{ + /* XXX - Implement me */ +} + +/* + * int walnut_setup_residual() + * + * Description: + * This routine pretty-prints the platform's internal CPU and bus clock + * frequencies into the buffer for usage in /proc/cpuinfo. + * + * Input(s): + * *buffer - Buffer into which CPU and bus clock frequencies are to be + * printed. + * + * Output(s): + * *buffer - Buffer with the CPU and bus clock frequencies. + * + * Returns: + * The number of bytes copied into 'buffer' if OK, otherwise zero or less + * on error. + */ +int +walnut_setup_residual(char *buffer) +{ + int len = 0; + bd_t *bp = (bd_t *)__res; + + len += sprintf(len + buffer, + "clock\t\t: %dMHz\n" + "bus clock\t\t: %dMHz\n", + bp->bi_intfreq / 1000000, + bp->bi_busfreq / 1000000); + + return (len); +} + +/* + * Document me. + */ +void __init +walnut_init_IRQ(void) +{ + int i; + + ppc4xx_pic_init(); + + for (i = 0; i < NR_IRQS; i++) { + irq_desc[i].handler = ppc4xx_pic; + } + + return; +} + +/* + * Document me. + */ +int +walnut_get_irq(struct pt_regs *regs) +{ + return (ppc4xx_pic_get_irq(regs)); +} + +/* + * Document me. + */ +void +walnut_restart(char *cmd) +{ + abort(); +} + +/* + * Document me. + */ +void +walnut_power_off(void) +{ + walnut_restart(NULL); +} + +/* + * Document me. + */ +void +walnut_halt(void) +{ + walnut_restart(NULL); +} + +/* + * Document me. + */ +void __init +walnut_time_init(void) +{ + /* XXX - Implement me */ +} + +/* + * Document me. + */ +int __init +walnut_set_rtc_time(unsigned long time) +{ + /* XXX - Implement me */ + + return (0); +} + +/* + * Document me. + */ +unsigned long __init +walnut_get_rtc_time(void) +{ + /* XXX - Implement me */ + + return (0); +} + +/* + * void __init walnut_calibrate_decr() + * + * Description: + * This routine retrieves the internal processor frequency from the board + * information structure, sets up the kernel timer decrementer based on + * that value, enables the 403 programmable interval timer (PIT) and sets + * it up for auto-reload. + * + * Input(s): + * N/A + * + * Output(s): + * N/A + * + * Returns: + * N/A + * + */ +void __init +walnut_calibrate_decr(void) +{ + unsigned int freq; + bd_t *bip = (bd_t *)__res; + + freq = bip->bi_intfreq; + + decrementer_count = freq / HZ; + count_period_num = 1; + count_period_den = freq; + + /* Enable the PIT and set auto-reload of its value */ + + mtspr(SPRN_TCR, TCR_PIE | TCR_ARE); + + /* Clear any pending timer interrupts */ + + mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_PIS | TSR_FIS); +} +/* + * + * Copyright (c) 1999-2000 Grant Erickson + * + * Module name: walnut_setup.c + * + * Description: + * Architecture- / platform-specific boot-time initialization code for + * the IBM PowerPC 403GP "Walnut" evaluation board. Adapted from original + * code by Gary Thomas, Cort Dougan , and Dan Malek + * . + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "local_irq.h" +#include "ppc4xx_pic.h" +#include "time.h" +#include "walnut_setup.h" + + +/* Function Prototypes */ + +extern void abort(void); + +/* Global Variables */ + +unsigned char __res[sizeof(bd_t)]; + + +/* + * void __init walnut_init() + * + * Description: + * This routine... + * + * Input(s): + * r3 - Optional pointer to a board information structure. + * r4 - Optional pointer to the physical starting address of the init RAM + * disk. + * r5 - Optional pointer to the physical ending address of the init RAM + * disk. + * r6 - Optional pointer to the physical starting address of any kernel + * command-line parameters. + * r7 - Optional pointer to the physical ending address of any kernel + * command-line parameters. + * + * Output(s): + * N/A + * + * Returns: + * N/A + * + */ +void __init +walnut_init(unsigned long r3, unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7) +{ + /* + * If we were passed in a board information, copy it into the + * residual data area. + */ + if (r3) { + memcpy((void *)__res, (void *)(r3 + KERNELBASE), sizeof(bd_t)); + } + +#if defined(CONFIG_BLK_DEV_INITRD) + /* + * If the init RAM disk has been configured in, and there's a valid + * starting address for it, set it up. + */ + if (r4) { + initrd_start = r4 + KERNELBASE; + initrd_end = r5 + KERNELBASE; + } +#endif /* CONFIG_BLK_DEV_INITRD */ + + /* Copy the kernel command line arguments to a safe place. */ + + if (r6) { + *(char *)(r7 + KERNELBASE) = 0; + strcpy(cmd_line, (char *)(r6 + KERNELBASE)); + } + + /* Initialize machine-dependency vectors */ + + ppc_md.setup_arch = walnut_setup_arch; + ppc_md.setup_residual = walnut_setup_residual; + ppc_md.get_cpuinfo = NULL; + ppc_md.irq_cannonicalize = NULL; + ppc_md.init_IRQ = walnut_init_IRQ; + ppc_md.get_irq = walnut_get_irq; + ppc_md.init = NULL; + + ppc_md.restart = walnut_restart; + ppc_md.power_off = walnut_power_off; + ppc_md.halt = walnut_halt; + + ppc_md.time_init = walnut_time_init; + ppc_md.set_rtc_time = walnut_set_rtc_time; + ppc_md.get_rtc_time = walnut_get_rtc_time; + ppc_md.calibrate_decr = walnut_calibrate_decr; + + ppc_md.kbd_setkeycode = NULL; + ppc_md.kbd_getkeycode = NULL; + ppc_md.kbd_translate = NULL; + ppc_md.kbd_unexpected_up = NULL; + ppc_md.kbd_leds = NULL; + ppc_md.kbd_init_hw = NULL; + +#if defined(CONFIG_MAGIC_SYSRQ) + ppc_md.ppc_kbd_sysrq_xlate = NULL; +#endif + + return; +} + +/* + * Document me. + */ +void __init +walnut_setup_arch(void) +{ + /* XXX - Implement me */ +} + +/* + * int walnut_setup_residual() + * + * Description: + * This routine pretty-prints the platform's internal CPU and bus clock + * frequencies into the buffer for usage in /proc/cpuinfo. + * + * Input(s): + * *buffer - Buffer into which CPU and bus clock frequencies are to be + * printed. + * + * Output(s): + * *buffer - Buffer with the CPU and bus clock frequencies. + * + * Returns: + * The number of bytes copied into 'buffer' if OK, otherwise zero or less + * on error. + */ +int +walnut_setup_residual(char *buffer) +{ + int len = 0; + bd_t *bp = (bd_t *)__res; + + len += sprintf(len + buffer, + "clock\t\t: %dMHz\n" + "bus clock\t\t: %dMHz\n", + bp->bi_intfreq / 1000000, + bp->bi_busfreq / 1000000); + + return (len); +} + +/* + * Document me. + */ +void __init +walnut_init_IRQ(void) +{ + int i; + + ppc4xx_pic_init(); + + for (i = 0; i < NR_IRQS; i++) { + irq_desc[i].handler = ppc4xx_pic; + } + + return; +} + +/* + * Document me. + */ +int +walnut_get_irq(struct pt_regs *regs) +{ + return (ppc4xx_pic_get_irq(regs)); +} + +/* + * Document me. + */ +void +walnut_restart(char *cmd) +{ + abort(); +} + +/* + * Document me. + */ +void +walnut_power_off(void) +{ + walnut_restart(NULL); +} + +/* + * Document me. + */ +void +walnut_halt(void) +{ + walnut_restart(NULL); +} + +/* + * Document me. + */ +void __init +walnut_time_init(void) +{ + /* XXX - Implement me */ +} + +/* + * Document me. + */ +int __init +walnut_set_rtc_time(unsigned long time) +{ + /* XXX - Implement me */ + + return (0); +} + +/* + * Document me. + */ +unsigned long __init +walnut_get_rtc_time(void) +{ + /* XXX - Implement me */ + + return (0); +} + +/* + * void __init walnut_calibrate_decr() + * + * Description: + * This routine retrieves the internal processor frequency from the board + * information structure, sets up the kernel timer decrementer based on + * that value, enables the 403 programmable interval timer (PIT) and sets + * it up for auto-reload. + * + * Input(s): + * N/A + * + * Output(s): + * N/A + * + * Returns: + * N/A + * + */ +void __init +walnut_calibrate_decr(void) +{ + unsigned int freq; + bd_t *bip = (bd_t *)__res; + + freq = bip->bi_intfreq; + + decrementer_count = freq / HZ; + count_period_num = 1; + count_period_den = freq; + + /* Enable the PIT and set auto-reload of its value */ + + mtspr(SPRN_TCR, TCR_PIE | TCR_ARE); + + /* Clear any pending timer interrupts */ + + mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_PIS | TSR_FIS); +} +/* + * + * Copyright (c) 1999-2000 Grant Erickson + * + * Module name: walnut_setup.c + * + * Description: + * Architecture- / platform-specific boot-time initialization code for + * the IBM PowerPC 403GP "Walnut" evaluation board. Adapted from original + * code by Gary Thomas, Cort Dougan , and Dan Malek + * . + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "local_irq.h" +#include "ppc4xx_pic.h" +#include "time.h" +#include "walnut_setup.h" + + +/* Function Prototypes */ + +extern void abort(void); + +/* Global Variables */ + +unsigned char __res[sizeof(bd_t)]; + + +/* + * void __init walnut_init() + * + * Description: + * This routine... + * + * Input(s): + * r3 - Optional pointer to a board information structure. + * r4 - Optional pointer to the physical starting address of the init RAM + * disk. + * r5 - Optional pointer to the physical ending address of the init RAM + * disk. + * r6 - Optional pointer to the physical starting address of any kernel + * command-line parameters. + * r7 - Optional pointer to the physical ending address of any kernel + * command-line parameters. + * + * Output(s): + * N/A + * + * Returns: + * N/A + * + */ +void __init +walnut_init(unsigned long r3, unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7) +{ + /* + * If we were passed in a board information, copy it into the + * residual data area. + */ + if (r3) { + memcpy((void *)__res, (void *)(r3 + KERNELBASE), sizeof(bd_t)); + } + +#if defined(CONFIG_BLK_DEV_INITRD) + /* + * If the init RAM disk has been configured in, and there's a valid + * starting address for it, set it up. + */ + if (r4) { + initrd_start = r4 + KERNELBASE; + initrd_end = r5 + KERNELBASE; + } +#endif /* CONFIG_BLK_DEV_INITRD */ + + /* Copy the kernel command line arguments to a safe place. */ + + if (r6) { + *(char *)(r7 + KERNELBASE) = 0; + strcpy(cmd_line, (char *)(r6 + KERNELBASE)); + } + + /* Initialize machine-dependency vectors */ + + ppc_md.setup_arch = walnut_setup_arch; + ppc_md.setup_residual = walnut_setup_residual; + ppc_md.get_cpuinfo = NULL; + ppc_md.irq_cannonicalize = NULL; + ppc_md.init_IRQ = walnut_init_IRQ; + ppc_md.get_irq = walnut_get_irq; + ppc_md.init = NULL; + + ppc_md.restart = walnut_restart; + ppc_md.power_off = walnut_power_off; + ppc_md.halt = walnut_halt; + + ppc_md.time_init = walnut_time_init; + ppc_md.set_rtc_time = walnut_set_rtc_time; + ppc_md.get_rtc_time = walnut_get_rtc_time; + ppc_md.calibrate_decr = walnut_calibrate_decr; + + ppc_md.kbd_setkeycode = NULL; + ppc_md.kbd_getkeycode = NULL; + ppc_md.kbd_translate = NULL; + ppc_md.kbd_unexpected_up = NULL; + ppc_md.kbd_leds = NULL; + ppc_md.kbd_init_hw = NULL; + +#if defined(CONFIG_MAGIC_SYSRQ) + ppc_md.ppc_kbd_sysrq_xlate = NULL; +#endif + + return; +} + +/* + * Document me. + */ +void __init +walnut_setup_arch(void) +{ + /* XXX - Implement me */ +} + +/* + * int walnut_setup_residual() + * + * Description: + * This routine pretty-prints the platform's internal CPU and bus clock + * frequencies into the buffer for usage in /proc/cpuinfo. + * + * Input(s): + * *buffer - Buffer into which CPU and bus clock frequencies are to be + * printed. + * + * Output(s): + * *buffer - Buffer with the CPU and bus clock frequencies. + * + * Returns: + * The number of bytes copied into 'buffer' if OK, otherwise zero or less + * on error. + */ +int +walnut_setup_residual(char *buffer) +{ + int len = 0; + bd_t *bp = (bd_t *)__res; + + len += sprintf(len + buffer, + "clock\t\t: %dMHz\n" + "bus clock\t\t: %dMHz\n", + bp->bi_intfreq / 1000000, + bp->bi_busfreq / 1000000); + + return (len); +} + +/* + * Document me. + */ +void __init +walnut_init_IRQ(void) +{ + int i; + + ppc4xx_pic_init(); + + for (i = 0; i < NR_IRQS; i++) { + irq_desc[i].handler = ppc4xx_pic; + } + + return; +} + +/* + * Document me. + */ +int +walnut_get_irq(struct pt_regs *regs) +{ + return (ppc4xx_pic_get_irq(regs)); +} + +/* + * Document me. + */ +void +walnut_restart(char *cmd) +{ + abort(); +} + +/* + * Document me. + */ +void +walnut_power_off(void) +{ + walnut_restart(NULL); +} + +/* + * Document me. + */ +void +walnut_halt(void) +{ + walnut_restart(NULL); +} + +/* + * Document me. + */ +void __init +walnut_time_init(void) +{ + /* XXX - Implement me */ +} + +/* + * Document me. + */ +int __init +walnut_set_rtc_time(unsigned long time) +{ + /* XXX - Implement me */ + + return (0); +} + +/* + * Document me. + */ +unsigned long __init +walnut_get_rtc_time(void) +{ + /* XXX - Implement me */ + + return (0); +} + +/* + * void __init walnut_calibrate_decr() + * + * Description: + * This routine retrieves the internal processor frequency from the board + * information structure, sets up the kernel timer decrementer based on + * that value, enables the 403 programmable interval timer (PIT) and sets + * it up for auto-reload. + * + * Input(s): + * N/A + * + * Output(s): + * N/A + * + * Returns: + * N/A + * + */ +void __init +walnut_calibrate_decr(void) +{ + unsigned int freq; + bd_t *bip = (bd_t *)__res; + + freq = bip->bi_intfreq; + + decrementer_count = freq / HZ; + count_period_num = 1; + count_period_den = freq; + + /* Enable the PIT and set auto-reload of its value */ + + mtspr(SPRN_TCR, TCR_PIE | TCR_ARE); + + /* Clear any pending timer interrupts */ + + mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_PIS | TSR_FIS); +} diff --git a/arch/ppc/kernel/walnut_setup.h b/arch/ppc/kernel/walnut_setup.h new file mode 100644 index 000000000000..f11755c6d8b7 --- /dev/null +++ b/arch/ppc/kernel/walnut_setup.h @@ -0,0 +1,250 @@ +/* + * + * Copyright (c) 1999-2000 Grant Erickson + * + * Module name: walnut_setup.c + * + * Description: + * Architecture- / platform-specific boot-time initialization code for + * the IBM PowerPC 405GP "Walnut" evaluation board. Adapted from original + * code by Gary Thomas, Cort Dougan , and Dan Malek + * . + * + */ + +#ifndef __WALNUT_SETUP_H__ +#define __WALNUT_SETUP_H__ + +#include +#include + + +#ifdef __cplusplus +extern "C" { +#endif + +extern unsigned char __res[sizeof(bd_t)]; + +extern void walnut_init(unsigned long r3, + unsigned long ird_start, + unsigned long ird_end, + unsigned long cline_start, + unsigned long cline_end); +extern void walnut_setup_arch(void); +extern int walnut_setup_residual(char *buffer); +extern void walnut_init_IRQ(void); +extern int walnut_get_irq(struct pt_regs *regs); +extern void walnut_restart(char *cmd); +extern void walnut_power_off(void); +extern void walnut_halt(void); +extern void walnut_time_init(void); +extern int walnut_set_rtc_time(unsigned long now); +extern unsigned long walnut_get_rtc_time(void); +extern void walnut_calibrate_decr(void); + + +#ifdef __cplusplus +} +#endif + +#endif /* __WALNUT_SETUP_H__ */ +/* + * + * Copyright (c) 1999-2000 Grant Erickson + * + * Module name: walnut_setup.c + * + * Description: + * Architecture- / platform-specific boot-time initialization code for + * the IBM PowerPC 405GP "Walnut" evaluation board. Adapted from original + * code by Gary Thomas, Cort Dougan , and Dan Malek + * . + * + */ + +#ifndef __WALNUT_SETUP_H__ +#define __WALNUT_SETUP_H__ + +#include +#include + + +#ifdef __cplusplus +extern "C" { +#endif + +extern unsigned char __res[sizeof(bd_t)]; + +extern void walnut_init(unsigned long r3, + unsigned long ird_start, + unsigned long ird_end, + unsigned long cline_start, + unsigned long cline_end); +extern void walnut_setup_arch(void); +extern int walnut_setup_residual(char *buffer); +extern void walnut_init_IRQ(void); +extern int walnut_get_irq(struct pt_regs *regs); +extern void walnut_restart(char *cmd); +extern void walnut_power_off(void); +extern void walnut_halt(void); +extern void walnut_time_init(void); +extern int walnut_set_rtc_time(unsigned long now); +extern unsigned long walnut_get_rtc_time(void); +extern void walnut_calibrate_decr(void); + + +#ifdef __cplusplus +} +#endif + +#endif /* __WALNUT_SETUP_H__ */ +/* + * + * Copyright (c) 1999-2000 Grant Erickson + * + * Module name: walnut_setup.c + * + * Description: + * Architecture- / platform-specific boot-time initialization code for + * the IBM PowerPC 405GP "Walnut" evaluation board. Adapted from original + * code by Gary Thomas, Cort Dougan , and Dan Malek + * . + * + */ + +#ifndef __WALNUT_SETUP_H__ +#define __WALNUT_SETUP_H__ + +#include +#include + + +#ifdef __cplusplus +extern "C" { +#endif + +extern unsigned char __res[sizeof(bd_t)]; + +extern void walnut_init(unsigned long r3, + unsigned long ird_start, + unsigned long ird_end, + unsigned long cline_start, + unsigned long cline_end); +extern void walnut_setup_arch(void); +extern int walnut_setup_residual(char *buffer); +extern void walnut_init_IRQ(void); +extern int walnut_get_irq(struct pt_regs *regs); +extern void walnut_restart(char *cmd); +extern void walnut_power_off(void); +extern void walnut_halt(void); +extern void walnut_time_init(void); +extern int walnut_set_rtc_time(unsigned long now); +extern unsigned long walnut_get_rtc_time(void); +extern void walnut_calibrate_decr(void); + + +#ifdef __cplusplus +} +#endif + +#endif /* __WALNUT_SETUP_H__ */ +/* + * + * Copyright (c) 1999-2000 Grant Erickson + * + * Module name: walnut_setup.c + * + * Description: + * Architecture- / platform-specific boot-time initialization code for + * the IBM PowerPC 405GP "Walnut" evaluation board. Adapted from original + * code by Gary Thomas, Cort Dougan , and Dan Malek + * . + * + */ + +#ifndef __WALNUT_SETUP_H__ +#define __WALNUT_SETUP_H__ + +#include +#include + + +#ifdef __cplusplus +extern "C" { +#endif + +extern unsigned char __res[sizeof(bd_t)]; + +extern void walnut_init(unsigned long r3, + unsigned long ird_start, + unsigned long ird_end, + unsigned long cline_start, + unsigned long cline_end); +extern void walnut_setup_arch(void); +extern int walnut_setup_residual(char *buffer); +extern void walnut_init_IRQ(void); +extern int walnut_get_irq(struct pt_regs *regs); +extern void walnut_restart(char *cmd); +extern void walnut_power_off(void); +extern void walnut_halt(void); +extern void walnut_time_init(void); +extern int walnut_set_rtc_time(unsigned long now); +extern unsigned long walnut_get_rtc_time(void); +extern void walnut_calibrate_decr(void); + + +#ifdef __cplusplus +} +#endif + +#endif /* __WALNUT_SETUP_H__ */ +/* + * + * Copyright (c) 1999-2000 Grant Erickson + * + * Module name: walnut_setup.c + * + * Description: + * Architecture- / platform-specific boot-time initialization code for + * the IBM PowerPC 405GP "Walnut" evaluation board. Adapted from original + * code by Gary Thomas, Cort Dougan , and Dan Malek + * . + * + */ + +#ifndef __WALNUT_SETUP_H__ +#define __WALNUT_SETUP_H__ + +#include +#include + + +#ifdef __cplusplus +extern "C" { +#endif + +extern unsigned char __res[sizeof(bd_t)]; + +extern void walnut_init(unsigned long r3, + unsigned long ird_start, + unsigned long ird_end, + unsigned long cline_start, + unsigned long cline_end); +extern void walnut_setup_arch(void); +extern int walnut_setup_residual(char *buffer); +extern void walnut_init_IRQ(void); +extern int walnut_get_irq(struct pt_regs *regs); +extern void walnut_restart(char *cmd); +extern void walnut_power_off(void); +extern void walnut_halt(void); +extern void walnut_time_init(void); +extern int walnut_set_rtc_time(unsigned long now); +extern unsigned long walnut_get_rtc_time(void); +extern void walnut_calibrate_decr(void); + + +#ifdef __cplusplus +} +#endif + +#endif /* __WALNUT_SETUP_H__ */ diff --git a/arch/ppc/mm/4xx_tlb.c b/arch/ppc/mm/4xx_tlb.c index b9d9d21193f7..69bf88320d03 100644 --- a/arch/ppc/mm/4xx_tlb.c +++ b/arch/ppc/mm/4xx_tlb.c @@ -1,6 +1,9 @@ /* * - * Copyright (c) 1999 Grant Erickson + * Copyright (c) 1998-1999 TiVo, Inc. + * Original implementation. + * Copyright (c) 1999-2000 Grant Erickson + * Minor rework. * * Module name: 4xx_tlb.c * @@ -9,7 +12,10 @@ * */ +#include + #include +#include #include #include #include @@ -26,372 +32,327 @@ #endif -/* Function Macros */ - - -/* Type Definitios */ - -typedef struct pin_entry_s { - unsigned int e_pinned: 1, /* This TLB entry is pinned down. */ - e_used: 23; /* Number of users for this mapping. */ -} pin_entry_t; - - /* Global Variables */ -static pin_entry_t pin_table[PPC4XX_TLB_SIZE]; +static int pinned = 0; /* Function Prototypes */ +static int PPC4xx_tlb_miss(struct pt_regs *, unsigned long, int); -void -PPC4xx_tlb_pin(unsigned long va, unsigned long pa, int pagesz, int cache) -{ - int i, found = FALSE; - unsigned long tag, data; - unsigned long opid; - - opid = mfspr(SPRN_PID); - mtspr(SPRN_PID, 0); - - data = (pa & TLB_RPN_MASK) | TLB_WR; - - if (cache) - data |= (TLB_EX | TLB_I); - else - data |= (TLB_G | TLB_I); - - tag = (va & TLB_EPN_MASK) | TLB_VALID | pagesz; - - for (i = 0; i < PPC4XX_TLB_SIZE; i++) { - if (pin_table[i].e_pinned == FALSE) { - found = TRUE; - break; - } - } +extern void do_page_fault(struct pt_regs *, unsigned long, unsigned long); - if (found) { - /* printk("Pinning %#x -> %#x in entry %d...\n", va, pa, i); */ - asm("tlbwe %0,%1,1" : : "r" (data), "r" (i)); - asm("tlbwe %0,%1,0" : : "r" (tag), "r" (i)); - asm("isync"); - pin_table[i].e_pinned = found; - } - mtspr(SPRN_PID, opid); - return; -} - -void -PPC4xx_tlb_unpin(unsigned long va, unsigned long pa, int size) +/* + * () + * + * Description: + * This routine... + * + * Input(s): + * + * + * Output(s): + * + * + * Returns: + * + * + */ +static inline void +PPC4xx_tlb_write(unsigned long tag, unsigned long data, unsigned int index) { - /* XXX - To beimplemented. */ + asm("tlbwe %0,%1,1" : : "r" (data), "r" (index)); + asm("tlbwe %0,%1,0" : : "r" (tag), "r" (index)); } +/* + * () + * + * Description: + * This routine... + * + * Input(s): + * + * + * Output(s): + * + * + * Returns: + * + * + */ void -PPC4xx_tlb_flush_all(void) +PPC4xx_flush_tlb_all(void) { int i; - unsigned long flags, opid; + unsigned long flags, pid; save_flags(flags); cli(); - opid = mfspr(SPRN_PID); + pid = mfspr(SPRN_PID); mtspr(SPRN_PID, 0); - for (i = 0; i < PPC4XX_TLB_SIZE; i++) { - unsigned long ov = 0; - - if (pin_table[i].e_pinned) - continue; - - asm("tlbwe %0,%1,0" : : "r" (ov), "r" (i)); - asm("tlbwe %0,%1,1" : : "r" (ov), "r" (i)); + for (i = pinned; i < PPC4XX_TLB_SIZE; i++) { + PPC4xx_tlb_write(0, 0, i); } - asm("sync;isync"); - mtspr(SPRN_PID, opid); + mtspr(SPRN_PID, pid); restore_flags(flags); } +/* + * () + * + * Description: + * This routine... + * + * Input(s): + * + * + * Output(s): + * + * + * Returns: + * + * + */ void -PPC4xx_tlb_flush(unsigned long va, int pid) +PPC4xx_dtlb_miss(struct pt_regs *regs) { - unsigned long i, tag, flags, found = 1, opid; - - save_flags(flags); - cli(); + unsigned long addr = mfspr(SPRN_DEAR); + int write = mfspr(SPRN_ESR) & ESR_DST; - opid = mfspr(SPRN_PID); - mtspr(SPRN_PID, pid); - - asm("tlbsx. %0,0,%2;beq 1f;li %1,0;1:" : "=r" (i), "=r" (found) : "r" (va)); - - if (found && pin_table[i].e_pinned == 0) { - asm("tlbre %0,%1,0" : "=r" (tag) : "r" (i)); - tag &= ~ TLB_VALID; - asm("tlbwe %0,%1,0" : : "r" (tag), "r" (i)); + if (PPC4xx_tlb_miss(regs, addr, write) < 0) { + sti(); + do_page_fault(regs, addr, write); + cli(); } - - mtspr(SPRN_PID, opid); - - restore_flags(flags); + } -#if 0 /* - * TLB miss handling code. + * () + * + * Description: + * This routine... + * + * Input(s): + * + * + * Output(s): + * + * + * Returns: + * + * */ +void +PPC4xx_itlb_miss(struct pt_regs *regs) +{ + unsigned long addr = regs->nip; + + if (PPC4xx_tlb_miss(regs, addr, 0) < 0) { + sti(); + do_page_fault(regs, addr, 0); + cli(); + } +} /* - * Handle TLB faults. We should push this back to assembly code eventually. - * Caller is responsible for turning off interrupts ... + * () + * + * Description: + * This routine... + * + * Input(s): + * + * + * Output(s): + * + * + * Returns: + * + * */ -static inline void -tlbDropin(unsigned long tlbhi, unsigned long tlblo) { - /* - * Avoid the divide at the slight cost of a little too - * much emphasis on the last few entries. - */ - unsigned long rand = mfspr(SPRN_TBLO); - rand &= 0x3f; - rand += NTLB_WIRED; - if (rand >= NTLB) - rand -= NTLB_WIRED; - - asm("tlbwe %0,%1,1" : : "r" (tlblo), "r" (rand)); - asm("tlbwe %0,%1,0" : : "r" (tlbhi), "r" (rand)); - asm("isync;sync"); -} +void +PPC4xx_tlb_pin(unsigned long va, unsigned long pa, int pagesz, int cache) +{ + unsigned long tag, data; + unsigned long opid; -static inline void -mkTlbEntry(unsigned long addr, pte_t *pte) { - unsigned long tlbhi; - unsigned long tlblo; - int found = 1; - int idx; + if (pinned >= PPC4XX_TLB_SIZE) + return; - /* - * Construct the TLB entry. - */ - tlbhi = addr & ~(PAGE_SIZE-1); - tlblo = virt_to_phys(pte_page(*pte)) & TLBLO_RPN; - if (pte_val(*pte) & _PAGE_HWWRITE) - tlblo |= TLBLO_WR; - if (pte_val(*pte) & _PAGE_NO_CACHE) - tlblo |= TLBLO_I; - tlblo |= TLBLO_EX; - if (addr < KERNELBASE) - tlblo |= TLBLO_Z_USER; - tlbhi |= TLBHI_PGSZ_4K; - tlbhi |= TLBHI_VALID; + opid = mfspr(SPRN_PID); + mtspr(SPRN_PID, 0); - /* - * See if a match already exists in the TLB. - */ - asm("tlbsx. %0,0,%2;beq 1f;li %1,0;1:" : "=r" (idx), "=r" (found) : "r" (tlbhi)); - if (found) { - /* - * Found an existing entry. Just reuse the index. - */ - asm("tlbwe %0,%1,0" : : "r" (tlbhi), "r" (idx)); - asm("tlbwe %0,%1,1" : : "r" (tlblo), "r" (idx)); - } - else { - /* - * Do the more expensive operation - */ - tlbDropin(tlbhi, tlblo); - } + data = (pa & TLB_RPN_MASK) | TLB_WR; + + if (cache) + data |= (TLB_EX); + else + data |= (TLB_G | TLB_I); + + tag = (va & TLB_EPN_MASK) | TLB_VALID | pagesz; + + PPC4xx_tlb_write(tag, data, pinned++); + + mtspr(SPRN_PID, opid); + return; } /* - * Mainline of the TLB miss handler. The above inline routines should fold into - * this one, eliminating most function call overhead. + * () + * + * Description: + * This routine... + * + * Input(s): + * + * + * Output(s): + * + * + * Returns: + * + * */ -#ifdef TLBMISS_DEBUG -volatile unsigned long miss_start; -volatile unsigned long miss_end; -#endif +void +PPC4xx_tlb_unpin(unsigned long va, unsigned long pa, int size) +{ + /* XXX - To be implemented. */ +} -static inline int tlbMiss(struct pt_regs *regs, unsigned long badaddr, int wasWrite) +/* + * () + * + * Description: + * This routine... + * + * Input(s): + * + * + * Output(s): + * + * + * Returns: + * + * + */ +static inline void +PPC4xx_tlb_update(unsigned long addr, pte_t *pte) { - int spid, ospid; - struct mm_struct *mm; - pgd_t *pgd; - pmd_t *pmd; - pte_t *pte; - - if (!user_mode(regs) && (badaddr >= KERNELBASE)) { - mm = task[0]->mm; - spid = 0; -#ifdef TLBMISS_DEBUG - miss_start = 0; -#endif - } - else { - mm = current->mm; - spid = mfspr(SPRN_PID); -#ifdef TLBMISS_DEBUG - miss_start = 1; -#endif - } -#ifdef TLBMISS_DEBUG - store_cache_range((unsigned long)&miss_start, sizeof(miss_start)); -#endif + unsigned long data, tag, rand; + int i, found = 1; - pgd = pgd_offset(mm, badaddr); - if (pgd_none(*pgd)) - goto NOGOOD; - - pmd = pmd_offset(pgd, badaddr); - if (pmd_none(*pmd)) - goto NOGOOD; - - pte = pte_offset(pmd, badaddr); - if (pte_none(*pte)) - goto NOGOOD; - if (!pte_present(*pte)) - goto NOGOOD; -#if 1 - prohibit_if_guarded(badaddr, sizeof(int)); -#endif - if (wasWrite) { - if (!pte_write(*pte)) { - goto NOGOOD; - } - set_pte(pte, pte_mkdirty(*pte)); - } - set_pte(pte, pte_mkyoung(*pte)); + /* Construct the hardware TLB entry from the Linux-style PTE */ - ospid = mfspr(SPRN_PID); - mtspr(SPRN_PID, spid); - mkTlbEntry(badaddr, pte); - mtspr(SPRN_PID, ospid); + tag = tag = (addr & PAGE_MASK) | TLB_VALID | TLB_PAGESZ(PAGESZ_4K); + data = data = (pte_val(*pte) & PAGE_MASK) | TLB_EX | TLB_WR; -#ifdef TLBMISS_DEBUG - miss_end = 0; - store_cache_range((unsigned long)&miss_end, sizeof(miss_end)); +#if 0 + if (pte_val(*pte) & _PAGE_HWWRITE) + data |= TLB_WR; #endif - return 0; -NOGOOD: -#ifdef TLBMISS_DEBUG - miss_end = 1; - store_cache_range((unsigned long)&miss_end, sizeof(miss_end)); -#endif - return 1; -} + if (pte_val(*pte) & _PAGE_NO_CACHE) + data |= TLB_I; -/* - * End TLB miss handling code. - */ -/* ---------- */ + if (pte_val(*pte) & _PAGE_GUARDED) + data |= TLB_G; -/* - * Used to flush the TLB if the page fault handler decides to change - * something. - */ -void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) { - int spid; - unsigned long flags; + if (addr < KERNELBASE) + data |= TLB_ZSEL(1); - save_flags(flags); - cli(); + /* Attempt to match the new tag to an existing entry in the TLB. */ - if (addr >= KERNELBASE) - spid = 0; - else - spid = vma->vm_mm->context; - tlbFlush1(addr, spid); + asm("tlbsx. %0,0,%2;" + "beq 1f;" + "li %1,0;1:" : "=r" (i), "=r" (found) : "r" (tag)); - restore_flags(flags); + /* + * If we found a match for the tag, reuse the entry index and update + * the tag and data portions. Otherwise, we did not find a match. Use + * the lower 5 bits of the lower time base register as a pseudo-random + * index into the TLB and replace the entry at that index. + */ + + if (found) { + PPC4xx_tlb_write(tag, data, i); + } else { + rand = mfspr(SPRN_TBLO) & (PPC4XX_TLB_SIZE - 1); + rand += pinned; + if (rand >= PPC4XX_TLB_SIZE) + rand -= pinned; + + PPC4xx_tlb_write(tag, data, rand); + asm("isync;sync"); + } } /* - * Given a virtual address in the current address space, make - * sure the associated physical page is present in memory, - * and if the data is to be modified, that any copy-on-write - * actions have taken place. + * () + * + * Description: + * This routine... + * + * Input(s): + * + * + * Output(s): + * + * + * Returns: + * + * */ -unsigned long make_page_present(unsigned long p, int rw) { +static int +PPC4xx_tlb_miss(struct pt_regs *regs, unsigned long addr, int write) +{ + unsigned long spid, ospid; + struct mm_struct *mm; + pgd_t *pgd; + pmd_t *pmd; pte_t *pte; - char c; - get_user(c, (char *) p); + if (!user_mode(regs) && (addr >= KERNELBASE)) { + mm = &init_mm; + spid = 0; + } else { + mm = current->mm; + spid = mfspr(SPRN_PID); + } + + pgd = pgd_offset(mm, addr); + if (pgd_none(*pgd)) + goto bad; + + pmd = pmd_offset(pgd, addr); + if (pmd_none(*pmd)) + goto bad; - pte = findPTE(current->mm, p); + pte = pte_offset(pmd, addr); if (pte_none(*pte) || !pte_present(*pte)) - debug("make_page_present didn't load page", 0); - - if (rw) { - /* - * You have to write-touch the page, so that - * zero-filled pages are forced to be copied - * rather than still pointing at the zero - * page. - */ - extern void tlbFlush1(unsigned long, int); - tlbFlush1(p, get_context()); - put_user(c, (char *) p); - if (!pte_write(*pte)) - debug("make_page_present didn't make page writable", 0); - - tlbFlush1(p, get_context()); - } - return pte_page(*pte); -} + goto bad; -void DataTLBMissException(struct pt_regs *regs) -{ - unsigned long badaddr = mfspr(SPRN_DEAR); - int wasWrite = mfspr(SPRN_ESR) & 0x800000; - if (tlbMiss(regs, badaddr, wasWrite)) { - sti(); - do_page_fault(regs, badaddr, wasWrite); - cli(); - } -} + if (write) { + if (!pte_write(*pte)) + goto bad; -void InstructionTLBMissException(struct pt_regs *regs) -{ - if (!current) { - debug("ITLB Miss with no current task", regs); - sti(); - bad_page_fault(regs, regs->nip); - cli(); - return; - } - if (tlbMiss(regs, regs->nip, 0)) { - sti(); - do_page_fault(regs, regs->nip, 0); - cli(); - } -} + set_pte(pte, pte_mkdirty(*pte)); + } + set_pte(pte, pte_mkyoung(*pte)); -void DataPageFault(struct pt_regs *regs) -{ - unsigned long badaddr = mfspr(SPRN_DEAR); - int wasWrite = mfspr(SPRN_ESR) & 0x800000; - sti(); - do_page_fault(regs, badaddr, wasWrite); - cli(); -} + ospid = mfspr(SPRN_PID); + mtspr(SPRN_PID, spid); + PPC4xx_tlb_update(addr, pte); + mtspr(SPRN_PID, ospid); -void InstructionPageFault(struct pt_regs *regs) -{ - if (!current) { - debug("ITLB fault with no current task", regs); - sti(); - bad_page_fault(regs, regs->nip); - cli(); - return; - } - sti(); - do_page_fault(regs, regs->nip, 0); - cli(); + return (0); +bad: + return (-1); } -#endif diff --git a/arch/ppc/mm/init.c b/arch/ppc/mm/init.c index c558ef05168b..216527e34344 100644 --- a/arch/ppc/mm/init.c +++ b/arch/ppc/mm/init.c @@ -107,7 +107,6 @@ unsigned long *oak_find_end_of_memory(void); static void mapin_ram(void); void map_page(unsigned long va, unsigned long pa, int flags); extern void die_if_kernel(char *,struct pt_regs *,long); -extern void show_net_buffers(void); struct mem_pieces phys_mem; @@ -281,9 +280,6 @@ void show_mem(void) printk("%d pages swap cached\n",cached); printk("%d pages in page table cache\n",(int)pgtable_cache_size); show_buffers(); -#ifdef CONFIG_NET - show_net_buffers(); -#endif printk("%-8s %3s %8s %8s %8s %9s %8s", "Process", "Pid", "Ctx", "Ctx<<4", "Last Sys", "pc", "task"); #ifdef __SMP__ @@ -643,7 +639,9 @@ void __init setbat(int index, unsigned long virt, unsigned long phys, wimgxpp |= (flags & _PAGE_RW)? BPP_RW: BPP_RX; bat[1].word[0] = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */ bat[1].word[1] = phys | wimgxpp; +#ifndef CONFIG_KGDB /* want user access for breakpoints */ if (flags & _PAGE_USER) +#endif bat[1].bat.batu.vp = 1; if (flags & _PAGE_GUARDED) { /* G bit must be zero in IBATs */ @@ -732,6 +730,10 @@ static void __init mapin_ram(void) * don't get ASID compares on kernel space. */ f = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_SHARED; +#ifdef CONFIG_KGDB + /* Allows stub to set breakpoints everywhere */ + f |= _PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE; +#else if ((char *) v < _stext || (char *) v >= etext) f |= _PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE; #ifndef CONFIG_8xx @@ -740,6 +742,7 @@ static void __init mapin_ram(void) forces R/W kernel access */ f |= _PAGE_USER; #endif /* CONFIG_8xx */ +#endif /* CONFIG_KGDB */ map_page(v, p, f); v += PAGE_SIZE; p += PAGE_SIZE; @@ -844,6 +847,8 @@ void free_initrd_mem(unsigned long start, unsigned long end) } #endif +extern boot_infos_t *disp_bi; + /* * Do very early mm setup such as finding the size of memory * and setting up the hash table. @@ -855,25 +860,45 @@ void free_initrd_mem(unsigned long start, unsigned long end) void __init MMU_init(void) { + /* + * The Zone Protection Register (ZPR) defines how protection will + * be applied to every page which is a member of a given zone. At + * present, we utilize only two of the 4xx's zones. The first, zone + * 0, is set at '00b and only allows access in supervisor-mode based + * on the EX and WR bits. No user-mode access is allowed. The second, + * zone 1, is set at '10b and in supervisor-mode allows access + * without regard to the EX and WR bits. In user-mode, access is + * allowed based on the EX and WR bits. + */ + + mtspr(SPRN_ZPR, 0x2aaaaaaa); + + /* Hardwire any TLB entries necessary here. */ + PPC4xx_tlb_pin(KERNELBASE, 0, TLB_PAGESZ(PAGESZ_16M), 1); - PPC4xx_tlb_pin(OAKNET_IO_BASE, OAKNET_IO_BASE, TLB_PAGESZ(PAGESZ_4K), 0); - end_of_DRAM = oak_find_end_of_memory(); - /* Map in all of RAM starting at KERNELBASE */ + /* + * Find the top of physical memory and map all of it in starting + * at KERNELBASE. + */ + end_of_DRAM = oak_find_end_of_memory(); mapin_ram(); - /* Zone 0 - kernel (above 0x80000000), zone 1 - user */ + /* + * Set up the real-mode cache parameters for the exception vector + * handlers (which are run in real-mode). + */ - mtspr(SPRN_ZPR, 0x2aaaaaaa); - mtspr(SPRN_DCWR, 0x00000000); /* all caching is write-back */ + mtspr(SPRN_DCWR, 0x00000000); /* All caching is write-back */ - /* Cache 128MB of space starting at KERNELBASE. */ + /* + * Cache instruction and data space where the exception + * vectors and the kernel live in real-mode. + */ - mtspr(SPRN_DCCR, 0x00000000); - /* flush_instruction_cache(); XXX */ - mtspr(SPRN_ICCR, 0x00000000); - + mtspr(SPRN_DCCR, 0x80000000); /* 128 MB of data space at 0x0. */ + mtspr(SPRN_ICCR, 0x80000000); /* 128 MB of instr. space at 0x0. */ } #else void __init MMU_init(void) @@ -895,7 +920,11 @@ void __init MMU_init(void) if ( ppc_md.progress ) ppc_md.progress("MMU:hash init", 0x300); hash_init(); +#ifdef CONFIG_PPC64 + _SDR1 = 0; /* temporary hack to just use bats -- Cort */ +#else _SDR1 = __pa(Hash) | (Hash_mask >> 10); +#endif ioremap_base = 0xf8000000; if ( ppc_md.progress ) ppc_md.progress("MMU:mapin", 0x301); @@ -916,8 +945,14 @@ void __init MMU_init(void) break; case _MACH_chrp: setbat(0, 0xf8000000, 0xf8000000, 0x08000000, IO_PAGE); +#ifdef CONFIG_PPC64 + /* temporary hack to get working until page tables are stable -- Cort*/ + setbat(1, 0x80000000, 0xc0000000, 0x10000000, IO_PAGE); + setbat(3, 0xd0000000, 0xd0000000, 0x10000000, IO_PAGE); +#else setbat(1, 0x80000000, 0x80000000, 0x10000000, IO_PAGE); setbat(3, 0x90000000, 0x90000000, 0x10000000, IO_PAGE); +#endif break; case _MACH_Pmac: #if 0 @@ -929,6 +964,10 @@ void __init MMU_init(void) setbat(0, base, base, 0x100000, IO_PAGE); } #endif +#if 0 + setbat(0, disp_bi->dispDeviceBase, disp_bi->dispDeviceBase, 0x100000, IO_PAGE); + disp_bi->logicalDisplayBase = disp_bi->dispDeviceBase; +#endif ioremap_base = 0xf0000000; break; case _MACH_apus: @@ -1087,6 +1126,8 @@ void __init paging_init(void) void __init mem_init(void) { + extern char *sysmap; + extern unsigned long sysmap_size; unsigned long addr; int codepages = 0; int datapages = 0; @@ -1116,6 +1157,11 @@ void __init mem_init(void) addr += PAGE_SIZE) SetPageReserved(mem_map + MAP_NR(addr)); #endif /* defined(CONFIG_CHRP) || defined(CONFIG_ALL_PPC) */ + if ( sysmap_size ) + for (addr = (unsigned long)sysmap; + addr < PAGE_ALIGN((unsigned long)sysmap+sysmap_size) ; + addr += PAGE_SIZE) + SetPageReserved(mem_map + MAP_NR(addr)); for (addr = PAGE_OFFSET; addr < (unsigned long)end_of_DRAM; addr += PAGE_SIZE) { @@ -1131,10 +1177,8 @@ void __init mem_init(void) } printk("Memory: %luk available (%dk kernel code, %dk data, %dk init) [%08x,%08lx]\n", - (unsigned long) nr_free_pages << (PAGE_SHIFT-10), - codepages << (PAGE_SHIFT-10), - datapages << (PAGE_SHIFT-10), - initpages << (PAGE_SHIFT-10), + (unsigned long)nr_free_pages()<< (PAGE_SHIFT-10), + codepages, datapages, initpages, PAGE_OFFSET, (unsigned long) end_of_DRAM); mem_init_done = 1; } @@ -1153,7 +1197,7 @@ unsigned long __init *pmac_find_end_of_memory(void) unsigned long a, total; /* max amount of RAM we allow -- Cort */ -#define RAM_LIMIT (768<<20) +#define RAM_LIMIT (64<<20) memory_node = find_devices("memory"); if (memory_node == NULL) { @@ -1384,7 +1428,7 @@ static void __init hash_init(void) { if ( ppc_md.progress ) ppc_md.progress("hash:patch", 0x345); Hash_end = (PTE *) ((unsigned long)Hash + Hash_size); - __clear_user(Hash, Hash_size); + /*__clear_user(Hash, Hash_size);*/ /* * Patch up the instructions in head.S:hash_page diff --git a/arch/ppc/xmon/start.c b/arch/ppc/xmon/start.c index 9ed5f3805143..f25060c942db 100644 --- a/arch/ppc/xmon/start.c +++ b/arch/ppc/xmon/start.c @@ -8,19 +8,34 @@ #include #include #include +#include #include #include +#include #include static volatile unsigned char *sccc, *sccd; unsigned long TXRDY, RXRDY; extern void xmon_printf(const char *fmt, ...); -extern void map_bootx_text(void); extern void drawchar(char); extern void drawstring(const char *str); +static int xmon_expect(const char *str, unsigned int timeout); static int console = 0; static int use_screen = 0; +static int via_modem = 0; +static int xmon_use_sccb = 0; +static struct device_node *macio_node; + +#define TB_SPEED 25000000 + +static inline unsigned int readtb(void) +{ + unsigned int ret; + + asm volatile("mftb %0" : "=r" (ret) :); + return ret; +} void buf_access(void) { @@ -36,17 +51,19 @@ xmon_map_scc(void) if ( _machine == _MACH_Pmac ) { struct device_node *np; - extern boot_infos_t *boot_infos; unsigned long addr; - #ifdef CONFIG_BOOTX_TEXT - if (boot_infos != 0 && find_via_pmu()) { - printk("xmon uses screen and keyboard\n"); + extern boot_infos_t *disp_bi; + + /* needs to be hacked if xmon_printk is to be used + from within find_via_pmu() */ + if (!via_modem && disp_bi && find_via_pmu()) { + drawstring("xmon uses screen and keyboard\n"); use_screen = 1; - map_bootx_text(); return; } #endif + #ifdef CHRP_ESCC addr = 0xc1013020; #else @@ -57,9 +74,10 @@ xmon_map_scc(void) np = find_devices("mac-io"); if (np && np->n_addrs) { + macio_node = np; addr = np->addrs[0].address + 0x13000; - /* use the B channel on the iMac, A channel on others */ - if (addr >= 0xf0000000) + /* use the B channel on the iMac */ + if (!xmon_use_sccb) addr += 0x20; /* use A channel */ } base = (volatile unsigned char *) ioremap(addr & PAGE_MASK, PAGE_SIZE); @@ -70,22 +88,22 @@ xmon_map_scc(void) sccd = sccc + (0xf3013030 - 0xf3013020); #endif } - else if ( _machine & _MACH_chrp ) + else if ( _machine & _MACH_gemini ) { /* should already be mapped by the kernel boot */ - sccc = (volatile unsigned char *) (isa_io_base + 0x3fd); - sccd = (volatile unsigned char *) (isa_io_base + 0x3f8); + sccc = (volatile unsigned char *) 0xffeffb0d; + sccd = (volatile unsigned char *) 0xffeffb08; TXRDY = 0x20; RXRDY = 1; + console = 1; } - else if ( _machine & _MACH_gemini ) + else { /* should already be mapped by the kernel boot */ - sccc = (volatile unsigned char *) 0xffeffb0d; - sccd = (volatile unsigned char *) 0xffeffb08; + sccc = (volatile unsigned char *) (isa_io_base + 0x3fd); + sccd = (volatile unsigned char *) (isa_io_base + 0x3f8); TXRDY = 0x20; RXRDY = 1; - console = 1; } } @@ -98,7 +116,7 @@ int xmon_write(void *handle, void *ptr, int nb) { char *p = ptr; - int i, ct; + int i, c, ct; #ifdef CONFIG_BOOTX_TEXT if (use_screen) { @@ -111,20 +129,26 @@ xmon_write(void *handle, void *ptr, int nb) if (!scc_initialized) xmon_init_scc(); for (i = 0; i < nb; ++i) { -#ifdef CONFIG_ADB + ct = 0; while ((*sccc & TXRDY) == 0) +#ifdef CONFIG_ADB if (sys_ctrler == SYS_CTRLER_PMU) pmu_poll(); +#else + ; #endif /* CONFIG_ADB */ - buf_access(); - if ( console && (*p != '\r')) - printk("%c", *p); - ct = 0; - if ( *p == '\n') + c = p[i]; + if (c == '\n' && !ct) { + c = '\r'; ct = 1; - *sccd = *p++; - if ( ct ) - xmon_write(handle, "\r", 1); + --i; + } else { + if (console) + printk("%c", c); + ct = 0; + } + buf_access(); + *sccd = c; } return i; } @@ -206,36 +230,49 @@ xmon_read(void *handle, void *ptr, int nb) if (!scc_initialized) xmon_init_scc(); for (i = 0; i < nb; ++i) { -#ifdef CONFIG_ADB while ((*sccc & RXRDY) == 0) +#ifdef CONFIG_ADB if (sys_ctrler == SYS_CTRLER_PMU) pmu_poll(); +#else + ; #endif /* CONFIG_ADB */ buf_access(); -#if 0 - if ( 0/*console*/ ) - *p++ = ppc_md.kbd_getkeycode(); - else -#endif *p++ = *sccd; } return i; } +int +xmon_read_poll(void) +{ + if ((*sccc & RXRDY) == 0) { +#ifdef CONFIG_ADB + if (sys_ctrler == SYS_CTRLER_PMU) + pmu_poll(); +#else + ; +#endif + return -1; + } + buf_access(); + return *sccd; +} + static unsigned char scc_inittab[] = { 13, 0, /* set baud rate divisor */ 12, 1, 14, 1, /* baud rate gen enable, src=rtxc */ 11, 0x50, /* clocks = br gen */ - 5, 0x6a, /* tx 8 bits, assert RTS */ - 4, 0x44, /* x16 clock, 1 stop */ + 5, 0xea, /* tx 8 bits, assert DTR & RTS */ + 4, 0x46, /* x16 clock, 1 stop */ 3, 0xc1, /* rx enable, 8 bits */ }; void xmon_init_scc() { - if ( _machine & (_MACH_chrp|_MACH_gemini) ) + if ( _machine == _MACH_chrp ) { sccd[3] = 0x83; eieio(); /* LCR = 8N1 + DLAB */ sccd[0] = 3; eieio(); /* DLL = 38400 baud */ @@ -248,6 +285,14 @@ xmon_init_scc() { int i, x; + if (macio_node != 0) { + unsigned int t0; + + feature_set(macio_node, FEATURE_Modem_power); + t0 = readtb(); + while (readtb() - t0 < 3*TB_SPEED) + eieio(); + } for (i = 20000; i != 0; --i) { x = *sccc; eieio(); } @@ -259,6 +304,18 @@ xmon_init_scc() } } scc_initialized = 1; + if (via_modem) { + for (;;) { + xmon_write(0, "ATE1V1\r", 7); + if (xmon_expect("OK", 5)) { + xmon_write(0, "ATA\r", 4); + if (xmon_expect("CONNECT", 40)) + break; + } + xmon_write(0, "+++", 3); + xmon_expect("OK", 3); + } + } } #if 0 @@ -331,6 +388,35 @@ static char line[256]; static char *lineptr; static int lineleft; +int xmon_expect(const char *str, unsigned int timeout) +{ + int c; + unsigned int t0; + + timeout *= TB_SPEED; + t0 = readtb(); + do { + lineptr = line; + for (;;) { + c = xmon_read_poll(); + if (c == -1) { + if (readtb() - t0 > timeout) { + printk("timeout\n"); + return 0; + } + continue; + } + if (c == '\n') + break; + printk("%c", c); + if (c != '\r' && lineptr < &line[sizeof(line) - 1]) + *lineptr++ = c; + } + *lineptr = 0; + } while (strstr(line, str) == NULL); + return 1; +} + int xmon_getchar(void) { diff --git a/arch/ppc/xmon/xmon.c b/arch/ppc/xmon/xmon.c index 1c6a812d26af..a0da2f1b4e6e 100644 --- a/arch/ppc/xmon/xmon.c +++ b/arch/ppc/xmon/xmon.c @@ -1344,3 +1344,31 @@ char *str; { lineptr = str; } + +char last[64]; +char * +lookup_addr(unsigned long addr) +{ + extern char *sysmap; + extern unsigned long sysmap_size; + char *c = sysmap; + unsigned long cmp; + + if ( !sysmap || !sysmap_size ) + return NULL; + + /* adjust if addr is relative to kernelbase */ + if ( addr < PAGE_OFFSET ) + addr += PAGE_OFFSET; + + cmp = simple_strtoul(c, &c, 8); + strcpy( last, strsep( &c, "\n")); + while ( c < (sysmap+sysmap_size) ) + { + cmp = simple_strtoul(c, &c, 8); + if ( cmp < addr ) + break; + strcpy( last, strsep( &c, "\n")); + } + return last; +} diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 83e2f3e9ea14..458685f4afa9 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -41,7 +41,6 @@ unsigned long mmu_context_cache; static unsigned long totalram_pages = 0; static unsigned long totalhigh_pages = 0; -extern void show_net_buffers(void); extern unsigned long init_smp_mappings(unsigned long); /* @@ -173,9 +172,6 @@ void show_mem(void) printk("%d pages swap cached\n",cached); printk("%ld pages in page table cache\n",pgtable_cache_size); show_buffers(); -#ifdef CONFIG_NET - show_net_buffers(); -#endif } /* References to section boundaries */ diff --git a/arch/sparc/kernel/irq.c b/arch/sparc/kernel/irq.c index 8e8fab320715..471929a01691 100644 --- a/arch/sparc/kernel/irq.c +++ b/arch/sparc/kernel/irq.c @@ -1,4 +1,4 @@ -/* $Id: irq.c,v 1.100 2000/01/29 01:38:04 anton Exp $ +/* $Id: irq.c,v 1.101 2000/02/09 11:15:03 davem Exp $ * arch/sparc/kernel/irq.c: Interrupt request handling routines. On the * Sparc the IRQ's are basically 'cast in stone' * and you are supposed to probe the prom's device @@ -205,9 +205,6 @@ unsigned int local_irq_count; unsigned int local_bh_count[NR_CPUS]; unsigned int local_irq_count[NR_CPUS]; -atomic_t global_bh_lock = ATOMIC_INIT(0); -spinlock_t global_bh_count = SPIN_LOCK_UNLOCKED; - /* Who has global_irq_lock. */ unsigned char global_irq_holder = NO_PROC_ID; @@ -217,9 +214,6 @@ spinlock_t global_irq_lock = SPIN_LOCK_UNLOCKED; /* Global IRQ locking depth. */ atomic_t global_irq_count = ATOMIC_INIT(0); -/* This protects BH software state (masks, things like that). */ -spinlock_t sparc_bh_lock = SPIN_LOCK_UNLOCKED; - void smp_show_backtrace_all_cpus(void); void show_backtrace(void); @@ -239,7 +233,7 @@ static void show(char * str) } printk("]\n"); - printk("bh: %d [ ", (spin_is_locked(&global_bh_count) ? 1 : 0)); + printk("bh: %d [ ", (spin_is_locked(&global_bh_lock) ? 1 : 0)); for (i = 0; i < NR_CPUS; i++) { printk("%d ", local_bh_count[cpu]); @@ -253,18 +247,6 @@ static void show(char * str) #endif } -static inline void wait_on_bh(void) -{ - int count = MAXCOUNT; - do { - if(!--count) { - show("wait_on_bh"); - count = 0; - } - barrier(); - } while(spin_is_locked(&global_bh_count)); -} - /* * We have to allow irqs to arrive between __sti and __cli */ @@ -281,7 +263,7 @@ static inline void wait_on_irq(int cpu) * already executing in one.. */ if (!atomic_read(&global_irq_count)) { - if (local_bh_count[cpu] || !spin_is_locked(&global_bh_count)) + if (local_bh_count[cpu] || !spin_is_locked(&global_bh_lock)) break; } @@ -300,7 +282,7 @@ static inline void wait_on_irq(int cpu) continue; if (spin_is_locked (&global_irq_lock)) continue; - if (!local_bh_count[cpu] && spin_is_locked(&global_bh_count)) + if (!local_bh_count[cpu] && spin_is_locked(&global_bh_lock)) continue; if (spin_trylock(&global_irq_lock)) break; @@ -308,20 +290,6 @@ static inline void wait_on_irq(int cpu) } } -/* - * This is called when we want to synchronize with - * bottom half handlers. We need to wait until - * no other CPU is executing any bottom half handler. - * - * Don't wait if we're already running in an interrupt - * context or are inside a bh handler. - */ -void synchronize_bh(void) -{ - if (spin_is_locked (&global_bh_count) && !in_interrupt()) - wait_on_bh(); -} - /* * This is called when we want to synchronize with * interrupts. We may for example tell a device to diff --git a/arch/sparc/kernel/rtrap.S b/arch/sparc/kernel/rtrap.S index 7b056b47aa7a..dcfc2dc785cd 100644 --- a/arch/sparc/kernel/rtrap.S +++ b/arch/sparc/kernel/rtrap.S @@ -1,4 +1,4 @@ -/* $Id: rtrap.S,v 1.53 2000/01/08 16:38:18 anton Exp $ +/* $Id: rtrap.S,v 1.54 2000/02/09 11:15:03 davem Exp $ * rtrap.S: Return from Sparc trap low-level code. * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) @@ -48,16 +48,18 @@ rtrap_7win_patch5: and %g1, 0x7f, %g1 .globl rtrap_patch3, rtrap_patch4, rtrap_patch5 .globl C_LABEL(ret_trap_lockless_ipi) ret_trap_entry: - sethi %hi(C_LABEL(bh_active)), %l3 - sethi %hi(C_LABEL(bh_mask)), %l4 - ld [%l4 + %lo(C_LABEL(bh_mask))], %g5 - ld [%l3 + %lo(C_LABEL(bh_active))], %g4 + ld [%curptr + AOFF_task_processor], %l3 + sll %l3, 5, %l3 + sethi %hi(C_LABEL(softirq_state)), %l4 + add %l4, %l3, %l4 + ld [%l4 + %lo(C_LABEL(softirq_state))], %g5 + ld [%l4 + %lo(C_LABEL(softirq_state) + 4)], %g4 andcc %g4, %g5, %g0 be C_LABEL(ret_trap_lockless_ipi) nop - call C_LABEL(do_bottom_half) + call C_LABEL(do_softirq) nop - + C_LABEL(ret_trap_lockless_ipi): andcc %t_psr, PSR_PS, %g0 be 1f diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c index 8df494f2d603..d1b3eca6373e 100644 --- a/arch/sparc/kernel/sparc_ksyms.c +++ b/arch/sparc/kernel/sparc_ksyms.c @@ -1,4 +1,4 @@ -/* $Id: sparc_ksyms.c,v 1.88 2000/01/28 13:41:55 jj Exp $ +/* $Id: sparc_ksyms.c,v 1.89 2000/02/09 11:15:03 davem Exp $ * arch/sparc/kernel/ksyms.c: Sparc specific ksyms support. * * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) @@ -11,8 +11,10 @@ #include #include +#include #include #include +#include #include #include #include diff --git a/arch/sparc/kernel/time.c b/arch/sparc/kernel/time.c index 3e52fef952f5..5a03e646b032 100644 --- a/arch/sparc/kernel/time.c +++ b/arch/sparc/kernel/time.c @@ -1,4 +1,4 @@ -/* $Id: time.c,v 1.51 2000/01/29 01:08:59 anton Exp $ +/* $Id: time.c,v 1.53 2000/02/09 21:11:04 davem Exp $ * linux/arch/sparc/kernel/time.c * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) @@ -429,12 +429,14 @@ void __init time_init(void) extern __inline__ unsigned long do_gettimeoffset(void) { + struct tasklet_struct *t; unsigned long offset = 0; unsigned int count; count = (*master_l10_counter >> 10) & 0x1fffff; - if(test_bit(TIMER_BH, &bh_active)) + t = &bh_task_vec[TIMER_BH]; + if (test_bit(TASKLET_STATE_SCHED, &t->state)) offset = 1000000; return offset + count; diff --git a/arch/sparc/mm/init.c b/arch/sparc/mm/init.c index 9e9a225a54a5..6736dc9d3e84 100644 --- a/arch/sparc/mm/init.c +++ b/arch/sparc/mm/init.c @@ -1,4 +1,4 @@ -/* $Id: init.c,v 1.79 2000/01/29 01:09:06 anton Exp $ +/* $Id: init.c,v 1.80 2000/02/09 21:11:06 davem Exp $ * linux/arch/sparc/mm/init.c * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) @@ -33,8 +33,6 @@ #include #include -extern void show_net_buffers(void); - unsigned long *sparc_valid_addr_bitmap; unsigned long phys_base; @@ -89,9 +87,6 @@ void show_mem(void) printk("%ld entries in page dir cache\n",pgd_cache_size); #endif show_buffers(); -#ifdef CONFIG_NET - show_net_buffers(); -#endif } extern pgprot_t protection_map[16]; diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index 820b93bb4c7b..a09303971065 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c @@ -1,4 +1,4 @@ -/* $Id: irq.c,v 1.81 2000/01/21 06:33:59 davem Exp $ +/* $Id: irq.c,v 1.82 2000/02/09 11:15:07 davem Exp $ * irq.c: UltraSparc IRQ handling/init/registry. * * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include /* XXX ADD add_foo_randomness() calls... -DaveM */ @@ -546,8 +547,6 @@ unsigned int local_bh_count; #define irq_enter(cpu, irq) (local_irq_count++) #define irq_exit(cpu, irq) (local_irq_count--) #else -atomic_t global_bh_lock = ATOMIC_INIT(0); -spinlock_t global_bh_count = SPIN_LOCK_UNLOCKED; /* Who has global_irq_lock. */ unsigned char global_irq_holder = NO_PROC_ID; @@ -573,24 +572,12 @@ static void show(char * str) atomic_read(&global_irq_count), cpu_data[0].irq_count, cpu_data[1].irq_count); printk("bh: %d [%u %u]\n", - (spin_is_locked(&global_bh_count) ? 1 : 0), + (spin_is_locked(&global_bh_lock) ? 1 : 0), cpu_data[0].bh_count, cpu_data[1].bh_count); } #define MAXCOUNT 100000000 -static inline void wait_on_bh(void) -{ - int count = MAXCOUNT; - do { - if(!--count) { - show("wait_on_bh"); - count = 0; - } - membar("#LoadLoad"); - } while(spin_is_locked(&global_bh_count)); -} - #define SYNC_OTHER_ULTRAS(x) udelay(x+1) static inline void wait_on_irq(int cpu) @@ -599,7 +586,7 @@ static inline void wait_on_irq(int cpu) for(;;) { membar("#LoadLoad"); if (!atomic_read (&global_irq_count)) { - if (local_bh_count || ! spin_is_locked(&global_bh_count)) + if (local_bh_count || ! spin_is_locked(&global_bh_lock)) break; } spin_unlock (&global_irq_lock); @@ -616,7 +603,7 @@ static inline void wait_on_irq(int cpu) continue; if (spin_is_locked (&global_irq_lock)) continue; - if (!local_bh_count && spin_is_locked (&global_bh_count)) + if (!local_bh_count && spin_is_locked (&global_bh_lock)) continue; if (spin_trylock(&global_irq_lock)) break; @@ -624,12 +611,6 @@ static inline void wait_on_irq(int cpu) } } -void synchronize_bh(void) -{ - if (spin_is_locked (&global_bh_count) && !in_interrupt()) - wait_on_bh(); -} - void synchronize_irq(void) { if (atomic_read(&global_irq_count)) { diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S index bb6d7398e5fb..d059a5a282b7 100644 --- a/arch/sparc64/kernel/rtrap.S +++ b/arch/sparc64/kernel/rtrap.S @@ -1,4 +1,4 @@ -/* $Id: rtrap.S,v 1.47 1999/07/30 09:35:23 davem Exp $ +/* $Id: rtrap.S,v 1.48 2000/02/09 11:15:07 davem Exp $ * rtrap.S: Preparing for return from trap on Sparc V9. * * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) @@ -20,15 +20,17 @@ .globl rtrap_clr_l6, rtrap rtrap_clr_l6: clr %l6 /* Fall through */ -rtrap: sethi %hi(bh_active), %l2 - sethi %hi(bh_mask), %l1 - ldx [%l2 + %lo(bh_active)], %l4 - ldx [%l1 + %lo(bh_mask)], %l7 - - andcc %l4, %l7, %g0 - be,pt %xcc, 2f +rtrap: lduw [%g6 + AOFF_task_processor], %l0 + sethi %hi(softirq_state), %l2 + or %l2, %lo(softirq_state), %l2 + sllx %l0, 6, %l0 + ldx [%l2 + %l0], %l1 + srlx %l1, 32, %l2 + + andcc %l1, %l2, %g0 + be,pt %icc, 2f nop - call do_bottom_half + call do_softirq nop 2: ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 sethi %hi(0xf << 20), %l4 diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c index f8cb011455b3..ff384365117d 100644 --- a/arch/sparc64/kernel/sparc64_ksyms.c +++ b/arch/sparc64/kernel/sparc64_ksyms.c @@ -1,4 +1,4 @@ -/* $Id: sparc64_ksyms.c,v 1.73 2000/02/08 05:11:32 jj Exp $ +/* $Id: sparc64_ksyms.c,v 1.74 2000/02/09 11:15:07 davem Exp $ * arch/sparc64/kernel/sparc64_ksyms.c: Sparc64 specific ksyms support. * * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) @@ -127,11 +127,6 @@ EXPORT_SYMBOL_PRIVATE(write_unlock); /* Kernel wide locking */ EXPORT_SYMBOL(kernel_flag); -/* Software-IRQ BH locking */ -EXPORT_SYMBOL(global_bh_lock); -EXPORT_SYMBOL(global_bh_count); -EXPORT_SYMBOL(synchronize_bh); - /* Hard IRQ locking */ EXPORT_SYMBOL(global_irq_holder); EXPORT_SYMBOL(global_irq_lock); @@ -160,8 +155,8 @@ EXPORT_SYMBOL(_do_write_unlock); #endif #else -EXPORT_SYMBOL(local_irq_count); EXPORT_SYMBOL(local_bh_count); +EXPORT_SYMBOL(local_irq_count); #endif /* rw semaphores */ diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 953fba539cd3..58dc224f9245 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -1,4 +1,4 @@ -/* $Id: init.c,v 1.145 2000/02/08 07:46:11 davem Exp $ +/* $Id: init.c,v 1.146 2000/02/09 21:11:09 davem Exp $ * arch/sparc64/mm/init.c * * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu) @@ -30,7 +30,6 @@ #include #include -extern void show_net_buffers(void); extern void device_scan(void); struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS]; @@ -130,9 +129,6 @@ void show_mem(void) printk("%d entries in page dir cache\n",pgd_cache_size); #endif show_buffers(); -#ifdef CONFIG_NET - show_net_buffers(); -#endif } int mmu_info(char *buf) diff --git a/arch/sparc64/solaris/socksys.c b/arch/sparc64/solaris/socksys.c index 99067a359d48..b5f76d400ff3 100644 --- a/arch/sparc64/solaris/socksys.c +++ b/arch/sparc64/solaris/socksys.c @@ -1,4 +1,4 @@ -/* $Id: socksys.c,v 1.10 1999/08/31 06:55:08 davem Exp $ +/* $Id: socksys.c,v 1.11 2000/02/09 22:32:17 davem Exp $ * socksys.c: /dev/inet/ stuff for Solaris emulation. * * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) diff --git a/drivers/block/Config.in b/drivers/block/Config.in index a562fd0f72bd..79bd8078e800 100644 --- a/drivers/block/Config.in +++ b/drivers/block/Config.in @@ -79,8 +79,8 @@ else fi bool ' HPT366 chipset support' CONFIG_BLK_DEV_HPT366 if [ "$CONFIG_IDEDMA_PCI_EXPERIMENTAL" = "y" -a "$CONFIG_BLK_DEV_HPT366" = "y" ]; then - bool ' HPT366 Fast Interrupt support (EXPERIMENTAL) (WIP)' HPT366_FAST_IRQ_PREDICTION - bool ' HPT366 mode three unsupported (EXPERIMENTAL) (WIP)' HPT366_MODE3 + bool ' HPT366 Fast Interrupt support (EXPERIMENTAL) (WIP)' CONFIG_HPT366_FAST_IRQ_PREDICTION + bool ' HPT366 mode three unsupported (EXPERIMENTAL) (WIP)' CONFIG_HPT366_MODE3 fi if [ "$CONFIG_X86" = "y" ]; then bool ' Intel PIIXn chipsets support' CONFIG_BLK_DEV_PIIX diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 17e329072148..3e5c9a6e6af3 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -289,9 +289,6 @@ static inline int DRIVE(kdev_t x) { #define CLEARSTRUCT(x) memset((x), 0, sizeof(*(x))) -#define INT_OFF save_flags(flags); cli() -#define INT_ON restore_flags(flags) - /* read/write */ #define COMMAND raw_cmd->cmd[0] #define DR_SELECT raw_cmd->cmd[1] @@ -471,7 +468,8 @@ static int probing = 0; #define FD_COMMAND_ERROR 2 #define FD_COMMAND_OKAY 3 -static volatile int command_status = FD_COMMAND_NONE, fdc_busy = 0; +static volatile int command_status = FD_COMMAND_NONE; +static unsigned long fdc_busy = 0; static DECLARE_WAIT_QUEUE_HEAD(fdc_wait); static DECLARE_WAIT_QUEUE_HEAD(command_done); @@ -846,24 +844,36 @@ static void set_fdc(int drive) /* locks the driver */ static int lock_fdc(int drive, int interruptible) { - unsigned long flags; - if (!usage_count){ printk(KERN_ERR "Trying to lock fdc while usage count=0\n"); return -1; } if(floppy_grab_irq_and_dma()==-1) return -EBUSY; - INT_OFF; - while (fdc_busy && NO_SIGNAL) - interruptible_sleep_on(&fdc_wait); - if (fdc_busy){ - INT_ON; - return -EINTR; + + if (test_and_set_bit(0, &fdc_busy)) { + DECLARE_WAITQUEUE(wait, current); + add_wait_queue(&fdc_wait, &wait); + + for (;;) { + set_current_state(TASK_INTERRUPTIBLE); + + if (!test_and_set_bit(0, &fdc_busy)) + break; + + schedule(); + + if (!NO_SIGNAL) { + remove_wait_queue(&fdc_wait, &wait); + return -EINTR; + } + } + + set_current_state(TASK_RUNNING); + remove_wait_queue(&fdc_wait, &wait); } - fdc_busy = 1; - INT_ON; command_status = FD_COMMAND_NONE; + reschedule_timeout(drive, "lock fdc", 0); set_fdc(drive); return 0; @@ -886,7 +896,7 @@ static inline void unlock_fdc(void) command_status = FD_COMMAND_NONE; del_timer(&fd_timeout); cont = NULL; - fdc_busy = 0; + clear_bit(0, &fdc_busy); floppy_release_irq_and_dma(); wake_up(&fdc_wait); } @@ -1031,39 +1041,39 @@ static int wait_for_completion(unsigned long delay, timeout_fn function) return 0; } +static spinlock_t floppy_hlt_lock = SPIN_LOCK_UNLOCKED; static int hlt_disabled=0; static void floppy_disable_hlt(void) { unsigned long flags; - INT_OFF; - if (!hlt_disabled){ + spin_lock_irqsave(&floppy_hlt_lock, flags); + if (!hlt_disabled) { hlt_disabled=1; #ifdef HAVE_DISABLE_HLT disable_hlt(); #endif } - INT_ON; + spin_unlock_irqrestore(&floppy_hlt_lock, flags); } static void floppy_enable_hlt(void) { unsigned long flags; - INT_OFF; + spin_lock_irqsave(&floppy_hlt_lock, flags); if (hlt_disabled){ hlt_disabled=0; #ifdef HAVE_DISABLE_HLT enable_hlt(); #endif } - INT_ON; + spin_unlock_irqrestore(&floppy_hlt_lock, flags); } static void setup_DMA(void) { - unsigned long flags; unsigned long f; #ifdef FLOPPY_SANITY_CHECK @@ -1085,7 +1095,6 @@ static void setup_DMA(void) return; } #endif - INT_OFF; f=claim_dma_lock(); fd_disable_dma(); #ifdef fd_dma_setup @@ -1094,7 +1103,6 @@ static void setup_DMA(void) DMA_MODE_READ : DMA_MODE_WRITE, FDCS->address) < 0) { release_dma_lock(f); - INT_ON; cont->done(0); FDCS->reset=1; return; @@ -1111,7 +1119,6 @@ static void setup_DMA(void) fd_enable_dma(); release_dma_lock(f); #endif - INT_ON; floppy_disable_hlt(); } @@ -1759,14 +1766,7 @@ void floppy_interrupt(int irq, void *dev_id, struct pt_regs * regs) } while ((ST0 & 0x83) != UNIT(current_drive) && inr == 2 && max_sensei); } if (handler) { - int cpu = smp_processor_id(); - if(softirq_trylock(cpu)) { - /* got the lock, call the handler immediately */ - handler(); - softirq_endlock(cpu); - } else - /* we interrupted a bottom half. Defer handler */ - schedule_bh( (void *)(void *) handler); + schedule_bh( (void *)(void *) handler); } else FDCS->reset = 1; is_alive("normal interrupt end"); @@ -1854,7 +1854,7 @@ static void show_floppy(void) #endif printk("status=%x\n", fd_inb(FD_STATUS)); - printk("fdc_busy=%d\n", fdc_busy); + printk("fdc_busy=%lu\n", fdc_busy); if (DEVICE_INTR) printk("DEVICE_INTR=%p\n", DEVICE_INTR); if (floppy_tq.sync) @@ -2025,25 +2025,36 @@ static struct cont_t intr_cont={ static int wait_til_done(void (*handler)(void), int interruptible) { int ret; - unsigned long flags; schedule_bh((void *)(void *)handler); - INT_OFF; - while(command_status < 2 && NO_SIGNAL){ - is_alive("wait_til_done"); - if (interruptible) - interruptible_sleep_on(&command_done); - else - sleep_on(&command_done); + + if (command_status < 2 && NO_SIGNAL) { + DECLARE_WAITQUEUE(wait, current); + + add_wait_queue(&command_done, &wait); + for (;;) { + set_current_state(interruptible? + TASK_INTERRUPTIBLE: + TASK_UNINTERRUPTIBLE); + + if (command_status >= 2 || !NO_SIGNAL) + break; + + is_alive("wait_til_done"); + + schedule(); + } + + set_current_state(TASK_RUNNING); + remove_wait_queue(&command_done, &wait); } + if (command_status < 2){ cancel_activity(); cont = &intr_cont; reset_fdc(); - INT_ON; return -EINTR; } - INT_ON; if (FDCS->reset) command_status = FD_COMMAND_ERROR; @@ -4177,22 +4188,26 @@ int __init floppy_init(void) return have_no_fdc; } +static spinlock_t floppy_usage_lock = SPIN_LOCK_UNLOCKED; + static int floppy_grab_irq_and_dma(void) { unsigned long flags; - INT_OFF; + spin_lock_irqsave(&floppy_usage_lock, flags); if (usage_count++){ - INT_ON; + spin_unlock_irqrestore(&floppy_usage_lock, flags); return 0; } - INT_ON; + spin_unlock_irqrestore(&floppy_usage_lock, flags); MOD_INC_USE_COUNT; if (fd_request_irq()) { DPRINT("Unable to grab IRQ%d for the floppy driver\n", FLOPPY_IRQ); MOD_DEC_USE_COUNT; + spin_lock_irqsave(&floppy_usage_lock, flags); usage_count--; + spin_unlock_irqrestore(&floppy_usage_lock, flags); return -1; } if (fd_request_dma()) { @@ -4200,7 +4215,9 @@ static int floppy_grab_irq_and_dma(void) FLOPPY_DMA); fd_free_irq(); MOD_DEC_USE_COUNT; + spin_lock_irqsave(&floppy_usage_lock, flags); usage_count--; + spin_unlock_irqrestore(&floppy_usage_lock, flags); return -1; } @@ -4216,7 +4233,9 @@ static int floppy_grab_irq_and_dma(void) release_region(FDCS->address+7, 1); } MOD_DEC_USE_COUNT; + spin_lock_irqsave(&floppy_usage_lock, flags); usage_count--; + spin_unlock_irqrestore(&floppy_usage_lock, flags); return -1; } request_region(FDCS->address, 6, "floppy"); @@ -4258,12 +4277,12 @@ static void floppy_release_irq_and_dma(void) unsigned long tmpaddr; unsigned long flags; - INT_OFF; + spin_lock_irqsave(&floppy_usage_lock, flags); if (--usage_count){ - INT_ON; + spin_unlock_irqrestore(&floppy_usage_lock, flags); return; } - INT_ON; + spin_unlock_irqrestore(&floppy_usage_lock, flags); if(irqdma_allocated) { fd_disable_dma(); diff --git a/drivers/block/hpt366.c b/drivers/block/hpt366.c index 65c695183445..91ef9d0b4fbc 100644 --- a/drivers/block/hpt366.c +++ b/drivers/block/hpt366.c @@ -262,20 +262,20 @@ static int config_chipset_for_dma (ide_drive_t *drive) pci_read_config_byte(HWIF(drive)->pci_dev, 0x51, ®51h); -#ifdef HPT366_FAST_IRQ_PREDICTION +#ifdef CONFIG_HPT366_FAST_IRQ_PREDICTION /* * Some drives prefer/allow for the method of handling interrupts. */ if (!(reg51h & 0x80)) pci_write_config_byte(HWIF(drive)->pci_dev, 0x51, reg51h|0x80); -#else /* ! HPT366_FAST_IRQ_PREDICTION */ +#else /* ! CONFIG_HPT366_FAST_IRQ_PREDICTION */ /* * Disable the "fast interrupt" prediction. * Instead, always wait for the real interrupt from the drive! */ if (reg51h & 0x80) pci_write_config_byte(HWIF(drive)->pci_dev, 0x51, reg51h & ~0x80); -#endif /* HPT366_FAST_IRQ_PREDICTION */ +#endif /* CONFIG_HPT366_FAST_IRQ_PREDICTION */ /* * Preserve existing PIO settings: diff --git a/drivers/block/ide-floppy.c b/drivers/block/ide-floppy.c index 4d6ec68b6190..b24933637f4a 100644 --- a/drivers/block/ide-floppy.c +++ b/drivers/block/ide-floppy.c @@ -1397,6 +1397,13 @@ static int idefloppy_identify_device (ide_drive_t *drive,struct hd_driveid *id) *((unsigned short *) &gcw) = id->config; +#ifdef CONFIG_PPC + /* kludge for Apple PowerBook internal zip */ + if ((gcw.device_type == 5) && !strstr(id->model, "CD-ROM") + && strstr(id->model, "ZIP")) + gcw.device_type = 0; +#endif + #if IDEFLOPPY_DEBUG_INFO printk (KERN_INFO "Dumping ATAPI Identify Device floppy parameters\n"); switch (gcw.protocol) { diff --git a/drivers/block/ide-pci.c b/drivers/block/ide-pci.c index bb7b4f59a14a..a5593dffbee8 100644 --- a/drivers/block/ide-pci.c +++ b/drivers/block/ide-pci.c @@ -691,12 +691,12 @@ static void __init hpt366_device_order_fixup (struct pci_dev *dev, ide_pci_devic printk("%s: IDE controller on PCI bus %02x dev %02x\n", d2->name, dev2->bus->number, dev2->devfn); if (hpt363_shared_pin && !hpt363_shared_irq) { printk("%s: IDE controller run unsupported mode three!!!\n", d2->name); -#ifndef HPT366_MODE3 +#ifndef CONFIG_HPT366_MODE3 printk("%s: IDE controller report to \n", d->name); return; -#else /* HPT366_MODE3 */ +#else /* CONFIG_HPT366_MODE3 */ printk("%s: OVERRIDE IDE controller not advisable this mode!!!\n", d2->name); -#endif /* HPT366_MODE3 */ +#endif /* CONFIG_HPT366_MODE3 */ } ide_setup_pci_device(dev2, d2); } diff --git a/drivers/block/ide-pmac.c b/drivers/block/ide-pmac.c index e6947e560ccd..28eb789c2aeb 100644 --- a/drivers/block/ide-pmac.c +++ b/drivers/block/ide-pmac.c @@ -14,6 +14,12 @@ * * Copyright (c) 1995-1998 Mark Lord * + * BenH: I began adding more complete timing setup code, mostly because DMA + * won't work on new machines unless timings are setup correctly. This + * code was mainly stolen from Cmd646 driver and should be completed to + * include real timing calc. instead of hard coded values. The format of + * the timing register can be found in Darwin's source code, except for + * Keylargo ATA-4 controller. */ #include #include @@ -36,29 +42,92 @@ #endif #include "ide_modes.h" -int pmac_ide_ports_known; -ide_ioreg_t pmac_ide_regbase[MAX_HWIFS]; -int pmac_ide_irq[MAX_HWIFS]; -int pmac_ide_count; -struct device_node *pmac_ide_node[MAX_HWIFS]; +#undef IDE_PMAC_DEBUG +#define IDE_SYSCLK_NS 30 + +struct pmac_ide_hwif { + ide_ioreg_t regbase; + int irq; + int kind; + struct device_node* node; + u32 timings[2]; #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC + volatile struct dbdma_regs* dma_regs; + struct dbdma_cmd* dma_table; +#endif + +} pmac_ide[MAX_HWIFS]; + +static int pmac_ide_count; + +enum { + controller_ohare, /* OHare based */ + controller_heathrow, /* Heathrow/Paddington */ + controller_kl_ata3, /* KeyLargo ATA-3 */ + controller_kl_ata4 /* KeyLargo ATA-4 */ +}; + + +#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC + +typedef struct { + int accessTime; + int cycleTime; +} pmac_ide_timing; + +/* Multiword DMA timings */ +static pmac_ide_timing mdma_timings[] = +{ + { 215, 480 }, /* Mode 0 */ + { 80, 150 }, /* 1 */ + { 70, 120 } /* 2 */ +}; + +/* Ultra DMA timings (for use when I know how to calculate them */ +static pmac_ide_timing udma_timings[] = +{ + { 0, 114 }, /* Mode 0 */ + { 0, 73 }, /* 1 */ + { 0, 54 }, /* 2 */ + { 0, 39 }, /* 3 */ + { 0, 25 } /* 4 */ +}; + #define MAX_DCMDS 256 /* allow up to 256 DBDMA commands per xfer */ -static void pmac_ide_setup_dma(struct device_node *np, ide_hwif_t *hwif); +static void pmac_ide_setup_dma(struct device_node *np, int ix); static int pmac_ide_dmaproc(ide_dma_action_t func, ide_drive_t *drive); -static int pmac_ide_build_dmatable(ide_drive_t *drive, int wr); +static int pmac_ide_build_dmatable(ide_drive_t *drive, int ix, int wr); +static void pmac_ide_tuneproc(ide_drive_t *drive, byte pio); +static void pmac_ide_selectproc(ide_drive_t *drive); + #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */ #ifdef CONFIG_PMAC_PBOOK -static int idepmac_notify(struct pmu_sleep_notifier *self, int when); +static int idepmac_notify_sleep(struct pmu_sleep_notifier *self, int when); struct pmu_sleep_notifier idepmac_sleep_notifier = { - idepmac_notify, SLEEP_LEVEL_BLOCK, + idepmac_notify_sleep, SLEEP_LEVEL_BLOCK, }; #endif /* CONFIG_PMAC_PBOOK */ +static int +pmac_ide_find(ide_drive_t *drive) +{ + ide_hwif_t *hwif = HWIF(drive); + ide_ioreg_t base; + int i; + + for (i=0; iio_ports[0]) + return i; + } + return -1; +} + /* - * N.B. this can't be an __init, because the media-bay task can + * N.B. this can't be an initfunc, because the media-bay task can * call ide_[un]register at any time. */ void pmac_ide_init_hwif_ports(hw_regs_t *hw, @@ -71,7 +140,7 @@ void pmac_ide_init_hwif_ports(hw_regs_t *hw, return; for (ix = 0; ix < MAX_HWIFS; ++ix) - if (data_port == pmac_ide_regbase[ix]) + if (data_port == pmac_ide[ix].regbase) break; if (ix >= MAX_HWIFS) { @@ -98,27 +167,125 @@ void pmac_ide_init_hwif_ports(hw_regs_t *hw, hw->io_ports[8] = data_port + 0x160; if (irq != NULL) - *irq = pmac_ide_irq[ix]; + *irq = pmac_ide[ix].irq; + + ide_hwifs[ix].tuneproc = pmac_ide_tuneproc; + ide_hwifs[ix].selectproc = pmac_ide_selectproc; + if (pmac_ide[ix].dma_regs && pmac_ide[ix].dma_table) { + ide_hwifs[ix].dmaproc = &pmac_ide_dmaproc; +#ifdef CONFIG_PMAC_IDEDMA_AUTO + ide_hwifs[ix].autodma = 1; +#endif + } } -void pmac_ide_tuneproc(ide_drive_t *drive, byte pio) +#if 0 +/* This one could be later extended to handle CMD IDE and be used by some kind + * of /proc interface. I want to be able to get the devicetree path of a block + * device for yaboot configuration + */ +struct device_node* +pmac_ide_get_devnode(ide_drive_t *drive) { - ide_pio_data_t d; + int i = pmac_ide_find(drive); + if (i < 0) + return NULL; + return pmac_ide[i].node; +} +#endif - if (_machine != _MACH_Pmac) +/* Setup timings for the selected drive (master/slave). I still need to verify if this + * is enough, I beleive selectproc will be called whenever an IDE command is started, + * but... */ +static void +pmac_ide_selectproc(ide_drive_t *drive) +{ + int i = pmac_ide_find(drive); + if (i < 0) return; + + if (drive->select.all & 0x10) + out_le32((unsigned *)(IDE_DATA_REG + 0x200 + _IO_BASE), pmac_ide[i].timings[1]); + else + out_le32((unsigned *)(IDE_DATA_REG + 0x200 + _IO_BASE), pmac_ide[i].timings[0]); +} + +/* Number of IDE_SYSCLK_NS ticks, argument is in nanoseconds */ +#define SYSCLK_TICKS(t) (((t) + IDE_SYSCLK_NS - 1) / IDE_SYSCLK_NS) + +static void +pmac_ide_tuneproc(ide_drive_t *drive, byte pio) +{ + ide_pio_data_t d; + int i; + u32 *timings; + int accessTicks, recTicks; + + i = pmac_ide_find(drive); + if (i < 0) + return; + + /* The "ata-4" IDE controller of UMA machines is a bit different. + * We don't do anything for PIO modes until we know how to do the + * calculation. + */ + if (pmac_ide[i].kind == controller_kl_ata4) + return; + pio = ide_get_best_pio_mode(drive, pio, 4, &d); - switch (pio) { - case 4: - out_le32((unsigned *)(IDE_DATA_REG + 0x200 + _IO_BASE), 0x211025); - break; - default: - out_le32((unsigned *)(IDE_DATA_REG + 0x200 + _IO_BASE), 0x2f8526); - break; + accessTicks = SYSCLK_TICKS(ide_pio_timings[pio].active_time); + if (accessTicks < 4) + accessTicks = 4; + recTicks = SYSCLK_TICKS(d.cycle_time) - accessTicks - 4; + if (recTicks < 1) + recTicks = 1; + if (drive->select.all & 0x10) + timings = &pmac_ide[i].timings[1]; + else + timings = &pmac_ide[i].timings[0]; + + *timings = ((*timings) & 0xFFFFFF800) | accessTicks | (recTicks << 5); +#ifdef IDE_PMAC_DEBUG + printk("ide_pmac: Set PIO timing for mode %d, reg: 0x%08x\n", + pio, *timings); +#endif + + if (drive->select.all == IN_BYTE(IDE_SELECT_REG)) + pmac_ide_selectproc(drive); +} + +ide_ioreg_t +pmac_ide_get_base(int index) +{ + return pmac_ide[index].regbase; +} + +static int ide_majors[] = { 3, 22, 33, 34, 56, 57 }; + +kdev_t __init +pmac_find_ide_boot(char *bootdevice, int n) +{ + int i; + + /* + * Look through the list of IDE interfaces for this one. + */ + for (i = 0; i < pmac_ide_count; ++i) { + char *name; + if (!pmac_ide[i].node || !pmac_ide[i].node->full_name) + continue; + name = pmac_ide[i].node->full_name; + if (memcmp(name, bootdevice, n) == 0 && name[n] == 0) { + /* XXX should cope with the 2nd drive as well... */ + return MKDEV(ide_majors[i], 0); + } } + + return 0; } -void __init pmac_ide_probe(void) +void __init +pmac_ide_probe(void) { struct device_node *np; int i; @@ -196,27 +363,70 @@ void __init pmac_ide_probe(void) } else { irq = np->intrs[0].line; } - pmac_ide_regbase[i] = base; - pmac_ide_irq[i] = irq; - pmac_ide_node[i] = np; + pmac_ide[i].regbase = base; + pmac_ide[i].irq = irq; + pmac_ide[i].node = np; + if (device_is_compatible(np, "keylargo-ata")) { + if (strcmp(np->name, "ata-4") == 0) + pmac_ide[i].kind = controller_kl_ata4; + else + pmac_ide[i].kind = controller_kl_ata3; + } else if (device_is_compatible(np, "heathrow-ata")) + pmac_ide[i].kind = controller_heathrow; + else + pmac_ide[i].kind = controller_ohare; if (np->parent && np->parent->name && strcasecmp(np->parent->name, "media-bay") == 0) { media_bay_set_ide_infos(np->parent,base,irq,i); - } else - feature_set(np, FEATURE_IDE_enable); + } else if (pmac_ide[i].kind == controller_ohare) { + /* The code below is having trouble on some ohare machines + * (timing related ?). Until I can put my hand on one of these + * units, I keep the old way + */ + feature_set(np, FEATURE_IDE0_enable); + } else { + /* This is necessary to enable IDE when net-booting */ + int *bidp = (int *)get_property(np, "AAPL,bus-id", NULL); + int bid = bidp ? *bidp : 0; + printk("pmac_ide: enabling IDE bus ID %d\n", bid); + switch(bid) { + case 0: + feature_set(np, FEATURE_IDE0_reset); + feature_set(np, FEATURE_IOBUS_enable); + mdelay(10); + feature_set(np, FEATURE_IDE0_enable); + mdelay(10); + feature_clear(np, FEATURE_IDE0_reset); + break; + case 1: + feature_set(np, FEATURE_Mediabay_IDE_reset); + mdelay(10); + feature_set(np, FEATURE_Mediabay_IDE_enable); + mdelay(10); + feature_clear(np, FEATURE_Mediabay_IDE_reset); + break; + case 2: + /* This one exists only for KL, I don't know about any + enable bit */ + feature_set(np, FEATURE_IDE2_reset); + mdelay(10); + feature_clear(np, FEATURE_IDE2_reset); + break; + } + mdelay(1000); + } hwif = &ide_hwifs[i]; pmac_ide_init_hwif_ports(&hwif->hw, base, 0, &hwif->irq); memcpy(hwif->io_ports, hwif->hw.io_ports, sizeof(hwif->io_ports)); hwif->chipset = ide_generic; hwif->noprobe = !hwif->io_ports[IDE_DATA_OFFSET]; - hwif->tuneproc = pmac_ide_tuneproc; #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC if (np->n_addrs >= 2) { /* has a DBDMA controller channel */ - pmac_ide_setup_dma(np, hwif); + pmac_ide_setup_dma(np, i); } #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */ @@ -232,27 +442,28 @@ void __init pmac_ide_probe(void) #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC static void __init -pmac_ide_setup_dma(struct device_node *np, ide_hwif_t *hwif) +pmac_ide_setup_dma(struct device_node *np, int ix) { - hwif->dma_base = (unsigned long) ioremap(np->addrs[1].address, 0x200); + pmac_ide[ix].dma_regs = + (volatile struct dbdma_regs*)ioremap(np->addrs[1].address, 0x200); /* * Allocate space for the DBDMA commands. * The +2 is +1 for the stop command and +1 to allow for * aligning the start address to a multiple of 16 bytes. */ - hwif->dmatable_cpu = (unsigned long *) + pmac_ide[ix].dma_table = (struct dbdma_cmd*) kmalloc((MAX_DCMDS + 2) * sizeof(struct dbdma_cmd), GFP_KERNEL); - if (hwif->dmatable_cpu == 0) { + if (pmac_ide[ix].dma_table == 0) { printk(KERN_ERR "%s: unable to allocate DMA command list\n", - hwif->name); + ide_hwifs[ix].name); return; } - hwif->dmaproc = &pmac_ide_dmaproc; -#ifdef CONFIG_IDEDMA_PMAC_AUTO - hwif->autodma = 1; -#endif /* CONFIG_IDEDMA_PMAC_AUTO */ + ide_hwifs[ix].dmaproc = &pmac_ide_dmaproc; +#ifdef CONFIG_PMAC_IDEDMA_AUTO + ide_hwifs[ix].autodma = 1; +#endif } /* @@ -260,19 +471,19 @@ pmac_ide_setup_dma(struct device_node *np, ide_hwif_t *hwif) * for a transfer and sets the DBDMA channel to point to it. */ static int -pmac_ide_build_dmatable(ide_drive_t *drive, int wr) +pmac_ide_build_dmatable(ide_drive_t *drive, int ix, int wr) { - ide_hwif_t *hwif = HWIF(drive); struct dbdma_cmd *table, *tstart; int count = 0; struct request *rq = HWGROUP(drive)->rq; struct buffer_head *bh = rq->bh; unsigned int size, addr; - volatile struct dbdma_regs *dma - = (volatile struct dbdma_regs *) hwif->dma_base; + volatile struct dbdma_regs *dma = pmac_ide[ix].dma_regs; - table = tstart = (struct dbdma_cmd *) DBDMA_ALIGN(hwif->dmatable_cpu); + table = tstart = (struct dbdma_cmd *) DBDMA_ALIGN(pmac_ide[ix].dma_table); out_le32(&dma->control, (RUN|PAUSE|FLUSH|WAKE|DEAD) << 16); + while (in_le32(&dma->status) & RUN) + udelay(1); do { /* @@ -335,30 +546,277 @@ pmac_ide_build_dmatable(ide_drive_t *drive, int wr) return 1; } + +/* This is fun. -DaveM */ +#define IDE_SETXFER 0x03 +#define IDE_SETFEATURE 0xef +#define IDE_DMA2_ENABLE 0x22 +#define IDE_DMA1_ENABLE 0x21 +#define IDE_DMA0_ENABLE 0x20 +#define IDE_UDMA4_ENABLE 0x44 +#define IDE_UDMA3_ENABLE 0x43 +#define IDE_UDMA2_ENABLE 0x42 +#define IDE_UDMA1_ENABLE 0x41 +#define IDE_UDMA0_ENABLE 0x40 + +static __inline__ unsigned char +dma_bits_to_command(unsigned char bits) +{ + if(bits & 0x04) + return IDE_DMA2_ENABLE; + if(bits & 0x02) + return IDE_DMA1_ENABLE; + return IDE_DMA0_ENABLE; +} + +static __inline__ unsigned char +udma_bits_to_command(unsigned char bits) +{ + if(bits & 0x10) + return IDE_UDMA4_ENABLE; + if(bits & 0x08) + return IDE_UDMA3_ENABLE; + if(bits & 0x04) + return IDE_UDMA2_ENABLE; + if(bits & 0x02) + return IDE_UDMA1_ENABLE; + if(bits & 0x01) + return IDE_UDMA0_ENABLE; + return 0; +} + +static __inline__ int +wait_for_ready(ide_drive_t *drive) +{ + /* Timeout bumped for some powerbooks */ + int timeout = 2000; + byte stat; + + while(--timeout) { + stat = GET_STAT(); + if(!(stat & BUSY_STAT)) { + if (drive->ready_stat == 0) + break; + else if((stat & drive->ready_stat) || (stat & ERR_STAT)) + break; + } + mdelay(1); + } + if((stat & ERR_STAT) || timeout <= 0) { + if (stat & ERR_STAT) { + printk("ide_pmace: wait_for_ready, error status: %x\n", stat); + } + return 1; + } + return 0; +} + +static int +pmac_ide_do_setfeature(ide_drive_t *drive, byte command) +{ + unsigned long flags; + byte old_select; + int result = 1; + + save_flags(flags); + cli(); + old_select = IN_BYTE(IDE_SELECT_REG); + OUT_BYTE(drive->select.all, IDE_SELECT_REG); + udelay(10); + OUT_BYTE(IDE_SETXFER, IDE_FEATURE_REG); + OUT_BYTE(command, IDE_NSECTOR_REG); + if(wait_for_ready(drive)) { + printk("pmac_ide_do_setfeature disk not ready before SET_FEATURE!\n"); + goto out; + } + OUT_BYTE(IDE_SETFEATURE, IDE_COMMAND_REG); + result = wait_for_ready(drive); + if (result) + printk("pmac_ide_do_setfeature disk not ready after SET_FEATURE !\n"); +out: + OUT_BYTE(old_select, IDE_SELECT_REG); + restore_flags(flags); + + return result; +} + +static int +pmac_ide_mdma_enable(ide_drive_t *drive, int idx) +{ + byte bits = drive->id->dma_mword & 0x07; + byte feature = dma_bits_to_command(bits); + u32 *timings; + int cycleTime, accessTime; + int accessTicks, recTicks; + struct hd_driveid *id = drive->id; + + /* For now, we don't know these values */ + if (pmac_ide[idx].kind == controller_kl_ata4 && feature != IDE_DMA2_ENABLE) + return 0; + if (pmac_ide[idx].kind != controller_kl_ata4 && feature == IDE_DMA0_ENABLE) + return 0; + + /* Set feature on drive */ + printk("%s: Enabling MultiWord DMA %d\n", drive->name, feature & 0xf); + if (pmac_ide_do_setfeature(drive, feature)) { + printk("%s: Failed !\n", drive->name); + return 0; + } + + /* which drive is it ? */ + if (drive->select.all & 0x10) + timings = &pmac_ide[idx].timings[1]; + else + timings = &pmac_ide[idx].timings[0]; + + /* Calculate accesstime and cycle time */ + cycleTime = mdma_timings[feature & 0xf].cycleTime; + accessTime = mdma_timings[feature & 0xf].accessTime; + if ((id->field_valid & 2) && (id->eide_dma_time)) + cycleTime = id->eide_dma_time; + if ((pmac_ide[idx].kind == controller_ohare) && (cycleTime < 150)) + cycleTime = 150; + + /* For ata-4 controller, we don't know the calculation */ + if (pmac_ide[idx].kind == controller_kl_ata4) { + *timings = 0x00019465; /* MDMA2 */ + } else { + int halfTick = 0; + int origAccessTime = accessTime; + int origCycleTime = cycleTime; + + accessTicks = SYSCLK_TICKS(accessTime); + if (accessTicks < 1) + accessTicks = 1; + accessTime = accessTicks * IDE_SYSCLK_NS; + recTicks = SYSCLK_TICKS(cycleTime - accessTime) - 1; + if (recTicks < 1) + recTicks = 1; + cycleTime = (recTicks + 1 + accessTicks) * IDE_SYSCLK_NS; + + if ((accessTicks > 1) && + ((accessTime - IDE_SYSCLK_NS/2) >= origAccessTime) && + ((cycleTime - IDE_SYSCLK_NS) >= origCycleTime)) { + halfTick = 1; + accessTicks--; + } + *timings = ((*timings) & 0x7FF) | + (accessTicks | (recTicks << 5) | (halfTick << 10)) << 11; + } +#ifdef IDE_PMAC_DEBUG + printk("ide_pmac: Set MDMA timing for mode %d, reg: 0x%08x\n", + feature & 0xf, *timings); +#endif + return 1; +} + +static int +pmac_ide_udma_enable(ide_drive_t *drive, int idx) +{ + byte bits = drive->id->dma_ultra & 0x1f; + byte feature = udma_bits_to_command(bits); + u32 timings; + + /* We support only those values */ + if (feature != IDE_UDMA4_ENABLE && feature != IDE_UDMA2_ENABLE) + return 0; + + /* Set feature on drive */ + printk("%s: Enabling Ultra DMA %d\n", drive->name, feature & 0xf); + if (pmac_ide_do_setfeature(drive, feature)) { + printk("%s: Failed !\n", drive->name); + return 0; + } + + /* Put this channel into UDMA mode. + * This value is set by MacOS on the iBook for U/DMA2 + */ + switch(feature) { + case IDE_UDMA4_ENABLE: + timings = 0x0cd00065; + break; + case IDE_UDMA2_ENABLE: + timings = 0x11100065; + break; + } + + if (drive->select.all & 0x10) + pmac_ide[idx].timings[1] = timings; + else + pmac_ide[idx].timings[0] = timings; + + return 1; +} + +static int +pmac_ide_dma_onoff(ide_drive_t *drive, int enable) +{ + int ata4, udma, idx; + struct hd_driveid *id = drive->id; + + drive->using_dma = 0; + + idx = pmac_ide_find(drive); + if (idx < 0) + return 0; + + if (drive->media == ide_floppy) + enable = 0; + if (((id->capability & 1) == 0) && !check_drive_lists(drive, GOOD_DMA_DRIVE)) + enable = 0; + if (check_drive_lists(drive, BAD_DMA_DRIVE)) + enable = 0; + + udma = 0; + ata4 = (pmac_ide[idx].kind == controller_kl_ata4); + + if(enable) { + if (ata4 && (drive->media == ide_disk) && + (id->field_valid & 0x0004) && (id->dma_ultra & 0x17)) { + /* UltraDMA modes. */ + drive->using_dma = pmac_ide_udma_enable(drive, idx); + } + if (!drive->using_dma && (id->dma_mword & 0x0007)) { + /* Normal MultiWord DMA modes. */ + drive->using_dma = pmac_ide_mdma_enable(drive, idx); + } + /* Without this, strange things will happen on Keylargo-based + * machines + */ + OUT_BYTE(0, IDE_CONTROL_REG); + if (drive->select.all == IN_BYTE(IDE_SELECT_REG)) + pmac_ide_selectproc(drive); + } + return 0; +} + int pmac_ide_dmaproc(ide_dma_action_t func, ide_drive_t *drive) { ide_hwif_t *hwif = HWIF(drive); - volatile struct dbdma_regs *dma - = (volatile struct dbdma_regs *) hwif->dma_base; - int dstat; + int ix, dstat; + volatile struct dbdma_regs *dma; + + /* Can we stuff a pointer to our intf structure in config_data + * or select_data in hwif ? + */ + ix = pmac_ide_find(drive); + if (ix < 0) + return 0; + dma = pmac_ide[ix].dma_regs; switch (func) { case ide_dma_on: - /* ide-floppy DMA doesn't work yet... */ - drive->using_dma = drive->media != ide_floppy; - break; case ide_dma_off: - printk(KERN_INFO "%s: DMA disabled\n", drive->name); case ide_dma_off_quietly: - drive->using_dma = 0; + pmac_ide_dma_onoff(drive, (func == ide_dma_on)); break; case ide_dma_check: - /* ide-floppy DMA doesn't work yet... */ - drive->using_dma = hwif->autodma && drive->media != ide_floppy; + if (hwif->autodma) + pmac_ide_dma_onoff(drive, 1); break; case ide_dma_read: case ide_dma_write: - if (!pmac_ide_build_dmatable(drive, func==ide_dma_write)) + if (!pmac_ide_build_dmatable(drive, ix, func==ide_dma_write)) return 1; drive->waiting_for_dma = 1; if (drive->media != ide_disk) @@ -387,11 +845,9 @@ int pmac_ide_dmaproc(ide_dma_action_t func, ide_drive_t *drive) #ifdef CONFIG_PMAC_PBOOK static void idepmac_sleep_disk(int i, unsigned long base) { + struct device_node* np = pmac_ide[i].node; int j; - /* Reset to PIO 0 */ - out_le32((unsigned *)(base + 0x200 + _IO_BASE), 0x2f8526); - /* FIXME: We only handle the master IDE */ if (ide_hwifs[i].drives[0].media == ide_disk) { /* Spin down the drive */ @@ -410,23 +866,30 @@ static void idepmac_sleep_disk(int i, unsigned long base) break; } } + feature_set(np, FEATURE_IDE0_reset); + feature_clear(np, FEATURE_IOBUS_enable); + feature_clear(np, FEATURE_IDE0_enable); + pmac_ide[i].timings[0] = 0; + pmac_ide[i].timings[1] = 0; } static void idepmac_wake_disk(int i, unsigned long base) { + struct device_node* np = pmac_ide[i].node; int j; /* Revive IDE disk and controller */ - feature_set(pmac_ide_node[i], FEATURE_IDE_enable); - mdelay(1); - feature_set(pmac_ide_node[i], FEATURE_IDE_DiskPower); - mdelay(100); - feature_set(pmac_ide_node[i], FEATURE_IDE_Reset); - mdelay(1); - /* Make sure we are still PIO0 */ - out_le32((unsigned *)(base + 0x200 + _IO_BASE), 0x2f8526); + feature_set(np, FEATURE_IOBUS_enable); + mdelay(10); + feature_set(np, FEATURE_IDE0_enable); + mdelay(10); + feature_clear(np, FEATURE_IDE0_reset); mdelay(100); + /* Reset timings */ + pmac_ide_selectproc(&ide_hwifs[i].drives[0]); + mdelay(10); + /* Wait up to 10 seconds (enough for recent drives) */ for (j = 0; j < 100; j++) { int status; @@ -443,14 +906,22 @@ idepmac_wake_bay(int i, unsigned long base) { int timeout; - timeout = 5000; + /* Reset timings */ + pmac_ide_selectproc(&ide_hwifs[i].drives[0]); + mdelay(10); + + timeout = 10000; while ((inb(base + 0x70) & BUSY_STAT) && timeout) { mdelay(1); --timeout; } } -static int idepmac_notify(struct pmu_sleep_notifier *self, int when) +/* Note: We support only master drives for now. This will have to be + * improved if we want to handle sleep on the iMacDV where the CD-ROM + * is a slave + */ +static int idepmac_notify_sleep(struct pmu_sleep_notifier *self, int when) { int i, ret; unsigned long base; @@ -462,10 +933,10 @@ static int idepmac_notify(struct pmu_sleep_notifier *self, int when) break; case PBOOK_SLEEP_NOW: for (i = 0; i < pmac_ide_count; ++i) { - if ((base = pmac_ide_regbase[i]) == 0) + if ((base = pmac_ide[i].regbase) == 0) continue; /* Disable irq during sleep */ - disable_irq(pmac_ide_irq[i]); + disable_irq(pmac_ide[i].irq); ret = check_media_bay_by_base(base, MB_CD); if (ret == -ENODEV) /* not media bay - put the disk to sleep */ @@ -474,15 +945,22 @@ static int idepmac_notify(struct pmu_sleep_notifier *self, int when) break; case PBOOK_WAKE: for (i = 0; i < pmac_ide_count; ++i) { - if ((base = pmac_ide_regbase[i]) == 0) + ide_hwif_t *hwif; + if ((base = pmac_ide[i].regbase) == 0) continue; + hwif = &ide_hwifs[i]; /* We don't handle media bay devices this way */ ret = check_media_bay_by_base(base, MB_CD); if (ret == -ENODEV) idepmac_wake_disk(i, base); else if (ret == 0) idepmac_wake_bay(i, base); - enable_irq(pmac_ide_irq[i]); + enable_irq(pmac_ide[i].irq); + +#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC + if (hwif->drives[0].present && hwif->drives[0].using_dma) + pmac_ide_dma_onoff(&hwif->drives[0], 1); +#endif } break; } diff --git a/drivers/block/ide-probe.c b/drivers/block/ide-probe.c index 1c6f19eba24d..33ca2900b2c8 100644 --- a/drivers/block/ide-probe.c +++ b/drivers/block/ide-probe.c @@ -117,8 +117,16 @@ static inline void do_identify (ide_drive_t *drive, byte cmd) } type = ide_cdrom; /* Early cdrom models used zero */ case ide_cdrom: - printk ("CDROM"); drive->removable = 1; +#ifdef CONFIG_PPC + /* kludge for Apple PowerBook internal zip */ + if (!strstr(id->model, "CD-ROM") && strstr(id->model, "ZIP")) { + printk ("FLOPPY"); + type = ide_floppy; + break; + } +#endif + printk ("CDROM"); break; case ide_tape: printk ("TAPE"); diff --git a/drivers/block/ide-tape.c b/drivers/block/ide-tape.c index 4825ae22f6c4..9d2bc216ffa2 100644 --- a/drivers/block/ide-tape.c +++ b/drivers/block/ide-tape.c @@ -4372,12 +4372,12 @@ static ssize_t idetape_chrdev_read (struct file *file, char *buf, return -ENXIO; } if (tape->onstream && (count != tape->tape_block_size)) { - printk(KERN_ERR "ide-tape: %s: use %d bytes as block size (%d used)\n", tape->name, tape->tape_block_size, count); + printk(KERN_ERR "ide-tape: %s: use %d bytes as block size (%Zd used)\n", tape->name, tape->tape_block_size, count); return -EINVAL; } #if IDETAPE_DEBUG_LOG if (tape->debug_level >= 3) - printk (KERN_INFO "ide-tape: Reached idetape_chrdev_read, count %d\n", count); + printk (KERN_INFO "ide-tape: Reached idetape_chrdev_read, count %Zd\n", count); #endif /* IDETAPE_DEBUG_LOG */ if (tape->chrdev_direction != idetape_direction_read) { @@ -4552,12 +4552,12 @@ static ssize_t idetape_chrdev_write (struct file *file, const char *buf, return -ENXIO; } if (tape->onstream && (count != tape->tape_block_size)) { - printk(KERN_ERR "ide-tape: %s: use %d bytes as block size (%d used)\n", tape->name, tape->tape_block_size, count); + printk(KERN_ERR "ide-tape: %s: use %d bytes as block size (%Zd used)\n", tape->name, tape->tape_block_size, count); return -EINVAL; } #if IDETAPE_DEBUG_LOG if (tape->debug_level >= 3) - printk (KERN_INFO "ide-tape: Reached idetape_chrdev_write, count %d\n", count); + printk (KERN_INFO "ide-tape: Reached idetape_chrdev_write, count %Zd\n", count); #endif /* IDETAPE_DEBUG_LOG */ if (tape->chrdev_direction != idetape_direction_write) { /* Initialize write operation */ diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c index 457758546ce5..783d644c4eb9 100644 --- a/drivers/block/ll_rw_blk.c +++ b/drivers/block/ll_rw_blk.c @@ -68,7 +68,7 @@ DECLARE_WAIT_QUEUE_HEAD(wait_for_request); /* This specifies how many sectors to read ahead on the disk. */ -int read_ahead[MAX_BLKDEV] = {0, }; +int read_ahead[MAX_BLKDEV]; /* blk_dev_struct is: * *request_fn @@ -84,7 +84,7 @@ struct blk_dev_struct blk_dev[MAX_BLKDEV]; /* initialized by blk_dev_init() */ * * if (!blk_size[MAJOR]) then no minor size checking is done. */ -int * blk_size[MAX_BLKDEV] = { NULL, NULL, }; +int * blk_size[MAX_BLKDEV]; /* * blksize_size contains the size of all block-devices: @@ -93,7 +93,7 @@ int * blk_size[MAX_BLKDEV] = { NULL, NULL, }; * * if (!blksize_size[MAJOR]) then 1024 bytes is assumed. */ -int * blksize_size[MAX_BLKDEV] = { NULL, NULL, }; +int * blksize_size[MAX_BLKDEV]; /* * hardsect_size contains the size of the hardware sector of a device. @@ -107,17 +107,17 @@ int * blksize_size[MAX_BLKDEV] = { NULL, NULL, }; * This is currently set by some scsi devices and read by the msdos fs driver. * Other uses may appear later. */ -int * hardsect_size[MAX_BLKDEV] = { NULL, NULL, }; +int * hardsect_size[MAX_BLKDEV]; /* * The following tunes the read-ahead algorithm in mm/filemap.c */ -int * max_readahead[MAX_BLKDEV] = { NULL, NULL, }; +int * max_readahead[MAX_BLKDEV]; /* * Max number of sectors per request */ -int * max_sectors[MAX_BLKDEV] = { NULL, NULL, }; +int * max_sectors[MAX_BLKDEV]; static inline int get_max_sectors(kdev_t dev) { @@ -487,10 +487,6 @@ static inline void __make_request(request_queue_t * q, int rw, count = bh->b_size >> 9; sector = bh->b_rsector; - /* It had better not be a new buffer by the time we see it */ - if (buffer_new(bh)) - BUG(); - if (blk_size[major]) { unsigned long maxsector = (blk_size[major][MINOR(bh->b_rdev)] << 1) + 1; @@ -1002,4 +998,5 @@ EXPORT_SYMBOL(end_that_request_last); EXPORT_SYMBOL(blk_init_queue); EXPORT_SYMBOL(blk_cleanup_queue); EXPORT_SYMBOL(blk_queue_headactive); +EXPORT_SYMBOL(blk_queue_pluggable); EXPORT_SYMBOL(generic_make_request); diff --git a/drivers/block/loop.c b/drivers/block/loop.c index fc6024fdc17e..e5a65e1598cd 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -342,8 +342,8 @@ static int create_missing_block(struct loop_device *lo, int block, int blksize) set_fs(old_fs); if (retval < 0) { - printk(KERN_WARNING "loop: cannot create block - FS write failed: code %d\n", - retval); + printk(KERN_WARNING "loop: cannot create block - FS write failed: code %Zi\n", + retval); return FALSE; } else { return TRUE; diff --git a/drivers/block/rd.c b/drivers/block/rd.c index 46bda510064b..17a745d5b248 100644 --- a/drivers/block/rd.c +++ b/drivers/block/rd.c @@ -270,7 +270,7 @@ repeat: } } if (rbh) { - set_bit(BH_Protected, &rbh->b_state); + mark_buffer_protected(rbh); brelse(rbh); } @@ -290,7 +290,10 @@ static int rd_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un switch (cmd) { case BLKFLSBUF: if (!capable(CAP_SYS_ADMIN)) return -EACCES; - invalidate_buffers(inode->i_rdev); + /* special: we want to release the ramdisk memory, + it's not like with the other blockdevices where + this ioctl only flushes away the buffer cache. */ + destroy_buffers(inode->i_rdev); break; case BLKGETSIZE: /* Return device size */ @@ -382,7 +385,7 @@ static void __exit rd_cleanup (void) int i; for (i = 0 ; i < NUM_RAMDISKS; i++) - invalidate_buffers(MKDEV(MAJOR_NR, i)); + destroy_buffers(MKDEV(MAJOR_NR, i)); unregister_blkdev( MAJOR_NR, "ramdisk" ); blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR)); diff --git a/drivers/char/bttv.c b/drivers/char/bttv.c index 3f75d721a594..c21198356048 100644 --- a/drivers/char/bttv.c +++ b/drivers/char/bttv.c @@ -1765,6 +1765,14 @@ static long bttv_read(struct video_device *v, char *buf, unsigned long count, in return count; } +static inline void burst(int on) +{ + tvnorms[0].scaledtwidth = 1135 - (on?BURSTOFFSET-2:0); + tvnorms[0].hdelayx1 = 186 - (on?BURSTOFFSET :0); + tvnorms[2].scaledtwidth = 1135 - (on?BURSTOFFSET-2:0); + tvnorms[2].hdelayx1 = 186 - (on?BURSTOFFSET :0); +} + /* * Open a bttv card. Right now the flags stuff is just playing */ @@ -1775,6 +1783,7 @@ static int bttv_open(struct video_device *dev, int flags) int i,ret; ret = -EBUSY; + down(&btv->lock); if (btv->user) goto out_unlock; @@ -1789,6 +1798,7 @@ static int bttv_open(struct video_device *dev, int flags) for (i = 0; i < MAX_GBUFFERS; i++) btv->frame_stat[i] = GBUFFER_UNUSED; + burst(0); btv->user++; up(&btv->lock); MOD_INC_USE_COUNT; @@ -2454,19 +2464,13 @@ static int bttv_ioctl(struct video_device *dev, unsigned int cmd, void *arg) case BTTV_BURST_ON: { - tvnorms[0].scaledtwidth=1135-BURSTOFFSET-2; - tvnorms[0].hdelayx1=186-BURSTOFFSET; - tvnorms[2].scaledtwidth=1135-BURSTOFFSET-2; - tvnorms[2].hdelayx1=186-BURSTOFFSET; + burst(1); return 0; } case BTTV_BURST_OFF: { - tvnorms[0].scaledtwidth=1135; - tvnorms[0].hdelayx1=186; - tvnorms[2].scaledtwidth=1135; - tvnorms[2].hdelayx1=186; + burst(0); return 0; } diff --git a/drivers/char/efirtc.c b/drivers/char/efirtc.c new file mode 100644 index 000000000000..8bc4f4b5f20d --- /dev/null +++ b/drivers/char/efirtc.c @@ -0,0 +1,363 @@ +/* + * EFI Time Services Driver for Linux + * + * Copyright (C) 1999 Hewlett-Packard Co + * Copyright (C) 1999 Stephane Eranian + * + * Based on skeleton from the drivers/char/rtc.c driver by P. Gortmaker + * + * This code provides a architected & portable interface to the real time + * clock by using EFI instead of direct bit fiddling. The functionalities are + * quite different from the rtc.c driver. The only way to talk to the device + * is by using ioctl(). There is a /proc interface which provides the raw + * information. + * + * Please note that we have kept the API as close as possible from the + * legacy RTC. The standard /sbin/hwclock program should work normally + * when used to get/set the time. + * + * NOTES: + * - Locking is required for safe execution of EFI calls with regards + * to interrrupts and SMP. + * + * TODO (December 1999): + * - provide the API to set/get the WakeUp Alarm (different from the + * rtc.c alarm). + * - SMP testing + * - Add module support + */ + + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define EFI_RTC_VERSION "0.1" + +#define EFI_ISDST (EFI_TIME_ADJUST_DAYLIGHT|EFI_TIME_IN_DAYLIGHT) +/* + * EFI Epoch is 1/1/1998 + */ +#define EFI_RTC_EPOCH 1998 + +static spinlock_t efi_rtc_lock; + +static int efi_rtc_ioctl(struct inode *inode, struct file *file, + unsigned int cmd, unsigned long arg); + +#define is_leap(year) \ + ((year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0)) + +static const unsigned short int __mon_yday[2][13] = +{ + /* Normal years. */ + { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 }, + /* Leap years. */ + { 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 } +}; + +/* + * returns day of the year [0-365] + */ +static inline int +compute_yday(efi_time_t *eft) +{ + /* efi_time_t.month is in the [1-12] so, we need -1 */ + return __mon_yday[is_leap(eft->year)][eft->month-1]+ eft->day -1; +} +/* + * returns day of the week [0-6] 0=Sunday + * + * Don't try to provide a year that's before 1998, please ! + */ +static int +compute_wday(efi_time_t *eft) +{ + int y; + int ndays = 0; + + if ( eft->year < 1998 ) { + printk(KERN_ERR "efirtc: EFI year < 1998, invalid date\n"); + return -1; + } + + for(y=EFI_RTC_EPOCH; y < eft->year; y++ ) { + ndays += 365 + (is_leap(y) ? 1 : 0); + } + ndays += compute_yday(eft); + + /* + * 4=1/1/1998 was a Thursday + */ + return (ndays + 4) % 7; +} + +static void +convert_to_efi_time(struct rtc_time *wtime, efi_time_t *eft) +{ + + eft->year = wtime->tm_year + 1900; + eft->month = wtime->tm_mon + 1; + eft->day = wtime->tm_mday; + eft->hour = wtime->tm_hour; + eft->minute = wtime->tm_min; + eft->second = wtime->tm_sec; + eft->nanosecond = 0; + eft->daylight = wtime->tm_isdst ? EFI_ISDST: 0; + eft->timezone = EFI_UNSPECIFIED_TIMEZONE; +} + +static void +convert_from_efi_time(efi_time_t *eft, struct rtc_time *wtime) +{ + wtime->tm_sec = eft->second; + wtime->tm_min = eft->minute; + wtime->tm_hour = eft->hour; + wtime->tm_mday = eft->day; + wtime->tm_mon = eft->month - 1; + wtime->tm_year = eft->year - 1900; + + /* day of the week [0-6], Sunday=0 */ + wtime->tm_wday = compute_wday(eft); + + /* day in the year [1-365]*/ + wtime->tm_yday = compute_yday(eft); + + + switch (eft->daylight & EFI_ISDST) { + case EFI_ISDST: + wtime->tm_isdst = 1; + break; + case EFI_TIME_ADJUST_DAYLIGHT: + wtime->tm_isdst = 0; + break; + default: + wtime->tm_isdst = -1; + } +} + +static int +efi_rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd, + unsigned long arg) +{ + + efi_status_t status; + unsigned long flags; + efi_time_t eft; + efi_time_cap_t cap; + struct rtc_time wtime; + + switch (cmd) { + case RTC_UIE_ON: + case RTC_UIE_OFF: + case RTC_PIE_ON: + case RTC_PIE_OFF: + case RTC_AIE_ON: + case RTC_AIE_OFF: + case RTC_ALM_SET: + case RTC_ALM_READ: + case RTC_IRQP_READ: + case RTC_IRQP_SET: + case RTC_EPOCH_READ: + case RTC_EPOCH_SET: + return -EINVAL; + + case RTC_RD_TIME: + + spin_lock_irqsave(&efi_rtc_lock, flags); + + status = efi.get_time(&eft, &cap); + + spin_unlock_irqrestore(&efi_rtc_lock,flags); + + if (status != EFI_SUCCESS) { + /* should never happen */ + printk(KERN_ERR "efitime: can't read time\n"); + return -EINVAL; + } + + convert_from_efi_time(&eft, &wtime); + + return copy_to_user((void *)arg, &wtime, sizeof (struct rtc_time)) ? - EFAULT : 0; + + case RTC_SET_TIME: + + if (!capable(CAP_SYS_TIME)) return -EACCES; + + if (copy_from_user(&wtime, (struct rtc_time *)arg, sizeof(struct rtc_time)) ) + return -EFAULT; + + convert_to_efi_time(&wtime, &eft); + + spin_lock_irqsave(&efi_rtc_lock, flags); + + status = efi.set_time(&eft); + + spin_unlock_irqrestore(&efi_rtc_lock,flags); + + return status == EFI_SUCCESS ? 0 : -EINVAL; + } + return -EINVAL; +} + +/* + * We enforce only one user at a time here with the open/close. + * Also clear the previous interrupt data on an open, and clean + * up things on a close. + */ + +static int +efi_rtc_open(struct inode *inode, struct file *file) +{ + /* + * nothing special to do here + * We do accept multiple open files at the same time as we + * synchronize on the per call operation. + */ + return 0; +} + +static int +efi_rtc_close(struct inode *inode, struct file *file) +{ + return 0; +} + +/* + * The various file operations we support. + */ + +static struct file_operations efi_rtc_fops = { + NULL, + NULL, /* no read */ + NULL, /* No write */ + NULL, /* No readdir */ + NULL, + efi_rtc_ioctl, + NULL, /* No mmap */ + efi_rtc_open, + NULL, /* flush */ + efi_rtc_close +}; + +static struct miscdevice efi_rtc_dev= +{ + EFI_RTC_MINOR, + "efirtc", + &efi_rtc_fops +}; + +/* + * We export RAW EFI information to /proc/efirtc + */ +static int +efi_rtc_get_status(char *buf) +{ + efi_time_t eft, alm; + efi_time_cap_t cap; + char *p = buf; + efi_bool_t enabled, pending; + unsigned long flags; + + spin_lock_irqsave(&efi_rtc_lock, flags); + + efi.get_time(&eft, &cap); + efi.get_wakeup_time(&enabled, &pending, &alm); + + spin_unlock_irqrestore(&efi_rtc_lock,flags); + + p += sprintf(p, + "Time :\n" + "Year : %u\n" + "Month : %u\n" + "Day : %u\n" + "Hour : %u\n" + "Minute : %u\n" + "Second : %u\n" + "Nanosecond: %u\n" + "Daylight : %u\n", + eft.year, eft.month, eft.day, eft.hour, eft.minute, + eft.second, eft.nanosecond, eft.daylight); + + if ( eft.timezone == EFI_UNSPECIFIED_TIMEZONE) + p += sprintf(p, "Timezone : unspecified\n"); + else + /* XXX fixme: convert to string? */ + p += sprintf(p, "Timezone : %u\n", eft.timezone); + + + p += sprintf(p, + "\nWakeup Alm:\n" + "Enabled : %s\n" + "Pending : %s\n" + "Year : %u\n" + "Month : %u\n" + "Day : %u\n" + "Hour : %u\n" + "Minute : %u\n" + "Second : %u\n" + "Nanosecond: %u\n" + "Daylight : %u\n", + enabled == 1 ? "Yes" : "No", + pending == 1 ? "Yes" : "No", + alm.year, alm.month, alm.day, alm.hour, alm.minute, + alm.second, alm.nanosecond, alm.daylight); + + if ( eft.timezone == EFI_UNSPECIFIED_TIMEZONE) + p += sprintf(p, "Timezone : unspecified\n"); + else + /* XXX fixme: convert to string? */ + p += sprintf(p, "Timezone : %u\n", eft.timezone); + + /* + * now prints the capabilities + */ + p += sprintf(p, + "\nClock Cap :\n" + "Resolution: %u\n" + "Accuracy : %u\n" + "SetstoZero: %u\n", + cap.resolution, cap.accuracy, cap.sets_to_zero); + + return p - buf; +} + +static int +efi_rtc_read_proc(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + int len = efi_rtc_get_status(page); + if (len <= off+count) *eof = 1; + *start = page + off; + len -= off; + if (len>count) len = count; + if (len<0) len = 0; + return len; +} +static int __init +efi_rtc_init(void) +{ + printk(KERN_INFO "EFI Time Services Driver v%s\n", EFI_RTC_VERSION); + + misc_register(&efi_rtc_dev); + + create_proc_read_entry ("efirtc", 0, NULL, efi_rtc_read_proc, NULL); + + return 0; +} +static int __exit +efi_rtc_exit(void) +{ + /* not yet used */ + return 0; +} + +module_init(efi_rtc_init); +module_exit(efi_rtc_exit); diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c index b2af9a1242be..1688fe181dee 100644 --- a/drivers/char/keyboard.c +++ b/drivers/char/keyboard.c @@ -898,7 +898,7 @@ static inline unsigned char getleds(void){ * used, but this allows for easy and efficient race-condition * prevention later on. */ -static void kbd_bh(void) +static void kbd_bh(unsigned long dummy) { unsigned char leds = getleds(); @@ -909,6 +909,8 @@ static void kbd_bh(void) } } +DECLARE_TASKLET_DISABLED(keyboard_tasklet, kbd_bh, 0); + int __init kbd_init(void) { int i; @@ -928,8 +930,9 @@ int __init kbd_init(void) ttytab = console_driver.table; kbd_init_hw(); - init_bh(KEYBOARD_BH, kbd_bh); - mark_bh(KEYBOARD_BH); + + tasklet_enable(&keyboard_tasklet); + tasklet_schedule(&keyboard_tasklet); pm_kbd = pm_register(PM_SYS_DEV, PM_SYS_KBC, NULL); diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 0c0c3c2bdc35..eff2ab8c0e62 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -585,7 +585,7 @@ int __init chr_dev_init(void) usb_init(); #endif #ifdef CONFIG_I2C - i2c_init_all(); + i2c_init_all(); #endif #if defined (CONFIG_FB) fbmem_init(); diff --git a/drivers/char/pc_keyb.c b/drivers/char/pc_keyb.c index 0a6a771bbc65..725b41462b86 100644 --- a/drivers/char/pc_keyb.c +++ b/drivers/char/pc_keyb.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include @@ -418,7 +419,7 @@ static inline void handle_keyboard_event(unsigned char scancode) if (do_acknowledge(scancode)) handle_scancode(scancode, !(scancode & 0x80)); #endif - mark_bh(KEYBOARD_BH); + tasklet_schedule(&keyboard_tasklet); } /* diff --git a/drivers/char/saa5249.c b/drivers/char/saa5249.c index 784ae208a287..1213e2ee559d 100644 --- a/drivers/char/saa5249.c +++ b/drivers/char/saa5249.c @@ -256,7 +256,7 @@ static int saa5249_command(struct i2c_client *device, static struct i2c_driver i2c_driver_videotext = { IF_NAME, /* name */ - I2C_DRIVERID_VIDEOTEXT, /* in i2c.h */ + I2C_DRIVERID_SAA5249, /* in i2c.h */ I2C_DF_NOTIFY, saa5249_probe, saa5249_detach, diff --git a/drivers/char/vt.c b/drivers/char/vt.c index 3480c1bf041e..e52eec625fdb 100644 --- a/drivers/char/vt.c +++ b/drivers/char/vt.c @@ -804,10 +804,12 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, * When we actually do the console switch, * make sure we are atomic with respect to * other console switches.. + * + * Damn! Was it difficult to make this clean? */ - start_bh_atomic(); + disable_bh(CONSOLE_BH); complete_change_console(newvt); - end_bh_atomic(); + enable_bh(CONSOLE_BH); } } diff --git a/drivers/i2c/i2c-algo-bit.c b/drivers/i2c/i2c-algo-bit.c index 32a8514ee2a0..7d7a3bd4ec06 100644 --- a/drivers/i2c/i2c-algo-bit.c +++ b/drivers/i2c/i2c-algo-bit.c @@ -1,7 +1,7 @@ /* ------------------------------------------------------------------------- */ /* i2c-algo-bit.c i2c driver algorithms for bit-shift adapters */ /* ------------------------------------------------------------------------- */ -/* Copyright (C) 1995-99 Simon G. Vogl +/* Copyright (C) 1995-2000 Simon G. Vogl This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -21,35 +21,15 @@ /* With some changes from Kyösti Mälkki and even Frodo Looijaard */ -/* $Id: i2c-algo-bit.c,v 1.21 1999/12/21 23:45:58 frodo Exp $ */ +/* $Id: i2c-algo-bit.c,v 1.26 2000/01/24 02:06:33 mds Exp $ */ #include #include #include #include #include -#if LINUX_VERSION_CODE >= 0x020135 #include -#else -#define __init -#endif - -#if LINUX_VERSION_CODE >= 0x020100 -# include -#else -# include -#endif - -/* 2.0.0 kernel compatibility */ -#if LINUX_VERSION_CODE < 0x020100 -#define MODULE_AUTHOR(noone) -#define MODULE_DESCRIPTION(none) -#define MODULE_PARM(no,param) -#define MODULE_PARM_DESC(no,description) -#define EXPORT_SYMBOL(noexport) -#define EXPORT_NO_SYMBOLS -#endif - +#include #include #include #include @@ -69,13 +49,8 @@ /* respectively. This makes sure that the algorithm works. Some chips */ /* might not like this, as they have an internal timeout of some mils */ /* -#if LINUX_VERSION_CODE >= 0x02016e #define SLO_IO jif=jiffies;while(jiffies<=jif+i2c_table[minor].veryslow)\ if (need_resched) schedule(); -#else -#define SLO_IO jif=jiffies;while(jiffies<=jif+i2c_table[minor].veryslow)\ - if (need_resched) schedule(); -#endif */ @@ -100,22 +75,22 @@ static int bit_scan=0; /* have a look at what's hanging 'round */ static inline void sdalo(struct i2c_algo_bit_data *adap) { - setsda(adap,0); - udelay(adap->udelay); + setsda(adap,0); + udelay(adap->udelay); } static inline void sdahi(struct i2c_algo_bit_data *adap) { - setsda(adap,1); - udelay(adap->udelay); + setsda(adap,1); + udelay(adap->udelay); } static inline void scllo(struct i2c_algo_bit_data *adap) { - setscl(adap,0); - udelay(adap->udelay); + setscl(adap,0); + udelay(adap->udelay); #ifdef SLO_IO - SLO_IO + SLO_IO #endif } @@ -145,13 +120,8 @@ static inline int sclhi(struct i2c_algo_bit_data *adap) if (start+adap->timeout <= jiffies) { return -ETIMEDOUT; } -#if LINUX_VERSION_CODE >= 0x02016e if (current->need_resched) schedule(); -#else - if (need_resched) - schedule(); -#endif } DEBSTAT(printk("needed %ld jiffies\n", jiffies-start)); #ifdef SLO_IO @@ -259,7 +229,7 @@ static int i2c_inb(struct i2c_adapter *i2c_adap) }; indata *= 2; if ( getsda(adap) ) - indata |= 0x01; + indata |= 0x01; scllo(adap); } /* assert: scl is low */ @@ -280,13 +250,14 @@ static int test_bus(struct i2c_algo_bit_data *adap, char* name) { } scl=getscl(adap); printk("i2c-algo-bit.o: Adapter: %s scl: %d sda: %d -- testing...\n", - name,getscl(adap),getsda(adap)); + name,getscl(adap),getsda(adap)); if (!scl || !sda ) { printk("i2c-algo-bit.o: %s seems to be busy.\n",name); goto bailout; } sdalo(adap); - printk("i2c-algo-bit.o:1 scl: %d sda: %d \n",getscl(adap),getsda(adap)); + printk("i2c-algo-bit.o:1 scl: %d sda: %d \n",getscl(adap), + getsda(adap)); if ( 0 != getsda(adap) ) { printk("i2c-algo-bit.o: %s SDA stuck high!\n",name); sdahi(adap); @@ -298,18 +269,21 @@ static int test_bus(struct i2c_algo_bit_data *adap, char* name) { goto bailout; } sdahi(adap); - printk("i2c-algo-bit.o:2 scl: %d sda: %d \n",getscl(adap),getsda(adap)); + printk("i2c-algo-bit.o:2 scl: %d sda: %d \n",getscl(adap), + getsda(adap)); if ( 0 == getsda(adap) ) { printk("i2c-algo-bit.o: %s SDA stuck low!\n",name); sdahi(adap); goto bailout; } if ( 0 == getscl(adap) ) { - printk("i2c-algo-bit.o: %s SCL unexpected low while SDA high!\n",name); + printk("i2c-algo-bit.o: %s SCL unexpected low while SDA high!\n", + name); goto bailout; } scllo(adap); - printk("i2c-algo-bit.o:3 scl: %d sda: %d \n",getscl(adap),getsda(adap)); + printk("i2c-algo-bit.o:3 scl: %d sda: %d \n",getscl(adap), + getsda(adap)); if ( 0 != getscl(adap) ) { printk("i2c-algo-bit.o: %s SCL stuck high!\n",name); sclhi(adap); @@ -321,7 +295,8 @@ static int test_bus(struct i2c_algo_bit_data *adap, char* name) { goto bailout; } sclhi(adap); - printk("i2c-algo-bit.o:4 scl: %d sda: %d \n",getscl(adap),getsda(adap)); + printk("i2c-algo-bit.o:4 scl: %d sda: %d \n",getscl(adap), + getsda(adap)); if ( 0 == getscl(adap) ) { printk("i2c-algo-bit.o: %s SCL stuck low!\n",name); sclhi(adap); @@ -366,7 +341,8 @@ static inline int try_address(struct i2c_adapter *i2c_adap, i2c_start(adap); udelay(adap->udelay); } - DEB2(if (i) printk("i2c-algo-bit.o: needed %d retries for %d\n",i,addr)); + DEB2(if (i) printk("i2c-algo-bit.o: needed %d retries for %d\n", + i,addr)); return ret; } @@ -391,7 +367,8 @@ static int sendbytes(struct i2c_adapter *i2c_adap,const char *buf, int count) printk("i2c-algo-bit.o: %s i2c_write: error - bailout.\n", i2c_adap->name); i2c_stop(adap); - return (retval<0)? retval : -EFAULT; /* got a better one ?? */ + return (retval<0)? retval : -EFAULT; + /* got a better one ?? */ } #if 0 /* from asm/delay.h */ @@ -447,8 +424,8 @@ static inline int readbytes(struct i2c_adapter *i2c_adap,char *buf,int count) * -x an error occured (like: -EREMOTEIO if the device did not answer, or * -ETIMEDOUT, for example if the lines are stuck...) */ -static inline int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg, - int retries) +static inline int bit_doAddress(struct i2c_adapter *i2c_adap, + struct i2c_msg *msg, int retries) { unsigned short flags = msg->flags; struct i2c_algo_bit_data *adap = i2c_adap->algo_data; @@ -486,6 +463,8 @@ static inline int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *ms addr = ( msg->addr << 1 ); if (flags & I2C_M_RD ) addr |= 1; + if (flags & I2C_M_REV_DIR_ADDR ) + addr ^= 1; ret = try_address(i2c_adap, addr, retries); if (ret!=1) { return -EREMOTEIO; @@ -505,11 +484,16 @@ static int bit_xfer(struct i2c_adapter *i2c_adap, i2c_start(adap); for (i=0;iretries); - if (ret != 0) { - DEB2(printk("i2c-algo-bit.o: NAK from device adr %#2x msg #%d\n" - ,msgs[i].addr,i)); - return (ret<0) ? ret : -EREMOTEIO; + if (!(pmsg->flags & I2C_M_NOSTART)) { + if (i) { + i2c_repstart(adap); + } + ret = bit_doAddress(i2c_adap,pmsg,i2c_adap->retries); + if (ret != 0) { + DEB2(printk("i2c-algo-bit.o: NAK from device adr %#2x msg #%d\n" + ,msgs[i].addr,i)); + return (ret<0) ? ret : -EREMOTEIO; + } } if (pmsg->flags & I2C_M_RD ) { /* read bytes into buffer*/ @@ -526,9 +510,6 @@ static int bit_xfer(struct i2c_adapter *i2c_adap, return (ret<0) ? ret : -EREMOTEIO; } } - if (ialgo_data; + struct i2c_algo_bit_data *bit_adap = adap->algo_data; if (bit_test) { int ret = test_bus(bit_adap, adap->name); @@ -573,7 +555,8 @@ int i2c_bit_add_bus(struct i2c_adapter *adap) return -ENODEV; } - DEB2(printk("i2c-algo-bit.o: hw routines for %s registered.\n",adap->name)); + DEB2(printk("i2c-algo-bit.o: hw routines for %s registered.\n", + adap->name)); /* register new adapter to i2c module... */ @@ -585,8 +568,9 @@ int i2c_bit_add_bus(struct i2c_adapter *adap) /* scan bus */ if (bit_scan) { - int ack; - printk(KERN_INFO " i2c-algo-bit.o: scanning bus %s.\n", adap->name); + int ack; + printk(KERN_INFO " i2c-algo-bit.o: scanning bus %s.\n", + adap->name); for (i = 0x00; i < 0xff; i+=2) { i2c_start(bit_adap); ack = i2c_outb(adap,i); @@ -642,7 +626,8 @@ MODULE_PARM(i2c_debug,"i"); MODULE_PARM_DESC(bit_test, "Test the lines of the bus to see if it is stuck"); MODULE_PARM_DESC(bit_scan, "Scan for active chips on the bus"); -MODULE_PARM_DESC(i2c_debug,"debug level - 0 off; 1 normal; 2,3 more verbose; 9 bit-protocol"); +MODULE_PARM_DESC(i2c_debug, + "debug level - 0 off; 1 normal; 2,3 more verbose; 9 bit-protocol"); int init_module(void) { diff --git a/drivers/i2c/i2c-algo-pcf.c b/drivers/i2c/i2c-algo-pcf.c index 2edf21f37547..14401bacc4ae 100644 --- a/drivers/i2c/i2c-algo-pcf.c +++ b/drivers/i2c/i2c-algo-pcf.c @@ -2,8 +2,8 @@ /* ------------------------------------------------------------------------- */ /* i2c-algo-pcf.c i2c driver algorithms for PCF8584 adapters */ /* ------------------------------------------------------------------------- */ -/* Copyright (C) 1995-97 Simon G. Vogl - 1998-99 Hans Berglund +/* Copyright (C) 1995-1997 Simon G. Vogl + 1998-2000 Hans Berglund This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -20,43 +20,22 @@ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* ------------------------------------------------------------------------- */ -/* With some changes from Kyösti Mälkki and even +/* With some changes from Kyösti Mälkki and Frodo Looijaard */ -/* $Id: i2c-algo-pcf.c,v 1.15 1999/12/21 23:45:58 frodo Exp $ */ +/* $Id: i2c-algo-pcf.c,v 1.20 2000/01/24 02:06:33 mds Exp $ */ #include #include #include #include #include -#if LINUX_VERSION_CODE >= 0x020135 #include -#else -#define __init -#endif - -#if LINUX_VERSION_CODE >= 0x020100 -# include -#else -# include -#endif - - +#include #include #include #include -/* 2.0.0 kernel compatibility */ -#if LINUX_VERSION_CODE < 0x020100 -#define MODULE_AUTHOR(noone) -#define MODULE_DESCRIPTION(none) -#define MODULE_PARM(no,param) -#define MODULE_PARM_DESC(no,description) -#define EXPORT_SYMBOL(noexport) -#define EXPORT_NO_SYMBOLS -#endif - #include #include #include "i2c-pcf8584.h" @@ -74,13 +53,8 @@ /* respectively. This makes sure that the algorithm works. Some chips */ /* might not like this, as they have an internal timeout of some mils */ /* -#if LINUX_VERSION_CODE >= 0x02016e #define SLO_IO jif=jiffies;while(jiffies<=jif+i2c_table[minor].veryslow)\ if (need_resched) schedule(); -#else -#define SLO_IO jif=jiffies;while(jiffies<=jif+i2c_table[minor].veryslow)\ - if (need_resched) schedule(); -#endif */ @@ -108,52 +82,42 @@ static int pcf_scan=0; /* have a look at what's hanging 'round */ /* --- other auxiliary functions -------------------------------------- */ -#if LINUX_VERSION_CODE < 0x02017f -static void schedule_timeout(int j) -{ - current->state = TASK_INTERRUPTIBLE; - current->timeout = jiffies + j; - schedule(); -} -#endif - - static void i2c_start(struct i2c_algo_pcf_data *adap) { - DEBPROTO(printk("S ")); - set_pcf(adap, 1, I2C_PCF_START); + DEBPROTO(printk("S ")); + set_pcf(adap, 1, I2C_PCF_START); } static void i2c_repstart(struct i2c_algo_pcf_data *adap) { - DEBPROTO(printk(" Sr ")); - set_pcf(adap, 1, I2C_PCF_REPSTART); + DEBPROTO(printk(" Sr ")); + set_pcf(adap, 1, I2C_PCF_REPSTART); } static void i2c_stop(struct i2c_algo_pcf_data *adap) { - DEBPROTO(printk("P\n")); - set_pcf(adap, 1, I2C_PCF_STOP); + DEBPROTO(printk("P\n")); + set_pcf(adap, 1, I2C_PCF_STOP); } static int wait_for_bb(struct i2c_algo_pcf_data *adap) { - int timeout = DEF_TIMEOUT; - int status; - - status = get_pcf(adap, 1); - while (timeout-- && !(status & I2C_PCF_BB)) { - udelay(1000); /* How much is this? */ - status = get_pcf(adap, 1); - } - if (timeout<=0) - printk("Timeout waiting for Bus Busy\n"); - /* - set_pcf(adap, 1, I2C_PCF_STOP); - */ - return(timeout<=0); + int timeout = DEF_TIMEOUT; + int status; + + status = get_pcf(adap, 1); + while (timeout-- && !(status & I2C_PCF_BB)) { + udelay(1000); /* How much is this? */ + status = get_pcf(adap, 1); + } + if (timeout<=0) + printk("Timeout waiting for Bus Busy\n"); + /* + set_pcf(adap, 1, I2C_PCF_STOP); + */ + return(timeout<=0); } @@ -165,17 +129,17 @@ static inline void pcf_sleep(unsigned long timeout) static int wait_for_pin(struct i2c_algo_pcf_data *adap, int *status) { - int timeout = DEF_TIMEOUT; - - *status = get_pcf(adap, 1); - while (timeout-- && (*status & I2C_PCF_PIN)) { - adap->waitforpin(); - *status = get_pcf(adap, 1); - } - if (timeout <= 0) - return(-1); - else - return(0); + int timeout = DEF_TIMEOUT; + + *status = get_pcf(adap, 1); + while (timeout-- && (*status & I2C_PCF_PIN)) { + adap->waitforpin(); + *status = get_pcf(adap, 1); + } + if (timeout <= 0) + return(-1); + else + return(0); } @@ -231,7 +195,8 @@ static int test_bus(struct i2c_algo_pcf_data *adap, char *name) { goto bailout; } sdalo(adap); - printk("i2c-algo-pcf.o:1 scl: %d sda: %d \n",getscl(adap),getsda(adap)); + printk("i2c-algo-pcf.o:1 scl: %d sda: %d \n",getscl(adap), + getsda(adap)); if ( 0 != getsda(adap) ) { printk("i2c-algo-pcf.o: %s SDA stuck high!\n",name); sdahi(adap); @@ -243,18 +208,21 @@ static int test_bus(struct i2c_algo_pcf_data *adap, char *name) { goto bailout; } sdahi(adap); - printk("i2c-algo-pcf.o:2 scl: %d sda: %d \n",getscl(adap),getsda(adap)); + printk("i2c-algo-pcf.o:2 scl: %d sda: %d \n",getscl(adap), + getsda(adap)); if ( 0 == getsda(adap) ) { printk("i2c-algo-pcf.o: %s SDA stuck low!\n",name); sdahi(adap); goto bailout; } if ( 0 == getscl(adap) ) { - printk("i2c-algo-pcf.o: %s SCL unexpected low while SDA high!\n",adap->name); + printk("i2c-algo-pcf.o: %s SCL unexpected low while SDA high!\n", + adap->name); goto bailout; } scllo(adap); - printk("i2c-algo-pcf.o:3 scl: %d sda: %d \n",getscl(adap),getsda(adap)); + printk("i2c-algo-pcf.o:3 scl: %d sda: %d \n",getscl(adap), + getsda(adap)); if ( 0 != getscl(adap) ) { printk("i2c-algo-pcf.o: %s SCL stuck high!\n",name); sclhi(adap); @@ -266,7 +234,8 @@ static int test_bus(struct i2c_algo_pcf_data *adap, char *name) { goto bailout; } sclhi(adap); - printk("i2c-algo-pcf.o:4 scl: %d sda: %d \n",getscl(adap),getsda(adap)); + printk("i2c-algo-pcf.o:4 scl: %d sda: %d \n",getscl(adap), + getsda(adap)); if ( 0 == getscl(adap) ) { printk("i2c-algo-pcf.o: %s SCL stuck low!\n",name); sclhi(adap); @@ -293,99 +262,99 @@ bailout: static inline int try_address(struct i2c_algo_pcf_data *adap, unsigned char addr, int retries) { - int i, status, ret = -1; - for (i=0;i= 0) { - if ((status && I2C_PCF_LRB) == 0) { - i2c_stop(adap); - break; /* success! */ - } - } - i2c_stop(adap); - udelay(adap->udelay); - } - DEB2(if (i) printk("i2c-algo-pcf.o: needed %d retries for %d\n",i,addr)); - return ret; + int i, status, ret = -1; + for (i=0;i= 0) { + if ((status && I2C_PCF_LRB) == 0) { + i2c_stop(adap); + break; /* success! */ + } + } + i2c_stop(adap); + udelay(adap->udelay); + } + DEB2(if (i) printk("i2c-algo-pcf.o: needed %d retries for %d\n",i, + addr)); + return ret; } -static int pcf_sendbytes(struct i2c_adapter *i2c_adap,const char *buf, int count) +static int pcf_sendbytes(struct i2c_adapter *i2c_adap,const char *buf, + int count) { - struct i2c_algo_pcf_data *adap = i2c_adap->algo_data; - int wrcount, status, timeout; - - for (wrcount=0; wrcountname, buf[wrcount]&0xff)); - i2c_outb(adap, buf[wrcount]); - timeout = wait_for_pin(adap, &status); - if (timeout) { - printk("i2c-algo-pcf.o: %s i2c_write: error - timeout.\n", - i2c_adap->name); - i2c_stop(adap); - return -EREMOTEIO; /* got a better one ?? */ - } - if (status & I2C_PCF_LRB) { - printk("i2c-algo-pcf.o: %s i2c_write: error - no ack.\n", - i2c_adap->name); - i2c_stop(adap); - return -EREMOTEIO; /* got a better one ?? */ - } - } - return (wrcount); + struct i2c_algo_pcf_data *adap = i2c_adap->algo_data; + int wrcount, status, timeout; + + for (wrcount=0; wrcountname, buf[wrcount]&0xff)); + i2c_outb(adap, buf[wrcount]); + timeout = wait_for_pin(adap, &status); + if (timeout) { + printk("i2c-algo-pcf.o: %s i2c_write: error - timeout.\n", + i2c_adap->name); + i2c_stop(adap); + return -EREMOTEIO; /* got a better one ?? */ + } + if (status & I2C_PCF_LRB) { + printk("i2c-algo-pcf.o: %s i2c_write: error - no ack.\n", + i2c_adap->name); + i2c_stop(adap); + return -EREMOTEIO; /* got a better one ?? */ + } + } + return (wrcount); } static int pcf_readbytes(struct i2c_adapter *i2c_adap, char *buf, int count) { - int rdcount=0, i, status, timeout, dummy=1; - struct i2c_algo_pcf_data *adap = i2c_adap->algo_data; + int rdcount=0, i, status, timeout, dummy=1; + struct i2c_algo_pcf_data *adap = i2c_adap->algo_data; - for (i=0; iflags; unsigned char addr; int ret; if ( (flags & I2C_M_TEN) ) { /* a ten bit address */ - addr = 0xf0 | (( msg->addr >> 7) & 0x03); + addr = 0xf0 | (( msg->addr >> 7) & 0x03); DEB2(printk("addr0: %d\n",addr)); /* try extended address code...*/ ret = try_address(adap, addr, retries); @@ -414,6 +383,8 @@ static inline int pcf_doAddress(struct i2c_algo_pcf_data *adap, struct i2c_msg * addr = ( msg->addr << 1 ); if (flags & I2C_M_RD ) addr |= 1; + if (flags & I2C_M_REV_DIR_ADDR ) + addr ^= 1; i2c_outb(adap, addr); } return 0; @@ -423,71 +394,50 @@ static int pcf_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msgs[], int num) { - struct i2c_algo_pcf_data *adap = i2c_adap->algo_data; - struct i2c_msg *pmsg; - int i, ret, timeout, status; - - timeout = wait_for_bb(adap); - if (timeout) { - DEB2(printk("i2c-algo-pcf.o: Timeout waiting for BB in pcf_xfer\n");) - return -EIO; - } - pmsg = &msgs[0]; - ret = pcf_doAddress(adap, pmsg, i2c_adap->retries); - i2c_start(adap); - - for (i=0; iflags & I2C_M_RD ) { - /* read bytes into buffer*/ - ret = pcf_readbytes(i2c_adap, pmsg->buf, pmsg->len); - DEB2(printk("i2c-algo-pcf.o: read %d bytes.\n",ret)); - } else { - /* write bytes from buffer */ - ret = pcf_sendbytes(i2c_adap, pmsg->buf, pmsg->len); - DEB2(printk("i2c-algo-pcf.o: wrote %d bytes.\n",ret)); - } - if (i == (num-1)) { - i2c_stop(adap); - } - else { - i2c_repstart(adap); - } - if (pmsg->flags & I2C_M_RD ) { - pmsg->buf[pmsg->len-1] = i2c_inb(adap); - } - if (i != (num-1)) { - pmsg = &msgs[0]; - ret = pcf_doAddress(adap, pmsg, i2c_adap->retries); - timeout = wait_for_pin(adap, &status); - if (timeout) { - DEB2(printk("i2c-algo-pcf.o: Timeout waiting for PIN(2) in pcf_xfer\n");) - return (-EREMOTEIO); - } - if (status & I2C_PCF_LRB) { - i2c_stop(adap); - DEB2(printk("i2c-algo-pcf.o: No LRB(2) in pcf_xfer\n");) - return (-EREMOTEIO); - } - } - } - return (num); + struct i2c_algo_pcf_data *adap = i2c_adap->algo_data; + struct i2c_msg *pmsg; + int i, ret, timeout, status; + + timeout = wait_for_bb(adap); + if (timeout) { + DEB2(printk("i2c-algo-pcf.o: Timeout waiting for BB in pcf_xfer\n");) + return -EIO; + } + i2c_start(adap); + + for (i=0; iflags & I2C_M_NOSTART)) { + if (i) + i2c_repstart(adap); + ret = pcf_doAddress(adap, pmsg, i2c_adap->retries); + timeout = wait_for_pin(adap, &status); + if (timeout) { + DEB2(printk("i2c-algo-pcf.o: Timeout waiting for PIN(1) in pcf_xfer\n");) + return (-EREMOTEIO); + } + if (status & I2C_PCF_LRB) { + i2c_stop(adap); + DEB2(printk("i2c-algo-pcf.o: No LRB(1) in pcf_xfer\n");) + return (-EREMOTEIO); + } + } + DEB3(printk("i2c-algo-pcf.o: Msg %d, addr=0x%x, flags=0x%x, len=%d\n", + i, msgs[i].addr, msgs[i].flags, msgs[i].len);) + if (pmsg->flags & I2C_M_RD ) { + /* read bytes into buffer*/ + ret = pcf_readbytes(i2c_adap, pmsg->buf, pmsg->len); + DEB2(printk("i2c-algo-pcf.o: read %d bytes.\n",ret)); + } else { + /* write bytes from buffer */ + ret = pcf_sendbytes(i2c_adap, pmsg->buf, pmsg->len); + DEB2(printk("i2c-algo-pcf.o: wrote %d bytes.\n",ret)); + } + } + i2c_stop(adap); + return (num); } - static int algo_control(struct i2c_adapter *adapter, unsigned int cmd, unsigned long arg) { @@ -496,7 +446,8 @@ static int algo_control(struct i2c_adapter *adapter, static u32 pcf_func(struct i2c_adapter *adap) { - return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR; + return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR | + I2C_FUNC_PROTOCOL_MANGLING; } /* -----exported algorithm data: ------------------------------------- */ @@ -509,7 +460,7 @@ static struct i2c_algorithm pcf_algo = { NULL, /* slave_xmit */ NULL, /* slave_recv */ algo_control, /* ioctl */ - pcf_func, /* functionality */ + pcf_func, /* functionality */ }; /* @@ -526,7 +477,8 @@ int i2c_pcf_add_bus(struct i2c_adapter *adap) return -ENODEV; } - DEB2(printk("i2c-algo-pcf.o: hw routines for %s registered.\n",adap->name)); + DEB2(printk("i2c-algo-pcf.o: hw routines for %s registered.\n", + adap->name)); /* register new adapter to i2c module... */ @@ -545,20 +497,21 @@ int i2c_pcf_add_bus(struct i2c_adapter *adap) /* scan bus */ if (pcf_scan) { - printk(KERN_INFO " i2c-algo-pcf.o: scanning bus %s.\n", adap->name); - for (i = 0x00; i < 0xff; i+=2) { - i2c_outb(pcf_adap, i); - i2c_start(pcf_adap); - if ((wait_for_pin(pcf_adap, &status) >= 0) && - ((status && I2C_PCF_LRB) == 0)) { - printk("(%02x)",i>>1); - } else { - printk("."); - } - i2c_stop(pcf_adap); - udelay(pcf_adap->udelay); - } - printk("\n"); + printk(KERN_INFO " i2c-algo-pcf.o: scanning bus %s.\n", + adap->name); + for (i = 0x00; i < 0xff; i+=2) { + i2c_outb(pcf_adap, i); + i2c_start(pcf_adap); + if ((wait_for_pin(pcf_adap, &status) >= 0) && + ((status && I2C_PCF_LRB) == 0)) { + printk("(%02x)",i>>1); + } else { + printk("."); + } + i2c_stop(pcf_adap); + udelay(pcf_adap->udelay); + } + printk("\n"); } return 0; } @@ -577,7 +530,7 @@ int i2c_pcf_del_bus(struct i2c_adapter *adap) int __init i2c_algo_pcf_init (void) { - printk("i2c-algo-pcf.o: i2c pcf8584 algorithm module\n"); + printk("i2c-algo-pcf.o: i2c pcf8584 algorithm module\n"); return 0; } @@ -595,7 +548,8 @@ MODULE_PARM(i2c_debug,"i"); MODULE_PARM_DESC(pcf_test, "Test if the I2C bus is available"); MODULE_PARM_DESC(pcf_scan, "Scan for active chips on the bus"); -MODULE_PARM_DESC(i2c_debug,"debug level - 0 off; 1 normal; 2,3 more verbose; 9 pcf-protocol"); +MODULE_PARM_DESC(i2c_debug, + "debug level - 0 off; 1 normal; 2,3 more verbose; 9 pcf-protocol"); int init_module(void) diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 2bb265fc499a..c98cb7c6fab7 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -20,7 +20,7 @@ /* With some changes from Kyösti Mälkki . All SMBus-related things are written by Frodo Looijaard */ -/* $Id: i2c-core.c,v 1.48 2000/01/24 21:41:19 frodo Exp $ */ +/* $Id: i2c-core.c,v 1.50 2000/02/02 23:29:54 frodo Exp $ */ #include #include @@ -474,38 +474,58 @@ ssize_t i2cproc_bus_read(struct file * file, char * buf,size_t count, struct inode * inode = file->f_dentry->d_inode; char *kbuf; struct i2c_client *client; - int i,j,len=0; + int i,j,k,order_nr,len=0,len_total; + int order[I2C_CLIENT_MAX]; if (count < 0) - return -EINVAL; - if (count > 4000) - count = 4000; + return -EINVAL; + len_total = file->f_pos + count; + /* Too bad if this gets longer (unlikely) */ + if (len_total > 4000) + len_total = 4000; for (i = 0; i < I2C_ADAP_MAX; i++) if (adapters[i]->inode == inode->i_ino) { /* We need a bit of slack in the kernel buffer; this makes the sprintf safe. */ if (! (kbuf = kmalloc(count + 80,GFP_KERNEL))) return -ENOMEM; - for (j = 0; j < I2C_CLIENT_MAX; j++) - if ((client = adapters[i]->clients[j])) - /* Filter out dummy clients */ - if (client->driver->id != I2C_DRIVERID_I2CDEV) - len += sprintf(kbuf+len,"%02x\t%-32s\t%-32s\n", - client->addr, - client->name,client->driver->name); - if (file->f_pos+len > count) - len = count - file->f_pos; - len = len - file->f_pos; - if (len < 0) - len = 0; - if (copy_to_user (buf,kbuf+file->f_pos, - len)) { - kfree(kbuf); - return -EFAULT; - } - file->f_pos += len; - kfree(kbuf); - return len; + /* Order will hold the indexes of the clients + sorted by address */ + order_nr=0; + for (j = 0; j < I2C_CLIENT_MAX; j++) { + if ((client = adapters[i]->clients[j]) && + (client->driver->id != I2C_DRIVERID_I2CDEV)) { + for(k = order_nr; + (k > 0) && + adapters[i]->clients[order[k-1]]-> + addr > client->addr; + k--) + order[k] = order[k-1]; + order[k] = j; + order_nr++; + } + } + + + for (j = 0; (j < order_nr) && (len < len_total); j++) { + client = adapters[i]->clients[order[j]]; + len += sprintf(kbuf+len,"%02x\t%-32s\t%-32s\n", + client->addr, + client->name, + client->driver->name); + } + len = len - file->f_pos; + if (len > count) + len = count; + if (len < 0) + len = 0; + if (copy_to_user (buf,kbuf+file->f_pos, len)) { + kfree(kbuf); + return -EFAULT; + } + file->f_pos += len; + kfree(kbuf); + return len; } return -ENOENT; } diff --git a/drivers/i2c/i2c-elv.c b/drivers/i2c/i2c-elv.c index 1eb17cacf0af..adae40c4ce95 100644 --- a/drivers/i2c/i2c-elv.c +++ b/drivers/i2c/i2c-elv.c @@ -1,7 +1,7 @@ /* ------------------------------------------------------------------------- */ /* i2c-elv.c i2c-hw access for philips style parallel port adapters */ /* ------------------------------------------------------------------------- */ -/* Copyright (C) 1995-99 Simon G. Vogl +/* Copyright (C) 1995-2000 Simon G. Vogl This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -16,39 +16,21 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ -/* ------------------------------------------------------------------------- +/* ------------------------------------------------------------------------- */ /* With some changes from Kyösti Mälkki and even Frodo Looijaard */ -/* $Id: i2c-elv.c,v 1.12 1999/12/21 23:45:58 frodo Exp $ */ +/* $Id: i2c-elv.c,v 1.16 2000/01/18 23:54:07 frodo Exp $ */ #include #include #include #include #include -#if LINUX_VERSION_CODE >= 0x020135 #include -#else -#define __init -#endif - -/* 2.0.0 kernel compatibility */ -#if LINUX_VERSION_CODE < 0x020100 -#define MODULE_AUTHOR(noone) -#define MODULE_DESCRIPTION(none) -#define MODULE_PARM(no,param) -#define MODULE_PARM_DESC(no,description) -#define EXPORT_SYMBOL(noexport) -#define EXPORT_NO_SYMBOLS -#endif -#if LINUX_VERSION_CODE >= 0x020100 -# include -#else -# include -#endif +#include #include #include @@ -66,11 +48,11 @@ static unsigned char PortData = 0; #define DEBE(x) x /* error messages */ #define DEBINIT(x) x /* detection status messages */ -/* --- Convenience defines for the parallel port: */ -#define BASE (unsigned int)(data) -#define DATA BASE /* Centronics data port */ -#define STAT (BASE+1) /* Centronics status port */ -#define CTRL (BASE+2) /* Centronics control port */ +/* --- Convenience defines for the parallel port: */ +#define BASE (unsigned int)(data) +#define DATA BASE /* Centronics data port */ +#define STAT (BASE+1) /* Centronics status port */ +#define CTRL (BASE+2) /* Centronics control port */ /* ----- local functions ---------------------------------------------- */ @@ -127,8 +109,8 @@ static int bit_elv_init(void) request_region(base,(base == 0x3bc)? 3 : 8, "i2c (ELV adapter)"); PortData = 0; - bit_elv_setsda((void*)base,1); - bit_elv_setscl((void*)base,1); + bit_elv_setsda((void*)base,1); + bit_elv_setscl((void*)base,1); } return 0; } @@ -140,7 +122,7 @@ static void bit_elv_exit(void) static int bit_elv_reg(struct i2c_client *client) { - return 0; + return 0; } static int bit_elv_unreg(struct i2c_client *client) @@ -151,14 +133,14 @@ static int bit_elv_unreg(struct i2c_client *client) static void bit_elv_inc_use(struct i2c_adapter *adap) { #ifdef MODULE - MOD_INC_USE_COUNT; + MOD_INC_USE_COUNT; #endif } static void bit_elv_dec_use(struct i2c_adapter *adap) { #ifdef MODULE - MOD_DEC_USE_COUNT; + MOD_DEC_USE_COUNT; #endif } @@ -172,7 +154,7 @@ static struct i2c_algo_bit_data bit_elv_data = { bit_elv_setscl, bit_elv_getsda, bit_elv_getscl, - 80, 80, 100, /* waits, timeout */ + 80, 80, 100, /* waits, timeout */ }; static struct i2c_adapter bit_elv_ops = { @@ -186,30 +168,30 @@ static struct i2c_adapter bit_elv_ops = { bit_elv_unreg, }; -int __init i2c_bitelv_init(void) +int __init i2c_bitelv_init(void) { printk("i2c-elv.o: i2c ELV parallel port adapter module\n"); - if (base==0) { - /* probe some values */ - base=DEFAULT_BASE; - bit_elv_data.data=(void*)DEFAULT_BASE; - if (bit_elv_init()==0) { - if(i2c_bit_add_bus(&bit_elv_ops) < 0) + if (base==0) { + /* probe some values */ + base=DEFAULT_BASE; + bit_elv_data.data=(void*)DEFAULT_BASE; + if (bit_elv_init()==0) { + if(i2c_bit_add_bus(&bit_elv_ops) < 0) return -ENODEV; - } else { - return -ENODEV; - } - } else { - bit_elv_ops.data=(void*)base; - if (bit_elv_init()==0) { - if(i2c_bit_add_bus(&bit_elv_ops) < 0) + } else { + return -ENODEV; + } + } else { + bit_elv_ops.data=(void*)base; + if (bit_elv_init()==0) { + if(i2c_bit_add_bus(&bit_elv_ops) < 0) return -ENODEV; - } else { - return -ENODEV; - } - } - printk("i2c-elv.o: found device at %#x.\n",base); - return 0; + } else { + return -ENODEV; + } + } + printk("i2c-elv.o: found device at %#x.\n",base); + return 0; } @@ -224,13 +206,13 @@ MODULE_PARM(base, "i"); int init_module(void) { - return i2c_bitelv_init(); + return i2c_bitelv_init(); } void cleanup_module(void) { - i2c_bit_del_bus(&bit_elv_ops); - bit_elv_exit(); + i2c_bit_del_bus(&bit_elv_ops); + bit_elv_exit(); } #endif diff --git a/drivers/i2c/i2c-philips-par.c b/drivers/i2c/i2c-philips-par.c index 52079eb8856b..ba72ddddb90e 100644 --- a/drivers/i2c/i2c-philips-par.c +++ b/drivers/i2c/i2c-philips-par.c @@ -1,7 +1,7 @@ /* ------------------------------------------------------------------------- */ /* i2c-philips-par.c i2c-hw access for philips style parallel port adapters */ /* ------------------------------------------------------------------------- */ -/* Copyright (C) 1995-99 Simon G. Vogl +/* Copyright (C) 1995-2000 Simon G. Vogl This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -16,34 +16,20 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ -/* ------------------------------------------------------------------------- +/* ------------------------------------------------------------------------- */ /* With some changes from Kyösti Mälkki and even Frodo Looijaard */ -/* $Id: i2c-philips-par.c,v 1.12 1999/12/21 23:45:58 frodo Exp $ */ +/* $Id: i2c-philips-par.c,v 1.16 2000/01/18 23:54:07 frodo Exp $ */ #include #include #include -#if LINUX_VERSION_CODE >= 0x020135 #include -#else -#define __init -#endif #include #include -/* 2.0.0 kernel compatibility */ -#if LINUX_VERSION_CODE < 0x020100 -#define MODULE_AUTHOR(noone) -#define MODULE_DESCRIPTION(none) -#define MODULE_PARM(no,param) -#define MODULE_PARM_DESC(no,description) -#define EXPORT_SYMBOL(noexport) -#define EXPORT_NO_SYMBOLS -#endif - #include #include @@ -147,12 +133,12 @@ static int bit_lp_unreg(struct i2c_client *client) static void bit_lp_inc_use(struct i2c_adapter *adap) { - MOD_INC_USE_COUNT; + MOD_INC_USE_COUNT; } static void bit_lp_dec_use(struct i2c_adapter *adap) { - MOD_DEC_USE_COUNT; + MOD_DEC_USE_COUNT; } /* ------------------------------------------------------------------------ @@ -183,7 +169,7 @@ static struct i2c_adapter bit_lp_ops = { int __init i2c_bitlp_init(void) { - printk("i2c-philips-par.o: i2c Philips parallel port adapter module\n"); + printk("i2c-philips-par.o: i2c Philips parallel port adapter module\n"); if (base==0) { /* probe some values */ base=DEFAULT_BASE; diff --git a/drivers/i2c/i2c-velleman.c b/drivers/i2c/i2c-velleman.c index 7a3bc3522a7e..95c12a289826 100644 --- a/drivers/i2c/i2c-velleman.c +++ b/drivers/i2c/i2c-velleman.c @@ -1,7 +1,7 @@ /* ------------------------------------------------------------------------- */ /* i2c-velleman.c i2c-hw access for Velleman K9000 adapters */ /* ------------------------------------------------------------------------- */ -/* Copyright (C) 1995-96 Simon G. Vogl +/* Copyright (C) 1995-96, 2000 Simon G. Vogl This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -16,32 +16,18 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ -/* ------------------------------------------------------------------------- +/* ------------------------------------------------------------------------- */ -/* $Id: i2c-velleman.c,v 1.14 1999/12/21 23:45:58 frodo Exp $ */ +/* $Id: i2c-velleman.c,v 1.19 2000/01/24 02:06:33 mds Exp $ */ #include #include #include -#if LINUX_VERSION_CODE >= 0x020135 #include -#else -#define __init -#endif #include /* for 2.0 kernels to get NULL */ #include /* for 2.0 kernels to get ENODEV */ #include -/* 2.0.0 kernel compatibility */ -#if LINUX_VERSION_CODE < 0x020100 -#define MODULE_AUTHOR(noone) -#define MODULE_DESCRIPTION(none) -#define MODULE_PARM(no,param) -#define MODULE_PARM_DESC(no,description) -#define EXPORT_SYMBOL(noexport) -#define EXPORT_NO_SYMBOLS -#endif - #include #include @@ -55,7 +41,7 @@ #define I2C_SCL 0x08 /* ctrl bit 3 (inv) */ #define I2C_SDAIN 0x10 /* stat bit 4 */ -#define I2C_SCLIN 0x08 /* ctrl bit 3 (inv) (reads own output) */ +#define I2C_SCLIN 0x08 /* ctrl bit 3 (inv)(reads own output)*/ #define I2C_DMASK 0xfd #define I2C_CMASK 0xf7 @@ -105,7 +91,8 @@ static int bit_velle_getsda(void *data) static int bit_velle_init(void) { if (check_region(base,(base == 0x3bc)? 3 : 8) < 0 ) { - DEBE(printk("i2c-velleman.o: Port %#x already in use.\n", base)); + DEBE(printk("i2c-velleman.o: Port %#x already in use.\n", + base)); return -ENODEV; } else { request_region(base, (base == 0x3bc)? 3 : 8, @@ -173,7 +160,7 @@ static struct i2c_adapter bit_velle_ops = { int __init i2c_bitvelle_init(void) { - printk("i2c-velleman.o: i2c Velleman K8000 adapter module\n"); + printk("i2c-velleman.o: i2c Velleman K8000 adapter module\n"); if (base==0) { /* probe some values */ base=DEFAULT_BASE; diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c index 1a557f0df7f5..9ed259a2060f 100644 --- a/drivers/macintosh/adb.c +++ b/drivers/macintosh/adb.c @@ -247,6 +247,8 @@ adb_notify_sleep(struct pmu_sleep_notifier *self, int when) switch (when) { case PBOOK_SLEEP_REQUEST: adb_got_sleep = 1; + if (adb_controller->autopoll) + adb_controller->autopoll(0); ret = notifier_call_chain(&adb_client_list, ADB_MSG_POWERDOWN, NULL); if (ret & NOTIFY_STOP_MASK) return PBOOK_SLEEP_REFUSE; @@ -262,6 +264,7 @@ adb_notify_sleep(struct pmu_sleep_notifier *self, int when) break; case PBOOK_WAKE: adb_reset_bus(); + adb_got_sleep = 0; break; } return PBOOK_SLEEP_OK; @@ -271,15 +274,21 @@ adb_notify_sleep(struct pmu_sleep_notifier *self, int when) int adb_reset_bus(void) { - int ret, devs; + int ret, nret, devs; unsigned long flags; if (adb_controller == NULL) return -ENXIO; - ret = notifier_call_chain(&adb_client_list, ADB_MSG_PRE_RESET, NULL); - if (ret & NOTIFY_STOP_MASK) + if (adb_controller->autopoll) + adb_controller->autopoll(0); + + nret = notifier_call_chain(&adb_client_list, ADB_MSG_PRE_RESET, NULL); + if (nret & NOTIFY_STOP_MASK) { + if (adb_controller->autopoll) + adb_controller->autopoll(devs); return -EBUSY; + } save_flags(flags); cli(); @@ -291,18 +300,17 @@ adb_reset_bus(void) else ret = 0; - if (!ret) - { + if (!ret) { devs = adb_scan_bus(); if (adb_controller->autopoll) adb_controller->autopoll(devs); } - ret = notifier_call_chain(&adb_client_list, ADB_MSG_POST_RESET, NULL); - if (ret & NOTIFY_STOP_MASK) + nret = notifier_call_chain(&adb_client_list, ADB_MSG_POST_RESET, NULL); + if (nret & NOTIFY_STOP_MASK) return -EBUSY; - return 1; + return ret; } void @@ -383,6 +391,12 @@ adb_input(unsigned char *buf, int nb, struct pt_regs *regs, int autopoll) int i, id; static int dump_adb_input = 0; + /* We skip keystrokes and mouse moves when the sleep process + * has been started. We stop autopoll, but this is another security + */ + if (adb_got_sleep) + return; + id = buf[0] >> 4; if (dump_adb_input) { printk(KERN_INFO "adb packet: "); @@ -403,12 +417,8 @@ adb_try_handler_change(int address, int new_id) if (adb_handler[address].handler_id == new_id) return 1; - adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1, - ADB_READREG(address,3)); - if (req.reply_len < 2) - return 0; adb_request(&req, NULL, ADBREQ_SYNC, 3, - ADB_WRITEREG(address, 3), req.reply[1] & 0xF0, new_id); + ADB_WRITEREG(address, 3), address | 0x20, new_id); adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1, ADB_READREG(address, 3)); if (req.reply_len < 2) @@ -611,10 +621,11 @@ static ssize_t adb_write(struct file *file, const char *buf, /* Special case for ADB_BUSRESET request, all others are sent to the controller */ if ((req->data[0] == ADB_PACKET)&&(count > 1) - &&(req->data[1] == ADB_BUSRESET)) + &&(req->data[1] == ADB_BUSRESET)) { ret = adb_reset_bus(); - else - { + atomic_dec(&state->n_pending); + goto out; + } else { req->reply_expected = ((req->data[1] & 0xc) == 0xc); if (adb_controller && adb_controller->send_request) diff --git a/drivers/macintosh/mac_keyb.c b/drivers/macintosh/mac_keyb.c index 4eae1efd9044..2ba5baac21cf 100644 --- a/drivers/macintosh/mac_keyb.c +++ b/drivers/macintosh/mac_keyb.c @@ -16,13 +16,15 @@ * * - Standard 1 button mouse * - All standard Apple Extended protocol (handler ID 4) - * mice & trackballs + * - mouseman and trackman mice & trackballs * - PowerBook Trackpad (default setup: enable tapping) * - MicroSpeed mouse & trackball (needs testing) * - CH Products Trackball Pro (needs testing) * - Contour Design (Contour Mouse) * - Hunter digital (NoHandsMouse) * - Kensignton TurboMouse 5 (needs testing) + * - Mouse Systems A3 mice and trackballs + * - MacAlly 2-buttons mouse (needs testing) * * To do: * @@ -66,10 +68,10 @@ static unsigned char dont_repeat[128] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* esc...option */ - 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, /* fn, num lock */ + 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, /* fn, num lock */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, /* scroll lock */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, /* R modifiers */ }; /* Simple translation table for the SysRq keys */ @@ -239,6 +241,7 @@ static void init_trackpad(int id); static void init_trackball(int id); static void init_turbomouse(int id); static void init_microspeed(int id); +static void init_ms_a3(int id); #ifdef CONFIG_ADBMOUSE /* XXX: Hook for mouse driver */ @@ -268,6 +271,8 @@ static struct adb_ids buttons_ids; #define ADBMOUSE_TURBOMOUSE5 5 /* Turbomouse 5 (previously req. mousehack) */ #define ADBMOUSE_MICROSPEED 6 /* Microspeed mouse (&trackball ?), MacPoint */ #define ADBMOUSE_TRACKBALLPRO 7 /* Trackball Pro (special buttons) */ +#define ADBMOUSE_MS_A3 8 /* Mouse systems A3 trackball (handler 3) */ +#define ADBMOUSE_MACALLY2 9 /* MacAlly 2-button mouse */ static int adb_mouse_kinds[16]; @@ -484,6 +489,19 @@ mouse_input(unsigned char *data, int nb, struct pt_regs *regs, int autopoll) data[3] = byyy bxxx Third button and fourth button. Y is additional high bits of y-axis motion. XY is additional high bits of x-axis motion. + + MacAlly 2-button mouse protocol. + + For MacAlly 2-button mouse protocol the data array will contain the + following values: + + BITS COMMENTS + data[0] = dddd 1100 ADB command: Talk, register 0, for device dddd. + data[1] = bxxx xxxx Left button and x-axis motion. + data[2] = byyy yyyy Right button and y-axis motion. + data[3] = ???? ???? unknown + data[4] = ???? ???? unknown + */ struct kbd_struct *kbd; @@ -510,6 +528,16 @@ mouse_input(unsigned char *data, int nb, struct pt_regs *regs, int autopoll) data[2] = (data[2] & 0x7f) | ((data[3] & 0x01) << 7); data[3] = (data[3] & 0x77) | ((data[3] & 0x02) << 6); break; + case ADBMOUSE_MS_A3: + data[1] = (data[1] & 0x7f) | ((data[3] & 0x01) << 7); + data[2] = (data[2] & 0x7f) | ((data[3] & 0x02) << 6); + data[3] = ((data[3] & 0x04) << 5); + break; + case ADBMOUSE_MACALLY2: + data[3] = (data[2] & 0x80) ? 0x80 : 0x00; + data[2] |= 0x80; /* Right button is mapped as button 3 */ + nb=4; + break; } if (adb_mouse_interrupt_hook) @@ -642,7 +670,6 @@ static int pending_led_end=0; static void real_mackbd_leds(unsigned char leds, int device) { - if (led_request.complete) { adb_request(&led_request, leds_done, 0, 3, ADB_WRITEREG(device, KEYB_LEDREG), 0xff, @@ -715,10 +742,18 @@ void __init mackbd_init_hw(void) static int adb_message_handler(struct notifier_block *this, unsigned long code, void *x) { + unsigned long flags; + switch (code) { case ADB_MSG_PRE_RESET: case ADB_MSG_POWERDOWN: - /* Add unregister_keyboard when merging with Paul Mackerras */ + /* Stop the repeat timer. Autopoll is already off at this point */ + save_flags(flags); + cli(); + del_timer(&repeat_timer); + restore_flags(flags); + + /* Stop pending led requests */ while(!led_request.complete) adb_poll(); break; @@ -753,9 +788,12 @@ mackeyb_probe(void) /* Enable full feature set of the keyboard ->get it to send separate codes for left and right shift, control, option keys */ +#if 0 /* handler 5 doesn't send separate codes for R modifiers */ if (adb_try_handler_change(id, 5)) printk("ADB keyboard at %d, handler set to 5\n", id); - else if (adb_try_handler_change(id, 3)) + else +#endif + if (adb_try_handler_change(id, 3)) printk("ADB keyboard at %d, handler set to 3\n", id); else printk("ADB keyboard at %d, handler 1\n", id); @@ -769,10 +807,6 @@ mackeyb_probe(void) printk("ADB mouse at %d, handler set to 4", id); adb_mouse_kinds[id] = ADBMOUSE_EXTENDED; } - else if (adb_try_handler_change(id, 2)) { - printk("ADB mouse at %d, handler set to 2", id); - adb_mouse_kinds[id] = ADBMOUSE_STANDARD_200; - } else if (adb_try_handler_change(id, 0x2F)) { printk("ADB mouse at %d, handler set to 0x2F", id); adb_mouse_kinds[id] = ADBMOUSE_MICROSPEED; @@ -789,6 +823,14 @@ mackeyb_probe(void) printk("ADB mouse at %d, handler set to 0x5F", id); adb_mouse_kinds[id] = ADBMOUSE_MICROSPEED; } + else if (adb_try_handler_change(id, 3)) { + printk("ADB mouse at %d, handler set to 3", id); + adb_mouse_kinds[id] = ADBMOUSE_MS_A3; + } + else if (adb_try_handler_change(id, 2)) { + printk("ADB mouse at %d, handler set to 2", id); + adb_mouse_kinds[id] = ADBMOUSE_STANDARD_200; + } else { printk("ADB mouse at %d, handler 1", id); adb_mouse_kinds[id] = ADBMOUSE_STANDARD_100; @@ -797,6 +839,8 @@ mackeyb_probe(void) if ((adb_mouse_kinds[id] == ADBMOUSE_TRACKBALLPRO) || (adb_mouse_kinds[id] == ADBMOUSE_MICROSPEED)) { init_microspeed(id); + } else if (adb_mouse_kinds[id] == ADBMOUSE_MS_A3) { + init_ms_a3(id); } else if (adb_mouse_kinds[id] == ADBMOUSE_EXTENDED) { /* * Register 1 is usually used for device @@ -808,7 +852,8 @@ mackeyb_probe(void) ADB_READREG(id, 1)); if ((req.reply_len) && - (req.reply[1] == 0x9a) && (req.reply[2] == 0x21)) + (req.reply[1] == 0x9a) && ((req.reply[2] == 0x21) + || (req.reply[2] == 0x20))) init_trackball(id); else if ((req.reply_len >= 4) && (req.reply[1] == 0x74) && (req.reply[2] == 0x70) && @@ -818,6 +863,14 @@ mackeyb_probe(void) (req.reply[1] == 0x4b) && (req.reply[2] == 0x4d) && (req.reply[3] == 0x4c) && (req.reply[4] == 0x31)) init_turbomouse(id); + else if ((req.reply_len == 9) && + (req.reply[1] == 0x4b) && (req.reply[2] == 0x4f) && + (req.reply[3] == 0x49) && (req.reply[4] == 0x54)){ + if (adb_try_handler_change(id, 0x42)) { + printk("\nADB MacAlly 2-button mouse at %d, handler set to 0x42", id); + adb_mouse_kinds[id] = ADBMOUSE_MACALLY2; + } + } } printk("\n"); } @@ -880,7 +933,7 @@ init_trackball(int id) { struct adb_request req; - printk(" (trackball)"); + printk(" (trackman/mouseman)"); adb_mouse_kinds[id] = ADBMOUSE_TRACKBALL; @@ -920,13 +973,10 @@ init_turbomouse(int id) adb_request(&req, NULL, ADBREQ_SYNC, 1, ADB_FLUSH(id)); - adb_request(&req, NULL, ADBREQ_SYNC, 3, - ADB_WRITEREG(id,3), 0x20 | id, 4); - - adb_request(&req, NULL, ADBREQ_SYNC, 1, ADB_FLUSH(id)); + adb_request(&req, NULL, ADBREQ_SYNC, 1, ADB_FLUSH(3)); adb_request(&req, NULL, ADBREQ_SYNC, 9, - ADB_WRITEREG(id,2), + ADB_WRITEREG(3,2), 0xe7, 0x8c, 0, @@ -936,10 +986,10 @@ init_turbomouse(int id) 0xff, 0x94); - adb_request(&req, NULL, ADBREQ_SYNC, 1, ADB_FLUSH(id)); + adb_request(&req, NULL, ADBREQ_SYNC, 1, ADB_FLUSH(3)); adb_request(&req, NULL, ADBREQ_SYNC, 9, - ADB_WRITEREG(id,2), + ADB_WRITEREG(3,2), 0xa5, 0x14, 0, @@ -990,3 +1040,17 @@ init_microspeed(int id) adb_request(&req, NULL, ADBREQ_SYNC, 1, ADB_FLUSH(id)); } +static void +init_ms_a3(int id) +{ + struct adb_request req; + + printk(" (Mouse Systems A3 Mouse, or compatible)"); + adb_request(&req, NULL, ADBREQ_SYNC, 3, + ADB_WRITEREG(id, 0x2), + 0x00, + 0x07); + + adb_request(&req, NULL, ADBREQ_SYNC, 1, ADB_FLUSH(id)); + } + diff --git a/drivers/macintosh/macserial.c b/drivers/macintosh/macserial.c index 6ae478e217a2..2555d5383680 100644 --- a/drivers/macintosh/macserial.c +++ b/drivers/macintosh/macserial.c @@ -8,7 +8,7 @@ * * Receive DMA code by Takashi Oe . * - * $Id: macserial.c,v 1.24.2.3 1999/09/10 02:05:58 paulus Exp $ + * $Id: macserial.c,v 1.24.2.4 1999/10/19 04:36:42 paulus Exp $ */ #include @@ -130,6 +130,22 @@ static int serial_refcount; #define _INLINE_ inline +#ifdef SERIAL_DEBUG_OPEN +#define OPNDBG(fmt, arg...) printk(KERN_INFO fmt , ## arg) +#else +#define OPNDBG(fmt, arg...) do { } while (0) +#endif +#ifdef SERIAL_DEBUG_POWER +#define PWRDBG(fmt, arg...) printk(KERN_INFO fmt , ## arg) +#else +#define PWRDBG(fmt, arg...) do { } while (0) +#endif +#ifdef SERIAL_DEBUG_BAUDS +#define BAUDBG(fmt, arg...) printk(KERN_INFO fmt , ## arg) +#else +#define BAUDBG(fmt, arg...) do { } while (0) +#endif + static void probe_sccs(void); static void change_speed(struct mac_serial *info, struct termios *old); static void rs_wait_until_sent(struct tty_struct *tty, int timeout); @@ -318,7 +334,7 @@ static void dbdma_reset(volatile struct dbdma_regs *dma) * to it. - paulus) */ for (i = 200; i > 0; --i) - if (ld_le32(&dma->control) & RUN) + if (ld_le32(&dma->status) & RUN) udelay(1); } @@ -718,14 +734,10 @@ static int startup(struct mac_serial * info, int can_sleep) { int delay; -#ifdef SERIAL_DEBUG_OPEN - printk("startup() (ttyS%d, irq %d)\n", info->line, info->irq); -#endif + OPNDBG("startup() (ttyS%d, irq %d)\n", info->line, info->irq); if (info->flags & ZILOG_INITIALIZED) { -#ifdef SERIAL_DEBUG_OPEN - printk(" -> already inited\n"); -#endif + OPNDBG(" -> already inited\n"); return 0; } @@ -735,17 +747,13 @@ static int startup(struct mac_serial * info, int can_sleep) return -ENOMEM; } -#ifdef SERIAL_DEBUG_OPEN - printk("starting up ttyS%d (irq %d)...\n", info->line, info->irq); -#endif + OPNDBG("starting up ttyS%d (irq %d)...\n", info->line, info->irq); delay = set_scc_power(info, 1); setup_scc(info); -#ifdef SERIAL_DEBUG_OPEN - printk("enabling IRQ on ttyS%d (irq %d)...\n", info->line, info->irq); -#endif + OPNDBG("enabling IRQ on ttyS%d (irq %d)...\n", info->line, info->irq); info->flags |= ZILOG_INITIALIZED; enable_irq(info->irq); @@ -951,9 +959,7 @@ static int setup_scc(struct mac_serial * info) { unsigned long flags; -#ifdef SERIAL_DEBUG_OPEN - printk("setting up ttys%d SCC...\n", info->line); -#endif + OPNDBG("setting up ttys%d SCC...\n", info->line); save_flags(flags); cli(); /* Disable interrupts */ @@ -1050,16 +1056,11 @@ static int setup_scc(struct mac_serial * info) */ static void shutdown(struct mac_serial * info) { -#ifdef SERIAL_DEBUG_OPEN - printk("Shutting down serial port %d (irq %d)....\n", info->line, + OPNDBG("Shutting down serial port %d (irq %d)....\n", info->line, info->irq); -#endif if (!(info->flags & ZILOG_INITIALIZED)) { -#ifdef SERIAL_DEBUG_OPEN - printk("(already shutdown)\n"); -#endif - + OPNDBG("(already shutdown)\n"); return; } @@ -1125,24 +1126,27 @@ static int set_scc_power(struct mac_serial * info, int state) ones, at least whe not using the modem, this should be tested. */ if (state) { -#ifdef SERIAL_DEBUG_POWER - printk(KERN_INFO "ttyS%02d: powering up hardware\n", info->line); -#endif + PWRDBG("ttyS%02d: powering up hardware\n", info->line); if (feature_test(info->dev_node, FEATURE_Serial_enable) == 0) { - feature_clear(info->dev_node, FEATURE_Serial_reset); - mdelay(5); feature_set(info->dev_node, FEATURE_Serial_enable); + mdelay(10); + feature_set(info->dev_node, FEATURE_Serial_reset); + mdelay(15); + feature_clear(info->dev_node, FEATURE_Serial_reset); + mdelay(10); } if (info->zs_chan_a == info->zs_channel) feature_set(info->dev_node, FEATURE_Serial_IO_A); else feature_set(info->dev_node, FEATURE_Serial_IO_B); - delay = 1; - + delay = 10; if (info->is_cobalt_modem){ - feature_set(info->dev_node, FEATURE_Modem_Reset); + mdelay(300); + feature_set(info->dev_node, FEATURE_Modem_power); mdelay(5); - feature_clear(info->dev_node, FEATURE_Modem_Reset); + feature_clear(info->dev_node, FEATURE_Modem_power); + mdelay(10); + feature_set(info->dev_node, FEATURE_Modem_power); delay = 2500; /* wait for 2.5s before using */ } #ifdef CONFIG_PMAC_PBOOK @@ -1150,33 +1154,11 @@ static int set_scc_power(struct mac_serial * info, int state) pmu_enable_irled(1); #endif /* CONFIG_PMAC_PBOOK */ } else { -#ifdef SERIAL_DEBUG_POWER - printk(KERN_INFO "ttyS%02d: shutting down hardware\n", info->line); -#endif -#ifdef CONFIG_KGDB - if (info->kgdb_channel) { -#ifdef SERIAL_DEBUG_POWER - printk(KERN_INFO " (canceled by KGDB)\n"); -#endif - return 0; - } -#endif -#ifdef CONFIG_XMON - if (!info->is_cobalt_modem) { -#ifdef SERIAL_DEBUG_POWER - printk(KERN_INFO " (canceled by XMON)\n"); -#endif - return 0; - } -#endif + PWRDBG("ttyS%02d: shutting down hardware\n", info->line); if (info->is_cobalt_modem) { -#ifdef SERIAL_DEBUG_POWER - printk(KERN_INFO "ttyS%02d: shutting down modem\n", info->line); -#endif - feature_set(info->dev_node, FEATURE_Modem_Reset); - mdelay(15); - feature_clear(info->dev_node, FEATURE_Modem_Reset); - mdelay(25); + PWRDBG("ttyS%02d: shutting down modem\n", info->line); + feature_clear(info->dev_node, FEATURE_Modem_power); + mdelay(10); } #ifdef CONFIG_PMAC_PBOOK if (info->is_pwbk_ir) @@ -1184,25 +1166,21 @@ static int set_scc_power(struct mac_serial * info, int state) #endif /* CONFIG_PMAC_PBOOK */ if (info->zs_chan_a == info->zs_channel) { -#ifdef SERIAL_DEBUG_POWER - printk(KERN_INFO "ttyS%02d: shutting down SCC channel A\n", info->line); -#endif + PWRDBG("ttyS%02d: shutting down SCC channel A\n", info->line); feature_clear(info->dev_node, FEATURE_Serial_IO_A); } else { -#ifdef SERIAL_DEBUG_POWER - printk(KERN_INFO "ttyS%02d: shutting down SCC channel B\n", info->line); -#endif + PWRDBG("ttyS%02d: shutting down SCC channel B\n", info->line); feature_clear(info->dev_node, FEATURE_Serial_IO_B); } /* XXX for now, shut down SCC core only on powerbooks */ if (is_powerbook && !(feature_test(info->dev_node, FEATURE_Serial_IO_A) || feature_test(info->dev_node, FEATURE_Serial_IO_B))) { -#ifdef SERIAL_DEBUG_POWER - printk(KERN_INFO "ttyS%02d: shutting down SCC core\n", info->line); -#endif + PWRDBG("ttyS%02d: shutting down SCC core\n", info->line); feature_set(info->dev_node, FEATURE_Serial_reset); - mdelay(10); + mdelay(15); + feature_clear(info->dev_node, FEATURE_Serial_reset); + mdelay(25); feature_clear(info->dev_node, FEATURE_Serial_enable); mdelay(5); } @@ -1249,9 +1227,7 @@ static void change_speed(struct mac_serial *info, struct termios *old_termios) info->zs_baud = baud; info->clk_divisor = 16; -#ifdef SERIAL_DEBUG_BAUDS - printk("set speed to %d bds, ", baud); -#endif + BAUDBG("set speed to %d bds, ", baud); switch (baud) { case ZS_CLOCK/16: /* 230400 */ @@ -1278,34 +1254,26 @@ static void change_speed(struct mac_serial *info, struct termios *old_termios) case CS5: info->curregs[3] |= Rx5; info->curregs[5] |= Tx5; -#ifdef SERIAL_DEBUG_BAUDS - printk("5 bits, "); -#endif + BAUDBG("5 bits, "); bits = 7; break; case CS6: info->curregs[3] |= Rx6; info->curregs[5] |= Tx6; -#ifdef SERIAL_DEBUG_BAUDS - printk("6 bits, "); -#endif + BAUDBG("6 bits, "); bits = 8; break; case CS7: info->curregs[3] |= Rx7; info->curregs[5] |= Tx7; -#ifdef SERIAL_DEBUG_BAUDS - printk("7 bits, "); -#endif + BAUDBG("7 bits, "); bits = 9; break; case CS8: default: /* defaults to 8 bits */ info->curregs[3] |= Rx8; info->curregs[5] |= Tx8; -#ifdef SERIAL_DEBUG_BAUDS - printk("8 bits, "); -#endif + BAUDBG("8 bits, "); bits = 10; break; } @@ -1316,21 +1284,15 @@ static void change_speed(struct mac_serial *info, struct termios *old_termios) if (cflag & CSTOPB) { info->curregs[4] |= SB2; bits++; -#ifdef SERIAL_DEBUG_BAUDS - printk("2 stop, "); -#endif + BAUDBG("2 stop, "); } else { info->curregs[4] |= SB1; -#ifdef SERIAL_DEBUG_BAUDS - printk("1 stop, "); -#endif + BAUDBG("1 stop, "); } if (cflag & PARENB) { bits++; info->curregs[4] |= PAR_ENA; -#ifdef SERIAL_DEBUG_BAUDS - printk("parity, "); -#endif + BAUDBG("parity, "); } if (!(cflag & PARODD)) { info->curregs[4] |= PAR_EVEN; @@ -1360,9 +1322,8 @@ static void change_speed(struct mac_serial *info, struct termios *old_termios) info->timeout = ((info->xmit_fifo_size*HZ*bits) / baud); info->timeout += HZ/50+1; /* Add .02 seconds of slop */ -#ifdef SERIAL_DEBUG_BAUDS - printk("timeout=%d/%ds, base:%d\n", (int)info->timeout, (int)HZ, (int)info->baud_base); -#endif + BAUDBG("timeout=%d/%ds, base:%d\n", (int)info->timeout, (int)HZ, + (int)info->baud_base); /* Load up the new values */ load_zsregs(info->zs_channel, info->curregs); @@ -1823,9 +1784,7 @@ static void rs_close(struct tty_struct *tty, struct file * filp) return; } -#ifdef SERIAL_DEBUG_OPEN - printk("rs_close ttys%d, count = %d\n", info->line, info->count); -#endif + OPNDBG("rs_close ttys%d, count = %d\n", info->line, info->count); if ((tty->count == 1) && (info->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty @@ -1861,9 +1820,7 @@ static void rs_close(struct tty_struct *tty, struct file * filp) * Now we wait for the transmit buffer to clear; and we notify * the line discipline to only process XON/XOFF characters. */ -#ifdef SERIAL_DEBUG_OPEN - printk("waiting end of Tx... (timeout:%d)\n", info->closing_wait); -#endif + OPNDBG("waiting end of Tx... (timeout:%d)\n", info->closing_wait); tty->closing = 1; if (info->closing_wait != ZILOG_CLOSING_WAIT_NONE) { restore_flags(flags); @@ -1887,9 +1844,7 @@ static void rs_close(struct tty_struct *tty, struct file * filp) * Before we drop DTR, make sure the SCC transmitter * has completely drained. */ -#ifdef SERIAL_DEBUG_OPEN - printk("waiting end of Rx...\n"); -#endif + OPNDBG("waiting end of Rx...\n"); restore_flags(flags); rs_wait_until_sent(tty, info->timeout); save_flags(flags); cli(); @@ -2059,10 +2014,8 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp, */ retval = 0; add_wait_queue(&info->open_wait, &wait); -#ifdef SERIAL_DEBUG_OPEN - printk("block_til_ready before block: ttys%d, count = %d\n", + OPNDBG("block_til_ready before block: ttys%d, count = %d\n", info->line, info->count); -#endif cli(); if (!tty_hung_up_p(filp)) info->count--; @@ -2095,10 +2048,8 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp, retval = -ERESTARTSYS; break; } -#ifdef SERIAL_DEBUG_OPEN - printk("block_til_ready blocking: ttys%d, count = %d\n", + OPNDBG("block_til_ready blocking: ttys%d, count = %d\n", info->line, info->count); -#endif schedule(); } current->state = TASK_RUNNING; @@ -2106,10 +2057,8 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp, if (!tty_hung_up_p(filp)) info->count++; info->blocked_open--; -#ifdef SERIAL_DEBUG_OPEN - printk("block_til_ready after blocking: ttys%d, count = %d\n", + OPNDBG("block_til_ready after blocking: ttys%d, count = %d\n", info->line, info->count); -#endif if (retval) return retval; info->flags |= ZILOG_NORMAL_ACTIVE; @@ -2144,10 +2093,8 @@ static int rs_open(struct tty_struct *tty, struct file * filp) #endif if (serial_paranoia_check(info, tty->device, "rs_open")) return -ENODEV; -#ifdef SERIAL_DEBUG_OPEN - printk("rs_open %s%d, count = %d\n", tty->driver.name, info->line, + OPNDBG("rs_open %s%d, count = %d\n", tty->driver.name, info->line, info->count); -#endif info->count++; tty->driver_data = info; @@ -2188,10 +2135,8 @@ static int rs_open(struct tty_struct *tty, struct file * filp) retval = block_til_ready(tty, filp, info); if (retval) { -#ifdef SERIAL_DEBUG_OPEN - printk("rs_open returning after block_til_ready with %d\n", + OPNDBG("rs_open returning after block_til_ready with %d\n", retval); -#endif return retval; } @@ -2213,9 +2158,7 @@ static int rs_open(struct tty_struct *tty, struct file * filp) info->session = current->session; info->pgrp = current->pgrp; -#ifdef SERIAL_DEBUG_OPEN - printk("rs_open ttys%d successful...\n", info->line); -#endif + OPNDBG("rs_open ttys%d successful...\n", info->line); return 0; } @@ -2237,6 +2180,10 @@ chan_init(struct mac_serial *zss, struct mac_zschannel *zs_chan, struct device_node *ch = zss->dev_node; char *conn; int len; + struct slot_names_prop { + int count; + char name[1]; + } *slots; zss->irq = ch->intrs[0].line; zss->has_dma = 0; @@ -2262,6 +2209,10 @@ chan_init(struct mac_serial *zss, struct mac_zschannel *zs_chan, should do no harm anyway */ conn = get_property(ch, "AAPL,connector", &len); zss->is_pwbk_ir = conn && (strcmp(conn, "infrared") == 0); + /* 1999 Powerbook G3 has slot-names property instead */ + slots = (struct slot_names_prop *)get_property(ch, "slot-names", &len); + if (slots && slots->count > 0 && strcmp(slots->name, "IrDA") == 0) + zss->is_pwbk_ir = 1; if (zss->has_dma) { zss->dma_priv = NULL; @@ -2506,16 +2457,13 @@ int macserial_init(void) printk(" (powerbook IR)"); printk("\n"); +#ifndef CONFIG_XMON #ifdef CONFIG_KGDB - if (info->kgdb_channel) - continue; -#endif -#ifdef CONFIG_XMON - if (!info->is_cobalt_modem) - continue; -#endif + if (!info->kgdb_channel) +#endif /* CONFIG_KGDB */ /* By default, disable the port */ set_scc_power(info, 0); +#endif /* CONFIG_XMON */ } tmp_buf = 0; diff --git a/drivers/macintosh/mediabay.c b/drivers/macintosh/mediabay.c index 2e3d96444dd9..cc1b44d2ce08 100644 --- a/drivers/macintosh/mediabay.c +++ b/drivers/macintosh/mediabay.c @@ -38,6 +38,14 @@ static struct pmu_sleep_notifier mb_sleep_notifier = { #endif #undef MB_USE_INTERRUPTS +#undef MB_DEBUG +#define MB_IGNORE_SIGNALS + +#ifdef MB_DEBUG +#define MBDBG(fmt, arg...) printk(KERN_INFO fmt , ## arg) +#else +#define MBDBG(fmt, arg...) do { } while (0) +#endif struct media_bay_hw { unsigned char b0; @@ -49,17 +57,16 @@ struct media_bay_hw { struct media_bay_info { volatile struct media_bay_hw* addr; int content_id; - int previous_id; - int ready; + int state; int last_value; int value_count; - int reset_timer; + int timer; struct device_node* dev_node; #ifdef CONFIG_BLK_DEV_IDE unsigned long cd_base; int cd_index; int cd_irq; - int cd_timer; + int cd_retry; #endif }; @@ -73,31 +80,79 @@ int media_bay_count = 0; #ifdef CONFIG_BLK_DEV_IDE /* check the busy bit in the media-bay ide interface (assumes the media-bay contains an ide device) */ +//#define MB_IDE_READY(i) ((inb(media_bays[i].cd_base + 0x70) & 0xc0) == 0x40) #define MB_IDE_READY(i) ((inb(media_bays[i].cd_base + 0x70) & 0x80) == 0) #endif +/* Note: All delays are not in milliseconds and converted to HZ relative + * values by the macro below + */ +#define MS_TO_HZ(ms) ((ms * HZ) / 1000) + /* * Consider the media-bay ID value stable if it is the same for - * this many consecutive samples (at intervals of 1/HZ seconds). + * this number of milliseconds */ -#define MB_STABLE_COUNT 4 +#define MB_STABLE_DELAY 40 + +/* Wait after powering up the media bay this delay in ms + * timeout bumped for some powerbooks + */ +#define MB_POWER_DELAY 200 /* * Hold the media-bay reset signal true for this many ticks * after a device is inserted before releasing it. */ -#define MB_RESET_COUNT 40 +#define MB_RESET_DELAY 40 + +/* + * Wait this long after the reset signal is released and before doing + * further operations. After this delay, the IDE reset signal is released + * too for an IDE device + */ +#define MB_SETUP_DELAY 100 /* * Wait this many ticks after an IDE device (e.g. CD-ROM) is inserted - * (or until the device is ready) before registering the IDE interface. + * (or until the device is ready) before waiting for busy bit to disappear + */ +#define MB_IDE_WAIT 1000 + +/* + * Timeout waiting for busy bit of an IDE device to go down + */ +#define MB_IDE_TIMEOUT 5000 + +/* + * Max retries of the full power up/down sequence for an IDE device + */ +#define MAX_CD_RETRIES 3 + +/* + * States of a media bay */ -#define MB_IDE_WAIT 1500 +enum { + mb_empty = 0, /* Idle */ + mb_powering_up, /* power bit set, waiting MB_POWER_DELAY */ + mb_enabling_bay, /* enable bits set, waiting MB_RESET_DELAY */ + mb_resetting, /* reset bit unset, waiting MB_SETUP_DELAY */ + mb_ide_resetting, /* IDE reset bit unser, waiting MB_IDE_WAIT */ + mb_ide_waiting, /* Waiting for BUSY bit to go away until MB_IDE_TIMEOUT */ + mb_up, /* Media bay full */ + mb_powering_down /* Powering down (avoid too fast down/up) */ +}; static void poll_media_bay(int which); static void set_media_bay(int which, int id); +static void set_mb_power(int which, int onoff); +static void media_bay_step(int i); static int media_bay_task(void *); +#ifdef MB_USE_INTERRUPTS +static void media_bay_intr(int irq, void *devid, struct pt_regs *regs); +#endif + /* * It seems that the bit for the media-bay interrupt in the IRQ_LEVEL * register is always set when there is something in the media bay. @@ -113,8 +168,7 @@ media_bay_init(void) struct device_node *np; int n,i; - for (i=0; in_addrs == 0) continue; media_bays[n].addr = (volatile struct media_bay_hw *) ioremap(np->addrs[0].address, sizeof(struct media_bay_hw)); #ifdef MB_USE_INTERRUPTS - if (np->n_intrs == 0) - { + if (np->n_intrs == 0) { printk(KERN_ERR "media bay %d has no irq\n",n); continue; } - if (request_irq(np_intrs[0].line, media_bay_intr, 0, "Media bay", NULL)) - { - printk(KERN_ERR "Couldn't get IRQ %d for media bay %d\n", irq, n); + if (request_irq(np->intrs[0].line, media_bay_intr, 0, "Media bay", (void *)n)) { + printk(KERN_ERR "Couldn't get IRQ %d for media bay %d\n", + np->intrs[0].line, n); continue; } #endif media_bay_count++; - set_media_bay(n, MB_CONTENTS(n)); - if (media_bays[n].content_id != MB_NO) { - feature_clear(media_bays[n].dev_node, FEATURE_Mediabay_reset); - udelay(500); - } - media_bays[n].ready = 1; - media_bays[n].previous_id = media_bays[n].content_id; - media_bays[n].reset_timer = 0; media_bays[n].dev_node = np; -#ifdef CONFIG_BLK_DEV_IDE - media_bays[n].cd_timer = 0; -#endif + + /* Force an immediate detect */ + set_mb_power(n,0); + mdelay(MB_POWER_DELAY); + out_8(&media_bays[n].addr->contents, 0x70); + mdelay(MB_STABLE_DELAY); + media_bays[n].content_id = MB_NO; + media_bays[n].last_value = MB_CONTENTS(n); + media_bays[n].value_count = MS_TO_HZ(MB_STABLE_DELAY); + media_bays[n].state = mb_empty; + do { + mdelay(1000/HZ); + media_bay_step(n); + } while((media_bays[n].state != mb_empty) && + (media_bays[n].state != mb_up)); + n++; np=np->next; } @@ -174,17 +231,66 @@ media_bay_init(void) } } -#if 0 -static void +#ifdef MB_USE_INTERRUPTS +static void __pmac media_bay_intr(int irq, void *devid, struct pt_regs *regs) { - int id = MB_CONTENTS(); - - if (id == MB_NO) - set_media_bay(id); } #endif +static void __pmac +set_mb_power(int which, int onoff) +{ + volatile struct media_bay_info* mb = &media_bays[which]; + + if (onoff) { + feature_set(mb->dev_node, FEATURE_Mediabay_power); + udelay(10); + feature_set(mb->dev_node, FEATURE_Mediabay_reset); + udelay(10); + mb->state = mb_powering_up; + MBDBG("mediabay%d: powering up\n", which); + } else { + feature_clear(mb->dev_node, FEATURE_Mediabay_floppy_enable); + feature_clear(mb->dev_node, FEATURE_Mediabay_IDE_enable); + feature_clear(mb->dev_node, FEATURE_Mediabay_PCI_enable); + feature_clear(mb->dev_node, FEATURE_SWIM3_enable); + feature_clear(mb->dev_node, FEATURE_Mediabay_power); + mb->state = mb_powering_down; + MBDBG("mediabay%d: powering down\n", which); + } + mb->timer = MS_TO_HZ(MB_POWER_DELAY); +} + +static void __pmac +set_media_bay(int which, int id) +{ + volatile struct media_bay_info* bay; + + bay = &media_bays[which]; + + switch (id) { + case MB_CD: + feature_set(bay->dev_node, FEATURE_Mediabay_IDE_enable); + udelay(10); + feature_set(bay->dev_node, FEATURE_Mediabay_IDE_reset); + printk(KERN_INFO "media bay %d contains a CD-ROM drive\n", which); + break; + case MB_FD: + case MB_FD1: + feature_set(bay->dev_node, FEATURE_Mediabay_floppy_enable); + feature_set(bay->dev_node, FEATURE_SWIM3_enable); + printk(KERN_INFO "media bay %d contains a floppy disk drive\n", which); + break; + case MB_NO: + break; + default: + printk(KERN_INFO "media bay %d contains an unknown device (%d)\n", + which, id); + break; + } +} + int __pmac check_media_bay(struct device_node *which_bay, int what) { @@ -194,7 +300,7 @@ check_media_bay(struct device_node *which_bay, int what) for (i=0; istate != mb_powering_down) + poll_media_bay(i); + + /* If timer expired or polling IDE busy, run state machine */ + if ((bay->state != mb_ide_waiting) && (bay->timer != 0) && ((--bay->timer) != 0)) + return; + + switch(bay->state) { + case mb_powering_up: + set_media_bay(i, bay->last_value); + bay->timer = MS_TO_HZ(MB_RESET_DELAY); + bay->state = mb_enabling_bay; + MBDBG("mediabay%d: enabling (kind:%d)\n", i, bay->content_id); + break; + case mb_enabling_bay: + feature_clear(bay->dev_node, FEATURE_Mediabay_reset); + bay->timer = MS_TO_HZ(MB_SETUP_DELAY); + bay->state = mb_resetting; + MBDBG("mediabay%d: waiting reset (kind:%d)\n", i, bay->content_id); + break; + + case mb_resetting: + if (bay->content_id != MB_CD) { + MBDBG("mediabay%d: bay is up (kind:%d)\n", i, bay->content_id); + bay->state = mb_up; + break; + } +#ifdef CONFIG_BLK_DEV_IDE + MBDBG("mediabay%d: waiting IDE reset (kind:%d)\n", i, bay->content_id); + feature_clear(bay->dev_node, FEATURE_Mediabay_IDE_reset); + bay->timer = MS_TO_HZ(MB_IDE_WAIT); + bay->state = mb_ide_resetting; +#else + printk(KERN_DEBUG "media-bay %d is ide (not compiled in kernel)\n", i); + set_mb_power(i, 0); +#endif // #ifdef CONFIG_BLK_DEV_IDE + break; + +#ifdef CONFIG_BLK_DEV_IDE + case mb_ide_resetting: + bay->timer = MS_TO_HZ(MB_IDE_TIMEOUT); + bay->state = mb_ide_waiting; + MBDBG("mediabay%d: waiting IDE ready (kind:%d)\n", i, bay->content_id); + break; + + case mb_ide_waiting: + if (bay->cd_base == 0) { + bay->timer = 0; + bay->state = mb_up; + MBDBG("mediabay%d: up before IDE init\n", i); + break; + } else if (MB_IDE_READY(i)) { + bay->timer = 0; + bay->state = mb_up; + if (bay->cd_index < 0) + bay->cd_index = ide_register(bay->cd_base, 0, bay->cd_irq); + if (bay->cd_index == -1) { + /* We eventually do a retry */ + bay->cd_retry++; + printk("IDE register error\n"); + set_mb_power(i, 0); + } else { + printk(KERN_DEBUG "media-bay %d is ide %d\n", i, bay->cd_index); + MBDBG("mediabay %d IDE ready\n", i); + } + break; + } + if (bay->timer == 0) { + printk("\nIDE Timeout in bay %d !\n", i); + MBDBG("mediabay%d: nIDE Timeout !\n", i); + set_mb_power(i, 0); + } + break; +#endif // #ifdef CONFIG_BLK_DEV_IDE + + case mb_powering_down: + bay->state = mb_empty; +#ifdef CONFIG_BLK_DEV_IDE + if (bay->cd_index >= 0) { + printk(KERN_DEBUG "Unregistering mb %d ide, index:%d\n", i, + bay->cd_index); + ide_unregister(bay->cd_index); + bay->cd_index = -1; + } + if (bay->cd_retry) { + if (bay->cd_retry > MAX_CD_RETRIES) { + /* Should add an error sound (sort of beep in dmasound) */ + printk("\nmedia-bay %d, IDE device badly inserted or unrecognised\n", i); + } else { + /* Force a new power down/up sequence */ + bay->content_id = MB_NO; + } + } +#endif + MBDBG("mediabay%d: end of power down\n", i); + break; + } +} + /* * This procedure runs as a kernel thread to poll the media bay * once each tick and register and unregister the IDE interface @@ -252,123 +476,57 @@ media_bay_set_ide_infos(struct device_node* which_bay, unsigned long base, int __pmac media_bay_task(void *x) { - volatile struct media_bay_info* bay; int i = 0; strcpy(current->comm, "media-bay"); - for (;;) - { - bay = &media_bays[i]; - poll_media_bay(i); - if (bay->content_id != bay->previous_id) { - bay->reset_timer = (bay->content_id != MB_NO) ? - MB_RESET_COUNT: 0; - bay->ready = 0; -#ifdef CONFIG_BLK_DEV_IDE - bay->cd_timer = 0; - if (bay->content_id != MB_CD && bay->cd_index >= 0) { - printk(KERN_DEBUG "Unregistering mb %d ide, index:%d\n", i, bay->cd_index); - ide_unregister(bay->cd_index); - bay->cd_index = -1; - } -#endif - } else if (bay->reset_timer) { - if (--bay->reset_timer == 0) { - feature_clear(bay->dev_node, FEATURE_Mediabay_reset); - bay->ready = 1; -#ifdef CONFIG_BLK_DEV_IDE - bay->cd_timer = 0; - if (bay->content_id == MB_CD && bay->cd_base != 0) - bay->cd_timer = MB_IDE_WAIT; -#endif - } -#ifdef CONFIG_BLK_DEV_IDE - } else if (bay->cd_timer && (--bay->cd_timer == 0 || MB_IDE_READY(i)) - && bay->cd_index < 0) { - bay->cd_timer = 0; - printk(KERN_DEBUG "Registering IDE, base:0x%08lx, irq:%d\n", bay->cd_base, bay->cd_irq); - printk("\n"); - bay->cd_index = ide_register(bay->cd_base, 0, bay->cd_irq); - if (bay->cd_index == -1) - printk("\nCD-ROM badly inserted. Remove it and try again !\n"); - else - printk(KERN_DEBUG "media-bay %d is ide %d\n", i, bay->cd_index); +#ifdef MB_IGNORE_SIGNALS + sigfillset(¤t->blocked); #endif - } - bay->previous_id = bay->content_id; + for (;;) { + media_bay_step(i); + + if (++i >= media_bay_count) { + i = 0; current->state = TASK_INTERRUPTIBLE; schedule_timeout(1); if (signal_pending(current)) return 0; - i = (i+1)%media_bay_count; + } } } void __pmac poll_media_bay(int which) { + volatile struct media_bay_info* bay = &media_bays[which]; int id = MB_CONTENTS(which); - if (id == media_bays[which].last_value) { - if (id != media_bays[which].content_id - && ++media_bays[which].value_count >= MB_STABLE_COUNT) { + if (id == bay->last_value) { + if (id != bay->content_id + && ++bay->value_count >= MS_TO_HZ(MB_STABLE_DELAY)) { /* If the device type changes without going thru "MB_NO", we force a pass by "MB_NO" to make sure things are properly reset */ - if ((id != MB_NO) && (media_bays[which].content_id != MB_NO)) { - set_media_bay(which, MB_NO); - udelay(500); + if ((id != MB_NO) && (bay->content_id != MB_NO)) { + id = MB_NO; + MBDBG("mediabay%d: forcing MB_NO\n", which); + } + MBDBG("mediabay%d: switching to %d\n", which, id); + set_mb_power(which, id != MB_NO); + bay->content_id = id; + if (id == MB_NO) { +#ifdef CONFIG_BLK_DEV_IDE + bay->cd_retry = 0; +#endif + printk(KERN_INFO "media bay %d is empty\n", which); } - set_media_bay(which, id); } } else { - media_bays[which].last_value = id; - media_bays[which].value_count = 0; + bay->last_value = id; + bay->value_count = 0; } } -static void __pmac -set_media_bay(int which, int id) -{ - volatile struct media_bay_info* bay; - - bay = &media_bays[which]; - - bay->content_id = id; - bay->last_value = id; - - switch (id) { - case MB_CD: - feature_set(bay->dev_node, FEATURE_Mediabay_enable); - feature_set(bay->dev_node, FEATURE_Mediabay_IDE_enable); - udelay(500); - feature_set(bay->dev_node, FEATURE_CD_power); - printk(KERN_INFO "media bay %d contains a CD-ROM drive\n", which); - break; - case MB_FD: - feature_set(bay->dev_node, FEATURE_Mediabay_enable); - feature_set(bay->dev_node, FEATURE_Mediabay_floppy_enable); - feature_set(bay->dev_node, FEATURE_SWIM3_enable); - printk(KERN_INFO "media bay %d contains a floppy disk drive\n", which); - break; - case MB_NO: - feature_clear(bay->dev_node, FEATURE_CD_power); - feature_clear(bay->dev_node, FEATURE_Mediabay_enable); - feature_clear(bay->dev_node, FEATURE_Mediabay_floppy_enable); - feature_clear(bay->dev_node, FEATURE_Mediabay_IDE_enable); - feature_clear(bay->dev_node, FEATURE_SWIM3_enable); - feature_set(bay->dev_node, FEATURE_Mediabay_reset); - printk(KERN_INFO "media bay %d is empty\n", which); - break; - default: - feature_set(bay->dev_node, FEATURE_Mediabay_enable); - printk(KERN_INFO "media bay %d contains an unknown device (%d)\n", - which, id); - break; - } - - udelay(500); -} #ifdef CONFIG_PMAC_PBOOK /* @@ -388,42 +546,34 @@ mb_notify_sleep(struct pmu_sleep_notifier *self, int when) case PBOOK_SLEEP_NOW: for (i=0; idev_node, FEATURE_Mediabay_enable); - feature_clear(bay->dev_node, FEATURE_Mediabay_IDE_enable); - feature_clear(bay->dev_node, FEATURE_SWIM3_enable); - feature_clear(bay->dev_node, FEATURE_Mediabay_floppy_enable); - feature_set(bay->dev_node, FEATURE_Mediabay_reset); - feature_clear(bay->dev_node, FEATURE_CD_power); - out_8(&media_bays[i].addr->contents, 0x70); + set_mb_power(i, 0); + mdelay(10); + out_8(&bay->addr->contents, 0x70); } break; case PBOOK_WAKE: for (i=0; idev_node, FEATURE_Mediabay_enable); - /* I suppose this is enough delay to stabilize MB_CONTENT ... */ - mdelay(10); - /* We re-enable the bay using it's previous content only if - it did not change */ - if (MB_CONTENTS(i) == bay->content_id) { - set_media_bay(i, bay->content_id); - if (bay->content_id != MB_NO) { - mdelay(400); - /* Clear the bay reset */ - feature_clear(bay->dev_node, FEATURE_Mediabay_reset); - /* This small delay makes sure the device has time - to assert the BUSY bit (used by IDE sleep) */ - udelay(100); - /* We reset the state machine timers in case we were in the - middle of a wait loop */ - if (bay->reset_timer) - bay->reset_timer = MB_RESET_COUNT; -#ifdef CONFIG_BLK_DEV_IDE - if (bay->cd_timer) - bay->cd_timer = MB_IDE_WAIT; -#endif - } - } + /* We re-enable the bay using it's previous content + only if it did not change. Note those bozo timings, they seem + to help the 3400 get it right + */ + mdelay(MB_STABLE_DELAY); + out_8(&bay->addr->contents, 0x70); + mdelay(MB_STABLE_DELAY); + if (MB_CONTENTS(i) != bay->content_id) + continue; + set_mb_power(i, 1); + mdelay(MB_POWER_DELAY); + media_bays[i].last_value = bay->content_id; + media_bays[i].value_count = MS_TO_HZ(MB_STABLE_DELAY); + media_bays[i].timer = 0; + media_bays[i].cd_retry = 0; + do { + mdelay(1000/HZ); + media_bay_step(i); + } while((media_bays[i].state != mb_empty) && + (media_bays[i].state != mb_up)); } break; } diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c index cca84d2faf99..4acdc55c5fcf 100644 --- a/drivers/macintosh/via-pmu.c +++ b/drivers/macintosh/via-pmu.c @@ -9,6 +9,12 @@ * and the RTC (real time clock) chip. * * Copyright (C) 1998 Paul Mackerras and Fabio Riccardi. + * + * todo: - Check this driver for smp safety (new Core99 motherboards). + * - Cleanup synchro between VIA interrupt and GPIO-based PMU + * interrupt. + * + * */ #include #include @@ -91,13 +97,16 @@ static unsigned char interrupt_data[32]; static unsigned char *reply_ptr; static int data_index; static int data_len; -static int adb_int_pending; +static volatile int adb_int_pending; static int pmu_adb_flags; static int adb_dev_map = 0; static struct adb_request bright_req_1, bright_req_2, bright_req_3; static struct device_node *vias; static int pmu_kind = PMU_UNKNOWN; static int pmu_fully_inited = 0; +static int pmu_has_adb, pmu_has_backlight; +static unsigned char *gpio_reg = NULL; +static int gpio_irq; int asleep; struct notifier_block *sleep_notifier_list; @@ -118,6 +127,7 @@ static void pmu_done(struct adb_request *req); static void pmu_handle_data(unsigned char *data, int len, struct pt_regs *regs); static void set_volume(int level); +static void gpio1_interrupt(int irq, void *arg, struct pt_regs *regs); #ifdef CONFIG_PMAC_PBOOK static void pmu_pass_intr(unsigned char *data, int len); #endif @@ -191,6 +201,7 @@ static char *pbook_type[] = { "PowerBook 2400/3400/3500(G3)", "PowerBook G3 Series", "1999 PowerBook G3", + "Core99 (iBook/iMac/G4)" }; int __openfirmware @@ -203,9 +214,6 @@ find_via_pmu() return 0; if (vias->next != 0) printk(KERN_WARNING "Warning: only using 1st via-pmu\n"); - - feature_set(vias, FEATURE_VIA_enable); - #if 0 { int i; @@ -218,13 +226,16 @@ find_via_pmu() printk("\n"); } #endif - if (vias->n_addrs != 1 || vias->n_intrs != 1) { + if (vias->n_addrs < 1 || vias->n_intrs < 1) { printk(KERN_ERR "via-pmu: %d addresses, %d interrupts!\n", vias->n_addrs, vias->n_intrs); if (vias->n_addrs < 1 || vias->n_intrs < 1) return 0; } + pmu_has_adb = 1; + pmu_has_backlight = 1; + if (vias->parent->name && ((strcmp(vias->parent->name, "ohare") == 0) || device_is_compatible(vias->parent, "ohare"))) pmu_kind = PMU_OHARE_BASED; @@ -232,12 +243,17 @@ find_via_pmu() pmu_kind = PMU_PADDINGTON_BASED; else if (device_is_compatible(vias->parent, "heathrow")) pmu_kind = PMU_HEATHROW_BASED; - else + else if (device_is_compatible(vias->parent, "Keylargo")) { + pmu_kind = PMU_KEYLARGO_BASED; + pmu_has_adb = (find_type_devices("adb") != NULL); + pmu_has_backlight = (find_type_devices("backlight") != NULL); + } else pmu_kind = PMU_UNKNOWN; via = (volatile unsigned char *) ioremap(vias->addrs->address, 0x2000); out_8(&via[IER], IER_CLR | 0x7f); /* disable all intrs */ + out_8(&via[IFR], 0x7f); /* clear IFR */ pmu_state = idle; @@ -289,6 +305,23 @@ void via_pmu_start(void) return; } + if (pmu_kind == PMU_KEYLARGO_BASED) { + struct device_node *gpio, *gpiop; + + gpiop = find_devices("gpio"); + if (gpiop && gpiop->n_addrs) { + gpio_reg = ioremap(gpiop->addrs->address, 0x10); + gpio = find_devices("extint-gpio1"); + if (gpio && gpio->parent == gpiop && gpio->n_intrs) { + gpio_irq = gpio->intrs[0].line; + if (request_irq(gpio_irq, gpio1_interrupt, 0, + "GPIO1/ADB", (void *)0)) + printk(KERN_ERR "pmu: can't get irq %d (GPIO1)\n", + gpio->intrs[0].line); + } + } + } + /* Enable interrupts */ out_8(&via[IER], IER_SET | SR_INT | CB1_INT); @@ -395,6 +428,8 @@ pmu_send_request(struct adb_request *req, int sync) } break; case ADB_PACKET: + if (!pmu_has_adb) + return -ENXIO; for (i = req->nbytes - 1; i > 1; --i) req->data[i+2] = req->data[i]; req->data[3] = req->nbytes - 2; @@ -425,7 +460,7 @@ pmu_adb_autopoll(int devs) { struct adb_request req; - if ((vias == NULL) || (!pmu_fully_inited)) + if ((vias == NULL) || (!pmu_fully_inited) || !pmu_has_adb) return -ENXIO; if (devs) { @@ -447,10 +482,9 @@ static int __openfirmware pmu_adb_reset_bus(void) { struct adb_request req; - long timeout; int save_autopoll = adb_dev_map; - if ((vias == NULL) || (!pmu_fully_inited)) + if ((vias == NULL) || (!pmu_fully_inited) || !pmu_has_adb) return -ENXIO; /* anyone got a better idea?? */ @@ -460,27 +494,17 @@ pmu_adb_reset_bus(void) req.done = NULL; req.data[0] = PMU_ADB_CMD; req.data[1] = 0; - req.data[2] = 3; /* ADB_BUSRESET ??? */ + req.data[2] = ADB_BUSRESET; /* 3 ??? */ req.data[3] = 0; req.data[4] = 0; req.reply_len = 0; req.reply_expected = 1; - if (pmu_queue_request(&req) != 0) - { + if (pmu_queue_request(&req) != 0) { printk(KERN_ERR "pmu_adb_reset_bus: pmu_queue_request failed\n"); return -EIO; } while (!req.complete) pmu_poll(); - timeout = 100000; - while (!req.complete) { - if (--timeout < 0) { - printk(KERN_ERR "pmu_adb_reset_bus (reset): no response from PMU\n"); - return -EIO; - } - udelay(10); - pmu_poll(); - } if (save_autopoll != 0) pmu_adb_autopoll(save_autopoll); @@ -558,6 +582,8 @@ pmu_queue_request(struct adb_request *req) return 0; } +/* New PMU seems to be very sensitive to those timings, so we make sure + * PCI is flushed immediately */ static void __openfirmware send_byte(int x) { @@ -566,6 +592,7 @@ send_byte(int x) out_8(&v[ACR], in_8(&v[ACR]) | SR_OUT | SR_EXT); out_8(&v[SR], x); out_8(&v[B], in_8(&v[B]) & ~TREQ); /* assert TREQ */ + (void)in_8(&v[B]); } static void __openfirmware @@ -575,10 +602,11 @@ recv_byte() out_8(&v[ACR], (in_8(&v[ACR]) & ~SR_OUT) | SR_EXT); in_8(&v[SR]); /* resets SR */ - out_8(&v[B], in_8(&v[B]) & ~0x10); + out_8(&v[B], in_8(&v[B]) & ~TREQ); + (void)in_8(&v[B]); } -static int disable_poll; +static volatile int disable_poll; static void __openfirmware pmu_start() @@ -616,7 +644,8 @@ pmu_poll() return; save_flags(flags); cli(); - if (via[IFR] & (SR_INT | CB1_INT)) + if ((via[IFR] & (SR_INT | CB1_INT)) || + (gpio_reg && (in_8(gpio_reg + 0x9) & 0x02) == 0)) via_pmu_interrupt(0, 0, 0); restore_flags(flags); } @@ -626,7 +655,12 @@ via_pmu_interrupt(int irq, void *arg, struct pt_regs *regs) { int intr; int nloop = 0; + unsigned long flags; + /* Currently, we use brute-force cli() for syncing with GPIO + * interrupt. I'll make this smarter later, along with some + * spinlocks for SMP */ + save_flags(flags);cli(); ++disable_poll; while ((intr = in_8(&via[IFR])) != 0) { if (++nloop > 1000) { @@ -645,6 +679,9 @@ via_pmu_interrupt(int irq, void *arg, struct pt_regs *regs) out_8(&via[IFR], intr); } } + if (gpio_reg && (in_8(gpio_reg + 0x9) & 0x02) == 0) + adb_int_pending = 1; + if (pmu_state == idle) { if (adb_int_pending) { pmu_state = intack; @@ -655,6 +692,13 @@ via_pmu_interrupt(int irq, void *arg, struct pt_regs *regs) } } --disable_poll; + restore_flags(flags); +} + +static void __openfirmware +gpio1_interrupt(int irq, void *arg, struct pt_regs *regs) +{ + via_pmu_interrupt(0, 0, 0); } static void __openfirmware @@ -668,9 +712,17 @@ pmu_sr_intr(struct pt_regs *regs) out_8(&via[IFR], SR_INT); return; } - if (via[B] & TACK) + /* This one seems to appear with PMU99. According to OF methods, + * the protocol didn't change... + */ + if (via[B] & TACK) { + while ((in_8(&via[B]) & TACK) != 0) + ; +#if 0 printk(KERN_ERR "PMU: sr_intr but ack still high! (%x)\n", via[B]); +#endif + } /* reset TREQ and wait for TACK to go high */ out_8(&via[B], in_8(&via[B]) | TREQ); @@ -832,7 +884,7 @@ pmu_enable_backlight(int on) { struct adb_request req; - if (vias == NULL) + if ((vias == NULL) || !pmu_has_backlight) return; /* first call: get current backlight value */ @@ -853,6 +905,7 @@ pmu_enable_backlight(int on) printk(KERN_DEBUG "pmu: nvram returned bright: %d\n", backlight_level); break; case PMU_PADDINGTON_BASED: + case PMU_KEYLARGO_BASED: /* the G3 PB 1999 has a backlight node and chrp-structured nvram */ /* XXX should read macos's "blkt" property in nvram @@ -883,7 +936,7 @@ pmu_set_brightness(int level) { int bright; - if (vias == NULL) + if ((vias == NULL) || !pmu_has_backlight) return ; backlight_level = level; @@ -963,6 +1016,12 @@ pmu_shutdown(void) ; } +int +pmu_present(void) +{ + return via != 0; +} + #ifdef CONFIG_PMAC_PBOOK static LIST_HEAD(sleep_notifiers); @@ -995,7 +1054,7 @@ pmu_unregister_sleep_notifier(struct pmu_sleep_notifier* n) /* Sleep is broadcast last-to-first */ static int -broadcast_sleep(int when, int can_cancel) +broadcast_sleep(int when, int fallback) { int ret = PBOOK_SLEEP_OK; struct list_head *list; @@ -1005,8 +1064,13 @@ broadcast_sleep(int when, int can_cancel) list = list->prev) { current = list_entry(list, struct pmu_sleep_notifier, list); ret = current->notifier_call(current, when); - if (can_cancel && (ret != PBOOK_SLEEP_OK)) + if (ret != PBOOK_SLEEP_OK) { + for (; list != &sleep_notifiers; list = list->next) { + current = list_entry(list, struct pmu_sleep_notifier, list); + current->notifier_call(current, fallback); + } return ret; + } } return ret; } @@ -1101,6 +1165,24 @@ pbook_pci_restore(void) } } +#if 0 +/* N.B. This doesn't work on the 3400 */ +void pmu_blink(int n) +{ + struct adb_request req; + + for (; n > 0; --n) { + pmu_request(&req, NULL, 4, 0xee, 4, 0, 1); + while (!req.complete) pmu_poll(); + udelay(50000); + pmu_request(&req, NULL, 4, 0xee, 4, 0, 0); + while (!req.complete) pmu_poll(); + udelay(50000); + } + udelay(50000); +} +#endif + /* * Put the powerbook to sleep. */ @@ -1127,19 +1209,27 @@ int __openfirmware powerbook_sleep_G3(void) macio_base = (unsigned long) ioremap(macio->addrs[0].address, 0x40); + /* Notify device drivers */ + ret = broadcast_sleep(PBOOK_SLEEP_REQUEST, PBOOK_SLEEP_REJECT); + if (ret != PBOOK_SLEEP_OK) { + printk("pmu: sleep rejected\n"); + return -EBUSY; + } + /* Sync the disks. */ /* XXX It would be nice to have some way to ensure that - * nobody is dirtying any new buffers while we wait. */ + * nobody is dirtying any new buffers while we wait. + * BenH: Moved to _after_ sleep request and changed video + * drivers to vmalloc() during sleep request. This way, all + * vmalloc's are done before actual sleep of block drivers */ fsync_dev(0); - /* Notify device drivers */ - ret = broadcast_sleep(PBOOK_SLEEP_REQUEST, 1); + /* Sleep can fail now. May not be very robust but useful for debugging */ + ret = broadcast_sleep(PBOOK_SLEEP_NOW, PBOOK_WAKE); if (ret != PBOOK_SLEEP_OK) { - broadcast_sleep(PBOOK_SLEEP_REJECT, 0); - printk("pmu: sleep rejected\n"); + printk("pmu: sleep failed\n"); return -EBUSY; } - broadcast_sleep(PBOOK_SLEEP_NOW, 0); /* Give the disks a little time to actually finish writing */ for (wait = jiffies + (HZ/4); time_before(jiffies, wait); ) @@ -1191,6 +1281,10 @@ int __openfirmware powerbook_sleep_G3(void) pmcr1 &= ~(GRACKLE_PM|GRACKLE_DOZE|GRACKLE_SLEEP|GRACKLE_NAP); grackle_pcibios_write_config_word(0, 0, 0x70, pmcr1); + /* Make sure the PMU is idle */ + while (pmu_state != idle) + pmu_poll(); + sti(); #if 0 /* According to someone from Apple, this should not be needed, @@ -1202,8 +1296,8 @@ int __openfirmware powerbook_sleep_G3(void) /* Restore L2 cache */ if (save_l2cr) - _set_L2CR(save_l2cr | 0x200000); /* set invalidate bit */ - + _set_L2CR(save_l2cr | 0x200000); /* set invalidate bit */ + /* reenable interrupts */ sleep_restore_intrs(); @@ -1223,19 +1317,27 @@ int __openfirmware powerbook_sleep_3400(void) unsigned long p, wait; struct adb_request sleep_req; + /* Notify device drivers */ + ret = broadcast_sleep(PBOOK_SLEEP_REQUEST, PBOOK_SLEEP_REJECT); + if (ret != PBOOK_SLEEP_OK) { + printk("pmu: sleep rejected\n"); + return -EBUSY; + } + /* Sync the disks. */ /* XXX It would be nice to have some way to ensure that - * nobody is dirtying any new buffers while we wait. */ + * nobody is dirtying any new buffers while we wait. + * BenH: Moved to _after_ sleep request and changed video + * drivers to vmalloc() during sleep request. This way, all + * vmalloc's are done before actual sleep of block drivers */ fsync_dev(0); - /* Notify device drivers */ - ret = broadcast_sleep(PBOOK_SLEEP_REQUEST, 1); + /* Sleep can fail now. May not be very robust but useful for debugging */ + ret = broadcast_sleep(PBOOK_SLEEP_NOW, PBOOK_WAKE); if (ret != PBOOK_SLEEP_OK) { - broadcast_sleep(PBOOK_SLEEP_REJECT, 0); - printk("pmu: sleep rejected\n"); + printk("pmu: sleep failed\n"); return -EBUSY; } - broadcast_sleep(PBOOK_SLEEP_NOW, 0); /* Give the disks a little time to actually finish writing */ for (wait = jiffies + (HZ/4); time_before(jiffies, wait); ) @@ -1462,18 +1564,24 @@ static int pmu_ioctl(struct inode * inode, struct file *filp, error = powerbook_sleep_G3(); break; default: - error = ENOSYS; + error = -ENOSYS; } return error; case PMU_IOC_GET_BACKLIGHT: + if (!pmu_has_backlight) + return -ENOSYS; return put_user(backlight_level, (__u32 *)arg); case PMU_IOC_SET_BACKLIGHT: + if (!pmu_has_backlight) + return -ENOSYS; error = get_user(value, (__u32 *)arg); if (!error) pmu_set_brightness(value); return error; case PMU_IOC_GET_MODEL: return put_user(pmu_kind, (__u32 *)arg); + case PMU_IOC_HAS_ADB: + return put_user(pmu_has_adb, (__u32 *)arg); } return -EINVAL; } @@ -1498,3 +1606,70 @@ void pmu_device_init(void) } #endif /* CONFIG_PMAC_PBOOK */ +#if 0 +static inline void polled_handshake(volatile unsigned char *via) +{ + via[B] &= ~TREQ; eieio(); + while ((via[B] & TACK) != 0) + ; + via[B] |= TREQ; eieio(); + while ((via[B] & TACK) == 0) + ; +} + +static inline void polled_send_byte(volatile unsigned char *via, int x) +{ + via[ACR] |= SR_OUT | SR_EXT; eieio(); + via[SR] = x; eieio(); + polled_handshake(via); +} + +static inline int polled_recv_byte(volatile unsigned char *via) +{ + int x; + + via[ACR] = (via[ACR] & ~SR_OUT) | SR_EXT; eieio(); + x = via[SR]; eieio(); + polled_handshake(via); + x = via[SR]; eieio(); + return x; +} + +int +pmu_polled_request(struct adb_request *req) +{ + unsigned long flags; + int i, l, c; + volatile unsigned char *v = via; + + req->complete = 1; + c = req->data[0]; + l = pmu_data_len[c][0]; + if (l >= 0 && req->nbytes != l + 1) + return -EINVAL; + + save_flags(flags); cli(); + while (pmu_state != idle) + pmu_poll(); + + polled_send_byte(v, c); + if (l < 0) { + l = req->nbytes - 1; + polled_send_byte(v, l); + } + for (i = 1; i <= l; ++i) + polled_send_byte(v, req->data[i]); + + l = pmu_data_len[c][1]; + if (l < 0) + l = polled_recv_byte(v); + for (i = 0; i < l; ++i) + req->reply[i + req->reply_len] = polled_recv_byte(v); + + if (req->done) + (*req->done)(req); + + restore_flags(flags); + return 0; +} +#endif /* 0 */ diff --git a/drivers/net/3c501.c b/drivers/net/3c501.c index 47ad2e076502..fdf2eeef9a58 100644 --- a/drivers/net/3c501.c +++ b/drivers/net/3c501.c @@ -29,54 +29,64 @@ with a TX-TX optimisation to see if we can touch 180-200K/second as seems theoretically maximum. 19950402 Alan Cox + + Cleaned up for 2.3.x because we broke SMP now. + 20000208 Alan Cox + +*/ - Some notes on this thing if you have to hack it. [Alan] - - 1] Some documentation is available from 3Com. Due to the boards age - standard responses when you ask for this will range from 'be serious' - to 'give it to a museum'. The documentation is incomplete and mostly - of historical interest anyway. - - 2] The basic system is a single buffer which can be used to receive or - transmit a packet. A third command mode exists when you are setting - things up. - - 3] If it's transmitting it's not receiving and vice versa. In fact the - time to get the board back into useful state after an operation is - quite large. - - 4] The driver works by keeping the board in receive mode waiting for a - packet to arrive. When one arrives it is copied out of the buffer - and delivered to the kernel. The card is reloaded and off we go. - - 5] When transmitting dev->tbusy is set and the card is reset (from - receive mode) [possibly losing a packet just received] to command - mode. A packet is loaded and transmit mode triggered. The interrupt - handler runs different code for transmit interrupts and can handle - returning to receive mode or retransmissions (yes you have to help - out with those too). - - Problems: - There are a wide variety of undocumented error returns from the card - and you basically have to kick the board and pray if they turn up. Most - only occur under extreme load or if you do something the board doesn't - like (eg touching a register at the wrong time). - - The driver is less efficient than it could be. It switches through - receive mode even if more transmits are queued. If this worries you buy - a real Ethernet card. - - The combination of slow receive restart and no real multicast - filter makes the board unusable with a kernel compiled for IP - multicasting in a real multicast environment. That's down to the board, - but even with no multicast programs running a multicast IP kernel is - in group 224.0.0.1 and you will therefore be listening to all multicasts. - One nv conference running over that Ethernet and you can give up. -*/ +/** + * DOC: 3c501 Card Notes + * + * Some notes on this thing if you have to hack it. [Alan] + * + * Some documentation is available from 3Com. Due to the boards age + * standard responses when you ask for this will range from 'be serious' + * to 'give it to a museum'. The documentation is incomplete and mostly + * of historical interest anyway. + * + * The basic system is a single buffer which can be used to receive or + * transmit a packet. A third command mode exists when you are setting + * things up. + * + * If it's transmitting it's not receiving and vice versa. In fact the + * time to get the board back into useful state after an operation is + * quite large. + * + * The driver works by keeping the board in receive mode waiting for a + * packet to arrive. When one arrives it is copied out of the buffer + * and delivered to the kernel. The card is reloaded and off we go. + * + * When transmitting dev->tbusy is set and the card is reset (from + * receive mode) [possibly losing a packet just received] to command + * mode. A packet is loaded and transmit mode triggered. The interrupt + * handler runs different code for transmit interrupts and can handle + * returning to receive mode or retransmissions (yes you have to help + * out with those too). + * + * DOC: Problems + * + * There are a wide variety of undocumented error returns from the card + * and you basically have to kick the board and pray if they turn up. Most + * only occur under extreme load or if you do something the board doesn't + * like (eg touching a register at the wrong time). + * + * The driver is less efficient than it could be. It switches through + * receive mode even if more transmits are queued. If this worries you buy + * a real Ethernet card. + * + * The combination of slow receive restart and no real multicast + * filter makes the board unusable with a kernel compiled for IP + * multicasting in a real multicast environment. That's down to the board, + * but even with no multicast programs running a multicast IP kernel is + * in group 224.0.0.1 and you will therefore be listening to all multicasts. + * One nv conference running over that Ethernet and you can give up. + * + */ static const char *version = - "3c501.c: 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov).\n"; + "3c501.c: 2000/02/08 Alan Cox (alan@redhat.com).\n"; /* * Braindamage remaining: @@ -210,6 +220,19 @@ struct net_local struct netdev_entry el1_drv = {"3c501", el1_probe1, EL1_IO_EXTENT, netcard_portlist}; #else +/** + * el1_probe: + * @dev: The device structure passed in to probe. + * + * This can be called from two places. The network layer will probe using + * a device structure passed in with the probe information completed. For a + * modular driver we use #init_module to fill in our own structure and probe + * for it. + * + * Returns 0 on success. ENXIO if asked not to probe and ENODEV if asked to + * probe and failing to find anything. + */ + int __init el1_probe(struct net_device *dev) { int i; @@ -233,8 +256,17 @@ int __init el1_probe(struct net_device *dev) } #endif -/* - * The actual probe. +/** + * el1_probe: + * @dev: The device structure to use + * @ioaddr: An I/O address to probe at. + * + * The actual probe. This is iterated over by #el1_probe in order to + * check all the applicable device locations. + * + * Returns 0 for a success, in which case the device is activated, + * EAGAIN if the IRQ is in use by another driver, and ENODEV if the + * board cannot be found. */ static int __init el1_probe1(struct net_device *dev, int ioaddr) @@ -310,11 +342,11 @@ static int __init el1_probe1(struct net_device *dev, int ioaddr) if (autoirq) dev->irq = autoirq; - printk("%s: %s EtherLink at %#lx, using %sIRQ %d.\n", dev->name, mname, dev->base_addr, + printk(KERN_INFO "%s: %s EtherLink at %#lx, using %sIRQ %d.\n", dev->name, mname, dev->base_addr, autoirq ? "auto":"assigned ", dev->irq); #ifdef CONFIG_IP_MULTICAST - printk("WARNING: Use of the 3c501 in a multicast kernel is NOT recommended.\n"); + printk(KERN_WARNING "WARNING: Use of the 3c501 in a multicast kernel is NOT recommended.\n"); #endif if (el_debug) @@ -351,13 +383,24 @@ static int __init el1_probe1(struct net_device *dev, int ioaddr) return 0; } -/* - * Open/initialize the board. +/** + * el1_open: + * @dev: device that is being opened + * + * When an ifconfig is issued which changes the device flags to include + * IFF_UP this function is called. It is only called when the change + * occurs, not when the interface remains up. #el1_close will be called + * when it goes down. + * + * Returns 0 for a successful open, or -EAGAIN if someone has run off + * with our interrupt line. */ static int el_open(struct net_device *dev) { int ioaddr = dev->base_addr; + struct net_local *lp = (struct net_local *)dev->priv; + unsigned long flags; if (el_debug > 2) printk("%s: Doing el_open()...", dev->name); @@ -365,7 +408,9 @@ static int el_open(struct net_device *dev) if (request_irq(dev->irq, &el_interrupt, 0, "3c501", dev)) return -EAGAIN; + spin_lock_irqsave(&lp->lock, flags); el_reset(dev); + spin_unlock_irqrestore(&lp->lock, flags); dev->start = 1; @@ -374,6 +419,28 @@ static int el_open(struct net_device *dev) return 0; } +/** + * e1_start_xmit: + * @skb: The packet that is queued to be sent + * @dev: The 3c501 card we want to throw it down + * + * Attempt to send a packet to a 3c501 card. There are some interesting + * catches here because the 3c501 is an extremely old and therefore + * stupid piece of technology. + * + * If we are handling an interrupt on the other CPU we cannot load a packet + * as we may still be attempting to retrieve the last RX packet buffer. + * + * When a transmit times out we dump the card into control mode and just + * start again. It happens enough that it isnt worth logging. + * + * We avoid holding the spin locks when doing the packet load to the board. + * The device is very slow, and its DMA mode is even slower. If we held the + * lock while loading 1500 bytes onto the controller we would drop a lot of + * serial port characters. This requires we do extra locking, but we have + * no real choice. + */ + static int el_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct net_local *lp = (struct net_local *)dev->priv; @@ -385,7 +452,7 @@ static int el_start_xmit(struct sk_buff *skb, struct net_device *dev) if (dev->tbusy) { - if (jiffies - dev->trans_start < 20) + if (jiffies - dev->trans_start < HZ) { if (el_debug > 2) printk(" transmitter busy, deferred.\n"); @@ -473,9 +540,27 @@ load_it_again_sam: } -/* - * The typical workload of the driver: - * Handle the ether interface interrupts. +/** + * el_interrupt: + * @irq: Interrupt number + * @dev_id: The 3c501 that burped + * @regs: Register data (surplus to our requirements) + * + * Handle the ether interface interrupts. The 3c501 needs a lot more + * hand holding than most cards. In paticular we get a transmit interrupt + * with a collision error because the board firmware isnt capable of rewinding + * its own transmit buffer pointers. It can however count to 16 for us. + * + * On the receive side the card is also very dumb. It has no buffering to + * speak of. We simply pull the packet out of its PIO buffer (which is slow) + * and queue it for the kernel. Then we reset the card for the next packet. + * + * We sometimes get suprise interrupts late both because the SMP IRQ delivery + * is message passing and because the card sometimes seems to deliver late. I + * think if it is part way through a receive and the mode is changed it carries + * on receiving and sends us an interrupt. We have to band aid all these cases + * to get a sensible 150kbytes/second performance. Even then you want a small + * TCP window. */ static void el_interrupt(int irq, void *dev_id, struct pt_regs *regs) @@ -656,9 +741,14 @@ static void el_interrupt(int irq, void *dev_id, struct pt_regs *regs) } -/* - * We have a good packet. Well, not really "good", just mostly not broken. - * We must check everything to see if it is good. +/** + * el_receive: + * @dev: Device to pull the packets from + * + * We have a good packet. Well, not really "good", just mostly not broken. + * We must check everything to see if it is good. In paticular we occasionally + * get wild packet sizes from the card. If the packet seems sane we PIO it + * off the card and queue it for the protocol layers. */ static void el_receive(struct net_device *dev) @@ -717,6 +807,15 @@ static void el_receive(struct net_device *dev) return; } +/** + * el_reset: Reset a 3c501 card + * @dev: The 3c501 card about to get zapped + * + * Even resetting a 3c501 isnt simple. When you activate reset it loses all + * its configuration. You must hold the lock when doing this. The function + * cannot take the lock itself as it is callable from the irq handler. + */ + static void el_reset(struct net_device *dev) { int ioaddr = dev->base_addr; @@ -732,16 +831,25 @@ static void el_reset(struct net_device *dev) } outw(0, RX_BUF_CLR); /* Set rx packet area to 0. */ - cli(); /* Avoid glitch on writes to CMD regs */ outb(TX_NORM, TX_CMD); /* tx irq on done, collision */ outb(RX_NORM, RX_CMD); /* Set Rx commands. */ inb(RX_STATUS); /* Clear status. */ inb(TX_STATUS); dev->interrupt = 0; dev->tbusy = 0; - sti(); } +/** + * el1_close: + * @dev: 3c501 card to shut down + * + * Close a 3c501 card. The IFF_UP flag has been cleared by the user via + * the SIOCSIFFLAGS ioctl. We stop any further transmissions being queued, + * and then disable the interrupts. Finally we reset the chip. The effects + * of the rest will be cleaned up by #el1_open. Always returns 0 indicating + * a success. + */ + static int el1_close(struct net_device *dev) { int ioaddr = dev->base_addr; @@ -751,7 +859,7 @@ static int el1_close(struct net_device *dev) dev->tbusy = 1; dev->start = 0; - + /* * Free and disable the IRQ. */ @@ -763,15 +871,31 @@ static int el1_close(struct net_device *dev) return 0; } +/** + * el1_get_stats: + * @dev: The card to get the statistics for + * + * In smarter devices this function is needed to pull statistics off the + * board itself. The 3c501 has no hardware statistics. We maintain them all + * so they are by definition always up to date. + * + * Returns the statistics for the card from the card private data + */ + static struct net_device_stats *el1_get_stats(struct net_device *dev) { struct net_local *lp = (struct net_local *)dev->priv; return &lp->stats; } -/* - * Set or clear the multicast filter for this adaptor. - * best-effort filtering. +/** + * set_multicast_list: + * @dev: The device to adjust + * + * Set or clear the multicast filter for this adaptor to use the best-effort + * filtering supported. The 3c501 supports only three modes of filtering. + * It always receives broadcasts and packets for itself. You can choose to + * optionally receive all packets, or all multicast packets on top of this. */ static void set_multicast_list(struct net_device *dev) @@ -812,6 +936,18 @@ static int irq=5; MODULE_PARM(io, "i"); MODULE_PARM(irq, "i"); +/** + * init_module: + * + * When the driver is loaded as a module this function is called. We fake up + * a device structure with the base I/O and interrupt set as if it was being + * called from Space.c. This minimises the extra code that would otherwise + * be required. + * + * Returns 0 for success or -EIO if a card is not found. Returning an error + * here also causes the module to be unloaded + */ + int init_module(void) { dev_3c501.irq=irq; @@ -821,6 +957,13 @@ int init_module(void) return 0; } +/** + * cleanup_module: + * + * The module is being unloaded. We unhook our network device from the system + * and then free up the resources we took when the card was found. + */ + void cleanup_module(void) { /* diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c index f1449299b5ce..a741009833f6 100644 --- a/drivers/net/3c527.c +++ b/drivers/net/3c527.c @@ -1,4 +1,3 @@ - /* 3c527.c: 3Com Etherlink/MC32 driver for Linux * * (c) Copyright 1998 Red Hat Software Inc @@ -18,11 +17,8 @@ static const char *version = "3c527.c:v0.07 2000/01/18 Alan Cox (alan@redhat.com)\n"; -/* - * Things you need - * o The databook. - * - * Traps for the unwary +/** + * DOC: Traps for the unwary * * The diagram (Figure 1-1) and the POS summary disagree with the * "Interrupt Level" section in the manual. @@ -30,6 +26,32 @@ static const char *version = * The documentation in places seems to miss things. In actual fact * I've always eventually found everything is documented, it just * requires careful study. + * + * DOC: Theory Of Operation + * + * The 3com 3c527 is a 32bit MCA bus mastering adapter with a large + * amount of on board intelligence that housekeeps a somewhat dumber + * Intel NIC. For performance we want to keep the transmit queue deep + * as the card can transmit packets while fetching others from main + * memory by bus master DMA. Transmission and reception are driven by + * ring buffers. When updating the ring we are required to do some + * housekeeping work using the mailboxes and the command register. + * + * The mailboxes provide a method for sending control requests to the + * card. The transmit mail box is used to update the transmit ring + * pointers and the receive mail box to update the receive ring + * pointers. The exec mailbox allows a variety of commands to be + * executed. Each command must complete before the next is executed. + * Primarily we use the exec mailbox for controlling the multicast lists. + * We have to do a certain amount of interesting hoop jumping as the + * multicast list changes can occur in interrupt state when the card + * has an exec command pending. We defer such events until the command + * completion interrupt. + * + * The control register is used to pass status information. It tells us + * the transmit and receive status for packets and allows us to control + * the card operation mode. You must stop the card when emptying the + * receive ring, or you will race with the ring buffer and lose packets. */ #include @@ -143,12 +165,14 @@ static void mc32_set_multicast_list(struct net_device *dev); static void mc32_reset_multicast_list(struct net_device *dev); -/* - * Check for a network adaptor of this type, and return '0' iff one exists. - * If dev->base_addr == 0, probe all likely locations. - * If dev->base_addr == 1, always return failure. - * If dev->base_addr == 2, allocate space for the device and return success - * (detachable devices only). +/** + * mc32_probe: + * @dev: device to probe + * + * Because MCA bus is a real bus and we can scan for cards we could do a + * single scan for all boards here. Right now we use the passed in device + * structure and scan for only one board. This needs fixing for modules + * in paticular. */ int __init mc32_probe(struct net_device *dev) @@ -182,11 +206,17 @@ int __init mc32_probe(struct net_device *dev) return -ENODEV; } -/* - * This is the real probe routine. Linux has a history of friendly device - * probes on the ISA bus. A good device probes avoids doing writes, and - * verifies that the correct device exists and functions. +/** + * mc32_probe1: + * @dev: Device structure to fill in + * @slot: The MCA bus slot being used by this card + * + * Decode the slot data and configure the card structures. Having done this we + * can reset the card and configure it. The card does a full self test cycle + * in firmware so we have to wait for it to return and post us either a + * failure case or some addresses we use to find the board internals. */ + static int __init mc32_probe1(struct net_device *dev, int slot) { static unsigned version_printed = 0; @@ -239,13 +269,14 @@ static int __init mc32_probe1(struct net_device *dev, int slot) return -ENODEV; } - /* Allocate a new 'dev' if needed. */ - if (dev == NULL) { - /* - * Don't allocate the private data here, it is done later - * This makes it easier to free the memory when this driver - * is used as a module. - */ + /* + * Don't allocate the private data here, it is done later + * This makes it easier to free the memory when this driver + * is used as a module. + */ + + if(dev==NULL) + { dev = init_etherdev(0, 0); if (dev == NULL) return -ENOMEM; @@ -435,8 +466,13 @@ static int __init mc32_probe1(struct net_device *dev, int slot) } -/* - * Polled command stuff +/** + * mc32_ring_poll: + * @dev: The device to wait for + * + * Wait until a command we issues to the control register is completed. + * This actually takes very little time at all, which is fortunate as + * we often have to busy wait it. */ static void mc32_ring_poll(struct net_device *dev) @@ -446,25 +482,20 @@ static void mc32_ring_poll(struct net_device *dev) } -/* - * Send exec commands. This requires a bit of explaining. - * - * You feed the card a command, you wait, it interrupts you get a - * reply. All well and good. The complication arises because you use - * commands for filter list changes which come in at bh level from things - * like IPV6 group stuff. - * - * We have a simple state machine - * - * 0 - nothing issued - * 1 - command issued, wait reply - * 2 - reply waiting - reader then goes to state 0 - * 3 - command issued, trash reply. In which case the irq - * takes it back to state 0 - */ -/* - * Send command from interrupt state +/** + * mc32_command_nowait: + * @dev: The 3c527 to issue the command to + * @cmd: The command word to write to the mailbox + * @data: A data block if the command expects one + * @len: Length of the data block + * + * Send a command from interrupt state. If there is a command currently + * being executed then we return an error of -1. It simply isnt viable + * to wait around as commands may be slow. Providing we get in then + * we send the command and busy wait for the board to acknowledge that + * a command request is pending. We do not wait for the command to + * complete, just for the card to admit to noticing it. */ static int mc32_command_nowait(struct net_device *dev, u16 cmd, void *data, int len) @@ -488,7 +519,35 @@ static int mc32_command_nowait(struct net_device *dev, u16 cmd, void *data, int } -/* +/** + * mc32_command: + * @dev: The 3c527 card to issue the command to + * @cmd: The command word to write to the mailbox + * @data: A data block if the command expects one + * @len: Length of the data block + * + * Sends exec commands in a user context. This permits us to wait around + * for the replies and also to wait for the command buffer to complete + * from a previous command before we execute our command. After our + * command completes we will complete any pending multicast reload + * we blocked off by hogging the exec buffer. + * + * You feed the card a command, you wait, it interrupts you get a + * reply. All well and good. The complication arises because you use + * commands for filter list changes which come in at bh level from things + * like IPV6 group stuff. + * + * We have a simple state machine + * + * 0 - nothing issued + * + * 1 - command issued, wait reply + * + * 2 - reply waiting - reader then goes to state 0 + * + * 3 - command issued, trash reply. In which case the irq + * takes it back to state 0 + * * Send command and block for results. On completion spot and reissue * multicasts */ @@ -548,8 +607,13 @@ static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len) } -/* - * RX abort +/** + * mc32_rx_abort: + * @dev: 3c527 to abort + * + * Peforms a receive abort sequence on the card. In fact after some + * experimenting we now simply tell the card to suspend reception. When + * issuing aborts occasionally odd things happened. */ static void mc32_rx_abort(struct net_device *dev) @@ -564,8 +628,13 @@ static void mc32_rx_abort(struct net_device *dev) } -/* - * RX enable +/** + * mc32_rx_begin: + * @dev: 3c527 to enable + * + * We wait for any pending command to complete and then issue + * a start reception command to the board itself. At this point + * receive handling continues as it was before. */ static void mc32_rx_begin(struct net_device *dev) @@ -582,6 +651,18 @@ static void mc32_rx_begin(struct net_device *dev) lp->rx_halted=0; } +/** + * mc32_tx_abort: + * @dev: 3c527 to abort + * + * Peforms a receive abort sequence on the card. In fact after some + * experimenting we now simply tell the card to suspend transmits . When + * issuing aborts occasionally odd things happened. In theory we want + * an abort to be sure we can recycle our buffers. As it happens we + * just have to be careful to shut the card down on close, and + * boot it carefully from scratch on setup. + */ + static void mc32_tx_abort(struct net_device *dev) { struct mc32_local *lp = (struct mc32_local *)dev->priv; @@ -625,8 +706,16 @@ static void mc32_tx_abort(struct net_device *dev) lp->tx_skb_top=lp->tx_skb_end=0; } -/* - * TX enable +/** + * mc32_tx_begin: + * @dev: 3c527 to enable + * + * We wait for any pending command to complete and then issue + * a start transmit command to the board itself. At this point + * transmit handling continues as it was before. The ring must + * be setup before you do this and must have an end marker in it. + * It turns out we can avoid issuing this specific command when + * doing our setup so we avoid it. */ static void mc32_tx_begin(struct net_device *dev) @@ -648,8 +737,17 @@ static void mc32_tx_begin(struct net_device *dev) } -/* - * Load the rx ring +/** + * mc32_load_rx_ring: + * @dev: 3c527 to build the ring for + * + * The card setups up the receive ring for us. We are required to + * use the ring it provides although we can change the size of the + * ring. + * + * We allocate an sk_buff for each ring entry in turn and set the entry + * up for a single non s/g buffer. The first buffer we mark with the + * end marker bits. Finally we clear the rx mailbox. */ static int mc32_load_rx_ring(struct net_device *dev) @@ -686,6 +784,15 @@ static int mc32_load_rx_ring(struct net_device *dev) return 0; } +/** + * mc32_flush_rx_ring: + * @lp: Local data of 3c527 to flush the rx ring of + * + * Free the buffer for each ring slot. Because of the receive + * algorithm we use the ring will always be loaded will a full set + * of buffers. + */ + static void mc32_flush_rx_ring(struct mc32_local *lp) { int i; @@ -693,6 +800,15 @@ static void mc32_flush_rx_ring(struct mc32_local *lp) kfree_skb(lp->rx_skb[i]); } +/** + * mc32_flush_tx_ring: + * @lp: Local data of 3c527 to flush the tx ring of + * + * We have to consider two cases here. We want to free the pending + * buffers only. If the ring buffer head is past the start then the + * ring segment we wish to free wraps through zero. + */ + static void mc32_flush_tx_ring(struct mc32_local *lp) { int i; @@ -711,9 +827,20 @@ static void mc32_flush_tx_ring(struct mc32_local *lp) } } -/* - * Open/initialize the board. This is called (in the current kernel) - * sometime after booting when the 'ifconfig' program is run. +/** + * mc32_open + * @dev: device to open + * + * The user is trying to bring the card into ready state. This requires + * a brief dialogue with the card. Firstly we enable interrupts and then + * 'indications'. Without these enabled the card doesn't bother telling + * us what it has done. This had me puzzled for a week. + * + * We then load the network address and multicast filters. Turn on the + * workaround mode. This works around a bug in the 82586 - it asks the + * firmware to do so. It has a performance hit but is needed on busy + * [read most] lans. We load the ring with buffers then we kick it + * all off. */ static int mc32_open(struct net_device *dev) @@ -781,6 +908,22 @@ static int mc32_open(struct net_device *dev) return 0; } +/** + * mc32_send_packet: + * @skb: buffer to transmit + * @dev: 3c527 to send it out of + * + * Transmit a buffer. This normally means throwing the buffer onto + * the transmit queue as the queue is quite large. If the queue is + * full then we set tx_busy and return. Once the interrupt handler + * gets messages telling it to reclaim transmit queue entries we will + * clear tx_busy and the kernel will start calling this again. + * + * We use cli rather than spinlocks. Since I have no access to an SMP + * MCA machine I don't plan to change it. It is probably the top + * performance hit for this driver on SMP however. + */ + static int mc32_send_packet(struct sk_buff *skb, struct net_device *dev) { struct mc32_local *lp = (struct mc32_local *)dev->priv; @@ -791,7 +934,7 @@ static int mc32_send_packet(struct sk_buff *skb, struct net_device *dev) * There should really be a "kick me" function call instead. */ int tickssofar = jiffies - dev->trans_start; - if (tickssofar < 5) + if (tickssofar < HZ/20) return 1; printk(KERN_WARNING "%s: transmit timed out?\n", dev->name); /* Try to restart the adaptor. */ @@ -864,11 +1007,42 @@ static int mc32_send_packet(struct sk_buff *skb, struct net_device *dev) return 0; } +/** + * mc32_update_stats: + * @dev: 3c527 to service + * + * When the board signals us that its statistics need attention we + * should query the table and clear it. In actual fact we currently + * track all our statistics in software and I haven't implemented it yet. + */ + static void mc32_update_stats(struct net_device *dev) { } - +/** + * mc32_rx_ring: + * @dev: 3c527 that needs its receive ring processing + * + * We have received one or more indications from the card that + * a receive has completed. The ring buffer thus contains dirty + * entries. Firstly we tell the card to stop receiving, then We walk + * the ring from the first filled entry, which is pointed to by the + * card rx mailbox and for each completed packet we will either copy + * it and pass it up the stack or if the packet is near MTU sized we + * allocate another buffer and flip the old one up the stack. + * + * We must succeed in keeping a buffer on the ring. If neccessary we + * will toss a received packet rather than lose a ring entry. Once the + * first packet that is unused is found we reload the mailbox with the + * buffer so that the card knows it can use the buffers again. Finally + * we set it receiving again. + * + * We must stop reception during the ring walk. I thought it would be + * neat to avoid it by clever tricks, but it turns out the event order + * on the card means you have to play by the manual. + */ + static void mc32_rx_ring(struct net_device *dev) { struct mc32_local *lp=dev->priv; @@ -877,6 +1051,12 @@ static void mc32_rx_ring(struct net_device *dev) volatile struct skb_header *p; u16 base; u16 top; + + /* Halt RX before walking the ring */ + + while(!(inb(ioaddr+HOST_STATUS)&HOST_STATUS_CRR)); + outb(3<<3, ioaddr+HOST_CMD); + while(inb(ioaddr+HOST_STATUS)&HOST_STATUS_CRR); top = base = lp->rx_box->data[0]; do @@ -927,13 +1107,6 @@ static void mc32_rx_ring(struct net_device *dev) } while(x++<48); - /* - * This is curious. It seems the receive stop and receive continue - * commands race against each other, even though we poll for - * command ready to be issued. The delay is hackish but is a workaround - * while I investigate in depth - */ - while(!(inb(ioaddr+HOST_STATUS)&HOST_STATUS_CRR)); lp->rx_box->mbox=0; lp->rx_box->data[0] = top; @@ -941,10 +1114,20 @@ static void mc32_rx_ring(struct net_device *dev) } -/* - * The typical workload of the driver: - * Handle the network interface interrupts. +/** + * mc32_interrupt: + * @irq: Interrupt number + * @dev_id: 3c527 that requires servicing + * @regs: Registers (unused) + * + * The 3c527 interrupts us for four reasons. The command register + * contains the message it wishes to send us packed into a single + * byte field. We keep reading status entries until we have processed + * all the transmit and control items, but simply count receive + * reports. When the receive reports are in we can call the mc32_rx_ring + * and empty the ring. This saves the overhead of multiple command requests */ + static void mc32_interrupt(int irq, void *dev_id, struct pt_regs * regs) { struct net_device *dev = dev_id; @@ -1071,8 +1254,23 @@ static void mc32_interrupt(int irq, void *dev_id, struct pt_regs * regs) } -/* The inverse routine to mc32_open(). */ - +/** + * mc32_close: + * @dev: 3c527 card to shut down + * + * The 3c527 is a bus mastering device. We must be careful how we + * shut it down. It may also be running shared interrupt so we have + * to be sure to silence it properly + * + * We abort any receive and transmits going on and then wait until + * any pending exec commands have completed in other code threads. + * In theory we can't get here while that is true, in practice I am + * paranoid + * + * We turn off the interrupt enable for the board to be sure it can't + * intefere with other devices. + */ + static int mc32_close(struct net_device *dev) { struct mc32_local *lp = (struct mc32_local *)dev->priv; @@ -1116,9 +1314,14 @@ static int mc32_close(struct net_device *dev) } -/* - * Get the current statistics. - * This may be called with the card open or closed. +/** + * mc32_get_stats: + * @dev: The 3c527 card to handle + * + * As we currently handle our statistics in software this one is + * easy to handle. With hardware statistics it will get messy + * as the get_stats call will need to send exec mailbox messages and + * need to lock out the multicast reloads. */ static struct net_device_stats *mc32_get_stats(struct net_device *dev) @@ -1127,13 +1330,24 @@ static struct net_device_stats *mc32_get_stats(struct net_device *dev) return &lp->net_stats; } -/* - * Set or clear the multicast filter for this adaptor. +/** + * do_mc32_set_multicast_list: + * @dev: 3c527 device to load the list on + * @retry: indicates this is not the first call. + * + * Actually set or clear the multicast filter for this adaptor. The locking + * issues are handled by this routine. We have to track state as it may take + * multiple calls to get the command sequence completed. We just keep trying + * to schedule the loads until we manage to process them all. + * * num_addrs == -1 Promiscuous mode, receive all packets + * * num_addrs == 0 Normal mode, clear multicast list + * * num_addrs > 0 Multicast mode, receive normal and MC packets, * and do best-effort filtering. */ + static void do_mc32_set_multicast_list(struct net_device *dev, int retry) { struct mc32_local *lp = (struct mc32_local *)dev->priv; @@ -1189,11 +1403,30 @@ static void do_mc32_set_multicast_list(struct net_device *dev, int retry) } } +/** + * mc32_set_multicast_list: + * @dev: The 3c527 to use + * + * Commence loading the multicast list. This is called when the kernel + * changes the lists. It will override any pending list we are trying to + * load. + */ + static void mc32_set_multicast_list(struct net_device *dev) { do_mc32_set_multicast_list(dev,0); } +/** + * mc32_reset_multicast_list: + * @dev: The 3c527 to use + * + * Attempt the next step in loading the multicast lists. If this attempt + * fails to complete then it will be scheduled and this function called + * again later from elsewhere. + */ + + static void mc32_reset_multicast_list(struct net_device *dev) { do_mc32_set_multicast_list(dev,1); @@ -1208,6 +1441,15 @@ static struct net_device this_device = { 0, 0, /* I/O address, IRQ */ 0, 0, 0, NULL, mc32_probe }; + +/** + * init_module: + * + * Probe and locate a 3c527 card. This really should probe and locate + * all the 3c527 cards in the machine not just one of them. Yes you can + * insmod multiple modules for now but its a hack. + */ + int init_module(void) { int result; @@ -1218,6 +1460,17 @@ int init_module(void) return 0; } +/** + * cleanup_module: + * + * Unloading time. We release the MCA bus resources and the interrupt + * at which point everything is ready to unload. The card must be stopped + * at this point or we would not have been called. When we unload we + * leave the card stopped but not totally shut down. When the card is + * initialized it must be rebooted or the rings reloaded before any + * transmit operations are allowed to start scribbling into memory. + */ + void cleanup_module(void) { int slot; @@ -1227,8 +1480,6 @@ void cleanup_module(void) /* * If we don't do this, we can't re-insmod it later. - * Release irq/dma here, when you have jumpered versions and - * allocate them in mc32_probe1(). */ if (this_device.priv) diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index 231aa46c52bd..bc155fe23d1b 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c @@ -504,6 +504,7 @@ static int mdio_read(long ioaddr, int phy_id, int location); static void mdio_write(long ioaddr, int phy_id, int location, int value); static void vortex_timer(unsigned long arg); static int vortex_start_xmit(struct sk_buff *skb, struct net_device *dev); +static void vortex_tx_timeout(struct net_device *dev); static int boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev); static int vortex_rx(struct net_device *dev); static int boomerang_rx(struct net_device *dev); @@ -975,6 +976,8 @@ static struct net_device *vortex_probe1(int pci_bus, int pci_devfn, /* The 3c59x-specific entries in the device structure. */ dev->open = &vortex_open; dev->hard_start_xmit = &vortex_start_xmit; + dev->tx_timeout = &vortex_tx_timeout; + dev->watchdog_timeo = TX_TIMEOUT; dev->stop = &vortex_close; dev->get_stats = &vortex_get_stats; dev->do_ioctl = &vortex_ioctl; @@ -1147,9 +1150,6 @@ vortex_open(struct net_device *dev) outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */ vp->in_interrupt = 0; - dev->tbusy = 0; - dev->interrupt = 0; - dev->start = 1; outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */ outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */ @@ -1169,6 +1169,8 @@ vortex_open(struct net_device *dev) if (vp->cb_fn_base) /* The PCMCIA people are idiots. */ writel(0x8000, vp->cb_fn_base + 4); + netif_start_queue(dev); + MOD_INC_USE_COUNT; return 0; @@ -1329,7 +1331,7 @@ static void vortex_tx_timeout(struct net_device *dev) ioaddr + DownListPtr); if (vp->tx_full && (vp->cur_tx - vp->dirty_tx <= TX_RING_SIZE - 1)) { vp->tx_full = 0; - clear_bit(0, (void*)&dev->tbusy); + netif_wake_queue(dev); } outb(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); outw(DownUnstall, ioaddr + EL3_CMD); @@ -1443,11 +1445,7 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev) struct vortex_private *vp = (struct vortex_private *)dev->priv; long ioaddr = dev->base_addr; - if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) { - if (jiffies - dev->trans_start >= TX_TIMEOUT) - vortex_tx_timeout(dev); - return 1; - } + netif_stop_queue(dev); /* Put out the doubleword header... */ outl(skb->len, ioaddr + TX_FIFO); @@ -1458,13 +1456,12 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev) outw(len, ioaddr + Wn7_MasterLen); vp->tx_skb = skb; outw(StartDMADown, ioaddr + EL3_CMD); - /* dev->tbusy will be cleared at the DMADone interrupt. */ } else { /* ... and the packet rounded to a doubleword. */ outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); DEV_FREE_SKB(skb); if (inw(ioaddr + TxFree) > 1536) { - clear_bit(0, (void*)&dev->tbusy); + netif_wake_queue(dev); } else /* Interrupt us when the FIFO has room for max-sized packet. */ outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD); @@ -1506,11 +1503,9 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) struct vortex_private *vp = (struct vortex_private *)dev->priv; long ioaddr = dev->base_addr; - if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) { - if (jiffies - dev->trans_start >= TX_TIMEOUT) - vortex_tx_timeout(dev); - return 1; - } else { + netif_stop_queue(dev); + + if (1) { /* Calculate the next Tx descriptor entry. */ int entry = vp->cur_tx % TX_RING_SIZE; struct boom_tx_desc *prev_entry = @@ -1533,6 +1528,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG); vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded); + /* Hmm... And some poor people try to use it on SMP machines 8) */ save_flags(flags); cli(); outw(DownStall, ioaddr + EL3_CMD); @@ -1549,11 +1545,11 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) restore_flags(flags); vp->cur_tx++; - if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) + if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) { vp->tx_full = 1; - else { /* Clear previous interrupt enable. */ + } else { /* Clear previous interrupt enable. */ prev_entry->status &= cpu_to_le32(~TxIntrUploaded); - clear_bit(0, (void*)&dev->tbusy); + netif_wake_queue(dev); } dev->trans_start = jiffies; vp->stats.tx_bytes += skb->len; @@ -1571,23 +1567,6 @@ static void vortex_interrupt(int irq, void *dev_id, struct pt_regs *regs) int latency, status; int work_done = max_interrupt_work; -#if defined(__i386__) - /* A lock to prevent simultaneous entry bug on Intel SMP machines. */ - if (test_and_set_bit(0, (void*)&dev->interrupt)) { - printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n", - dev->name); - dev->interrupt = 0; /* Avoid halting machine. */ - return; - } -#else - if (dev->interrupt) { - printk(KERN_ERR "%s: Re-entering the interrupt handler.\n", dev->name); - return; - } - dev->interrupt = 1; -#endif - - dev->interrupt = 1; ioaddr = dev->base_addr; latency = inb(ioaddr + Timer); status = inw(ioaddr + EL3_STATUS); @@ -1611,8 +1590,7 @@ static void vortex_interrupt(int irq, void *dev_id, struct pt_regs *regs) printk(KERN_DEBUG " TX room bit was handled.\n"); /* There's room in the FIFO for a full-sized packet. */ outw(AckIntr | TxAvailable, ioaddr + EL3_CMD); - clear_bit(0, (void*)&dev->tbusy); - mark_bh(NET_BH); + netif_wake_queue(dev); } if (status & DownComplete) { @@ -1627,7 +1605,7 @@ static void vortex_interrupt(int irq, void *dev_id, struct pt_regs *regs) struct sk_buff *skb = vp->tx_skbuff[entry]; pci_unmap_single(vp->pdev, le32_to_cpu(vp->tx_ring[entry].addr), skb->len); - DEV_FREE_SKB(vp->tx_skbuff[entry]); + dev_kfree_skb_irq(vp->tx_skbuff[entry]); vp->tx_skbuff[entry] = 0; } /* vp->stats.tx_packets++; Counted below. */ @@ -1637,18 +1615,16 @@ static void vortex_interrupt(int irq, void *dev_id, struct pt_regs *regs) outw(AckIntr | DownComplete, ioaddr + EL3_CMD); if (vp->tx_full && (vp->cur_tx - dirty_tx <= TX_RING_SIZE - 1)) { vp->tx_full= 0; - clear_bit(0, (void*)&dev->tbusy); - mark_bh(NET_BH); + netif_wake_queue(dev); } } if (status & DMADone) { if (inw(ioaddr + Wn7_MasterStatus) & 0x1000) { outw(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */ pci_unmap_single(vp->pdev, vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3); - DEV_FREE_SKB(vp->tx_skb); /* Release the transfered buffer */ + dev_kfree_skb_irq(vp->tx_skb); /* Release the transfered buffer */ if (inw(ioaddr + TxFree) > 1536) { - clear_bit(0, (void*)&dev->tbusy); - mark_bh(NET_BH); + netif_wake_queue(dev); } else /* Interrupt when FIFO has room for max-sized packet. */ outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD); } @@ -1686,11 +1662,6 @@ static void vortex_interrupt(int irq, void *dev_id, struct pt_regs *regs) printk(KERN_DEBUG "%s: exiting interrupt, status %4.4x.\n", dev->name, status); -#if defined(__i386__) - clear_bit(0, (void*)&dev->interrupt); -#else - dev->interrupt = 0; -#endif return; } @@ -1867,8 +1838,7 @@ vortex_close(struct net_device *dev) long ioaddr = dev->base_addr; int i; - dev->start = 0; - dev->tbusy = 1; + netif_stop_queue(dev); if (vortex_debug > 1) { printk(KERN_DEBUG"%s: vortex_close() status %4.4x, Tx status %2.2x.\n", @@ -1930,7 +1900,7 @@ static struct net_device_stats *vortex_get_stats(struct net_device *dev) struct vortex_private *vp = (struct vortex_private *)dev->priv; unsigned long flags; - if (dev->start) { + if (test_bit(LINK_STATE_START, &dev->state)) { save_flags(flags); cli(); update_stats(dev->base_addr, dev); diff --git a/drivers/net/8390.c b/drivers/net/8390.c index 04c15f61c904..f63e1c3109b2 100644 --- a/drivers/net/8390.c +++ b/drivers/net/8390.c @@ -167,7 +167,8 @@ int ei_open(struct net_device *dev) NS8390_init(dev, 1); /* Set the flag before we drop the lock, That way the IRQ arrives after its set and we get no silly warnings */ - dev->start = 1; + clear_bit(LINK_STATE_RXSEM, &dev->state); + netif_start_queue(dev); spin_unlock_irqrestore(&ei_local->page_lock, flags); ei_local->irqlock = 0; return 0; @@ -186,7 +187,7 @@ int ei_close(struct net_device *dev) spin_lock_irqsave(&ei_local->page_lock, flags); NS8390_init(dev, 0); spin_unlock_irqrestore(&ei_local->page_lock, flags); - dev->start = 0; + netif_stop_queue(dev); return 0; } @@ -198,13 +199,11 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev) unsigned long flags; /* - * We normally shouldn't be called if dev->tbusy is set, but the - * existing code does anyway. If it has been too long since the - * last Tx, we assume the board has died and kick it. We are - * bh_atomic here. + * If it has been too long since the last Tx, we assume the + * board has died and kick it. */ - if (dev->tbusy) + if (test_bit(LINK_STATE_XOFF, &dev->state)) { /* Do timeouts, just like the 8003 driver. */ int txsr; int isr; @@ -225,7 +224,7 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev) ei_local->stat.tx_errors++; isr = inb(e8390_base+EN0_ISR); - if (dev->start == 0) + if (!test_bit(LINK_STATE_START, &dev->state)) { spin_unlock_irqrestore(&ei_local->page_lock, flags); printk(KERN_WARNING "%s: xmit on stopped card\n", dev->name); @@ -289,16 +288,6 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev) spin_lock(&ei_local->page_lock); - if (dev->interrupt) - { - printk(KERN_WARNING "%s: Tx request while isr active.\n",dev->name); - outb_p(ENISR_ALL, e8390_base + EN0_IMR); - spin_unlock(&ei_local->page_lock); - enable_irq(dev->irq); - ei_local->stat.tx_errors++; - dev_kfree_skb(skb); - return 0; - } ei_local->irqlock = 1; send_length = ETH_ZLEN < length ? length : ETH_ZLEN; @@ -332,10 +321,10 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev) else { /* We should never get here. */ if (ei_debug) - printk(KERN_DEBUG "%s: No Tx buffers free! irq=%ld tx1=%d tx2=%d last=%d\n", - dev->name, dev->interrupt, ei_local->tx1, ei_local->tx2, ei_local->lasttx); + printk(KERN_DEBUG "%s: No Tx buffers free! tx1=%d tx2=%d last=%d\n", + dev->name, ei_local->tx1, ei_local->tx2, ei_local->lasttx); ei_local->irqlock = 0; - dev->tbusy = 1; + netif_stop_queue(dev); outb_p(ENISR_ALL, e8390_base + EN0_IMR); spin_unlock(&ei_local->page_lock); enable_irq(dev->irq); @@ -368,7 +357,10 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev) } else ei_local->txqueue++; - dev->tbusy = (ei_local->tx1 && ei_local->tx2); + if (ei_local->tx1 && ei_local->tx2) + netif_stop_queue(dev); + else + netif_start_queue(dev); #else /* EI_PINGPONG */ @@ -382,7 +374,7 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev) ei_local->txing = 1; NS8390_trigger_send(dev, send_length, ei_local->tx_start_page); dev->trans_start = jiffies; - dev->tbusy = 1; + netif_stop_queue(dev); #endif /* EI_PINGPONG */ @@ -424,7 +416,7 @@ void ei_interrupt(int irq, void *dev_id, struct pt_regs * regs) spin_lock(&ei_local->page_lock); - if (dev->interrupt || ei_local->irqlock) + if (ei_local->irqlock) { #if 1 /* This might just be an interrupt for a PCI device sharing this line */ /* The "irqlock" check is only for testing. */ @@ -438,8 +430,7 @@ void ei_interrupt(int irq, void *dev_id, struct pt_regs * regs) return; } - - dev->interrupt = 1; + set_bit(LINK_STATE_RXSEM, &dev->state); /* Change to page 0 and read the intr status reg. */ outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD); @@ -451,7 +442,7 @@ void ei_interrupt(int irq, void *dev_id, struct pt_regs * regs) while ((interrupts = inb_p(e8390_base + EN0_ISR)) != 0 && ++nr_serviced < MAX_SERVICE) { - if (dev->start == 0) + if (!test_bit(LINK_STATE_START, &dev->state)) { printk(KERN_WARNING "%s: interrupt from stopped card\n", dev->name); interrupts = 0; @@ -500,7 +491,7 @@ void ei_interrupt(int irq, void *dev_id, struct pt_regs * regs) outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */ } } - dev->interrupt = 0; + clear_bit(LINK_STATE_RXSEM, &dev->state); spin_unlock(&ei_local->page_lock); return; } @@ -576,7 +567,7 @@ static void ei_tx_intr(struct net_device *dev) printk(KERN_ERR "%s: bogus last_tx_buffer %d, tx1=%d.\n", ei_local->name, ei_local->lasttx, ei_local->tx1); ei_local->tx1 = 0; - dev->tbusy = 0; + netif_start_queue(dev); if (ei_local->tx2 > 0) { ei_local->txing = 1; @@ -593,7 +584,7 @@ static void ei_tx_intr(struct net_device *dev) printk("%s: bogus last_tx_buffer %d, tx2=%d.\n", ei_local->name, ei_local->lasttx, ei_local->tx2); ei_local->tx2 = 0; - dev->tbusy = 0; + netif_start_queue(dev); if (ei_local->tx1 > 0) { ei_local->txing = 1; @@ -613,7 +604,7 @@ static void ei_tx_intr(struct net_device *dev) * Single Tx buffer: mark it free so another packet can be loaded. */ ei_local->txing = 0; - dev->tbusy = 0; + netif_start_queue(dev); #endif /* Minimize Tx latency: update the statistics after we restart TXing. */ @@ -638,7 +629,7 @@ static void ei_tx_intr(struct net_device *dev) if (status & ENTSR_OWC) ei_local->stat.tx_window_errors++; } - mark_bh (NET_BH); + netif_wake_queue(dev); } /* We have a good packet(s), get it/them out of the buffers. @@ -849,7 +840,7 @@ static struct net_device_stats *get_stats(struct net_device *dev) unsigned long flags; /* If the card is stopped, just return the present stats. */ - if (dev->start == 0) + if (!test_bit(LINK_STATE_START, &dev->state)) return &ei_local->stat; spin_lock_irqsave(&ei_local->page_lock,flags); @@ -945,7 +936,7 @@ static void do_set_multicast_list(struct net_device *dev) * Ultra32 EISA) appears to have this bug fixed. */ - if (dev->start) + if (test_bit(LINK_STATE_START, &dev->state)) outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD); for(i = 0; i < 8; i++) @@ -1064,8 +1055,7 @@ void NS8390_init(struct net_device *dev, int startp) outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG); outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); - dev->tbusy = 0; - dev->interrupt = 0; + netif_start_queue(dev); ei_local->tx1 = ei_local->tx2 = 0; ei_local->txing = 0; diff --git a/drivers/net/Config.in b/drivers/net/Config.in index f041c81b112f..af3fa30a2417 100644 --- a/drivers/net/Config.in +++ b/drivers/net/Config.in @@ -33,6 +33,7 @@ if [ "$CONFIG_NET_ETHERNET" = "y" ]; then if [ "$CONFIG_PPC" = "y" ]; then tristate ' MACE (Power Mac ethernet) support' CONFIG_MACE tristate ' BMAC (G3 ethernet) support' CONFIG_BMAC + tristate ' GMAC (G4/iBook ethernet) support' CONFIG_GMAC tristate ' Symbios 53c885 (Synergy ethernet) support' CONFIG_NCR885E tristate ' National DP83902AV (Oak ethernet) support' CONFIG_OAKNET fi diff --git a/drivers/net/Makefile b/drivers/net/Makefile index d1585c870daf..0231c4e71551 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -248,6 +248,7 @@ obj-$(CONFIG_MACSONIC) += macsonic.o obj-$(CONFIG_MACMACE) += macmace.o obj-$(CONFIG_MAC89x0) += mac89x0.o obj-$(CONFIG_BMAC) += bmac.o +obj-$(CONFIG_GMAC) += gmac.o obj-$(CONFIG_NCR885E) += ncr885e.o obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o obj-$(CONFIG_OAKNET) += oaknet.o 8390.o diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c index 4a90a220a63b..1329b66c75de 100644 --- a/drivers/net/acenic.c +++ b/drivers/net/acenic.c @@ -1698,14 +1698,14 @@ static void ace_interrupt(int irq, void *dev_id, struct pt_regs *ptregs) * Ie. skip the comparison of the tx producer vs. the * consumer. */ - if (ap->tx_full && dev->tbusy) { + if (ap->tx_full && + test_bit(LINK_STATE_XOFF, &dev->state)) { ap->tx_full = 0; /* * This does not need to be atomic (and expensive), * I've seen cases where it would fail otherwise ;-( */ - clear_bit(0, &dev->tbusy); - mark_bh(NET_BH); + netif_wake_queue(dev); /* * TX ring is no longer full, aka the @@ -1730,7 +1730,7 @@ static void ace_interrupt(int irq, void *dev_id, struct pt_regs *ptregs) * This has to go last in the interrupt handler and run with * the spin lock released ... what lock? */ - if (dev->start) { + if (test_bit(LINK_STATE_START, &dev->state)) { int cur_size; int run_bh = 0; @@ -1835,10 +1835,6 @@ static int ace_open(struct net_device *dev) ace_issue_cmd(regs, &cmd); #endif - dev->tbusy = 0; - dev->interrupt = 0; - dev->start = 1; - MOD_INC_USE_COUNT; /* @@ -1868,8 +1864,7 @@ static int ace_close(struct net_device *dev) unsigned long flags; short i; - dev->start = 0; - set_bit(0, &dev->tbusy); + netif_stop_queue(dev); ap = (struct ace_private *)dev->priv; regs = ap->regs; @@ -1928,9 +1923,6 @@ static int ace_start_xmit(struct sk_buff *skb, struct net_device *dev) unsigned long addr; u32 idx, flagsize; - if (test_and_set_bit(0, &dev->tbusy)) - return 1; - idx = ap->tx_prd; if ((idx + 1) % TX_RING_ENTRIES == ap->tx_ret_csm) { @@ -1976,7 +1968,7 @@ static int ace_start_xmit(struct sk_buff *skb, struct net_device *dev) /* * No need for it to be atomic - seems it needs to be */ - clear_bit(0, &dev->tbusy); + netif_stop_queue(dev); } dev->trans_start = jiffies; @@ -2148,7 +2140,7 @@ static int ace_set_mac_addr(struct net_device *dev, void *p) u16 *da; struct cmd cmd; - if(dev->start) + if(test_bit(LINK_STATE_START, &dev->state)) return -EBUSY; memcpy(dev->dev_addr, addr->sa_data,dev->addr_len); diff --git a/drivers/net/de4x5.c b/drivers/net/de4x5.c index a42f1bc7ff42..a6df21d81b7a 100644 --- a/drivers/net/de4x5.c +++ b/drivers/net/de4x5.c @@ -1403,8 +1403,6 @@ de4x5_open(struct net_device *dev) } } - dev->tbusy = 0; - dev->start = 1; lp->interrupt = UNMASK_INTERRUPTS; dev->trans_start = jiffies; @@ -1440,7 +1438,7 @@ static int de4x5_init(struct net_device *dev) { /* Lock out other processes whilst setting up the hardware */ - test_and_set_bit(0, (void *)&dev->tbusy); + netif_stop_queue(dev); de4x5_sw_reset(dev); @@ -1536,7 +1534,7 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev) int status = 0; u_long flags = 0; - test_and_set_bit(0, (void*)&dev->tbusy); /* Stop send re-tries */ + netif_stop_queue(dev); if (lp->tx_enable == NO) { /* Cannot send for now */ return -1; } @@ -1555,14 +1553,15 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev) return -1; /* Transmit descriptor ring full or stale skb */ - if (dev->tbusy || (u_long) lp->tx_skb[lp->tx_new] > 1) { + if (test_bit(LINK_STATE_XOFF, &dev->state) || + (u_long) lp->tx_skb[lp->tx_new] > 1) { if (lp->interrupt) { de4x5_putb_cache(dev, skb); /* Requeue the buffer */ } else { de4x5_put_cache(dev, skb); } if (de4x5_debug & DEBUG_TX) { - printk("%s: transmit busy, lost media or stale skb found:\n STS:%08x\n tbusy:%ld\n IMR:%08x\n OMR:%08x\n Stale skb: %s\n",dev->name, inl(DE4X5_STS), dev->tbusy, inl(DE4X5_IMR), inl(DE4X5_OMR), ((u_long) lp->tx_skb[lp->tx_new] > 1) ? "YES" : "NO"); + printk("%s: transmit busy, lost media or stale skb found:\n STS:%08x\n tbusy:%d\n IMR:%08x\n OMR:%08x\n Stale skb: %s\n",dev->name, inl(DE4X5_STS), test_bit(LINK_STATE_XOFF, &dev->state), inl(DE4X5_IMR), inl(DE4X5_OMR), ((u_long) lp->tx_skb[lp->tx_new] > 1) ? "YES" : "NO"); } } else if (skb->len > 0) { /* If we already have stuff queued locally, use that first */ @@ -1571,9 +1570,9 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev) skb = de4x5_get_cache(dev); } - while (skb && !dev->tbusy && (u_long) lp->tx_skb[lp->tx_new] <= 1) { + while (skb && !test_bit(LINK_STATE_XOFF, &dev->state) && (u_long) lp->tx_skb[lp->tx_new] <= 1) { spin_lock_irqsave(&lp->lock, flags); - test_and_set_bit(0, (void*)&dev->tbusy); + netif_stop_queue(dev); load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb); lp->stats.tx_bytes += skb->len; outl(POLL_DEMAND, DE4X5_TPD);/* Start the TX */ @@ -1582,7 +1581,7 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev) dev->trans_start = jiffies; if (TX_BUFFS_AVAIL) { - dev->tbusy = 0; /* Another pkt may be queued */ + netif_start_queue(dev); /* Another pkt may be queued */ } skb = de4x5_get_cache(dev); spin_unlock_irqrestore(&lp->lock, flags); @@ -1659,7 +1658,7 @@ de4x5_interrupt(int irq, void *dev_id, struct pt_regs *regs) /* Load the TX ring with any locally stored packets */ if (!test_and_set_bit(0, (void *)&lp->cache.lock)) { - while (lp->cache.skb && !dev->tbusy && lp->tx_enable) { + while (lp->cache.skb && !test_bit(LINK_STATE_XOFF, &dev->state) && lp->tx_enable) { de4x5_queue_pkt(de4x5_get_cache(dev), dev); } lp->cache.lock = 0; @@ -1802,9 +1801,12 @@ de4x5_tx(struct net_device *dev) lp->tx_old = (++lp->tx_old) % lp->txRingSize; } - if (TX_BUFFS_AVAIL && dev->tbusy) { /* Any resources available? */ - dev->tbusy = 0; /* Clear TX busy flag */ - if (lp->interrupt) mark_bh(NET_BH); + /* Any resources available? */ + if (TX_BUFFS_AVAIL && test_bit(LINK_STATE_XOFF, &dev->state)) { + if (lp->interrupt) + netif_wake_queue(dev); + else + netif_start_queue(dev); } return 0; @@ -1885,8 +1887,8 @@ de4x5_close(struct net_device *dev) s32 imr, omr; disable_ast(dev); - dev->start = 0; - dev->tbusy = 1; + + netif_stop_queue(dev); if (de4x5_debug & DEBUG_CLOSE) { printk("%s: Shutting down ethercard, status was %8.8x.\n", @@ -3288,10 +3290,10 @@ de4x5_init_connection(struct net_device *dev) de4x5_rst_desc_ring(dev); de4x5_setup_intr(dev); lp->tx_enable = YES; - dev->tbusy = 0; spin_unlock_irqrestore(&lp->lock, flags); outl(POLL_DEMAND, DE4X5_TPD); - mark_bh(NET_BH); + + netif_wake_queue(dev); return; } @@ -5597,12 +5599,13 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) } build_setup_frame(dev, PHYS_ADDR_ONLY); /* Set up the descriptor and give ownership to the card */ - while (test_and_set_bit(0, (void *)&dev->tbusy) != 0) barrier(); + while (test_and_set_bit(LINK_STATE_XOFF, &dev->state) != 0) + barrier(); load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET | SETUP_FRAME_LEN, (struct sk_buff *)1); lp->tx_new = (++lp->tx_new) % lp->txRingSize; outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */ - dev->tbusy = 0; /* Unlock the TX ring */ + netif_start_queue(dev); /* Unlock the TX ring */ break; case DE4X5_SET_PROM: /* Set Promiscuous Mode */ @@ -5754,7 +5757,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) } tmp.addr[j++] = lp->txRingSize; - tmp.addr[j++] = dev->tbusy; + tmp.addr[j++] = test_bit(LINK_STATE_XOFF, &dev->state); ioc->len = j; if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT; diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c index 745df62dac1a..95712c29ac88 100644 --- a/drivers/net/eepro100.c +++ b/drivers/net/eepro100.c @@ -81,11 +81,11 @@ static int debug = -1; /* The debug level */ #include #include -#include #include #include #include #include +#include #ifdef HAS_PCI_NETIF #include "pci-netif.h" #else @@ -466,7 +466,6 @@ struct speedo_private { struct descriptor *mc_setup_frm; /* ..multicast setup frame. */ int mc_setup_busy; /* Avoid double-use of setup frame. */ dma_addr_t mc_setup_dma; - unsigned long in_interrupt; /* Word-aligned dev->interrupt */ char rx_mode; /* Current PROMISC/ALLMULTI setting. */ unsigned int tx_full:1; /* The Tx queue is full. */ unsigned int full_duplex:1; /* Full-duplex operation requested. */ @@ -478,7 +477,6 @@ struct speedo_private { unsigned short phy[2]; /* PHY media interfaces available. */ unsigned short advertising; /* Current PHY advertised caps. */ unsigned short partner; /* Link partner caps. */ - long last_reset; }; /* The parameters for a CmdConfigure operation. @@ -846,6 +844,8 @@ static struct net_device *speedo_found1(int pci_bus, int pci_devfn, /* The Speedo-specific entries in the device structure. */ dev->open = &speedo_open; dev->hard_start_xmit = &speedo_start_xmit; + dev->tx_timeout = &speedo_tx_timeout; + dev->watchdog_timeo = TX_TIMEOUT; dev->stop = &speedo_close; dev->get_stats = &speedo_get_stats; dev->set_multicast_list = &set_rx_mode; @@ -942,7 +942,6 @@ speedo_open(struct net_device *dev) sp->last_cmd = 0; sp->tx_full = 0; spin_lock_init(&sp->lock); - sp->in_interrupt = 0; /* .. we can safely take handler calls during init. */ if (request_irq(dev->irq, &speedo_interrupt, SA_SHIRQ, dev->name, dev)) { @@ -975,9 +974,8 @@ speedo_open(struct net_device *dev) /* Fire up the hardware. */ speedo_resume(dev); - dev->tbusy = 0; - dev->interrupt = 0; - dev->start = 1; + clear_bit(LINK_STATE_RXSEM, &dev->state); + netif_start_queue(dev); /* Setup the chip and configure the multicast list. */ sp->mc_setup_frm = NULL; @@ -1099,12 +1097,6 @@ static void speedo_timer(unsigned long data) printk(KERN_DEBUG "%s: Media control tick, status %4.4x.\n", dev->name, inw(ioaddr + SCBStatus)); } - /* This has a small false-trigger window. */ - if (test_bit(0, (void*)&dev->tbusy) && - (jiffies - dev->trans_start) > TX_TIMEOUT) { - speedo_tx_timeout(dev); - sp->last_reset = jiffies; - } if (sp->rx_mode < 0 || (sp->rx_bug && jiffies - sp->last_rx_time > 2*HZ)) { /* We haven't received a packet in a Long Time. We might have been @@ -1242,22 +1234,6 @@ speedo_start_xmit(struct sk_buff *skb, struct net_device *dev) long ioaddr = dev->base_addr; int entry; - /* Block a timer-based transmit from overlapping. This could better be - done with atomic_swap(1, dev->tbusy), but set_bit() works as well. - If this ever occurs the queue layer is doing something evil! */ - if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) { - int tickssofar = jiffies - dev->trans_start; - if (tickssofar < TX_TIMEOUT - 2) - return 1; - if (tickssofar < TX_TIMEOUT) { - /* Reap sent packets from the full Tx queue. */ - outw(SCBTriggerIntr, ioaddr + SCBCmd); - return 1; - } - speedo_tx_timeout(dev); - return 1; - } - /* Caution: the write order is important here, set the base address with the "ownership" bits last. */ @@ -1296,10 +1272,10 @@ speedo_start_xmit(struct sk_buff *skb, struct net_device *dev) sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry]; last_cmd->cmd_status &= cpu_to_le32(~(CmdSuspend | CmdIntr)); } - if (sp->cur_tx - sp->dirty_tx >= TX_QUEUE_LIMIT) + if (sp->cur_tx - sp->dirty_tx >= TX_QUEUE_LIMIT) { sp->tx_full = 1; - else - clear_bit(0, (void*)&dev->tbusy); + netif_stop_queue(dev); + } spin_unlock_irqrestore(&sp->lock, flags); } wait_for_cmd_done(ioaddr + SCBCmd); @@ -1327,16 +1303,6 @@ static void speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs) ioaddr = dev->base_addr; sp = (struct speedo_private *)dev->priv; -#ifndef final_version - /* A lock to prevent simultaneous entry on SMP machines. */ - if (test_and_set_bit(0, (void*)&sp->in_interrupt)) { - printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n", - dev->name); - sp->in_interrupt = 0; /* Avoid halting machine. */ - return; - } - dev->interrupt = 1; -#endif do { status = inw(ioaddr + SCBStatus); @@ -1392,7 +1358,7 @@ static void speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs) pci_unmap_single(sp->pdev, le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0), sp->tx_skbuff[entry]->len); - dev_free_skb(sp->tx_skbuff[entry]); + dev_kfree_skb_irq(sp->tx_skbuff[entry]); sp->tx_skbuff[entry] = 0; } else if ((status & 0x70000) == CmdNOp) { if (sp->mc_setup_busy) @@ -1418,7 +1384,6 @@ static void speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs) && sp->cur_tx - dirty_tx < TX_QUEUE_LIMIT - 1) { /* The ring is no longer full, clear tbusy. */ sp->tx_full = 0; - clear_bit(0, (void*)&dev->tbusy); spin_unlock(&sp->lock); netif_wake_queue(dev); } else @@ -1438,8 +1403,6 @@ static void speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs) printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n", dev->name, inw(ioaddr + SCBStatus)); - dev->interrupt = 0; - clear_bit(0, (void*)&sp->in_interrupt); return; } @@ -1560,8 +1523,7 @@ speedo_close(struct net_device *dev) struct speedo_private *sp = (struct speedo_private *)dev->priv; int i; - dev->start = 0; - dev->tbusy = 1; + netif_stop_queue(dev); if (speedo_debug > 1) printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n", @@ -1654,7 +1616,7 @@ speedo_get_stats(struct net_device *dev) sp->stats.rx_fifo_errors += le32_to_cpu(sp->lstats->rx_overrun_errs); sp->stats.rx_length_errors += le32_to_cpu(sp->lstats->rx_runt_errs); sp->lstats->done_marker = 0x0000; - if (dev->start) { + if (test_bit(LINK_STATE_START, &dev->state)) { wait_for_cmd_done(ioaddr + SCBCmd); outw(CUDumpStats, ioaddr + SCBCmd); } diff --git a/drivers/net/ethertap.c b/drivers/net/ethertap.c index 0b5aa3db807b..c35d2ff8b618 100644 --- a/drivers/net/ethertap.c +++ b/drivers/net/ethertap.c @@ -123,9 +123,7 @@ static int ethertap_open(struct net_device *dev) MOD_DEC_USE_COUNT; return -ENOBUFS; } - - dev->start = 1; - dev->tbusy = 0; + netif_start_queue(dev); return 0; } @@ -319,8 +317,7 @@ static int ethertap_close(struct net_device *dev) if (ethertap_debug > 2) printk("%s: Shutting down.\n", dev->name); - dev->tbusy = 1; - dev->start = 0; + netif_stop_queue(dev); if (sk) { lp->nl = NULL; diff --git a/drivers/net/gmac.c b/drivers/net/gmac.c new file mode 100644 index 000000000000..3df9d924f4cd --- /dev/null +++ b/drivers/net/gmac.c @@ -0,0 +1,614 @@ +/* + * Network device driver for the GMAC ethernet controller on + * Apple G4 Powermacs. + * + * Copyright (C) 2000 Paul Mackerras. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "gmac.h" + +#define DEBUG_PHY + +#define NTX 32 /* must be power of 2 */ +#define NRX 32 /* must be power of 2 */ +#define RX_BUFLEN (ETH_FRAME_LEN + 8) + +struct gmac_dma_desc { + unsigned int cmd; + unsigned int status; + unsigned int address; /* phys addr, low 32 bits */ + unsigned int hi_addr; +}; + +/* Bits in cmd */ +#define RX_OWN 0x80000000 /* 1 = owned by chip */ +#define TX_SOP 0x80000000 +#define TX_EOP 0x40000000 + +struct gmac { + volatile unsigned int *regs; /* hardware registers, virtual addr */ + volatile unsigned int *sysregs; + unsigned long desc_page; /* page for DMA descriptors */ + volatile struct gmac_dma_desc *rxring; + struct sk_buff *rx_buff[NRX]; + int next_rx; + volatile struct gmac_dma_desc *txring; + struct sk_buff *tx_buff[NTX]; + int next_tx; + int tx_gone; + unsigned char tx_full; + int phy_addr; + int full_duplex; + struct net_device_stats stats; +}; + +#define GM_OUT(r, v) out_le32(gm->regs + (r)/4, (v)) +#define GM_IN(r) in_le32(gm->regs + (r)/4) +#define GM_BIS(r, v) GM_OUT((r), GM_IN(r) | (v)) +#define GM_BIC(r, v) GM_OUT((r), GM_IN(r) & ~(v)) + +#define PHY_B5400 0x6040 +#define PHY_B5201 0x6212 + +static unsigned char dummy_buf[RX_BUFLEN+2]; +static struct net_device *gmacs = NULL; + +/* Prototypes */ +static int mii_read(struct gmac *gm, int phy, int r); +static int mii_write(struct gmac *gm, int phy, int r, int v); +static void powerup_transceiver(struct gmac *gm); +static int gmac_reset(struct net_device *dev); +static void gmac_mac_init(struct gmac *gm, unsigned char *mac_addr); +static void gmac_init_rings(struct gmac *gm); +static void gmac_start_dma(struct gmac *gm); +static int gmac_open(struct net_device *dev); +static int gmac_close(struct net_device *dev); +static int gmac_xmit_start(struct sk_buff *skb, struct net_device *dev); +static int gmac_tx_cleanup(struct gmac *gm); +static void gmac_receive(struct net_device *dev); +static void gmac_interrupt(int irq, void *dev_id, struct pt_regs *regs); +static struct net_device_stats *gmac_stats(struct net_device *dev); +int gmac_probe(struct net_device *dev); + +/* Stuff for talking to the physical-layer chip */ +static int +mii_read(struct gmac *gm, int phy, int r) +{ + int timeout; + + GM_OUT(MIFFRAME, 0x60020000 | (phy << 23) | (r << 18)); + for (timeout = 1000; timeout > 0; --timeout) { + udelay(20); + if (GM_IN(MIFFRAME) & 0x10000) + return GM_IN(MIFFRAME) & 0xffff; + } + return -1; +} + +static int +mii_write(struct gmac *gm, int phy, int r, int v) +{ + int timeout; + + GM_OUT(MIFFRAME, 0x50020000 | (phy << 23) | (r << 18) | (v & 0xffff)); + for (timeout = 1000; timeout > 0; --timeout) { + udelay(20); + if (GM_IN(MIFFRAME) & 0x10000) + return 0; + } + return -1; +} + +static void +mii_poll_start(struct gmac *gm) +{ + unsigned int tmp; + + /* Start the MIF polling on the external transceiver. */ + tmp = GM_IN(MIFCONFIG); + tmp &= ~(GMAC_MIF_CFGPR_MASK | GMAC_MIF_CFGPD_MASK); + tmp |= ((gm->phy_addr & 0x1f) << GMAC_MIF_CFGPD_SHIFT); + tmp |= (0x19 << GMAC_MIF_CFGPR_SHIFT); + tmp |= GMAC_MIF_CFGPE; + GM_OUT(MIFCONFIG, tmp); + + /* Let the bits set. */ + udelay(GMAC_MIF_POLL_DELAY); + + GM_OUT(MIFINTMASK, 0xffc0); +} + +static void +mii_poll_stop(struct gmac *gm) +{ + GM_OUT(MIFINTMASK, 0xffff); + GM_BIC(MIFCONFIG, GMAC_MIF_CFGPE); + udelay(GMAC_MIF_POLL_DELAY); +} + +static void +mii_interrupt(struct gmac *gm) +{ + unsigned long flags; + int phy_status; + + save_flags(flags); + cli(); + + mii_poll_stop(gm); + + /* May the status change before polling is re-enabled ? */ + mii_poll_start(gm); + + /* We read the Auxilliary Status Summary register */ + phy_status = mii_read(gm, gm->phy_addr, 0x19); +#ifdef DEBUG_PHY + printk("mii_interrupt, phy_status: %x\n", phy_status); +#endif + /* Auto-neg. complete ? */ + if (phy_status & 0x8000) { + int full_duplex = 0; + switch((phy_status >> 8) & 0x7) { + case 2: + case 5: + full_duplex = 1; + break; + } + if (full_duplex != gm->full_duplex) { + GM_BIC(TXMAC_CONFIG, 1); + udelay(200); + if (full_duplex) { + printk("full duplex active\n"); + GM_OUT(TXMAC_CONFIG, 6); + GM_OUT(XIF_CONFIG, 1); + } else { + printk("half duplex active\n"); + GM_OUT(TXMAC_CONFIG, 0); + GM_OUT(XIF_CONFIG, 5); + } + GM_BIS(TXMAC_CONFIG, 1); + gm->full_duplex = full_duplex; + } + } + + restore_flags(flags); +} + +static void +powerup_transceiver(struct gmac *gm) +{ + int phytype = mii_read(gm, 0, 3); +#ifdef DEBUG_PHY + int i; +#endif + switch (phytype) { + case PHY_B5400: + mii_write(gm, 0, 0, mii_read(gm, 0, 0) & ~0x800); + mii_write(gm, 31, 30, mii_read(gm, 31, 30) & ~8); + break; + case PHY_B5201: + mii_write(gm, 0, 30, mii_read(gm, 0, 30) & ~8); + break; + default: + printk(KERN_ERR "GMAC: unknown PHY type %x\n", phytype); + } + /* Check this */ + gm->phy_addr = 0; + gm->full_duplex = 0; + +#ifdef DEBUG_PHY + printk("PHY regs:\n"); + for (i=0; i<0x20; i++) { + printk("%04x ", mii_read(gm, 0, i)); + if ((i % 4) == 3) + printk("\n"); + } +#endif +} + +static int +gmac_reset(struct net_device *dev) +{ + struct gmac *gm = (struct gmac *) dev->priv; + int timeout; + + /* turn on GB clock */ + out_le32(gm->sysregs + 0x20/4, in_le32(gm->sysregs + 0x20/4) | 2); + udelay(10); + GM_OUT(SW_RESET, 3); + for (timeout = 100; timeout > 0; --timeout) { + mdelay(10); + if ((GM_IN(SW_RESET) & 3) == 0) + return 0; + } + printk(KERN_ERR "GMAC: reset failed!\n"); + return -1; +} + +static void +gmac_mac_init(struct gmac *gm, unsigned char *mac_addr) +{ + int i; + + GM_OUT(RANSEED, 937); + GM_OUT(DATAPATHMODE, 4); + mii_write(gm, 0, 0, 0x1000); + GM_OUT(TXDMA_CONFIG, 0xffc00); + GM_OUT(RXDMA_CONFIG, 0); + GM_OUT(MACPAUSE, 0x1bf0); + GM_OUT(IPG0, 0); + GM_OUT(IPG1, 8); + GM_OUT(IPG2, 4); + GM_OUT(MINFRAMESIZE, 64); + GM_OUT(MAXFRAMESIZE, 2000); + GM_OUT(PASIZE, 7); + GM_OUT(JAMSIZE, 4); + GM_OUT(ATTEMPT_LIMIT, 16); + GM_OUT(SLOTTIME, 64); + GM_OUT(MACCNTL_TYPE, 0x8808); + GM_OUT(MAC_ADDR_0, (mac_addr[4] << 8) + mac_addr[5]); + GM_OUT(MAC_ADDR_1, (mac_addr[2] << 8) + mac_addr[3]); + GM_OUT(MAC_ADDR_2, (mac_addr[0] << 8) + mac_addr[1]); + GM_OUT(MAC_ADDR_3, 0); + GM_OUT(MAC_ADDR_4, 0); + GM_OUT(MAC_ADDR_5, 0); + GM_OUT(MAC_ADDR_6, 0x0180); + GM_OUT(MAC_ADDR_7, 0xc200); + GM_OUT(MAC_ADDR_8, 0x0001); + GM_OUT(MAC_ADDR_FILTER_0, 0); + GM_OUT(MAC_ADDR_FILTER_1, 0); + GM_OUT(MAC_ADDR_FILTER_2, 0); + GM_OUT(MAC_ADDR_FILTER_MASK21, 0); + GM_OUT(MAC_ADDR_FILTER_MASK0, 0); + for (i = 0; i < 27; ++i) + GM_OUT(MAC_HASHTABLE + i, 0); + GM_OUT(MACCNTL_CONFIG, 0); + /* default to half duplex */ + GM_OUT(TXMAC_CONFIG, 0); + GM_OUT(XIF_CONFIG, 5); +} + +static void +gmac_init_rings(struct gmac *gm) +{ + int i; + struct sk_buff *skb; + unsigned char *data; + struct gmac_dma_desc *ring; + + /* init rx ring */ + ring = (struct gmac_dma_desc *) gm->rxring; + memset(ring, 0, NRX * sizeof(struct gmac_dma_desc)); + for (i = 0; i < NRX; ++i, ++ring) { + data = dummy_buf; + gm->rx_buff[i] = skb = dev_alloc_skb(RX_BUFLEN + 2); + if (skb != 0) { + /*skb_reserve(skb, 2);*/ + data = skb->data; + } + st_le32(&ring->address, virt_to_bus(data)); + st_le32(&ring->cmd, RX_OWN); + } + + /* init tx ring */ + ring = (struct gmac_dma_desc *) gm->txring; + memset(ring, 0, NRX * sizeof(struct gmac_dma_desc)); + + /* set pointers in chip */ + mb(); + GM_OUT(RXDMA_BASE_HIGH, 0); + GM_OUT(RXDMA_BASE_LOW, virt_to_bus(gm->rxring)); + GM_OUT(TXDMA_BASE_HIGH, 0); + GM_OUT(TXDMA_BASE_LOW, virt_to_bus(gm->txring)); +} + +static void +gmac_start_dma(struct gmac *gm) +{ + GM_BIS(RXDMA_CONFIG, 1); + GM_BIS(RXMAC_CONFIG, 1); + GM_OUT(RXDMA_KICK, NRX); + GM_BIS(TXDMA_CONFIG, 1); + GM_BIS(TXMAC_CONFIG, 1); +} + +static int gmac_open(struct net_device *dev) +{ + struct gmac *gm = (struct gmac *) dev->priv; + + if (gmac_reset(dev)) + return -EIO; + + MOD_INC_USE_COUNT; + + powerup_transceiver(gm); + gmac_mac_init(gm, dev->dev_addr); + gmac_init_rings(gm); + gmac_start_dma(gm); + mii_interrupt(gm); + + GM_OUT(INTR_DISABLE, 0xfffdffe8); + + return 0; +} + +static int gmac_close(struct net_device *dev) +{ + struct gmac *gm = (struct gmac *) dev->priv; + int i; + + mii_poll_stop(gm); + + GM_BIC(RXDMA_CONFIG, 1); + GM_BIC(RXMAC_CONFIG, 1); + GM_BIC(TXDMA_CONFIG, 1); + GM_BIC(TXMAC_CONFIG, 1); + GM_OUT(INTR_DISABLE, ~0U); + for (i = 0; i < NRX; ++i) { + if (gm->rx_buff[i] != 0) { + dev_kfree_skb(gm->rx_buff[i]); + gm->rx_buff[i] = 0; + } + } + for (i = 0; i < NTX; ++i) { + if (gm->tx_buff[i] != 0) { + dev_kfree_skb(gm->tx_buff[i]); + gm->tx_buff[i] = 0; + } + } + + MOD_DEC_USE_COUNT; + return 0; +} + +static int gmac_xmit_start(struct sk_buff *skb, struct net_device *dev) +{ + struct gmac *gm = (struct gmac *) dev->priv; + volatile struct gmac_dma_desc *dp; + unsigned long flags; + int i; + + save_flags(flags); cli(); + i = gm->next_tx; + if (gm->tx_buff[i] != 0) { + /* buffer is full, can't send this packet at the moment */ + dev->tbusy = 1; + gm->tx_full = 1; + restore_flags(flags); + return 1; + } + gm->next_tx = (i + 1) & (NTX - 1); + gm->tx_buff[i] = skb; + restore_flags(flags); + + dp = &gm->txring[i]; + dp->status = 0; + dp->hi_addr = 0; + st_le32(&dp->address, virt_to_bus(skb->data)); + mb(); + st_le32(&dp->cmd, TX_SOP | TX_EOP | skb->len); + mb(); + + GM_OUT(TXDMA_KICK, gm->next_tx); + + return 0; +} + +static int gmac_tx_cleanup(struct gmac *gm) +{ + int i = gm->tx_gone; + volatile struct gmac_dma_desc *dp; + struct sk_buff *skb; + int ret = 0; + int gone = GM_IN(TXDMA_COMPLETE); + + while (i != gone) { + skb = gm->tx_buff[i]; + if (skb == NULL) + break; + dp = &gm->txring[i]; + gm->stats.tx_bytes += skb->len; + ++gm->stats.tx_packets; + gm->tx_buff[i] = NULL; + dev_kfree_skb(skb); + if (++i >= NTX) + i = 0; + } + if (i != gm->tx_gone) { + ret = gm->tx_full; + gm->tx_gone = i; + gm->tx_full = 0; + } + return ret; +} + +static void gmac_receive(struct net_device *dev) +{ + struct gmac *gm = (struct gmac *) dev->priv; + int i = gm->next_rx; + volatile struct gmac_dma_desc *dp; + struct sk_buff *skb; + int len; + unsigned char *data; + + for (;;) { + dp = &gm->rxring[i]; + if (ld_le32(&dp->cmd) & RX_OWN) + break; + len = (ld_le32(&dp->cmd) >> 16) & 0x7fff; + skb = gm->rx_buff[i]; + if (skb == 0) { + ++gm->stats.rx_dropped; + } else if (ld_le32(&dp->status) & 0x40000000) { + ++gm->stats.rx_errors; + dev_kfree_skb(skb); + } else { + skb_put(skb, len); + skb->dev = dev; + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + gm->stats.rx_bytes += skb->len; + ++gm->stats.rx_packets; + } + data = dummy_buf; + gm->rx_buff[i] = skb = dev_alloc_skb(RX_BUFLEN + 2); + if (skb != 0) { + /*skb_reserve(skb, 2);*/ + data = skb->data; + } + st_le32(&dp->address, virt_to_bus(data)); + dp->hi_addr = 0; + mb(); + st_le32(&dp->cmd, RX_OWN); + if (++i >= NRX) + i = 0; + } + gm->next_rx = i; +} + +static void gmac_interrupt(int irq, void *dev_id, struct pt_regs *regs) +{ + struct net_device *dev = (struct net_device *) dev_id; + struct gmac *gm = (struct gmac *) dev->priv; + unsigned int status; + + status = GM_IN(INTR_STATUS); + GM_OUT(INTR_ACK, status); + + if (status & GMAC_IRQ_MIF) { + mii_interrupt(gm); + } + gmac_receive(dev); + if (gmac_tx_cleanup(gm)){ + dev->tbusy = 0; + mark_bh(NET_BH); + } +} + +static struct net_device_stats *gmac_stats(struct net_device *dev) +{ + struct gmac *gm = (struct gmac *) dev->priv; + + return &gm->stats; +} + +int gmac_probe(struct net_device *dev) +{ + static int gmacs_found; + static struct device_node *next_gmac; + struct device_node *gmac; + struct gmac *gm; + unsigned long descpage; + unsigned char *addr; + int i; + + /* + * We could (and maybe should) do this using PCI scanning + * for vendor/net_device ID 0x106b/0x21. + */ + if (!gmacs_found) { + next_gmac = find_compatible_devices("network", "gmac"); + gmacs_found = 1; + } + if ((gmac = next_gmac) == 0) + return -ENODEV; + next_gmac = gmac->next; + + if (gmac->n_addrs < 1 || gmac->n_intrs < 1) { + printk(KERN_ERR "can't use GMAC %s: %d addrs and %d intrs\n", + gmac->full_name, gmac->n_addrs, gmac->n_intrs); + return -ENODEV; + } + + dev = init_etherdev(0, sizeof(struct gmac)); + memset(dev->priv, 0, sizeof(struct gmac)); + + gm = (struct gmac *) dev->priv; + dev->base_addr = gmac->addrs[0].address; + gm->regs = (volatile unsigned int *) + ioremap(gmac->addrs[0].address, 0x10000); + gm->sysregs = (volatile unsigned int *) ioremap(0xf8000000, 0x1000); + dev->irq = gmac->intrs[0].line; + + addr = get_property(gmac, "local-mac-address", NULL); + if (addr == NULL) { + printk(KERN_ERR "Can't get mac-address for GMAC %s\n", + gmac->full_name); + return -EAGAIN; + } + + printk(KERN_INFO "%s: GMAC at", dev->name); + for (i = 0; i < 6; ++i) { + dev->dev_addr[i] = addr[i]; + printk("%c%.2x", (i? ':': ' '), addr[i]); + } + printk("\n"); + + descpage = get_free_page(GFP_KERNEL); + if (descpage == 0) { + printk(KERN_ERR "GMAC: can't get a page for descriptors\n"); + return -EAGAIN; + } + + gm->desc_page = descpage; + gm->rxring = (volatile struct gmac_dma_desc *) descpage; + gm->txring = (volatile struct gmac_dma_desc *) (descpage + 0x800); + + gm->phy_addr = 0; + + dev->open = gmac_open; + dev->stop = gmac_close; + dev->hard_start_xmit = gmac_xmit_start; + dev->get_stats = gmac_stats; + + ether_setup(dev); + + if (request_irq(dev->irq, gmac_interrupt, 0, "GMAC", dev)) { + printk(KERN_ERR "GMAC: can't get irq %d\n", dev->irq); + return -EAGAIN; + } + + gmacs = dev; + + return 0; +} + +#ifdef MODULE + +MODULE_AUTHOR("Paul Mackerras"); +MODULE_DESCRIPTION("PowerMac GMAC driver."); + +int init_module(void) +{ + if (gmacs != NULL) + return -EBUSY; + return gmac_probe(NULL); +} + +void cleanup_module(void) +{ + struct gmac *gm; + + /* XXX should handle more than one */ + if (gmacs == NULL) + return; + + gm = (struct gmac *) gmacs->priv; + free_irq(gmacs->irq, gmac_interrupt); + free_page(gm->descpage); + unregister_netdev(gmacs); + kfree(gmacs); + gmacs = NULL; +} + +#endif diff --git a/drivers/net/gmac.h b/drivers/net/gmac.h new file mode 100644 index 000000000000..2e50f6072e8a --- /dev/null +++ b/drivers/net/gmac.h @@ -0,0 +1,113 @@ +/* + * Definitions for the GMAC ethernet chip, used in the + * Apple G4 powermac. + */ + +/* Register offsets */ +#define INTR_STATUS 0x000c +#define INTR_DISABLE 0x0010 +#define INTR_ACK 0x0014 +#define SW_RESET 0x1010 +#define TXDMA_KICK 0x2000 +#define TXDMA_CONFIG 0x2004 +#define TXDMA_BASE_LOW 0x2008 +#define TXDMA_BASE_HIGH 0x200c +#define TXDMA_STATE_MACH 0x2028 +#define TXDMA_COMPLETE 0x2100 +#define RXDMA_CONFIG 0x4000 +#define RXDMA_BASE_LOW 0x4004 +#define RXDMA_BASE_HIGH 0x4008 +#define RXDMA_KICK 0x4100 +#define MACPAUSE 0x6008 +#define TXMAC_STATUS 0x6010 +#define TXMAC_CONFIG 0x6030 +#define RXMAC_CONFIG 0x6034 +#define MACCNTL_CONFIG 0x6038 +#define XIF_CONFIG 0x603c +#define IPG0 0x6040 +#define IPG1 0x6044 +#define IPG2 0x6048 +#define SLOTTIME 0x604c +#define MINFRAMESIZE 0x6050 +#define MAXFRAMESIZE 0x6054 +#define PASIZE 0x6058 +#define JAMSIZE 0x605c +#define ATTEMPT_LIMIT 0x6060 +#define MACCNTL_TYPE 0x6064 +#define MAC_ADDR_0 0x6080 +#define MAC_ADDR_1 0x6084 +#define MAC_ADDR_2 0x6088 +#define MAC_ADDR_3 0x608c +#define MAC_ADDR_4 0x6090 +#define MAC_ADDR_5 0x6094 +#define MAC_ADDR_6 0x6098 +#define MAC_ADDR_7 0x609c +#define MAC_ADDR_8 0x60a0 +#define MAC_ADDR_FILTER_0 0x60a4 +#define MAC_ADDR_FILTER_1 0x60a8 +#define MAC_ADDR_FILTER_2 0x60ac +#define MAC_ADDR_FILTER_MASK21 0x60b0 +#define MAC_ADDR_FILTER_MASK0 0x60b4 +#define MAC_HASHTABLE 0x60c0 +#define RANSEED 0x6130 +#define MIFFRAME 0x620c +#define MIFCONFIG 0x6210 +#define MIFINTMASK 0x6214 +#define MIFSTATUS 0x6218 +#define DATAPATHMODE 0x9050 + +/* -- 0x000C R-C Global Interrupt status. + * d: 0x00000000 bits 0-6 cleared on read (C) + */ +#define GMAC_IRQ_TX_INT_ME 0x00000001 /* C Frame with INT_ME bit set in fifo */ +#define GMAC_IRQ_TX_ALL 0x00000002 /* C TX descriptor ring empty */ +#define GMAC_IRQ_TX_DONE 0x00000004 /* C moved from host to TX fifo */ +#define GMAC_IRQ_RX_DONE 0x00000010 /* C moved from RX fifo to host */ +#define GMAC_IRQ_RX_NO_BUF 0x00000020 /* C No RX buffer available */ +#define GMAC_IRQ_RX_TAG_ERR 0x00000040 /* C RX tag error */ + +#define GMAC_IRQ_PCS 0x00002000 /* PCS interrupt ? */ +#define GMAC_IRQ_MAC_TX 0x00004000 /* MAC tx register set */ +#define GMAC_IRQ_MAC_RX 0x00008000 /* MAC rx register set */ +#define GMAC_IRQ_MAC_CTRL 0x00010000 /* MAC control register set */ +#define GMAC_IRQ_MIF 0x00020000 /* MIF status register set */ +#define GMAC_IRQ_BUS_ERROR 0x00040000 /* Bus error status register set */ + +#define GMAC_IRQ_TX_COMP 0xfff80000 /* TX completion mask */ + +/* -- 0x6210 RW MIF config reg + */ + +#define GMAC_MIF_CFGPS 0x00000001 /* PHY Select */ +#define GMAC_MIF_CFGPE 0x00000002 /* Poll Enable */ +#define GMAC_MIF_CFGBB 0x00000004 /* Bit Bang Enable */ +#define GMAC_MIF_CFGPR_MASK 0x000000f8 /* Poll Register address */ +#define GMAC_MIF_CFGPR_SHIFT 3 +#define GMAC_MIF_CFGM0 0x00000100 /* MDIO_0 Data / MDIO_0 attached */ +#define GMAC_MIF_CFGM1 0x00000200 /* MDIO_1 Data / MDIO_1 attached */ +#define GMAC_MIF_CFGPD_MASK 0x00007c00 /* Poll Device PHY address */ +#define GMAC_MIF_CFGPD_SHIFT 10 + +#define GMAC_MIF_POLL_DELAY 200 + +#define GMAC_INTERNAL_PHYAD 1 /* PHY address for int. transceiver */ +#define GMAC_EXTERNAL_PHYAD 0 /* PHY address for ext. transceiver */ + + +/* -- 0x6214 RW MIF interrupt mask reg + * same as basic/status Register + */ + +/* -- 0x6214 RW MIF basic/status reg + * The Basic portion of this register indicates the last + * value of the register read indicated in the POLL REG field + * of the Configuration Register. + * The Status portion indicates bit(s) that have changed. + * The MIF Mask register is corresponding to this register in + * terms of the bit(s) that need to be masked for generating + * interrupt on the MIF Interrupt Bit of the Global Status Rgister. + */ + +#define GMAC_MIF_STATUS 0x0000ffff /* 0-15 : Status */ +#define GMAC_MIF_BASIC 0xffff0000 /* 16-31 : Basic register */ + diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index ad0191b56301..1281fff820d3 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -107,17 +107,10 @@ static struct net_device_stats *get_stats(struct net_device *dev) return (struct net_device_stats *)dev->priv; } -static int loopback_open(struct net_device *dev) -{ - dev->flags|=IFF_LOOPBACK; - return 0; -} - /* Initialize the rest of the LOOPBACK device. */ int __init loopback_init(struct net_device *dev) { dev->mtu = LOOPBACK_MTU; - dev->tbusy = 0; dev->hard_start_xmit = loopback_xmit; dev->hard_header = eth_header; dev->hard_header_cache = eth_header_cache; @@ -127,7 +120,6 @@ int __init loopback_init(struct net_device *dev) dev->tx_queue_len = 0; dev->type = ARPHRD_LOOPBACK; /* 0x0001 */ dev->rebuild_header = eth_rebuild_header; - dev->open = loopback_open; dev->flags = IFF_LOOPBACK; dev->priv = kmalloc(sizeof(struct net_device_stats), GFP_KERNEL); if (dev->priv == NULL) diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c index 1266c55e264d..23bb50c64735 100644 --- a/drivers/net/myri_sbus.c +++ b/drivers/net/myri_sbus.c @@ -551,7 +551,6 @@ static void myri_interrupt(int irq, void *dev_id, struct pt_regs *regs) DIRQ(("IRQ_DISAB ")); myri_disable_irq(lregs, mp->cregs); - dev->interrupt = 1; softstate = sbus_readl(&chan->state); DIRQ(("state[%08x] ", softstate)); if (softstate != STATE_READY) { @@ -562,7 +561,6 @@ static void myri_interrupt(int irq, void *dev_id, struct pt_regs *regs) myri_rx(mp, dev); DIRQ(("\nistat=ISTAT_HOST ")); sbus_writel(ISTAT_HOST, lregs + LANAI_ISTAT); - dev->interrupt = 0; DIRQ(("IRQ_ENAB ")); myri_enable_irq(lregs, mp->cregs); } @@ -584,6 +582,17 @@ static int myri_close(struct net_device *dev) return 0; } +static void myri_tx_timeout(struct net_device *dev) +{ + struct myri_eth *mp = (struct myri_eth *) dev->priv; + + printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); + + mp->enet_stats.tx_errors++; + myri_init(mp, 0); + netif_start_queue(dev); +} + static int myri_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct myri_eth *mp = (struct myri_eth *) dev->priv; @@ -598,29 +607,7 @@ static int myri_start_xmit(struct sk_buff *skb, struct net_device *dev) myri_tx(mp, dev); - if (dev->tbusy) { - int tickssofar = jiffies - dev->trans_start; - - DTX(("tbusy tickssofar[%d] ", tickssofar)); - if (tickssofar < 40) { - DTX(("returning 1\n")); - return 1; - } else { - DTX(("resetting, return 0\n")); - printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); - mp->enet_stats.tx_errors++; - myri_init(mp, in_interrupt()); - dev->tbusy = 0; - dev->trans_start = jiffies; - return 0; - } - } - - if (test_and_set_bit(0, (void *) &dev->tbusy) != 0) { - DTX(("tbusy, maybe a race? returning 1\n")); - printk("%s: Transmitter access conflict.\n", dev->name); - return 1; - } + netif_stop_queue(dev); /* This is just to prevent multiple PIO reads for TX_BUFFS_AVAIL. */ head = sbus_readl(&sq->head); @@ -677,7 +664,7 @@ static int myri_start_xmit(struct sk_buff *skb, struct net_device *dev) bang_the_chip(mp); DTX(("tbusy=0, returning 0\n")); - dev->tbusy = 0; + netif_start_queue(dev); restore_flags(flags); return 0; } @@ -1059,6 +1046,8 @@ static int __init myri_ether_init(struct net_device *dev, struct sbus_dev *sdev, dev->open = &myri_open; dev->stop = &myri_close; dev->hard_start_xmit = &myri_start_xmit; + dev->tx_timeout = &myri_tx_timeout; + dev->watchdog_timeo = 5*HZ; dev->get_stats = &myri_get_stats; dev->set_multicast_list = &myri_set_multicast; dev->irq = sdev->irqs[0]; diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c index 5e177bb2a628..01f1e2ea7938 100644 --- a/drivers/net/ne2k-pci.c +++ b/drivers/net/ne2k-pci.c @@ -451,9 +451,8 @@ ne2k_pci_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int rin /* This *shouldn't* happen. If it does, it's the last thing you'll see */ if (ei_status.dmaing) { printk("%s: DMAing conflict in ne2k_pci_get_8390_hdr " - "[DMAstat:%d][irqlock:%d][intr:%d].\n", - dev->name, ei_status.dmaing, ei_status.irqlock, - (int)dev->interrupt); + "[DMAstat:%d][irqlock:%d].\n", + dev->name, ei_status.dmaing, ei_status.irqlock); return; } @@ -490,9 +489,8 @@ ne2k_pci_block_input(struct net_device *dev, int count, struct sk_buff *skb, int /* This *shouldn't* happen. If it does, it's the last thing you'll see */ if (ei_status.dmaing) { printk("%s: DMAing conflict in ne2k_pci_block_input " - "[DMAstat:%d][irqlock:%d][intr:%d].\n", - dev->name, ei_status.dmaing, ei_status.irqlock, - (int)dev->interrupt); + "[DMAstat:%d][irqlock:%d].\n", + dev->name, ei_status.dmaing, ei_status.irqlock); return; } ei_status.dmaing |= 0x01; @@ -543,9 +541,8 @@ ne2k_pci_block_output(struct net_device *dev, int count, /* This *shouldn't* happen. If it does, it's the last thing you'll see */ if (ei_status.dmaing) { printk("%s: DMAing conflict in ne2k_pci_block_output." - "[DMAstat:%d][irqlock:%d][intr:%d]\n", - dev->name, ei_status.dmaing, ei_status.irqlock, - (int)dev->interrupt); + "[DMAstat:%d][irqlock:%d]\n", + dev->name, ei_status.dmaing, ei_status.irqlock); return; } ei_status.dmaing |= 0x01; diff --git a/drivers/net/net_init.c b/drivers/net/net_init.c index 216aa14cb34d..93b2f9923f87 100644 --- a/drivers/net/net_init.c +++ b/drivers/net/net_init.c @@ -153,7 +153,7 @@ struct net_device *init_etherdev(struct net_device *dev, int sizeof_priv) static int eth_mac_addr(struct net_device *dev, void *p) { struct sockaddr *addr=p; - if(dev->start) + if(test_bit(LINK_STATE_START, &dev->state)) return -EBUSY; memcpy(dev->dev_addr, addr->sa_data,dev->addr_len); return 0; @@ -200,7 +200,7 @@ static int hippi_change_mtu(struct net_device *dev, int new_mtu) static int hippi_mac_addr(struct net_device *dev, void *p) { struct sockaddr *addr = p; - if(dev->start) + if(test_bit(LINK_STATE_START, &dev->state)) return -EBUSY; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); return 0; diff --git a/drivers/net/oaknet.c b/drivers/net/oaknet.c index 282d436fd54c..3cd09fc30f6e 100644 --- a/drivers/net/oaknet.c +++ b/drivers/net/oaknet.c @@ -1,6 +1,6 @@ /* * - * Copyright (c) 1999 Grant Erickson + * Copyright (c) 1999-2000 Grant Erickson * * Module name: oaknet.c * @@ -9,10 +9,14 @@ * on-board the IBM PowerPC "Oak" evaluation board. Adapted from the * various other 8390 drivers written by Donald Becker and Paul Gortmaker. * + * Additional inspiration from the "tcd8390.c" driver from TiVo, Inc. + * and "enetLib.c" from IBM. + * */ #include #include +#include #include #include @@ -32,20 +36,22 @@ #define FALSE 0 #endif -#define OAKNET_CMD 0x00 -#define OAKNET_DATA 0x10 /* NS-defined port window offset. */ -#define OAKNET_RESET 0x1f /* A read resets, a write clears. */ - #define OAKNET_START_PG 0x20 /* First page of TX buffer */ #define OAKNET_STOP_PG 0x40 /* Last pagge +1 of RX ring */ -#define OAKNET_BASE (dev->base_addr) - #define OAKNET_WAIT (2 * HZ / 100) /* 20 ms */ +/* Experimenting with some fixes for a broken driver... */ + +#define OAKNET_DISINT +#define OAKNET_HEADCHECK +#define OAKNET_RWFIX + /* Global Variables */ +static const char *name = "National DP83902AV"; + #if defined(MODULE) static struct net_device *oaknet_devs; #endif @@ -90,14 +96,25 @@ oaknet_init(void) { register int i; int reg0, regd; - struct net_device *dev = NULL; - unsigned long ioaddr = OAKNET_IO_BASE; - const char *name = "National DP83902AV"; + struct net_device tmp, *dev = NULL; +#if 0 + unsigned long ioaddr = OAKNET_IO_BASE; +#else + unsigned long ioaddr = ioremap(OAKNET_IO_BASE, OAKNET_IO_SIZE); +#endif bd_t *bip = (bd_t *)__res; + /* + * This MUST happen here because of the nic_* macros + * which have an implicit dependency on dev->base_addr. + */ + + tmp.base_addr = ioaddr; + dev = &tmp; + /* Quick register check to see if the device is really there. */ - if ((reg0 = inb_p(ioaddr)) == 0xFF) + if ((reg0 = ei_ibp(ioaddr)) == 0xFF) return (ENODEV); /* @@ -106,17 +123,17 @@ oaknet_init(void) * and semi-functional. */ - outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, ioaddr + E8390_CMD); - regd = inb_p(ioaddr + 0x0D); - outb_p(0xFF, ioaddr + 0x0D); - outb_p(E8390_NODMA + E8390_PAGE0, ioaddr + E8390_CMD); - inb_p(ioaddr + EN0_COUNTER0); + ei_obp(E8390_NODMA + E8390_PAGE1 + E8390_STOP, ioaddr + E8390_CMD); + regd = ei_ibp(ioaddr + 0x0D); + ei_obp(0xFF, ioaddr + 0x0D); + ei_obp(E8390_NODMA + E8390_PAGE0, ioaddr + E8390_CMD); + ei_ibp(ioaddr + EN0_COUNTER0); /* It's no good. Fix things back up and leave. */ - if (inb_p(ioaddr + EN0_COUNTER0) != 0) { - outb_p(reg0, ioaddr); - outb_p(regd, ioaddr + 0x0D); + if (ei_ibp(ioaddr + EN0_COUNTER0) != 0) { + ei_obp(reg0, ioaddr); + ei_obp(regd, ioaddr + 0x0D); dev->base_addr = 0; return (ENODEV); @@ -145,7 +162,7 @@ oaknet_init(void) * and interrupt assignments are pre-assigned and unchageable. */ - dev->base_addr = OAKNET_IO_BASE; + dev->base_addr = ioaddr; dev->irq = OAKNET_INT; /* Allocate 8390-specific device-private area and fields. */ @@ -155,20 +172,13 @@ oaknet_init(void) return (-ENOMEM); } - /* - * Just to be safe, reset the card as we cannot really* be sure - * what state it was last left in. - */ - - oaknet_reset_8390(dev); - /* * Disable all chip interrupts for now and ACK all pending * interrupts. */ - outb_p(0x0, ioaddr + EN0_IMR); - outb_p(0xFF, ioaddr + EN0_ISR); + ei_obp(0x0, ioaddr + EN0_IMR); + ei_obp(0xFF, ioaddr + EN0_ISR); /* Attempt to get the interrupt line */ @@ -273,7 +283,7 @@ oaknet_close(struct net_device *dev) * This routine resets the DP83902 chip. * * Input(s): - * *dev - + * *dev - Pointer to the device structure for this driver. * * Output(s): * N/A @@ -285,36 +295,48 @@ oaknet_close(struct net_device *dev) static void oaknet_reset_8390(struct net_device *dev) { - int base = OAKNET_BASE; - unsigned long start = jiffies; + int base = E8390_BASE; - outb(inb(base + OAKNET_RESET), base + OAKNET_RESET); + /* + * We have no provision of reseting the controller as is done + * in other drivers, such as "ne.c". However, the following + * seems to work well enough in the TiVo driver. + */ + printk("Resetting %s...\n", dev->name); + ei_obp(E8390_STOP | E8390_NODMA | E8390_PAGE0, base + E8390_CMD); ei_status.txing = 0; ei_status.dmaing = 0; - /* This check shouldn't be necessary eventually */ - - while ((inb_p(base + EN0_ISR) & ENISR_RESET) == 0) { - if (jiffies - start > OAKNET_WAIT) { - printk("%s: reset didn't complete\n", dev->name); - break; - } - } - - outb_p(ENISR_RESET, base + EN0_ISR); /* ACK reset interrupt */ - return; } /* - * XXX - Document me. + * static void oaknet_get_8390_hdr() + * + * Description: + * This routine grabs the 8390-specific header. It's similar to the + * block input routine, but we don't need to be concerned with ring wrap + * as the header will be at the start of a page, so we optimize accordingly. + * + * Input(s): + * *dev - Pointer to the device structure for this driver. + * *hdr - Pointer to storage for the 8390-specific packet header. + * ring_page - ? + * + * Output(s): + * *hdr - Pointer to the 8390-specific packet header for the just- + * received frame. + * + * Returns: + * N/A + * */ static void oaknet_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) { - int base = OAKNET_BASE; + int base = dev->base_addr; /* * This should NOT happen. If it does, it is the LAST thing you'll @@ -341,6 +363,10 @@ oaknet_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, insb(base + OAKNET_DATA, hdr, sizeof(struct e8390_pkt_hdr)); + /* Byte-swap the packet byte count */ + + hdr->count = le16_to_cpu(hdr->count); + outb_p(ENISR_RDC, base + EN0_ISR); /* ACK Remote DMA interrupt */ ei_status.dmaing &= ~0x01; @@ -367,38 +393,99 @@ oaknet_block_input(struct net_device *dev, int count, struct sk_buff *skb, return; } +#ifdef OAKNET_DISINT + save_flags(flags); + cli(); +#endif + ei_status.dmaing |= 0x01; - outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, base + OAKNET_CMD); - outb_p(count & 0xff, base + EN0_RCNTLO); - outb_p(count >> 8, base + EN0_RCNTHI); - outb_p(ring_offset & 0xff, base + EN0_RSARLO); - outb_p(ring_offset >> 8, base + EN0_RSARHI); - outb_p(E8390_RREAD + E8390_START, base + OAKNET_CMD); + ei_obp(E8390_NODMA + E8390_PAGE0 + E8390_START, base + E8390_CMD); + ei_obp(count & 0xff, base + EN0_RCNTLO); + ei_obp(count >> 8, base + EN0_RCNTHI); + ei_obp(ring_offset & 0xff, base + EN0_RSARLO); + ei_obp(ring_offset >> 8, base + EN0_RSARHI); + ei_obp(E8390_RREAD + E8390_START, base + E8390_CMD); if (ei_status.word16) { - insw(base + OAKNET_DATA, buf, count >> 1); + ei_isw(base + E8390_DATA, buf, count >> 1); if (count & 0x01) { - buf[count-1] = inb(base + OAKNET_DATA); + buf[count - 1] = ei_ib(base + E8390_DATA); +#ifdef OAKNET_HEADCHECK + bytes++; +#endif } } else { - insb(base + OAKNET_DATA, buf, count); + ei_isb(base + E8390_DATA, buf, count); } - outb_p(ENISR_RDC, base + EN0_ISR); /* ACK Remote DMA interrupt */ +#ifdef OAKNET_HEADCHECK + /* + * This was for the ALPHA version only, but enough people have + * been encountering problems so it is still here. If you see + * this message you either 1) have a slightly incompatible clone + * or 2) have noise/speed problems with your bus. + */ + + /* DMA termination address check... */ + { + int addr, tries = 20; + do { + /* DON'T check for 'ei_ibp(EN0_ISR) & ENISR_RDC' here + -- it's broken for Rx on some cards! */ + int high = ei_ibp(base + EN0_RSARHI); + int low = ei_ibp(base + EN0_RSARLO); + addr = (high << 8) + low; + if (((ring_offset + bytes) & 0xff) == low) + break; + } while (--tries > 0); + if (tries <= 0) + printk("%s: RX transfer address mismatch," + "%#4.4x (expected) vs. %#4.4x (actual).\n", + dev->name, ring_offset + bytes, addr); + } +#endif + ei_obp(ENISR_RDC, base + EN0_ISR); /* ACK Remote DMA interrupt */ ei_status.dmaing &= ~0x01; +#ifdef OAKNET_DISINT + restore_flags(flags); +#endif + return; } /* - * XXX - Document me. + * static void oaknet_block_output() + * + * Description: + * This routine... + * + * Input(s): + * *dev - Pointer to the device structure for this driver. + * count - Number of bytes to be transferred. + * *buf - + * start_page - + * + * Output(s): + * N/A + * + * Returns: + * N/A + * */ static void oaknet_block_output(struct net_device *dev, int count, const unsigned char *buf, int start_page) { - int base = OAKNET_BASE; + int base = E8390_BASE; +#if 0 int bug; +#endif unsigned long start; - unsigned char lobyte; +#ifdef OAKNET_DISINT + unsigned long flags; +#endif +#ifdef OAKNET_HEADCHECK + int retries = 0; +#endif /* Round the count up for word writes. */ @@ -415,12 +502,22 @@ oaknet_block_output(struct net_device *dev, int count, return; } +#ifdef OAKNET_DISINT + save_flags(flags); + cli(); +#endif + ei_status.dmaing |= 0x01; /* Make sure we are in page 0. */ - outb_p(E8390_PAGE0 + E8390_START + E8390_NODMA, base + OAKNET_CMD); + ei_obp(E8390_PAGE0 + E8390_START + E8390_NODMA, base + E8390_CMD); +#ifdef OAKNET_HEADCHECK +retry: +#endif + +#if 0 /* * The 83902 documentation states that the processor needs to * do a "dummy read" before doing the remote write to work @@ -433,60 +530,131 @@ oaknet_block_output(struct net_device *dev, int count, unsigned int rdlo; /* Now the normal output. */ - outb_p(ENISR_RDC, base + EN0_ISR); - outb_p(count & 0xff, base + EN0_RCNTLO); - outb_p(count >> 8, base + EN0_RCNTHI); - outb_p(0x00, base + EN0_RSARLO); - outb_p(start_page, base + EN0_RSARHI); + ei_obp(ENISR_RDC, base + EN0_ISR); + ei_obp(count & 0xff, base + EN0_RCNTLO); + ei_obp(count >> 8, base + EN0_RCNTHI); + ei_obp(0x00, base + EN0_RSARLO); + ei_obp(start_page, base + EN0_RSARHI); if (bug++) break; /* Perform the dummy read */ - rdhi = inb_p(base + EN0_CRDAHI); - rdlo = inb_p(base + EN0_CRDALO); - outb_p(E8390_RREAD + E8390_START, base + OAKNET_CMD); + rdhi = ei_ibp(base + EN0_CRDAHI); + rdlo = ei_ibp(base + EN0_CRDALO); + ei_obp(E8390_RREAD + E8390_START, base + E8390_CMD); while (1) { unsigned int nrdhi; unsigned int nrdlo; - nrdhi = inb_p(base + EN0_CRDAHI); - nrdlo = inb_p(base + EN0_CRDALO); + nrdhi = ei_ibp(base + EN0_CRDAHI); + nrdlo = ei_ibp(base + EN0_CRDALO); if ((rdhi != nrdhi) || (rdlo != nrdlo)) break; } } +#else +#ifdef OAKNET_RWFIX + /* + * Handle the read-before-write bug the same way as the + * Crynwr packet driver -- the Nat'l Semi. method doesn't work. + * Actually this doesn't always work either, but if you have + * problems with your 83902 this is better than nothing! + */ + + ei_obp(0x42, base + EN0_RCNTLO); + ei_obp(0x00, base + EN0_RCNTHI); + ei_obp(0x42, base + EN0_RSARLO); + ei_obp(0x00, base + EN0_RSARHI); + ei_obp(E8390_RREAD + E8390_START, base + E8390_CMD); + /* Make certain that the dummy read has occurred. */ + udelay(6); +#endif - outb_p(E8390_RWRITE+E8390_START, base + OAKNET_CMD); + ei_obp(ENISR_RDC, base + EN0_ISR); + + /* Now the normal output. */ + ei_obp(count & 0xff, base + EN0_RCNTLO); + ei_obp(count >> 8, base + EN0_RCNTHI); + ei_obp(0x00, base + EN0_RSARLO); + ei_obp(start_page, base + EN0_RSARHI); +#endif /* 0/1 */ + + ei_obp(E8390_RWRITE + E8390_START, base + E8390_CMD); if (ei_status.word16) { - outsw(OAKNET_BASE + OAKNET_DATA, buf, count >> 1); + ei_osw(E8390_BASE + E8390_DATA, buf, count >> 1); } else { - outsb(OAKNET_BASE + OAKNET_DATA, buf, count); + ei_osb(E8390_BASE + E8390_DATA, buf, count); } +#ifdef OAKNET_DISINT + restore_flags(flags); +#endif + start = jiffies; - while (((lobyte = inb_p(base + EN0_ISR)) & ENISR_RDC) == 0) { +#ifdef OAKNET_HEADCHECK + /* + * This was for the ALPHA version only, but enough people have + * been encountering problems so it is still here. + */ + + { + /* DMA termination address check... */ + int addr, tries = 20; + do { + int high = ei_ibp(base + EN0_RSARHI); + int low = ei_ibp(base + EN0_RSARLO); + addr = (high << 8) + low; + if ((start_page << 8) + count == addr) + break; + } while (--tries > 0); + + if (tries <= 0) { + printk("%s: Tx packet transfer address mismatch," + "%#4.4x (expected) vs. %#4.4x (actual).\n", + dev->name, (start_page << 8) + count, addr); + if (retries++ == 0) + goto retry; + } + } +#endif + + while ((ei_ibp(base + EN0_ISR) & ENISR_RDC) == 0) { if (jiffies - start > OAKNET_WAIT) { - unsigned char hicnt, locnt; - hicnt = inb_p(base + EN0_CRDAHI); - locnt = inb_p(base + EN0_CRDALO); - printk("%s: timeout waiting for Tx RDC, stat = 0x%x\n", - dev->name, lobyte); - printk("\tstart address 0x%x, current address 0x%x, count %d\n", - (start_page << 8), (hicnt << 8) | locnt, count); + printk("%s: timeout waiting for Tx RDC.\n", dev->name); oaknet_reset_8390(dev); NS8390_init(dev, TRUE); break; } } - outb_p(ENISR_RDC, base + EN0_ISR); /* Ack intr. */ + ei_obp(ENISR_RDC, base + EN0_ISR); /* Ack intr. */ ei_status.dmaing &= ~0x01; return; } +/* + * static void oaknet_dma_error() + * + * Description: + * This routine prints out a last-ditch informative message to the console + * indicating that a DMA error occured. If you see this, it's the last + * thing you'll see. + * + * Input(s): + * *dev - Pointer to the device structure for this driver. + * *name - Informative text (e.g. function name) indicating where the + * DMA error occurred. + * + * Output(s): + * N/A + * + * Returns: + * N/A + * + */ static void oaknet_dma_error(struct net_device *dev, const char *name) { diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c index 575a912718fb..e47f6145b721 100644 --- a/drivers/net/pcmcia/pcnet_cs.c +++ b/drivers/net/pcmcia/pcnet_cs.c @@ -315,7 +315,8 @@ static dev_link_t *pcnet_attach(void) dev->open = &pcnet_open; dev->stop = &pcnet_close; dev->set_config = &set_config; - dev->tbusy = 1; + + netif_stop_queue(dev); /* Register with Card Services */ link->next = dev_list; @@ -681,7 +682,7 @@ static void pcnet_config(dev_link_t *link) } else { dev->if_port = 0; } - dev->tbusy = 0; + netif_start_queue(dev); if (register_netdev(dev) != 0) { printk(KERN_NOTICE "pcnet_cs: register_netdev() failed\n"); goto failed; @@ -806,7 +807,8 @@ static int pcnet_event(event_t event, int priority, case CS_EVENT_CARD_REMOVAL: link->state &= ~DEV_PRESENT; if (link->state & DEV_CONFIG) { - info->dev.tbusy = 1; info->dev.start = 0; + netif_stop_queue(&info->dev); + clear_bit(LINK_STATE_START, &info->dev.state); link->release.expires = jiffies + HZ/20; link->state |= DEV_RELEASE_PENDING; add_timer(&link->release); @@ -822,7 +824,8 @@ static int pcnet_event(event_t event, int priority, case CS_EVENT_RESET_PHYSICAL: if (link->state & DEV_CONFIG) { if (link->open) { - info->dev.tbusy = 1; info->dev.start = 0; + netif_stop_queue(&info->dev); + clear_bit(LINK_STATE_START, &info->dev); } CardServices(ReleaseConfiguration, link->handle); } @@ -836,7 +839,8 @@ static int pcnet_event(event_t event, int priority, if (link->open) { pcnet_reset_8390(&info->dev); NS8390_init(&info->dev, 1); - info->dev.tbusy = 0; info->dev.start = 1; + netif_start_queue(&info->dev); + set_bit(LINK_STATE_START, &info->dev); } } break; @@ -903,7 +907,8 @@ static int pcnet_close(struct net_device *dev) free_irq(dev->irq, dev); - link->open--; dev->start = 0; + link->open--; + clear_bit(LINK_STATE_START, &dev->state); del_timer(&info->watchdog); if (link->state & DEV_STALE_CONFIG) { link->release.expires = jiffies + HZ/20; @@ -979,7 +984,8 @@ static void ei_watchdog(u_long arg) struct net_device *dev = &info->dev; ioaddr_t nic_base = dev->base_addr; - if (dev->start == 0) goto reschedule; + if (!test_bit(LINK_STATE_START, &dev->state)) + goto reschedule; /* Check for pending interrupt with expired latency timer: with this, we can limp along even if the interrupt is blocked */ @@ -1023,9 +1029,8 @@ static void dma_get_8390_hdr(struct net_device *dev, if (ei_status.dmaing) { printk(KERN_NOTICE "%s: DMAing conflict in dma_block_input." - "[DMAstat:%1x][irqlock:%1x][intr:%ld]\n", - dev->name, ei_status.dmaing, ei_status.irqlock, - (long)dev->interrupt); + "[DMAstat:%1x][irqlock:%1x]\n", + dev->name, ei_status.dmaing, ei_status.irqlock); return; } @@ -1061,9 +1066,8 @@ static void dma_block_input(struct net_device *dev, int count, #endif if (ei_status.dmaing) { printk(KERN_NOTICE "%s: DMAing conflict in dma_block_input." - "[DMAstat:%1x][irqlock:%1x][intr:%ld]\n", - dev->name, ei_status.dmaing, ei_status.irqlock, - (long)dev->interrupt); + "[DMAstat:%1x][irqlock:%1x]\n", + dev->name, ei_status.dmaing, ei_status.irqlock); return; } ei_status.dmaing |= 0x01; @@ -1127,9 +1131,8 @@ static void dma_block_output(struct net_device *dev, int count, count++; if (ei_status.dmaing) { printk(KERN_NOTICE "%s: DMAing conflict in dma_block_output." - "[DMAstat:%1x][irqlock:%1x][intr:%ld]\n", - dev->name, ei_status.dmaing, ei_status.irqlock, - (long)dev->interrupt); + "[DMAstat:%1x][irqlock:%1x]\n", + dev->name, ei_status.dmaing, ei_status.irqlock); return; } ei_status.dmaing |= 0x01; diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index 7deaf807be07..fad5da9fe6e6 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c @@ -527,7 +527,7 @@ static int ppp_ioctl(struct inode *inode, struct file *file, } else { ppp->npmode[i] = npi.mode; /* we may be able to transmit more packets now (??) */ - mark_bh(NET_BH); + netif_wake_queue(ppp->dev); } err = 0; break; @@ -626,11 +626,7 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev) pp[0] = proto >> 8; pp[1] = proto; - /* - * ppp->xq should only ever have more than 1 data packet on it - * if the core net code calls us when dev->tbusy == 1. - */ - dev->tbusy = 1; + netif_stop_queue(dev); skb_queue_tail(&ppp->xq, skb); if (trylock_xmit_path(ppp)) ppp_xmit_unlock(ppp, 0); @@ -731,12 +727,9 @@ ppp_xmit_unlock(struct ppp *ppp, int do_mark_bh) while (ppp->xmit_pending == 0 && (skb = skb_dequeue(&ppp->xq)) != 0) ppp_send_frame(ppp, skb); - if (ppp->xmit_pending == 0 && skb_peek(&ppp->xq) == 0 - && ppp->dev->tbusy) { - ppp->dev->tbusy = 0; - if (do_mark_bh) - mark_bh(NET_BH); - } + if (ppp->xmit_pending == 0 && skb_peek(&ppp->xq) == 0) + netif_wake_queue(ppp->dev); + /* Now unlock the transmit path, let others in. */ unlock_xmit_path(ppp); /* Check whether any work was queued up diff --git a/drivers/net/rtl8139.c b/drivers/net/rtl8139.c index 3e83a83c15e5..e5a5c6115505 100644 --- a/drivers/net/rtl8139.c +++ b/drivers/net/rtl8139.c @@ -500,6 +500,8 @@ static struct net_device *rtl8129_probe1(struct pci_dev *pdev, int pci_bus, /* The Rtl8129-specific entries in the device structure. */ dev->open = &rtl8129_open; dev->hard_start_xmit = &rtl8129_start_xmit; + dev->tx_timeout = &rtl8129_tx_timeout; + dev->watchdog_timeo = TX_TIMEOUT; dev->stop = &rtl8129_close; dev->get_stats = &rtl8129_get_stats; dev->set_multicast_list = &set_rx_mode; @@ -742,10 +744,6 @@ rtl8129_open(struct net_device *dev) outb(CmdRxEnb | CmdTxEnb, ioaddr + ChipCmd); - dev->tbusy = 0; - dev->interrupt = 0; - dev->start = 1; - /* Enable all known interrupts by setting the interrupt mask. */ outw(PCIErr | PCSTimeout | RxUnderrun | RxOverflow | RxFIFOOver | TxErr | TxOK | RxErr | RxOK, ioaddr + IntrMask); @@ -796,7 +794,8 @@ static void rtl8129_timer(unsigned long data) rtl8129_interrupt(dev->irq, dev, 0); } } - if (dev->tbusy && jiffies - dev->trans_start >= 2*TX_TIMEOUT) + if (test_bit(LINK_STATE_XOFF, &dev->state) && + (jiffies - dev->trans_start) >= 2*TX_TIMEOUT) rtl8129_tx_timeout(dev); #if 0 @@ -944,7 +943,7 @@ static void rtl8129_tx_timeout(struct net_device *dev) i++; } if (tp->cur_tx - tp->dirty_tx < NUM_TX_DESC) {/* Typical path */ - dev->tbusy = 0; + netif_wake_queue(dev); tp->tx_full = 0; } else { tp->tx_full = 1; @@ -985,13 +984,7 @@ rtl8129_start_xmit(struct sk_buff *skb, struct net_device *dev) long ioaddr = dev->base_addr; int entry; - /* Block a timer-based transmit from overlapping. This could better be - done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */ - if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) { - if (jiffies - dev->trans_start >= TX_TIMEOUT) - rtl8129_tx_timeout(dev); - return 1; - } + netif_stop_queue(dev); /* Calculate the next Tx descriptor entry. */ entry = tp->cur_tx % NUM_TX_DESC; @@ -1012,7 +1005,7 @@ rtl8129_start_xmit(struct sk_buff *skb, struct net_device *dev) ioaddr + TxStatus0 + entry*4); if (++tp->cur_tx - tp->dirty_tx < NUM_TX_DESC) { /* Typical path */ - clear_bit(0, (void*)&dev->tbusy); + netif_start_queue(dev); } else { tp->tx_full = 1; } @@ -1035,22 +1028,6 @@ static void rtl8129_interrupt(int irq, void *dev_instance, struct pt_regs *regs) int status, link_changed = 0; long ioaddr = dev->base_addr; -#if defined(__i386__) - /* A lock to prevent simultaneous entry bug on Intel SMP machines. */ - if (test_and_set_bit(0, (void*)&dev->interrupt)) { - printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n", - dev->name); - dev->interrupt = 0; /* Avoid halting machine. */ - return; - } -#else - if (dev->interrupt) { - printk(KERN_ERR "%s: Re-entering the interrupt handler.\n", dev->name); - return; - } - dev->interrupt = 1; -#endif - do { status = inw(ioaddr + IntrStatus); /* Acknowledge all of the current interrupt sources ASAP, but @@ -1122,10 +1099,9 @@ static void rtl8129_interrupt(int irq, void *dev_instance, struct pt_regs *regs) dev_free_skb(tp->tx_info[entry].skb); tp->tx_info[entry].skb = NULL; if (tp->tx_full) { - /* The ring is no longer full, clear tbusy. */ + /* The ring is no longer full, wake the queue. */ tp->tx_full = 0; - clear_bit(0, (void*)&dev->tbusy); - mark_bh(NET_BH); + netif_wake_queue(dev); } dirty_tx++; } @@ -1198,12 +1174,6 @@ static void rtl8129_interrupt(int irq, void *dev_instance, struct pt_regs *regs) if (rtl8129_debug > 3) printk(KERN_DEBUG"%s: exiting interrupt, intr_status=%#4.4x.\n", dev->name, inl(ioaddr + IntrStatus)); - -#if defined(__i386__) - clear_bit(0, (void*)&dev->interrupt); -#else - dev->interrupt = 0; -#endif return; } @@ -1325,8 +1295,7 @@ rtl8129_close(struct net_device *dev) struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv; int i; - dev->start = 0; - dev->tbusy = 1; + netif_stop_queue(dev); if (rtl8129_debug > 1) printk(KERN_DEBUG"%s: Shutting down ethercard, status was 0x%4.4x.\n", @@ -1403,7 +1372,7 @@ rtl8129_get_stats(struct net_device *dev) struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv; long ioaddr = dev->base_addr; - if (dev->start) { + if (test_bit(LINK_STATE_START, &dev->state)) { tp->stats.rx_missed_errors += inl(ioaddr + RxMissed); outl(0, ioaddr + RxMissed); } diff --git a/drivers/net/setup.c b/drivers/net/setup.c index cf972e7e8b68..f57b97d491c6 100644 --- a/drivers/net/setup.c +++ b/drivers/net/setup.c @@ -29,6 +29,7 @@ extern int awc4500_365_probe(void); extern int arcnet_init(void); extern int bigmac_probe(void); extern int bmac_probe(void); +extern int gmac_probe(void); extern int cpm_enet_init(void); extern int oaknet_init(void); extern int dlci_setup(void); @@ -167,6 +168,9 @@ struct net_probe pci_probes[] __initdata = { #ifdef CONFIG_BMAC {bmac_probe, 0}, #endif +#ifdef CONFIG_GMAC + {gmac_probe, 0}, +#endif #ifdef CONFIG_NCR885E {ncr885e_probe, 0}, #endif diff --git a/drivers/net/shaper.c b/drivers/net/shaper.c index 02c76f50a5ec..89cbb0e9e94b 100644 --- a/drivers/net/shaper.c +++ b/drivers/net/shaper.c @@ -287,6 +287,7 @@ static void shaper_timer(unsigned long data) { struct shaper *sh=(struct shaper *)data; shaper_kick(sh); + timer_exit(&sh->timer); } /* @@ -404,9 +405,7 @@ static int shaper_close(struct net_device *dev) { struct shaper *shaper=dev->priv; shaper_flush(shaper); - start_bh_atomic(); - del_timer(&shaper->timer); - end_bh_atomic(); + del_timer_sync(&shaper->timer); MOD_DEC_USE_COUNT; return 0; } diff --git a/drivers/net/sk98lin/skge.c b/drivers/net/sk98lin/skge.c index decb0f961c18..7e834bd7b3fb 100644 --- a/drivers/net/sk98lin/skge.c +++ b/drivers/net/sk98lin/skge.c @@ -653,7 +653,7 @@ SK_EVPARA EvPara; pAC = (SK_AC*)root_dev->priv; next = pAC->Next; - root_dev->tbusy = 1; + netif_stop_queue(root_dev); SkGeYellowLED(pAC, pAC->IoBase, 0); if(pAC->BoardLevel == 2) { @@ -1432,10 +1432,6 @@ SK_EVPARA EvPara; /* an event parameter union */ SkEventDispatcher(pAC, pAC->IoBase); spin_unlock_irqrestore(&pAC->SlowPathLock, Flags); - dev->tbusy = 0; - dev->interrupt = 0; - dev->start = 1; - MOD_INC_USE_COUNT; SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY, @@ -1464,9 +1460,8 @@ unsigned int Flags; /* for spin lock */ int i; SK_EVPARA EvPara; - dev->start = 0; - set_bit(0, (void*)&dev->tbusy); - + netif_stop_queue(dev); + pAC = (SK_AC*) dev->priv; SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY, @@ -1533,7 +1528,7 @@ int Rc; /* return code of XmitFrame */ if (Rc == 0) { /* transmitter out of resources */ - set_bit(0, (void*) &dev->tbusy); + netif_stop_queue(dev); /* give buffer ownership back to the queueing layer */ return (1); @@ -1688,13 +1683,13 @@ SK_U64 PhysAddr; /* address of DMA mapping */ * freed ( -> ring completely free now). */ pTxPort->pTxdRingTail = pTxd; - pAC->dev->tbusy = 0; + netif_start_queue(pAC->dev); return; } if (Control & TX_CTRL_OWN_BMU) { pTxPort->pTxdRingTail = pTxd; if (pTxPort->TxdRingFree > 0) { - pAC->dev->tbusy = 0; + netif_start_queue(pAC->dev); } return; } @@ -2300,7 +2295,7 @@ unsigned int Flags; SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY, ("SkGeSetMacAddr starts now...\n")); - if(dev->start) { + if(test_bit(LINK_STATE_START, &dev->state)) { return -EBUSY; } memcpy(dev->dev_addr, addr->sa_data,dev->addr_len); @@ -2426,7 +2421,7 @@ SK_EVPARA EvPara; spin_lock_irqsave( &pAC->TxPort[i][TX_PRIO_LOW].TxDesRingLock, Flags); } - pAC->dev->tbusy = 1; + netif_stop_queue(pAC->dev); /* * adjust number of rx buffers allocated @@ -2515,7 +2510,7 @@ SK_EVPARA EvPara; } #endif - pAC->dev->tbusy = 0; + netif_start_queue(pAC->dev); for (i=pAC->GIni.GIMacsFound-1; i>=0; i--) { spin_unlock_irqrestore( &pAC->TxPort[i][TX_PRIO_LOW].TxDesRingLock, Flags); diff --git a/drivers/net/skeleton.c b/drivers/net/skeleton.c index 5b3cfababf59..21628c1fc8b9 100644 --- a/drivers/net/skeleton.c +++ b/drivers/net/skeleton.c @@ -53,6 +53,7 @@ static const char *version = #include #include #include +#include #include #include #include @@ -83,10 +84,19 @@ static unsigned int net_debug = NET_DEBUG; /* The number of low I/O ports used by the ethercard. */ #define NETCARD_IO_EXTENT 32 +#define MY_TX_TIMEOUT ((400*HZ)/1000) + /* Information that need to be kept for each board. */ struct net_local { struct net_device_stats stats; long open_time; /* Useless example local info. */ + + /* Tx control lock. This protects the transmit buffer ring + * state along with the "tx full" state of the driver. This + * means all netif_queue flow control actions are protected + * by this lock as well. + */ + spinlock_t lock; }; /* The station (ethernet) address prefix, used for IDing the board. */ @@ -106,6 +116,8 @@ static void net_rx(struct net_device *dev); static int net_close(struct net_device *dev); static struct net_device_stats *net_get_stats(struct net_device *dev); static void set_multicast_list(struct net_device *dev); +static void net_tx_timeout(struct net_device *dev); + /* Example routines you must write ;->. */ #define tx_done(dev) 1 @@ -157,6 +169,7 @@ netcard_probe(struct net_device *dev) */ static int __init netcard_probe1(struct net_device *dev, int ioaddr) { + struct net_local *np; static unsigned version_printed = 0; int i; @@ -282,6 +295,9 @@ static int __init netcard_probe1(struct net_device *dev, int ioaddr) memset(dev->priv, 0, sizeof(struct net_local)); + np = (struct net_local *)dev->priv; + spin_lock_init(&np->lock); + /* Grab the region so that no one else tries to probe our ioports. */ request_region(ioaddr, NETCARD_IO_EXTENT, cardname); @@ -291,12 +307,41 @@ static int __init netcard_probe1(struct net_device *dev, int ioaddr) dev->get_stats = net_get_stats; dev->set_multicast_list = &set_multicast_list; + dev->tx_timeout = &net_tx_timeout; + dev->watchdog_timeo = MY_TX_TIMEOUT; + /* Fill in the fields of the device structure with ethernet values. */ ether_setup(dev); return 0; } +static void net_tx_timeout(struct net_device *dev) +{ + struct net_local *np = (struct net_local *)dev->priv; + + printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name, + tx_done(dev) ? "IRQ conflict" : "network cable problem"); + + /* Try to restart the adaptor. */ + chipset_init(dev, 1); + + np->stats.tx_errors++; + + /* If we have space available to accept new transmit + * requests, wake up the queueing layer. This would + * be the case if the chipset_init() call above just + * flushes out the tx queue and empties it. + * + * If instead, the tx queue is retained then the + * netif_wake_queue() call should be placed in the + * TX completion interrupt handler of the driver instead + * of here. + */ + if (!tx_full(dev)) + netif_wake_queue(dev); +} + /* * Open/initialize the board. This is called (in the current kernel) * sometime after booting when the 'ifconfig' program is run. @@ -308,7 +353,7 @@ static int __init netcard_probe1(struct net_device *dev, int ioaddr) static int net_open(struct net_device *dev) { - struct net_local *lp = (struct net_local *)dev->priv; + struct net_local *np = (struct net_local *)dev->priv; int ioaddr = dev->base_addr; /* * This is used if the interrupt line can turned off (shared). @@ -327,100 +372,155 @@ net_open(struct net_device *dev) } /* Reset the hardware here. Don't forget to set the station address. */ - /*chipset_init(dev, 1);*/ + chipset_init(dev, 1); outb(0x00, ioaddr); - lp->open_time = jiffies; + np->open_time = jiffies; - dev->tbusy = 0; - dev->interrupt = 0; - dev->start = 1; + /* We are now ready to accept transmit requeusts from + * the queueing layer of the networking. + */ + netif_start_queue(dev); MOD_INC_USE_COUNT; return 0; } +/* This will only be invoked if your driver is _not_ in XOFF state. + * What this means is that you need not check it, and that this + * invariant will hold if you make sure that the netif_*_queue() + * calls are done at the proper times. + */ static int net_send_packet(struct sk_buff *skb, struct net_device *dev) { - struct net_local *lp = (struct net_local *)dev->priv; + struct net_local *np = (struct net_local *)dev->priv; int ioaddr = dev->base_addr; + short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; + unsigned char *buf = skb->data; + + /* If some error occurs while trying to transmit this + * packet, you should return '1' from this function. + * In such a case you _may not_ do anything to the + * SKB, it is still owned by the network queueing + * layer when an error is returned. This means you + * may not modify any SKB fields, you may not free + * the SKB, etc. + */ - if (dev->tbusy) { - /* - * If we get here, some higher level has decided we are broken. - * There should really be a "kick me" function call instead. - */ - int tickssofar = jiffies - dev->trans_start; - if (tickssofar < 5) - return 1; - printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name, - tx_done(dev) ? "IRQ conflict" : "network cable problem"); - /* Try to restart the adaptor. */ - chipset_init(dev, 1); - dev->tbusy=0; - dev->trans_start = jiffies; - } +#if TX_RING + /* This is the most common case for modern hardware. + * The spinlock protects this code from the TX complete + * hardware interrupt handler. Queue flow control is + * thus managed under this lock as well. + */ + spin_lock_irq(&np->lock); - /* - * Block a timer-based transmit from overlapping. This could better be - * done with atomic_swap(1, dev->tbusy), but set_bit() works as well. + add_to_tx_ring(np, skb, length); + dev->trans_start = jiffied; + + /* If we just used up the very last entry in the + * TX ring on this device, tell the queueing + * layer to send no more. */ - if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) - printk(KERN_WARNING "%s: Transmitter access conflict.\n", dev->name); - else { - short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; - unsigned char *buf = skb->data; - lp->stats.tx_bytes+=skb->len; - hardware_send_packet(ioaddr, buf, length); - dev->trans_start = jiffies; - } - dev_kfree_skb (skb); + if (tx_full(dev)) + netif_stop_queue(dev); + + /* When the TX completion hw interrupt arrives, this + * is when the transmit statistics are updated. + */ + + spin_unlock_irq(&np->lock); +#else + /* This is the case for older hardware which takes + * a single transmit buffer at a time, and it is + * just written to the device via PIO. + * + * No spin locking is needed since there is no TX complete + * event. If by chance your card does have a TX complete + * hardware IRQ then you may need to utilize np->lock here. + */ + hardware_send_packet(ioaddr, buf, length); + np->stats.tx_bytes += skb->len; + + dev->trans_start = jiffies; /* You might need to clean up and record Tx statistics here. */ if (inw(ioaddr) == /*RU*/81) - lp->stats.tx_aborted_errors++; + np->stats.tx_aborted_errors++; + dev_kfree_skb (skb); +#endif return 0; } +#if TX_RING +/* This handles TX complete events posted by the device + * via interrupts. + */ +void net_tx(struct net_device *dev) +{ + struct net_local *np = (struct net_local *)dev->priv; + int entry; + + /* This protects us from concurrent execution of + * our dev->hard_start_xmit function above. + */ + spin_lock(&np->lock); + + entry = np->tx_old; + while (tx_entry_is_sent(np, entry)) { + struct sk_buff *skb = np->skbs[entry]; + + np->stats.tx_bytes += skb->len; + dev_kfree_skb_irq (skb); + + entry = next_tx_entry(np, entry); + } + np->tx_old = entry; + + /* If we had stopped the queue due to a "tx full" + * condition, and space has now been made available, + * wake up the queue. + */ + if (test_bit(LINK_STATE_XOFF, &dev->state) && + ! tx_full(dev)) + netif_wake_queue(dev); + + spin_unlock(&np->lock); +} +#endif + /* * The typical workload of the driver: - * Handle the network interface interrupts. + * Handle the network interface interrupts. */ static void net_interrupt(int irq, void *dev_id, struct pt_regs * regs) { struct net_device *dev = dev_id; - struct net_local *lp; - int ioaddr, status, boguscount = 0; - - if (dev == NULL) { - printk(KERN_WARNING "%s: irq %d for unknown device.\n", cardname, irq); - return; - } - dev->interrupt = 1; + struct net_local *np; + int ioaddr, status; ioaddr = dev->base_addr; - lp = (struct net_local *)dev->priv; - status = inw(ioaddr + 0); - do { - if (status /*& RX_INTR*/) { - /* Got a packet(s). */ - net_rx(dev); - } - if (status /*& TX_INTR*/) { - lp->stats.tx_packets++; - dev->tbusy = 0; - mark_bh(NET_BH); /* Inform upper layers. */ - } - if (status /*& COUNTERS_INTR*/) { - /* Increment the appropriate 'localstats' field. */ - lp->stats.tx_window_errors++; - } - } while (++boguscount < 20) ; + np = (struct net_local *)dev->priv; + status = inw(ioaddr + 0); - dev->interrupt = 0; - return; + if (status & RX_INTR) { + /* Got a packet(s). */ + net_rx(dev); + } +#if TX_RING + if (status & TX_INTR) { + /* Transmit complete. */ + net_tx(dev); + np->stats.tx_packets++; + netif_wake_queue(dev); + } +#endif + if (status & COUNTERS_INTR) { + /* Increment the appropriate 'localstats' field. */ + np->stats.tx_window_errors++; + } } /* We have a good packet(s), get it/them out of the buffers. */ @@ -470,11 +570,6 @@ net_rx(struct net_device *dev) } } while (--boguscount); - /* - * If any worth-while packets have been received, dev_rint() - * has done a mark_bh(NET_BH) for us and will work on them - * when we get to the bottom-half routine. - */ return; } @@ -487,8 +582,7 @@ net_close(struct net_device *dev) lp->open_time = 0; - dev->tbusy = 1; - dev->start = 0; + netif_stop_queue(dev); /* Flush the Tx and disable Rx here. */ diff --git a/drivers/net/slip.c b/drivers/net/slip.c index 7bf1692e3471..749e8e150a0a 100644 --- a/drivers/net/slip.c +++ b/drivers/net/slip.c @@ -169,9 +169,9 @@ sl_alloc_bufs(struct slip *sl, int mtu) if (slcomp == NULL) goto err_exit; #endif - start_bh_atomic(); + spin_lock_bh(&sl->lock); if (sl->tty == NULL) { - end_bh_atomic(); + spin_unlock_bh(&sl->lock); err = -ENODEV; goto err_exit; } @@ -189,7 +189,7 @@ sl_alloc_bufs(struct slip *sl, int mtu) sl->xbits = 0; #endif #endif - end_bh_atomic(); + spin_unlock_bh(&sl->lock); err = 0; /* Cleanup */ @@ -268,7 +268,7 @@ static int sl_realloc_bufs(struct slip *sl, int mtu) goto done; } - start_bh_atomic(); + spin_lock_bh(&sl->lock); err = -ENODEV; if (sl->tty == NULL) @@ -304,7 +304,7 @@ static int sl_realloc_bufs(struct slip *sl, int mtu) err = 0; done_on_bh: - end_bh_atomic(); + spin_unlock_bh(&sl->lock); done: if (xbuff) @@ -323,9 +323,7 @@ done: static inline void sl_lock(struct slip *sl) { - if (test_and_set_bit(0, (void *) &sl->dev->tbusy)) { - printk("%s: trying to lock already locked device!\n", sl->dev->name); - } + netif_stop_queue(sl->dev); } @@ -333,9 +331,7 @@ sl_lock(struct slip *sl) static inline void sl_unlock(struct slip *sl) { - if (!test_and_clear_bit(0, (void *)&sl->dev->tbusy)) { - printk("%s: trying to unlock already unlocked device!\n", sl->dev->name); - } + netif_wake_queue(sl->dev); } /* Send one completely decapsulated IP datagram to the IP layer. */ @@ -453,7 +449,7 @@ static void slip_write_wakeup(struct tty_struct *tty) struct slip *sl = (struct slip *) tty->disc_data; /* First make sure we're connected. */ - if (!sl || sl->magic != SLIP_MAGIC || !sl->dev->start) { + if (!sl || sl->magic != SLIP_MAGIC || !test_bit(LINK_STATE_START, &sl->dev->state)) { return; } if (sl->xleft <= 0) { @@ -462,7 +458,6 @@ static void slip_write_wakeup(struct tty_struct *tty) sl->tx_packets++; tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP); sl_unlock(sl); - mark_bh(NET_BH); return; } @@ -471,40 +466,25 @@ static void slip_write_wakeup(struct tty_struct *tty) sl->xhead += actual; } -/* Encapsulate an IP datagram and kick it into a TTY queue. */ -static int -sl_xmit(struct sk_buff *skb, struct net_device *dev) +static void sl_tx_timeout(struct net_device *dev) { struct slip *sl = (struct slip*)(dev->priv); - if (!dev->start) { - printk("%s: xmit call when iface is down\n", dev->name); - dev_kfree_skb(skb); - return 0; - } - if (sl->tty == NULL) { - dev_kfree_skb(skb); - return 0; - } + spin_lock(&sl->lock); + + if (test_bit(LINK_STATE_XOFF, &dev->state)) { + struct slip *sl = (struct slip*)(dev->priv); + + if (!test_bit(LINK_STATE_START, &dev->state)) + goto out; - /* - * If we are busy already- too bad. We ought to be able - * to queue things at this point, to allow for a little - * frame buffer. Oh well... - * ----------------------------------------------------- - * I hate queues in SLIP driver. May be it's efficient, - * but for me latency is more important. ;) - * So, no queues ! - * 14 Oct 1994 Dmitry Gorodchanin. - */ - if (dev->tbusy) { /* May be we must check transmitter timeout here ? * 14 Oct 1994 Dmitry Gorodchanin. */ #ifdef SL_CHECK_TRANSMIT if (jiffies - dev->trans_start < 20 * HZ) { /* 20 sec timeout not reached */ - return 1; + goto out; } printk("%s: transmit timed out, %s?\n", dev->name, (sl->tty->driver.chars_in_buffer(sl->tty) || sl->xleft) ? @@ -512,19 +492,39 @@ sl_xmit(struct sk_buff *skb, struct net_device *dev) sl->xleft = 0; sl->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP); sl_unlock(sl); -#else - return 1; #endif } - /* We were not busy, so we are now... :-) */ - if (skb != NULL) - { - sl_lock(sl); - sl->tx_bytes+=skb->len; - sl_encaps(sl, skb->data, skb->len); +out: + spin_unlock(&sl->lock); +} + + +/* Encapsulate an IP datagram and kick it into a TTY queue. */ +static int +sl_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct slip *sl = (struct slip*)(dev->priv); + + spin_lock(&sl->lock); + if (!test_bit(LINK_STATE_START, &dev->state)) { + spin_unlock(&sl->lock); + printk("%s: xmit call when iface is down\n", dev->name); dev_kfree_skb(skb); + return 0; } + if (sl->tty == NULL) { + spin_unlock(&sl->lock); + dev_kfree_skb(skb); + return 0; + } + + sl_lock(sl); + sl->tx_bytes+=skb->len; + sl_encaps(sl, skb->data, skb->len); + spin_unlock(&sl->lock); + + dev_kfree_skb(skb); return 0; } @@ -540,16 +540,15 @@ sl_close(struct net_device *dev) { struct slip *sl = (struct slip*)(dev->priv); - start_bh_atomic(); + spin_lock_bh(&sl->lock); if (sl->tty) { /* TTY discipline is running. */ sl->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP); } - dev->tbusy = 1; - dev->start = 0; + netif_stop_queue(dev); sl->rcount = 0; sl->xleft = 0; - end_bh_atomic(); + spin_unlock_bh(&sl->lock); MOD_DEC_USE_COUNT; return 0; @@ -565,8 +564,7 @@ static int sl_open(struct net_device *dev) return -ENODEV; sl->flags &= (1 << SLF_INUSE); - dev->start = 1; - dev->tbusy = 0; + netif_start_queue(dev); MOD_INC_USE_COUNT; return 0; } @@ -634,6 +632,10 @@ static int sl_init(struct net_device *dev) dev->mtu = sl->mtu; dev->hard_start_xmit = sl_xmit; +#ifdef SL_CHECK_TRANSMIT + dev->tx_timeout = sl_tx_timeout; + dev->watchdog_timeo = 20*HZ; +#endif dev->open = sl_open; dev->stop = sl_close; dev->get_stats = sl_get_stats; @@ -676,7 +678,8 @@ static void slip_receive_buf(struct tty_struct *tty, const unsigned char *cp, ch { struct slip *sl = (struct slip *) tty->disc_data; - if (!sl || sl->magic != SLIP_MAGIC || !sl->dev->start) + if (!sl || sl->magic != SLIP_MAGIC || + !test_bit(LINK_STATE_START, &sl->dev->state)) return; /* Read the characters out of the buffer */ @@ -800,6 +803,7 @@ sl_alloc(kdev_t line) /* Initialize channel control data */ sl->magic = SLIP_MAGIC; sl->dev = &slp->dev; + spin_lock_init(&sl->lock); sl->mode = SL_MODE_DEFAULT; sprintf(slp->if_name, "sl%d", i); slp->dev.name = slp->if_name; @@ -946,10 +950,8 @@ slip_close(struct tty_struct *tty) /* VSV = very important to remove timers */ #ifdef CONFIG_SLIP_SMART - if (sl->keepalive) - del_timer (&sl->keepalive_timer); - if (sl->outfill) - del_timer (&sl->outfill_timer); + del_timer_sync(&sl->keepalive_timer); + del_timer_sync(&sl->outfill_timer); #endif /* Count references from TTY module */ @@ -1182,20 +1184,18 @@ slip_ioctl(struct tty_struct *tty, void *file, int cmd, void *arg) if (tmp > 255) /* max for unchar */ return -EINVAL; - start_bh_atomic(); + spin_lock_bh(&sl->lock); if (!sl->tty) { - end_bh_atomic(); + spin_unlock_bh(&sl->lock); return -ENODEV; } - if (sl->keepalive) - (void)del_timer (&sl->keepalive_timer); if ((sl->keepalive = (unchar) tmp) != 0) { - sl->keepalive_timer.expires=jiffies+sl->keepalive*HZ; - add_timer(&sl->keepalive_timer); + mod_timer(&sl->keepalive_timer, jiffies+sl->keepalive*HZ); set_bit(SLF_KEEPTEST, &sl->flags); - } - end_bh_atomic(); - + } else { + del_timer (&sl->keepalive_timer); + } + spin_unlock_bh(&sl->lock); return 0; case SIOCGKEEPALIVE: @@ -1208,19 +1208,18 @@ slip_ioctl(struct tty_struct *tty, void *file, int cmd, void *arg) return -EFAULT; if (tmp > 255) /* max for unchar */ return -EINVAL; - start_bh_atomic(); + spin_lock_bh(&sl->lock); if (!sl->tty) { - end_bh_atomic(); + spin_unlock_bh(&sl->lock); return -ENODEV; } - if (sl->outfill) - (void)del_timer (&sl->outfill_timer); if ((sl->outfill = (unchar) tmp) != 0){ - sl->outfill_timer.expires=jiffies+sl->outfill*HZ; - add_timer(&sl->outfill_timer); + mod_timer(&sl->outfill_timer, jiffies+sl->outfill*HZ); set_bit(SLF_OUTWAIT, &sl->flags); + } else { + del_timer (&sl->outfill_timer); } - end_bh_atomic(); + spin_unlock_bh(&sl->lock); return 0; case SIOCGOUTFILL: @@ -1253,10 +1252,10 @@ static int sl_ioctl(struct net_device *dev,struct ifreq *rq,int cmd) if (sl == NULL) /* Allocation failed ?? */ return -ENODEV; - start_bh_atomic(); /* Hangup would kill us */ + spin_lock_bh(&sl->lock); if (!sl->tty) { - end_bh_atomic(); + spin_unlock_bh(&sl->lock); return -ENODEV; } @@ -1265,14 +1264,15 @@ static int sl_ioctl(struct net_device *dev,struct ifreq *rq,int cmd) /* max for unchar */ if (((unsigned int)((unsigned long)rq->ifr_data)) > 255) return -EINVAL; - if (sl->keepalive) - (void)del_timer (&sl->keepalive_timer); sl->keepalive = (unchar) ((unsigned long)rq->ifr_data); if (sl->keepalive != 0) { sl->keepalive_timer.expires=jiffies+sl->keepalive*HZ; - add_timer(&sl->keepalive_timer); + mod_timer(&sl->keepalive_timer, jiffies+sl->keepalive*HZ); set_bit(SLF_KEEPTEST, &sl->flags); - } + } else { + del_timer(&sl->keepalive_timer); + } + spin_unlock_bh(&sl->lock); break; case SIOCGKEEPALIVE: @@ -1282,12 +1282,11 @@ static int sl_ioctl(struct net_device *dev,struct ifreq *rq,int cmd) case SIOCSOUTFILL: if (((unsigned)((unsigned long)rq->ifr_data)) > 255) /* max for unchar */ return -EINVAL; - if (sl->outfill) - del_timer (&sl->outfill_timer); if ((sl->outfill = (unchar)((unsigned long) rq->ifr_data)) != 0){ - sl->outfill_timer.expires=jiffies+sl->outfill*HZ; - add_timer(&sl->outfill_timer); + mod_timer(&sl->outfill_timer, jiffies+sl->outfill*HZ); set_bit(SLF_OUTWAIT, &sl->flags); + } else { + del_timer (&sl->outfill_timer); } break; @@ -1300,7 +1299,7 @@ static int sl_ioctl(struct net_device *dev,struct ifreq *rq,int cmd) and opened by another process device. */ if (sl->tty != current->tty && sl->pid != current->pid) { - end_bh_atomic(); + spin_unlock_bh(&sl->lock); return -EPERM; } sl->leased = 0; @@ -1311,7 +1310,7 @@ static int sl_ioctl(struct net_device *dev,struct ifreq *rq,int cmd) case SIOCGLEASE: rq->ifr_data=(caddr_t)((unsigned long)sl->leased); }; - end_bh_atomic(); + spin_unlock_bh(&sl->lock); return 0; } #endif @@ -1399,15 +1398,19 @@ cleanup_module(void) } busy = 0; - start_bh_atomic(); + local_bh_disable(); for (i = 0; i < slip_maxdev; i++) { struct slip_ctrl *slc = slip_ctrls[i]; - if (slc && slc->ctrl.tty) { + if (!slc) + continue; + spin_lock(&slc->ctrl.lock); + if (slc->ctrl.tty) { busy++; tty_hangup(slc->ctrl.tty); } + spin_unlock(&slc->ctrl.lock); } - end_bh_atomic(); + local_bh_enable(); } while (busy && jiffies - start < 1*HZ); busy = 0; @@ -1449,8 +1452,10 @@ static void sl_outfill(unsigned long sls) { struct slip *sl=(struct slip *)sls; - if (sl==NULL || sl->tty == NULL) - return; + spin_lock(&sl->lock); + + if (sl->tty == NULL) + goto out; if(sl->outfill) { @@ -1463,7 +1468,7 @@ static void sl_outfill(unsigned long sls) unsigned char s = END; #endif /* put END into tty queue. Is it right ??? */ - if (!test_bit(0, (void *) &sl->dev->tbusy)) + if (!test_bit(LINK_STATE_XOFF, &sl->dev->state)) { /* if device busy no outfill */ sl->tty->driver.write(sl->tty, 0, &s, 1); @@ -1471,18 +1476,22 @@ static void sl_outfill(unsigned long sls) } else set_bit(SLF_OUTWAIT, &sl->flags); - (void)del_timer(&sl->outfill_timer); - sl->outfill_timer.expires=jiffies+sl->outfill*HZ; - add_timer(&sl->outfill_timer); + + mod_timer(&sl->outfill_timer, jiffies+sl->outfill*HZ); } +out: + spin_unlock(&sl->lock); + timer_exit(&sl->outfill_timer); } static void sl_keepalive(unsigned long sls) { struct slip *sl=(struct slip *)sls; - if (sl == NULL || sl->tty == NULL) - return; + spin_lock(&sl->lock); + + if (sl->tty == NULL) + goto out; if( sl->keepalive) { @@ -1494,13 +1503,17 @@ static void sl_keepalive(unsigned long sls) printk("%s: no packets received during keepalive timeout, hangup.\n", sl->dev->name); tty_hangup(sl->tty); /* this must hangup tty & close slip */ /* I think we need not something else */ - return; + goto out; } else set_bit(SLF_KEEPTEST, &sl->flags); - sl->keepalive_timer.expires=jiffies+sl->keepalive*HZ; - add_timer(&sl->keepalive_timer); + + mod_timer(&sl->keepalive_timer, jiffies+sl->keepalive*HZ); } + +out: + spin_unlock(&sl->lock); + timer_exit(&sl->keepalive_timer); } #endif diff --git a/drivers/net/slip.h b/drivers/net/slip.h index 6172f2a1f3e4..c9f064b1d5e2 100644 --- a/drivers/net/slip.h +++ b/drivers/net/slip.h @@ -52,7 +52,9 @@ struct slip { /* Various fields. */ struct tty_struct *tty; /* ptr to TTY structure */ - struct net_device *dev; /* easy for intr handling */ + struct net_device *dev; /* easy for intr handling */ + spinlock_t lock; + #ifdef SL_INCLUDE_CSLIP struct slcompress *slcomp; /* for header compression */ unsigned char *cbuff; /* compression buffer */ diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c index b8ba1eb6d0c8..4f0e291fdd41 100644 --- a/drivers/net/starfire.c +++ b/drivers/net/starfire.c @@ -562,6 +562,8 @@ starfire_probe1(struct pci_dev *pdev, int pci_bus, int pci_devfn, long ioaddr, i /* The chip-specific entries in the device structure. */ dev->open = &netdev_open; dev->hard_start_xmit = &start_tx; + dev->tx_timeout = tx_timeout; + dev->watchdog_timeo = TX_TIMEOUT; dev->stop = &netdev_close; dev->get_stats = &get_stats; dev->set_multicast_list = &set_rx_mode; @@ -701,17 +703,12 @@ static int netdev_open(struct net_device *dev) if (dev->if_port == 0) dev->if_port = np->default_port; - dev->tbusy = 0; - dev->interrupt = 0; - if (debug > 1) printk(KERN_DEBUG "%s: Setting the Rx and Tx modes.\n", dev->name); set_rx_mode(dev); check_duplex(dev, 1); - dev->start = 1; - /* Set the interrupt mask and enable PCI interrupts. */ writel(IntrRxDone | IntrRxEmpty | IntrRxPCIErr | IntrTxDone | IntrTxEmpty | IntrTxPCIErr | @@ -821,7 +818,7 @@ static void tx_timeout(struct net_device *dev) /* Trigger an immediate transmit demand. */ - dev->trans_start = jiffies; + netif_wake_queue(dev); np->stats.tx_errors++; return; } @@ -881,14 +878,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev) struct netdev_private *np = (struct netdev_private *)dev->priv; unsigned entry; - /* Block a timer-based transmit from overlapping. This could better be - done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */ - if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) { - if (jiffies - dev->trans_start < TX_TIMEOUT) - return 1; - tx_timeout(dev); - return 1; - } + netif_stop_queue(dev); /* Caution: the write order is important here, set the field with the "ownership" bits last. */ @@ -925,7 +915,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev) if (np->cur_tx - np->dirty_tx >= TX_RING_SIZE - 1) np->tx_full = 1; if (! np->tx_full) - clear_bit(0, (void*)&dev->tbusy); + netif_start_queue(dev); dev->trans_start = jiffies; if (debug > 4) { @@ -953,21 +943,6 @@ static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs) ioaddr = dev->base_addr; np = (struct netdev_private *)dev->priv; -#if defined(__i386__) - /* A lock to prevent simultaneous entry bug on Intel SMP machines. */ - if (test_and_set_bit(0, (void*)&dev->interrupt)) { - printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n", - dev->name); - dev->interrupt = 0; /* Avoid halting machine. */ - return; - } -#else - if (dev->interrupt) { - printk(KERN_ERR "%s: Re-entering the interrupt handler.\n", dev->name); - return; - } - dev->interrupt = 1; -#endif do { u32 intr_status = readl(ioaddr + IntrClear); @@ -1027,10 +1002,9 @@ static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs) writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2); } if (np->tx_full && np->cur_tx - np->dirty_tx < TX_RING_SIZE - 4) { - /* The ring is no longer full, clear tbusy. */ + /* The ring is no longer full, wake the queue. */ np->tx_full = 0; - clear_bit(0, (void*)&dev->tbusy); - mark_bh(NET_BH); + netif_wake_queue(dev); } /* Abnormal error summary/uncommon events handlers. */ @@ -1049,23 +1023,6 @@ static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs) printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n", dev->name, readl(ioaddr + IntrStatus)); -#ifndef final_version - /* Code that should never be run! Remove after testing.. */ - { - static int stopit = 10; - if (dev->start == 0 && --stopit < 0) { - printk(KERN_ERR "%s: Emergency stop, looping startup interrupt.\n", - dev->name); - free_irq(irq, dev); - } - } -#endif - -#if defined(__i386__) - clear_bit(0, (void*)&dev->interrupt); -#else - dev->interrupt = 0; -#endif return; } @@ -1357,8 +1314,7 @@ static int netdev_close(struct net_device *dev) struct netdev_private *np = (struct netdev_private *)dev->priv; int i; - dev->start = 0; - dev->tbusy = 1; + netif_stop_queue(dev); if (debug > 1) { printk(KERN_DEBUG "%s: Shutting down ethercard, status was Int %4.4x.\n", diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c index b86c8fdb687d..ad9a1f7c76b1 100644 --- a/drivers/net/sunbmac.c +++ b/drivers/net/sunbmac.c @@ -1,4 +1,4 @@ -/* $Id: sunbmac.c,v 1.13 2000/01/28 13:42:29 jj Exp $ +/* $Id: sunbmac.c,v 1.14 2000/02/09 11:15:35 davem Exp $ * sunbmac.c: Driver for Sparc BigMAC 100baseT ethernet adapters. * * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@redhat.com) @@ -190,14 +190,20 @@ static void bigmac_clean_rings(struct bigmac *bp) for (i = 0; i < RX_RING_SIZE; i++) { if (bp->rx_skbs[i] != NULL) { - dev_kfree_skb(bp->rx_skbs[i]); + if (in_irq()) + dev_kfree_skb_irq(bp->rx_skbs[i]); + else + dev_kfree_skb(bp->rx_skbs[i]); bp->rx_skbs[i] = NULL; } } for (i = 0; i < TX_RING_SIZE; i++) { if (bp->tx_skbs[i] != NULL) { - dev_kfree_skb(bp->tx_skbs[i]); + if (in_irq()) + dev_kfree_skb_irq(bp->tx_skbs[i]); + else + dev_kfree_skb(bp->tx_skbs[i]); bp->tx_skbs[i] = NULL; } } @@ -750,8 +756,12 @@ static void bigmac_is_medium_rare(struct bigmac *bp, u32 qec_status, u32 bmac_st static void bigmac_tx(struct bigmac *bp) { struct be_txd *txbase = &bp->bmac_block->be_txd[0]; - int elem = bp->tx_old; + struct net_device *dev = bp->dev; + int elem; + + spin_lock(&bp->lock); + elem = bp->tx_old; DTX(("bigmac_tx: tx_old[%d] ", elem)); while (elem != bp->tx_new) { struct sk_buff *skb; @@ -770,12 +780,18 @@ static void bigmac_tx(struct bigmac *bp) DTX(("skb(%p) ", skb)); bp->tx_skbs[elem] = NULL; - dev_kfree_skb(skb); + dev_kfree_skb_irq(skb); elem = NEXT_TX(elem); } DTX((" DONE, tx_old=%d\n", elem)); bp->tx_old = elem; + + if (test_bit(LINK_STATE_XOFF, &dev->state) && + TX_BUFFS_AVAIL(bp) > 0) + netif_wake_queue(bp->dev); + + spin_unlock(&bp->lock); } /* BigMAC receive complete service routines. */ @@ -874,8 +890,6 @@ static void bigmac_interrupt(int irq, void *dev_id, struct pt_regs *regs) bmac_status = sbus_readl(bp->creg + CREG_STAT); qec_status = sbus_readl(bp->gregs + GLOB_STAT); - bp->dev->interrupt = 1; - DIRQ(("qec_status=%08x bmac_status=%08x\n", qec_status, bmac_status)); if ((qec_status & (GLOB_STAT_ER | GLOB_STAT_BM)) || (bmac_status & CREG_STAT_ERRORS)) @@ -886,13 +900,6 @@ static void bigmac_interrupt(int irq, void *dev_id, struct pt_regs *regs) if (bmac_status & CREG_STAT_RXIRQ) bigmac_rx(bp); - - if (bp->dev->tbusy && (TX_BUFFS_AVAIL(bp) > 0)) { - bp->dev->tbusy = 0; - mark_bh(NET_BH); - } - - bp->dev->interrupt = 0; } static int bigmac_open(struct net_device *dev) @@ -928,55 +935,43 @@ static int bigmac_close(struct net_device *dev) return 0; } +static void bigmac_tx_timeout(struct net_device *dev) +{ + struct bigmac *bp = (struct bigmac *) dev->priv; + + bigmac_init(bp, 0); + netif_wake_queue(dev); +} + /* Put a packet on the wire. */ static int bigmac_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct bigmac *bp = (struct bigmac *) dev->priv; int len, entry; - - if (dev->tbusy) { - int tickssofar = jiffies - dev->trans_start; - - if (tickssofar < 40) { - return 1; - } else { - printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); - bp->enet_stats.tx_errors++; - bigmac_init(bp, 0); - dev->tbusy = 0; - dev->trans_start = jiffies; - dev_kfree_skb(skb); - return 0; - } - } - - if (test_and_set_bit(0, (void *) &dev->tbusy) != 0) { - printk(KERN_ERR "%s: Transmitter access conflict.\n", dev->name); - return 1; - } - - if (!TX_BUFFS_AVAIL(bp)) - return 1; + u32 mapping; len = skb->len; - entry = bp->tx_new; - DTX(("bigmac_start_xmit: len(%d) entry(%d)\n", len, entry)); + mapping = sbus_map_single(bp->bigmac_sdev, skb->data, len); /* Avoid a race... */ + spin_lock_irq(&bp->lock); + entry = bp->tx_new; + DTX(("bigmac_start_xmit: len(%d) entry(%d)\n", len, entry)); bp->bmac_block->be_txd[entry].tx_flags = TXD_UPDATE; bp->tx_skbs[entry] = skb; - bp->bmac_block->be_txd[entry].tx_addr = - sbus_map_single(bp->bigmac_sdev, skb->data, len); + bp->bmac_block->be_txd[entry].tx_addr = mapping; bp->bmac_block->be_txd[entry].tx_flags = (TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH)); - dev->trans_start = jiffies; bp->tx_new = NEXT_TX(entry); + if (TX_BUFFS_AVAIL(bp) <= 0) + netif_stop_queue(dev); + spin_unlock_irq(&bp->lock); /* Get it going. */ sbus_writel(CREG_CTRL_TWAKEUP, bp->creg + CREG_CTRL); - if (TX_BUFFS_AVAIL(bp)) - dev->tbusy = 0; + + dev->trans_start = jiffies; return 0; } @@ -1084,6 +1079,8 @@ static int __init bigmac_ether_init(struct net_device *dev, struct sbus_dev *qec bp->qec_sdev = qec_sdev; bp->bigmac_sdev = qec_sdev->child; + spin_lock_init(&bp->lock); + /* All further failures we find return this. */ res = ENODEV; @@ -1194,6 +1191,9 @@ static int __init bigmac_ether_init(struct net_device *dev, struct sbus_dev *qec dev->get_stats = &bigmac_get_stats; dev->set_multicast_list = &bigmac_set_multicast; + dev->tx_timeout = &bigmac_tx_timeout; + dev->watchdog_timeo = 5*HZ; + /* Finish net device registration. */ dev->irq = bp->bigmac_sdev->irqs[0]; dev->dma = 0; diff --git a/drivers/net/sunbmac.h b/drivers/net/sunbmac.h index 2e452f39c04b..f1d878c66ebc 100644 --- a/drivers/net/sunbmac.h +++ b/drivers/net/sunbmac.h @@ -1,4 +1,4 @@ -/* $Id: sunbmac.h,v 1.5 1999/09/21 14:36:26 davem Exp $ +/* $Id: sunbmac.h,v 1.6 2000/02/09 11:15:36 davem Exp $ * sunbmac.h: Defines for the Sun "Big MAC" 100baseT ethernet cards. * * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) @@ -310,6 +310,8 @@ struct bigmac { struct bmac_init_block *bmac_block; /* RX and TX descriptors */ __u32 bblock_dvma; /* RX and TX descriptors */ + spinlock_t lock; + struct sk_buff *rx_skbs[RX_RING_SIZE]; struct sk_buff *tx_skbs[TX_RING_SIZE]; diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c index 65d8315d97ea..2c628ba9557f 100644 --- a/drivers/net/sunhme.c +++ b/drivers/net/sunhme.c @@ -1,4 +1,4 @@ -/* $Id: sunhme.c,v 1.85 2000/01/28 13:42:27 jj Exp $ +/* $Id: sunhme.c,v 1.86 2000/02/09 11:15:36 davem Exp $ * sunhme.c: Sparc HME/BigMac 10/100baseT half/full duplex auto switching, * auto carrier detecting ethernet driver. Also known as the * "Happy Meal Ethernet" found on SunSwift SBUS cards. @@ -1194,7 +1194,10 @@ static void happy_meal_clean_rings(struct happy_meal *hp) rxd = &hp->happy_block->happy_meal_rxd[i]; dma_addr = hme_read_desc32(hp, &rxd->rx_addr); hme_dma_unmap(hp, dma_addr, RX_BUF_ALLOC_SIZE); - dev_kfree_skb(skb); + if (in_irq()) + dev_kfree_skb_irq(skb); + else + dev_kfree_skb(skb); hp->rx_skbs[i] = NULL; } } @@ -1208,7 +1211,10 @@ static void happy_meal_clean_rings(struct happy_meal *hp) txd = &hp->happy_block->happy_meal_txd[i]; dma_addr = hme_read_desc32(hp, &txd->tx_addr); hme_dma_unmap(hp, dma_addr, skb->len); - dev_kfree_skb(skb); + if (in_irq()) + dev_kfree_skb_irq(skb); + else + dev_kfree_skb(skb); hp->tx_skbs[i] = NULL; } } @@ -1871,8 +1877,12 @@ static void happy_meal_tx(struct happy_meal *hp) { struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0]; struct happy_meal_txd *this; - int elem = hp->tx_old; + struct net_device *dev = hp->dev; + int elem; + spin_lock(&hp->happy_lock); + + elem = hp->tx_old; TXD(("TX<")); while (elem != hp->tx_new) { struct sk_buff *skb; @@ -1889,13 +1899,19 @@ static void happy_meal_tx(struct happy_meal *hp) hp->tx_skbs[elem] = NULL; hp->net_stats.tx_bytes += skb->len; - dev_kfree_skb(skb); + dev_kfree_skb_irq(skb); hp->net_stats.tx_packets++; elem = NEXT_TX(elem); } hp->tx_old = elem; TXD((">")); + + if (test_bit(LINK_STATE_XOFF, &dev->state) && + TX_BUFFS_AVAIL(hp) > 0) + netif_wake_queue(dev); + + spin_unlock(&hp->happy_lock); } #ifdef RXDEBUG @@ -2020,14 +2036,10 @@ static void happy_meal_interrupt(int irq, void *dev_id, struct pt_regs *regs) HMD(("happy_meal_interrupt: status=%08x ", happy_status)); - dev->interrupt = 1; - if (happy_status & GREG_STAT_ERRORS) { HMD(("ERRORS ")); - if (happy_meal_is_not_so_happy(hp, /* un- */ happy_status)) { - dev->interrupt = 0; + if (happy_meal_is_not_so_happy(hp, /* un- */ happy_status)) return; - } } if (happy_status & GREG_STAT_MIFIRQ) { @@ -2045,12 +2057,6 @@ static void happy_meal_interrupt(int irq, void *dev_id, struct pt_regs *regs) happy_meal_rx(hp, dev); } - if (dev->tbusy && (TX_BUFFS_AVAIL(hp) > 0)) { - hp->dev->tbusy = 0; - mark_bh(NET_BH); - } - - dev->interrupt = 0; HMD(("done\n")); } @@ -2072,14 +2078,10 @@ static void quattro_sbus_interrupt(int irq, void *cookie, struct pt_regs *ptregs GREG_STAT_RXTOHOST))) continue; - dev->interrupt = 1; - if (happy_status & GREG_STAT_ERRORS) { HMD(("ERRORS ")); - if (happy_meal_is_not_so_happy(hp, happy_status)) { - dev->interrupt=0; + if (happy_meal_is_not_so_happy(hp, happy_status)) break; - } } if (happy_status & GREG_STAT_MIFIRQ) { @@ -2096,12 +2098,6 @@ static void quattro_sbus_interrupt(int irq, void *cookie, struct pt_regs *ptregs HMD(("RXTOHOST ")); happy_meal_rx(hp, dev); } - - if (dev->tbusy && (TX_BUFFS_AVAIL(hp) > 0)) { - hp->dev->tbusy = 0; - mark_bh(NET_BH); - } - dev->interrupt = 0; } HMD(("done\n")); } @@ -2161,50 +2157,46 @@ static int happy_meal_close(struct net_device *dev) #define SXD(x) #endif +static void happy_meal_tx_timeout(struct net_device *dev) +{ + struct happy_meal *hp = (struct happy_meal *) dev->priv; + + printk (KERN_ERR "%s: transmit timed out, resetting\n", dev->name); + tx_dump_log(); + printk (KERN_ERR "%s: Happy Status %08x TX[%08x:%08x]\n", dev->name, + hme_read32(hp, hp->gregs + GREG_STAT), + hme_read32(hp, hp->etxregs + ETX_CFG), + hme_read32(hp, hp->bigmacregs + BMAC_TXCFG)); + happy_meal_init(hp, 0); + netif_wake_queue(dev); +} + static int happy_meal_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct happy_meal *hp = (struct happy_meal *) dev->priv; int len, entry; + u32 mapping; - if (test_and_set_bit(0, (void *) &dev->tbusy) != 0) { - int tickssofar = jiffies - dev->trans_start; - - if (tickssofar >= 40) { - printk (KERN_ERR "%s: transmit timed out, resetting\n", dev->name); - hp->net_stats.tx_errors++; - tx_dump_log(); - printk (KERN_ERR "%s: Happy Status %08x TX[%08x:%08x]\n", dev->name, - hme_read32(hp, hp->gregs + GREG_STAT), - hme_read32(hp, hp->etxregs + ETX_CFG), - hme_read32(hp, hp->bigmacregs + BMAC_TXCFG)); - happy_meal_init(hp, 0); - dev->tbusy = 0; - dev->trans_start = jiffies; - } else - tx_add_log(hp, TXLOG_ACTION_TXMIT|TXLOG_ACTION_TBUSY, 0); - return 1; - } - - if (!TX_BUFFS_AVAIL(hp)) { - tx_add_log(hp, TXLOG_ACTION_TXMIT|TXLOG_ACTION_NBUFS, 0); - return 1; - } len = skb->len; - entry = hp->tx_new; + mapping = hme_dma_map(hp, skb->data, len); + spin_lock_irq(&hp->happy_lock); + + entry = hp->tx_new; SXD(("SX", len, entry)); hp->tx_skbs[entry] = skb; hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry], (TXFLAG_OWN | TXFLAG_SOP | TXFLAG_EOP | (len & TXFLAG_SIZE)), - hme_dma_map(hp, skb->data, len)); + mapping); hp->tx_new = NEXT_TX(entry); + if (TX_BUFFS_AVAIL(hp) <= 0) + netif_stop_queue(dev); + + spin_unlock_irq(&hp->happy_lock); /* Get it going. */ - dev->trans_start = jiffies; hme_write32(hp, hp->etxregs + ETX_PENDING, ETX_TP_DMAWAKEUP); - - if (TX_BUFFS_AVAIL(hp)) - dev->tbusy = 0; + dev->trans_start = jiffies; tx_add_log(hp, TXLOG_ACTION_TXMIT, 0); return 0; @@ -2228,7 +2220,7 @@ static void happy_meal_set_multicast(struct net_device *dev) u32 crc, poly = CRC_POLYNOMIAL_LE; /* Lock out others. */ - set_bit(0, (void *) &dev->tbusy); + netif_stop_queue(dev); if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) { hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff); @@ -2272,7 +2264,7 @@ static void happy_meal_set_multicast(struct net_device *dev) } /* Let us get going again. */ - dev->tbusy = 0; + netif_wake_queue(dev); } /* Ethtool support... */ @@ -2552,6 +2544,8 @@ static int __init happy_meal_sbus_init(struct net_device *dev, hp->happy_dev = sdev; + spin_lock_init(&hp->happy_lock); + if (sdev->num_registers != 5) { printk(KERN_ERR "happymeal: Device does not have 5 regs, it has %d.\n", sdev->num_registers); @@ -2637,6 +2631,8 @@ static int __init happy_meal_sbus_init(struct net_device *dev, dev->hard_start_xmit = &happy_meal_start_xmit; dev->get_stats = &happy_meal_get_stats; dev->set_multicast_list = &happy_meal_set_multicast; + dev->tx_timeout = &happy_meal_tx_timeout; + dev->watchdog_timeo = 5*HZ; dev->do_ioctl = &happy_meal_ioctl; dev->irq = sdev->irqs[0]; @@ -2741,6 +2737,8 @@ static int __init happy_meal_pci_init(struct net_device *dev, struct pci_dev *pd hp->happy_dev = pdev; + spin_lock_init(&hp->happy_lock); + if (qp != NULL) { hp->qfe_parent = qp; hp->qfe_ent = qfe_slot; diff --git a/drivers/net/sunhme.h b/drivers/net/sunhme.h index 174a9ed3a22f..3708515e3fdd 100644 --- a/drivers/net/sunhme.h +++ b/drivers/net/sunhme.h @@ -1,4 +1,4 @@ -/* $Id: sunhme.h,v 1.28 1999/09/21 14:36:34 davem Exp $ +/* $Id: sunhme.h,v 1.29 2000/02/09 11:15:40 davem Exp $ * sunhme.h: Definitions for Sparc HME/BigMac 10/100baseT ethernet driver. * Also known as the "Happy Meal". * @@ -510,6 +510,8 @@ struct happy_meal { /* This is either a sbus_dev or a pci_dev. */ void *happy_dev; + spinlock_t happy_lock; + struct sk_buff *rx_skbs[RX_RING_SIZE]; struct sk_buff *tx_skbs[TX_RING_SIZE]; diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c index 45b671ae7aab..ba2a62858ab5 100644 --- a/drivers/net/sunlance.c +++ b/drivers/net/sunlance.c @@ -1,4 +1,4 @@ -/* $Id: sunlance.c,v 1.93 2000/01/28 13:42:31 jj Exp $ +/* $Id: sunlance.c,v 1.94 2000/02/09 11:15:40 davem Exp $ * lance.c: Linux/Sparc/Lance driver * * Written 1995, 1996 by Miguel de Icaza @@ -240,6 +240,8 @@ struct lance_private { unsigned long dregs; /* DMA controller regs. */ volatile struct lance_init_block *init_block; + spinlock_t lock; + int rx_new, tx_new; int rx_old, tx_old; @@ -317,7 +319,6 @@ static void load_csrs(struct lance_private *lp) } /* Setup the Lance Rx and Tx rings */ -/* Sets dev->tbusy */ static void lance_init_ring_dvma(struct net_device *dev) { struct lance_private *lp = (struct lance_private *) dev->priv; @@ -327,7 +328,7 @@ static void lance_init_ring_dvma(struct net_device *dev) int i; /* Lock out other processes while setting up hardware */ - dev->tbusy = 1; + netif_stop_queue(dev); lp->rx_new = lp->tx_new = 0; lp->rx_old = lp->tx_old = 0; @@ -383,7 +384,7 @@ static void lance_init_ring_pio(struct net_device *dev) int i; /* Lock out other processes while setting up hardware */ - dev->tbusy = 1; + netif_stop_queue(dev); lp->rx_new = lp->tx_new = 0; lp->rx_old = lp->tx_old = 0; @@ -573,6 +574,8 @@ static void lance_tx_dvma(struct net_device *dev) volatile struct lance_init_block *ib = lp->init_block; int i, j; + spin_lock(&lp->lock); + j = lp->tx_old; for (i = j; i != lp->tx_new; i = j) { volatile struct lance_tx_desc *td = &ib->btx_ring [i]; @@ -637,6 +640,12 @@ static void lance_tx_dvma(struct net_device *dev) j = TX_NEXT(j); } lp->tx_old = j; + + if (test_bit(LINK_STATE_XOFF, &dev->state) && + TX_BUFFS_AVAIL > 0) + netif_wake_queue(dev); + + spin_unlock(&lp->lock); } static void lance_piocopy_to_skb(struct sk_buff *skb, volatile void *piobuf, int len) @@ -736,6 +745,8 @@ static void lance_tx_pio(struct net_device *dev) volatile struct lance_init_block *ib = lp->init_block; int i, j; + spin_lock(&lp->lock); + j = lp->tx_old; for (i = j; i != lp->tx_new; i = j) { volatile struct lance_tx_desc *td = &ib->btx_ring [i]; @@ -800,6 +811,12 @@ static void lance_tx_pio(struct net_device *dev) j = TX_NEXT(j); } lp->tx_old = j; + + if (test_bit(LINK_STATE_XOFF, &dev->state) && + TX_BUFFS_AVAIL > 0) + netif_wake_queue(dev); + + spin_unlock(&lp->lock); } static void lance_interrupt(int irq, void *dev_id, struct pt_regs *regs) @@ -808,11 +825,6 @@ static void lance_interrupt(int irq, void *dev_id, struct pt_regs *regs) struct lance_private *lp = (struct lance_private *)dev->priv; int csr0; - if (dev->interrupt) - printk(KERN_ERR "%s: again", dev->name); - - dev->interrupt = 1; - sbus_writew(LE_CSR0, lp->lregs + RAP); csr0 = sbus_readw(lp->lregs + RDP); @@ -833,11 +845,6 @@ static void lance_interrupt(int irq, void *dev_id, struct pt_regs *regs) if (csr0 & LE_C0_TINT) lp->tx(dev); - if ((TX_BUFFS_AVAIL > 0) && dev->tbusy) { - dev->tbusy = 0; - mark_bh(NET_BH); - } - if (csr0 & LE_C0_BABL) lp->stats.tx_errors++; @@ -867,11 +874,10 @@ static void lance_interrupt(int irq, void *dev_id, struct pt_regs *regs) lp->init_ring(dev); load_csrs(lp); init_restart_lance(lp); - dev->tbusy = 0; + netif_wake_queue(dev); } sbus_writew(LE_C0_INEA, lp->lregs + RDP); - dev->interrupt = 0; } /* Build a fake network packet and send it to ourselves. */ @@ -953,9 +959,7 @@ static int lance_open(struct net_device *dev) lp->init_ring(dev); load_csrs(lp); - dev->tbusy = 0; - dev->interrupt = 0; - dev->start = 1; + netif_start_queue(dev); status = init_restart_lance(lp); if (!status && lp->auto_select) { @@ -973,9 +977,8 @@ static int lance_close(struct net_device *dev) { struct lance_private *lp = (struct lance_private *) dev->priv; - dev->start = 0; - dev->tbusy = 1; - del_timer(&lp->multicast_timer); + netif_stop_queue(dev); + del_timer_sync(&lp->multicast_timer); STOP_LANCE(lp); @@ -1007,9 +1010,6 @@ static int lance_reset(struct net_device *dev) lp->init_ring(dev); load_csrs(lp); dev->trans_start = jiffies; - dev->interrupt = 0; - dev->start = 1; - dev->tbusy = 0; status = init_restart_lance(lp); return status; } @@ -1108,40 +1108,30 @@ static void lance_piozero(volatile void *dest, int len) sbus_writeb(0, piobuf); } +static void lance_tx_timeout(struct net_device *dev) +{ + struct lance_private *lp = (struct lance_private *) dev->priv; + + printk(KERN_ERR "%s: transmit timed out, status %04x, reset\n", + dev->name, sbus_readw(lp->lregs + RDP)); + lance_reset(dev); + netif_wake_queue(dev); +} + static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct lance_private *lp = (struct lance_private *) dev->priv; volatile struct lance_init_block *ib = lp->init_block; - unsigned long flags; int entry, skblen, len; - if (test_and_set_bit(0, (void *) &dev->tbusy) != 0) { - int tickssofar = jiffies - dev->trans_start; - - if (tickssofar < 100) - return 1; - - printk(KERN_ERR "%s: transmit timed out, status %04x, reset\n", - dev->name, sbus_readw(lp->lregs + RDP)); - lp->stats.tx_errors++; - lance_reset(dev); - - return 1; - } - skblen = skb->len; - save_and_cli(flags); - - if (!TX_BUFFS_AVAIL) { - restore_flags(flags); - return 1; - } - len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen; lp->stats.tx_bytes += len; + spin_lock_irq(&lp->lock); + entry = lp->tx_new & TX_RING_MOD_MASK; if (lp->pio_buffer) { sbus_writew((-len) | 0xf000, &ib->btx_ring[entry].length); @@ -1161,21 +1151,22 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) lp->tx_new = TX_NEXT(entry); + if (TX_BUFFS_AVAIL <= 0) + netif_stop_queue(dev); + + spin_unlock_irq(&lp->lock); + /* Kick the lance: transmit now */ sbus_writew(LE_C0_INEA | LE_C0_TDMD, lp->lregs + RDP); dev->trans_start = jiffies; dev_kfree_skb(skb); - if (TX_BUFFS_AVAIL) - dev->tbusy = 0; - /* Read back CSR to invalidate the E-Cache. * This is needed, because DMA_DSBL_WR_INV is set. */ if (lp->dregs) sbus_readw(lp->lregs + RDP); - restore_flags(flags); return 0; } @@ -1255,20 +1246,17 @@ static void lance_set_multicast(struct net_device *dev) volatile struct lance_init_block *ib = lp->init_block; u16 mode; - if (!dev->start) + if (!test_bit(LINK_STATE_START, &dev->state)) return; - if (test_and_set_bit(0, (void *)&dev->tbusy)) { - mod_timer(&lp->multicast_timer, jiffies + 2); - return; - } - if (lp->tx_old != lp->tx_new) { mod_timer(&lp->multicast_timer, jiffies + 4); - dev->tbusy = 0; + netif_wake_queue(dev); return; } + netif_stop_queue(dev); + STOP_LANCE(lp); lp->init_ring(dev); @@ -1292,8 +1280,14 @@ static void lance_set_multicast(struct net_device *dev) } load_csrs(lp); init_restart_lance(lp); - dev->tbusy = 0; - mark_bh(NET_BH); + netif_wake_queue(dev); +} + +static void lance_set_multicast_retry(unsigned long _opaque) +{ + struct net_device *dev = (struct net_device *) _opaque; + + lance_set_multicast(dev); } static void lance_free_hwresources(struct lance_private *lp) @@ -1476,6 +1470,8 @@ no_link_test: dev->open = &lance_open; dev->stop = &lance_close; dev->hard_start_xmit = &lance_start_xmit; + dev->tx_timeout = &lance_tx_timeout; + dev->watchdog_timeo = 5*HZ; dev->get_stats = &lance_get_stats; dev->set_multicast_list = &lance_set_multicast; @@ -1491,8 +1487,7 @@ no_link_test: */ init_timer(&lp->multicast_timer); lp->multicast_timer.data = (unsigned long) dev; - lp->multicast_timer.function = - (void (*)(unsigned long)) &lance_set_multicast; + lp->multicast_timer.function = &lance_set_multicast_retry; #ifdef MODULE dev->ifindex = dev_new_index(); diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c index 53e0c756f706..15221ecbc34d 100644 --- a/drivers/net/sunqe.c +++ b/drivers/net/sunqe.c @@ -1,4 +1,4 @@ -/* $Id: sunqe.c,v 1.41 2000/01/28 13:42:30 jj Exp $ +/* $Id: sunqe.c,v 1.43 2000/02/09 21:11:19 davem Exp $ * sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver. * Once again I am out to prove that every ethernet * controller out there can be most efficiently programmed @@ -455,6 +455,8 @@ static void qe_rx(struct sunqe *qep) printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", qep->dev->name); } +static void qe_tx_reclaim(struct sunqe *qep); + /* Interrupts for all QE's get filtered out via the QEC master controller, * so we just run through each qe and check to see who is signaling * and thus needs to be serviced. @@ -470,11 +472,8 @@ static void qec_interrupt(int irq, void *dev_id, struct pt_regs *regs) while (channel < 4) { if (qec_status & 0xf) { struct sunqe *qep = qecp->qes[channel]; - struct net_device *dev = qep->dev; u32 qe_status; - dev->interrupt = 1; - qe_status = sbus_readl(qep->qcregs + CREG_STAT); if (qe_status & CREG_STAT_ERRORS) { if (qe_is_bolixed(qep, qe_status)) @@ -482,8 +481,20 @@ static void qec_interrupt(int irq, void *dev_id, struct pt_regs *regs) } if (qe_status & CREG_STAT_RXIRQ) qe_rx(qep); + if (test_bit(LINK_STATE_XOFF, &qep->dev->state) && + (qe_status & CREG_STAT_TXIRQ)) { + spin_lock(&qep->lock); + qe_tx_reclaim(qep); + if (TX_BUFFS_AVAIL(qep) > 0) { + /* Wake net queue and return to + * lazy tx reclaim. + */ + netif_wake_queue(qep->dev); + sbus_writel(1, qep->qcregs + CREG_TIMASK); + } + spin_unlock(&qep->lock); + } next: - dev->interrupt = 0; } qec_status >>= 4; channel++; @@ -514,11 +525,12 @@ static int qe_close(struct net_device *dev) return 0; } -/* Reclaim TX'd frames from the ring. */ +/* Reclaim TX'd frames from the ring. This must always run under + * the IRQ protected qep->lock. + */ static void qe_tx_reclaim(struct sunqe *qep) { struct qe_txd *txbase = &qep->qe_block->qe_txd[0]; - struct net_device *dev = qep->dev; int elem = qep->tx_old; while (elem != qep->tx_new) { @@ -529,11 +541,31 @@ static void qe_tx_reclaim(struct sunqe *qep) elem = NEXT_TX(elem); } qep->tx_old = elem; +} - if (dev->tbusy && (TX_BUFFS_AVAIL(qep) > 0)) { - dev->tbusy = 0; - mark_bh(NET_BH); - } +static void qe_tx_timeout(struct net_device *dev) +{ + struct sunqe *qep = (struct sunqe *) dev->priv; + int tx_full; + + spin_lock_irq(&qep->lock); + + /* Try to reclaim, if that frees up some tx + * entries, we're fine. + */ + qe_tx_reclaim(qep); + tx_full = TX_BUFFS_AVAIL(qep) <= 0; + + spin_unlock_irq(&qep->lock); + + if (! tx_full) + goto out; + + printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); + qe_init(qep, 1); + +out: + netif_wake_queue(dev); } /* Get a packet queued to go onto the wire. */ @@ -545,19 +577,9 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev) unsigned char *txbuf; int len, entry; - qe_tx_reclaim(qep); - - if (test_and_set_bit(0, (void *) &dev->tbusy) != 0) { - long tickssofar = jiffies - dev->trans_start; + spin_lock_irq(&qep->lock); - if (tickssofar >= 40) { - printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); - qe_init(qep, 1); - dev->tbusy = 0; - dev->trans_start = jiffies; - } - return 1; - } + qe_tx_reclaim(qep); len = skb->len; entry = qep->tx_new; @@ -583,10 +605,18 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev) qep->net_stats.tx_packets++; qep->net_stats.tx_bytes += len; - dev_kfree_skb(skb); + if (TX_BUFFS_AVAIL(qep) <= 0) { + /* Halt the net queue and enable tx interrupts. + * When the tx queue empties the tx irq handler + * will wake up the queue and return us back to + * the lazy tx reclaim scheme. + */ + netif_stop_queue(dev); + sbus_writel(0, qep->qcregs + CREG_TIMASK); + } + spin_unlock_irq(&qep->lock); - if (TX_BUFFS_AVAIL(qep)) - dev->tbusy = 0; + dev_kfree_skb(skb); return 0; } @@ -611,7 +641,7 @@ static void qe_set_multicast(struct net_device *dev) u32 crc, poly = CRC_POLYNOMIAL_LE; /* Lock out others. */ - set_bit(0, (void *) &dev->tbusy); + netif_stop_queue(dev); if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) { sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET, @@ -673,7 +703,7 @@ static void qe_set_multicast(struct net_device *dev) sbus_writeb(qep->mconfig, qep->mregs + MREGS_MCONFIG); /* Let us get going again. */ - dev->tbusy = 0; + netif_wake_queue(dev); } /* This is only called once at boot time for each card probed. */ @@ -722,6 +752,7 @@ static int __init qec_ether_init(struct net_device *dev, struct sbus_dev *sdev) qe_devs[0] = dev; qeps[0] = (struct sunqe *) dev->priv; qeps[0]->channel = 0; + spin_lock_init(&qeps[0]->lock); for (j = 0; j < 6; j++) qe_devs[0]->dev_addr[j] = idprom->id_ethaddr[j]; @@ -857,6 +888,8 @@ static int __init qec_ether_init(struct net_device *dev, struct sbus_dev *sdev) qe_devs[i]->hard_start_xmit = qe_start_xmit; qe_devs[i]->get_stats = qe_get_stats; qe_devs[i]->set_multicast_list = qe_set_multicast; + qe_devs[i]->tx_timeout = qe_tx_timeout; + qe_devs[i]->watchdog_timeo = 5*HZ; qe_devs[i]->irq = sdev->irqs[0]; qe_devs[i]->dma = 0; ether_setup(qe_devs[i]); diff --git a/drivers/net/sunqe.h b/drivers/net/sunqe.h index 83c1c04eebd6..b90ed5db409d 100644 --- a/drivers/net/sunqe.h +++ b/drivers/net/sunqe.h @@ -1,4 +1,4 @@ -/* $Id: sunqe.h,v 1.12 1999/09/21 14:36:44 davem Exp $ +/* $Id: sunqe.h,v 1.13 2000/02/09 11:15:42 davem Exp $ * sunqe.h: Definitions for the Sun QuadEthernet driver. * * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) @@ -335,6 +335,7 @@ struct sunqe { unsigned long mregs; /* Per-channel MACE Registers */ struct qe_init_block *qe_block; /* RX and TX descriptors */ __u32 qblock_dvma; /* RX and TX descriptors */ + spinlock_t lock; /* Protects txfull state */ int rx_new, rx_old; /* RX ring extents */ int tx_new, tx_old; /* TX ring extents */ struct sunqe_buffers *buffers; /* CPU visible address. */ diff --git a/drivers/net/tulip.c b/drivers/net/tulip.c index 5f5b16ee9358..278b5e3d4da9 100644 --- a/drivers/net/tulip.c +++ b/drivers/net/tulip.c @@ -374,10 +374,7 @@ struct tulip_private { struct enet_statistics stats; #endif struct timer_list timer; /* Media selection timer. */ - int interrupt; /* In-interrupt flag. */ -#ifdef SMP_CHECK - int smp_proc_id; /* Which processor in IRQ handler. */ -#endif + spinlock_t tx_lock; unsigned int cur_rx, cur_tx; /* The next free ring entry */ unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */ unsigned int tx_full:1; /* The Tx queue is full. */ @@ -753,6 +750,8 @@ static struct net_device *tulip_probe1(int pci_bus, int pci_device_fn, /* The Tulip-specific entries in the device structure. */ dev->open = &tulip_open; dev->hard_start_xmit = &tulip_start_xmit; + dev->tx_timeout = &tulip_tx_timeout; + dev->watchdog_timeo = TX_TIMEOUT; dev->stop = &tulip_close; dev->get_stats = &tulip_get_stats; #ifdef HAVE_PRIVATE_IOCTL @@ -1249,6 +1248,7 @@ tulip_open(struct net_device *dev) MOD_INC_USE_COUNT; + spin_lock_init(&tp->tx_lock); tulip_init_ring(dev); /* This is set_rx_mode(), but without starting the transmitter. */ @@ -1329,10 +1329,6 @@ media_picked: outl(tp->csr6, ioaddr + CSR6); outl(tp->csr6 | 0x2000, ioaddr + CSR6); - dev->tbusy = 0; - tp->interrupt = 0; - dev->start = 1; - /* Enable interrupts by setting the interrupt mask. */ outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5); outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7); @@ -1352,6 +1348,8 @@ media_picked: tp->timer.function = tulip_tbl[tp->chip_id].media_timer; add_timer(&tp->timer); + netif_start_queue(dev); + return 0; } @@ -1958,9 +1956,13 @@ static void tulip_tx_timeout(struct net_device *dev) struct tulip_private *tp = (struct tulip_private *)dev->priv; long ioaddr = dev->base_addr; + printk("%s: transmit timed out\n", dev->name); + if (media_cap[dev->if_port] & MediaIsMII) { /* Do nothing -- the media monitor should handle this. */ +#if 0 if (tulip_debug > 1) +#endif printk(KERN_WARNING "%s: Transmit timeout using MII device.\n", dev->name); dev->trans_start = jiffies; @@ -2090,19 +2092,13 @@ tulip_start_xmit(struct sk_buff *skb, struct net_device *dev) struct tulip_private *tp = (struct tulip_private *)dev->priv; int entry; u32 flag; - - /* Block a timer-based transmit from overlapping. This could better be - done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */ - if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) { - if (jiffies - dev->trans_start < TX_TIMEOUT) - return 1; - tulip_tx_timeout(dev); - return 1; - } + unsigned long cpuflags; /* Caution: the write order is important here, set the base address with the "ownership" bits last. */ + spin_lock_irqsave(&tp->tx_lock, cpuflags); + /* Calculate the next Tx descriptor entry. */ entry = tp->cur_tx % TX_RING_SIZE; @@ -2111,17 +2107,15 @@ tulip_start_xmit(struct sk_buff *skb, struct net_device *dev) if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */ flag = 0x60000000; /* No interrupt */ - dev->tbusy = 0; } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) { flag = 0xe0000000; /* Tx-done intr. */ - dev->tbusy = 0; } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) { flag = 0x60000000; /* No Tx-done intr. */ - dev->tbusy = 0; } else { /* Leave room for set_rx_mode() to fill entries. */ flag = 0xe0000000; /* Tx-done intr. */ tp->tx_full = 1; + netif_stop_queue(dev); } if (entry == TX_RING_SIZE-1) flag |= 0xe2000000; @@ -2129,6 +2123,8 @@ tulip_start_xmit(struct sk_buff *skb, struct net_device *dev) tp->tx_ring[entry].length = skb->len | flag; tp->tx_ring[entry].status = 0x80000000; /* Pass ownership to the chip. */ tp->cur_tx++; + spin_unlock_irqrestore(&tp->tx_lock, cpuflags); + /* Trigger an immediate transmit demand. */ outl(0, dev->base_addr + CSR1); @@ -2159,20 +2155,6 @@ static void tulip_interrupt IRQ(int irq, void *dev_instance, struct pt_regs *reg ioaddr = dev->base_addr; tp = (struct tulip_private *)dev->priv; - if (test_and_set_bit(0, (void*)&tp->interrupt)) { -#ifdef SMP_CHECK - printk(KERN_ERR "%s: Re-entering the interrupt handler with proc %d," - " proc %d already handling.\n", dev->name, - tp->smp_proc_id, smp_processor_id()); -#else - printk(KERN_ERR "%s: Re-entering the interrupt handler.\n", dev->name); -#endif - return; - } - dev->interrupt = 1; -#ifdef SMP_CHECK - tp->smp_proc_id = smp_processor_id(); -#endif do { csr5 = inl(ioaddr + CSR5); @@ -2189,6 +2171,8 @@ static void tulip_interrupt IRQ(int irq, void *dev_instance, struct pt_regs *reg if (csr5 & (RxIntr | RxNoBuf)) work_budget -= tulip_rx(dev); + spin_lock(&tp->tx_lock); + if (csr5 & (TxNoBuf | TxDied | TxIntr)) { unsigned int dirty_tx; @@ -2233,7 +2217,7 @@ static void tulip_interrupt IRQ(int irq, void *dev_instance, struct pt_regs *reg /* Free the original skb. */ #if (LINUX_VERSION_CODE > 0x20155) - dev_kfree_skb(tp->tx_skbuff[entry]); + dev_kfree_skb_irq(tp->tx_skbuff[entry]); #else dev_kfree_skb(tp->tx_skbuff[entry], FREE_WRITE); #endif @@ -2248,15 +2232,14 @@ static void tulip_interrupt IRQ(int irq, void *dev_instance, struct pt_regs *reg } #endif - if (tp->tx_full && dev->tbusy - && tp->cur_tx - dirty_tx < TX_RING_SIZE - 2) { + if (tp->tx_full && tp->cur_tx - dirty_tx < TX_RING_SIZE - 2) { /* The ring is no longer full, clear tbusy. */ tp->tx_full = 0; - dev->tbusy = 0; - mark_bh(NET_BH); + netif_wake_queue(dev); } tp->dirty_tx = dirty_tx; + if (csr5 & TxDied) { if (tulip_debug > 1) printk(KERN_WARNING "%s: The transmitter stopped!" @@ -2266,6 +2249,7 @@ static void tulip_interrupt IRQ(int irq, void *dev_instance, struct pt_regs *reg outl(tp->csr6 | 0x2002, ioaddr + CSR6); } } + spin_unlock(&tp->tx_lock); /* Log errors. */ if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */ @@ -2316,8 +2300,6 @@ static void tulip_interrupt IRQ(int irq, void *dev_instance, struct pt_regs *reg printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n", dev->name, inl(ioaddr + CSR5)); - dev->interrupt = 0; - clear_bit(0, (void*)&tp->interrupt); return; } @@ -2429,8 +2411,7 @@ tulip_close(struct net_device *dev) struct tulip_private *tp = (struct tulip_private *)dev->priv; int i; - dev->start = 0; - dev->tbusy = 1; + netif_stop_queue(dev); if (tulip_debug > 1) printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n", @@ -2495,7 +2476,7 @@ tulip_get_stats(struct net_device *dev) struct tulip_private *tp = (struct tulip_private *)dev->priv; long ioaddr = dev->base_addr; - if (dev->start) + if (test_bit(LINK_STATE_START, &dev->state)) tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff; return &tp->stats; @@ -2601,6 +2582,7 @@ static void set_rx_mode(struct net_device *dev, int num_addrs, void *addrs) long ioaddr = dev->base_addr; int csr6 = inl(ioaddr + CSR6) & ~0x00D5; struct tulip_private *tp = (struct tulip_private *)dev->priv; + unsigned long cpuflags; tp->csr6 &= ~0x00D5; if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ @@ -2660,13 +2642,12 @@ static void set_rx_mode(struct net_device *dev, int num_addrs, void *addrs) *setup_frm++ = eaddrs[2]; } while (++i < 15); /* Now add this frame to the Tx list. */ + spin_lock_irqsave(&tp->tx_lock, cpuflags); if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) { /* Same setup recently queued, we need not add it. */ } else { - unsigned long flags; unsigned int entry, dummy = 0; - save_flags(flags); cli(); entry = tp->cur_tx++ % TX_RING_SIZE; if (entry != 0) { @@ -2688,12 +2669,12 @@ static void set_rx_mode(struct net_device *dev, int num_addrs, void *addrs) tp->tx_ring[entry].buffer1 = virt_to_bus(tp->setup_frame); tp->tx_ring[entry].status = 0x80000000; if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2) { - dev->tbusy = 1; + netif_stop_queue(dev); tp->tx_full = 1; } if (dummy >= 0) tp->tx_ring[dummy].status = DescOwned; - restore_flags(flags); + spin_unlock_irqrestore(&tp->tx_lock, cpuflags); /* Trigger an immediate transmit demand. */ outl(0, ioaddr + CSR1); } diff --git a/drivers/parport/ieee1284_ops.c b/drivers/parport/ieee1284_ops.c index df2779bb5d3d..a3b76ffbc18a 100644 --- a/drivers/parport/ieee1284_ops.c +++ b/drivers/parport/ieee1284_ops.c @@ -257,7 +257,7 @@ size_t parport_ieee1284_read_byte (struct parport *port, if (parport_read_status (port) & PARPORT_STATUS_ERROR) { port->physport->ieee1284.phase = IEEE1284_PH_HBUSY_DNA; DPRINTK (KERN_DEBUG - "%s: No more byte data (%d bytes)\n", + "%s: No more byte data (%Zd bytes)\n", port->name, count); /* Go to reverse idle phase. */ @@ -735,23 +735,29 @@ size_t parport_ieee1284_epp_write_data (struct parport *port, PARPORT_CONTROL_SELECT); port->ops->data_forward (port); for (; len > 0; len--, bp++) { + /* Event 62: Write data and strobe data */ parport_write_data (port, *bp); - - if (parport_wait_peripheral (port, PARPORT_STATUS_BUSY, - PARPORT_STATUS_BUSY)) - break; - - /* Strobe data */ parport_frob_control (port, PARPORT_CONTROL_AUTOFD, PARPORT_CONTROL_AUTOFD); - if (parport_wait_peripheral (port, PARPORT_STATUS_BUSY, 0)) + /* Event 58 */ + if (parport_poll_peripheral (port, PARPORT_STATUS_BUSY, 0, 10)) break; + /* Event 63 */ parport_frob_control (port, PARPORT_CONTROL_AUTOFD, 0); + + /* Event 60 */ + if (parport_poll_peripheral (port, PARPORT_STATUS_BUSY, + PARPORT_STATUS_BUSY, 5)) + break; + ret++; } + /* Event 61 */ + parport_frob_control (port, PARPORT_CONTROL_STROBE, 0); + return ret; } @@ -766,23 +772,24 @@ size_t parport_ieee1284_epp_read_data (struct parport *port, parport_frob_control (port, PARPORT_CONTROL_STROBE | - PARPORT_CONTROL_AUTOFD | PARPORT_CONTROL_SELECT, 0); port->ops->data_reverse (port); for (; len > 0; len--, bp++) { - if (parport_wait_peripheral (port, PARPORT_STATUS_BUSY, - PARPORT_STATUS_BUSY)) + parport_frob_control (port, PARPORT_CONTROL_AUTOFD, 0); + + /* Event 58 */ + if (parport_poll_peripheral (port, PARPORT_STATUS_BUSY, + PARPORT_STATUS_BUSY, 10)) break; + *bp = parport_read_data (port); + parport_frob_control (port, PARPORT_CONTROL_AUTOFD, PARPORT_CONTROL_AUTOFD); - if (parport_wait_peripheral (port, PARPORT_STATUS_BUSY, 0)) + if (parport_poll_peripheral (port, PARPORT_STATUS_BUSY, 0, 5)) break; - *bp = parport_read_data (port); - - parport_frob_control (port, PARPORT_CONTROL_AUTOFD, 0); ret++; } port->ops->data_forward (port); @@ -807,23 +814,25 @@ size_t parport_ieee1284_epp_write_addr (struct parport *port, PARPORT_CONTROL_SELECT); port->ops->data_forward (port); for (; len > 0; len--, bp++) { + /* Write data and assert nAStrb. */ parport_write_data (port, *bp); - - if (parport_wait_peripheral (port, PARPORT_STATUS_BUSY, - PARPORT_STATUS_BUSY)) - break; - - /* Strobe data */ parport_frob_control (port, PARPORT_CONTROL_SELECT, PARPORT_CONTROL_SELECT); - if (parport_wait_peripheral (port, PARPORT_STATUS_BUSY, 0)) + if (parport_poll_peripheral (port, PARPORT_STATUS_BUSY, + PARPORT_STATUS_BUSY, 10)) break; parport_frob_control (port, PARPORT_CONTROL_SELECT, 0); + + if (parport_poll_peripheral (port, PARPORT_STATUS_BUSY, 0, 5)) + break; + ret++; } + parport_frob_control (port, PARPORT_CONTROL_STROBE, 0); + return ret; } @@ -838,23 +847,24 @@ size_t parport_ieee1284_epp_read_addr (struct parport *port, parport_frob_control (port, PARPORT_CONTROL_STROBE | - PARPORT_CONTROL_SELECT | PARPORT_CONTROL_AUTOFD, 0); port->ops->data_reverse (port); for (; len > 0; len--, bp++) { - if (parport_wait_peripheral (port, PARPORT_STATUS_BUSY, - PARPORT_STATUS_BUSY)) + parport_frob_control (port, PARPORT_CONTROL_SELECT, 0); + + /* Event 58 */ + if (parport_poll_peripheral (port, PARPORT_STATUS_BUSY, + PARPORT_STATUS_BUSY, 10)) break; + *bp = parport_read_data (port); + parport_frob_control (port, PARPORT_CONTROL_SELECT, PARPORT_CONTROL_SELECT); - if (parport_wait_peripheral (port, PARPORT_STATUS_BUSY, 0)) + if (parport_poll_peripheral (port, PARPORT_STATUS_BUSY, 0, 5)) break; - *bp = parport_read_data (port); - - parport_frob_control (port, PARPORT_CONTROL_SELECT, 0); ret++; } port->ops->data_forward (port); diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c index 61cf80900a34..29206729ad94 100644 --- a/drivers/parport/parport_pc.c +++ b/drivers/parport/parport_pc.c @@ -551,21 +551,22 @@ static size_t parport_pc_fifo_write_block_dma (struct parport *port, unsigned long dmaflag; size_t left = length; const struct parport_pc_private *priv = port->physport->private_data; - unsigned long dma_addr; + dma_addr_t dma_addr, dma_handle; size_t maxlen = 0x10000; /* max 64k per DMA transfer */ unsigned long start = (unsigned long) buf; unsigned long end = (unsigned long) buf + length - 1; - /* above 16 MB we use a bounce buffer as ISA-DMA is not possible */ - if (end <= MAX_DMA_ADDRESS) { - /* If it would cross a 64k boundary, cap it at the end. */ - if ((start ^ end) & ~0xffff) - maxlen = (0x10000 - start) & 0xffff; + if (end < MAX_DMA_ADDRESS) { + /* If it would cross a 64k boundary, cap it at the end. */ + if ((start ^ end) & ~0xffffUL) + maxlen = (0x10000 - start) & 0xffff; - dma_addr = virt_to_bus(buf); + dma_addr = dma_handle = pci_map_single(priv->dev, (void *)buf, length); } else { - dma_addr = priv->dma_handle; + /* above 16 MB we use a bounce buffer as ISA-DMA is not possible */ maxlen = PAGE_SIZE; /* sizeof(priv->dma_buf) */ + dma_addr = priv->dma_handle; + dma_handle = 0; } port = port->physport; @@ -585,7 +586,7 @@ static size_t parport_pc_fifo_write_block_dma (struct parport *port, if (count > maxlen) count = maxlen; - if (maxlen == PAGE_SIZE) /* bounce buffer ! */ + if (!dma_handle) /* bounce buffer ! */ memcpy(priv->dma_buf, buf, count); dmaflag = claim_dma_lock(); @@ -607,6 +608,7 @@ static size_t parport_pc_fifo_write_block_dma (struct parport *port, /* assume DMA will be successful */ left -= count; buf += count; + if (dma_handle) dma_addr += count; /* Wait for interrupt. */ false_alarm: @@ -645,6 +647,7 @@ static size_t parport_pc_fifo_write_block_dma (struct parport *port, /* update for possible DMA residue ! */ buf -= count; left += count; + if (dma_handle) dma_addr -= count; } /* Maybe got here through break, so adjust for DMA residue! */ @@ -656,6 +659,9 @@ static size_t parport_pc_fifo_write_block_dma (struct parport *port, /* Turn off DMA mode */ frob_econtrol (port, 1<<3, 0); + + if (dma_handle) + pci_unmap_single(priv->dev, dma_handle, length); return length - left; } diff --git a/drivers/parport/probe.c b/drivers/parport/probe.c index 9cfb00794790..6772ea6d8837 100644 --- a/drivers/parport/probe.c +++ b/drivers/parport/probe.c @@ -164,8 +164,9 @@ ssize_t parport_device_id (int devnum, char *buffer, size_t len) retval = parport_read (dev->port, buffer, len); if (retval != len) - printk (KERN_DEBUG "%s: only read %d of %d ID bytes\n", - dev->port->name, retval, len); + printk (KERN_DEBUG "%s: only read %Zd of %Zd ID bytes\n", + dev->port->name, retval, + len); /* Some printer manufacturers mistakenly believe that the length field is supposed to be _exclusive_. */ @@ -179,7 +180,7 @@ ssize_t parport_device_id (int devnum, char *buffer, size_t len) if (diff) printk (KERN_DEBUG "%s: device reported incorrect " - "length field (%d, should be %d)\n", + "length field (%d, should be %Zd)\n", dev->port->name, idlen, retval); else { /* One semi-colon short of a device ID. */ diff --git a/drivers/sbus/audio/audio.c b/drivers/sbus/audio/audio.c index 86335cef45c3..62d295a8edad 100644 --- a/drivers/sbus/audio/audio.c +++ b/drivers/sbus/audio/audio.c @@ -1,4 +1,4 @@ -/* $Id: audio.c,v 1.47 1999/12/15 22:30:16 davem Exp $ +/* $Id: audio.c,v 1.48 2000/02/09 22:33:19 davem Exp $ * drivers/sbus/audio/audio.c * * Copyright 1996 Thomas K. Dyas (tdyas@noc.rutgers.edu) diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c index 9a6b414012ad..08acf4f81533 100644 --- a/drivers/sbus/char/envctrl.c +++ b/drivers/sbus/char/envctrl.c @@ -1,4 +1,4 @@ -/* $Id: envctrl.c,v 1.14 2000/01/09 15:43:45 ecd Exp $ +/* $Id: envctrl.c,v 1.15 2000/02/09 22:33:23 davem Exp $ * envctrl.c: Temperature and Fan monitoring on Machines providing it. * * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be) diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c index f88cdedf1f53..ab7d94bb1756 100644 --- a/drivers/sbus/char/flash.c +++ b/drivers/sbus/char/flash.c @@ -1,4 +1,4 @@ -/* $Id: flash.c,v 1.15 1999/12/09 00:44:22 davem Exp $ +/* $Id: flash.c,v 1.16 2000/02/09 22:33:24 davem Exp $ * flash.c: Allow mmap access to the OBP Flash, for OBP updates. * * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be) diff --git a/drivers/sbus/char/pcikbd.c b/drivers/sbus/char/pcikbd.c index fd1d6f0d6220..97ebda430633 100644 --- a/drivers/sbus/char/pcikbd.c +++ b/drivers/sbus/char/pcikbd.c @@ -1,4 +1,4 @@ -/* $Id: pcikbd.c,v 1.41 2000/01/08 07:01:20 davem Exp $ +/* $Id: pcikbd.c,v 1.43 2000/02/09 22:33:25 davem Exp $ * pcikbd.c: Ultra/AX PC keyboard support. * * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be) @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -325,7 +326,7 @@ pcikbd_interrupt(int irq, void *dev_id, struct pt_regs *regs) handle_scancode(scancode, !(scancode & 0x80)); status = pcikbd_inb(pcikbd_iobase + KBD_STATUS_REG); } while(status & KBD_STAT_OBF); - mark_bh(KEYBOARD_BH); + tasklet_schedule(&keyboard_tasklet); } static int send_data(unsigned char data) @@ -713,12 +714,13 @@ static DECLARE_MUTEX(aux_sema4); static inline void aux_start_atomic(void) { down(&aux_sema4); - disable_bh(KEYBOARD_BH); + tasklet_disable_nosync(&keyboard_tasklet); + tasklet_unlock_wait(&keyboard_tasklet); } static inline void aux_end_atomic(void) { - enable_bh(KEYBOARD_BH); + tasklet_enable(&keyboard_tasklet); up(&aux_sema4); } @@ -1022,6 +1024,9 @@ int __init ps2kbd_probe(void) goto found; } #endif + if (!pci_present()) + goto do_enodev; + /* * Get the nodes for keyboard and mouse from aliases on normal systems. */ diff --git a/drivers/sbus/char/rtc.c b/drivers/sbus/char/rtc.c index 4a39da89945b..31f3596d5042 100644 --- a/drivers/sbus/char/rtc.c +++ b/drivers/sbus/char/rtc.c @@ -1,4 +1,4 @@ -/* $Id: rtc.c,v 1.18 1999/08/31 18:51:36 davem Exp $ +/* $Id: rtc.c,v 1.19 2000/02/09 22:33:26 davem Exp $ * * Linux/SPARC Real Time Clock Driver * Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu) diff --git a/drivers/sbus/char/su.c b/drivers/sbus/char/su.c index 9c1e9f54f9f6..6e30f9b58246 100644 --- a/drivers/sbus/char/su.c +++ b/drivers/sbus/char/su.c @@ -1,4 +1,4 @@ -/* $Id: su.c,v 1.34 1999/12/02 09:55:21 davem Exp $ +/* $Id: su.c,v 1.36 2000/02/09 21:11:22 davem Exp $ * su.c: Small serial driver for keyboard/mouse interface on sparc32/PCI * * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be) @@ -2223,7 +2223,7 @@ done: */ static __inline__ void __init show_su_version(void) { - char *revision = "$Revision: 1.34 $"; + char *revision = "$Revision: 1.36 $"; char *version, *p; version = strchr(revision, ' '); diff --git a/drivers/sbus/char/sunkbd.c b/drivers/sbus/char/sunkbd.c index 118ff45eb9a8..7e26f3491d45 100644 --- a/drivers/sbus/char/sunkbd.c +++ b/drivers/sbus/char/sunkbd.c @@ -243,19 +243,13 @@ static unsigned char sunkbd_clickp; #define KEY_ALT 0x86 #define KEY_L1 0x87 -/* Do to sun_kbd_init() being called before rs_init(), and sun_kbd_init() doing: +/* Due to sun_kbd_init() being called before rs_init(), and sun_kbd_init() doing: * - * init_bh(KEYBOARD_BH, kbd_bh); - * mark_bh(KEYBOARD_BH); + * tasklet_enable(&keyboard_tasklet); + * tasklet_schedule(&keyboard_tasklet); * * this might well be called before some driver has claimed interest in * handling the keyboard input/output. So we need to assign an initial nop. - * - * Otherwise this would lead to the following (DaveM might want to look at): - * - * sparc64_dtlb_refbit_catch(), - * do_sparc64_fault(), - * kernel NULL pointer dereference at do_sparc64_fault + 0x2c0 ;-( */ static void nop_kbd_put_char(unsigned char c) { } static void (*kbd_put_char)(unsigned char) = nop_kbd_put_char; @@ -460,6 +454,10 @@ keyboard_timer (unsigned long ignored) restore_flags(flags); } +#ifndef CONFIG_PCI +DECLARE_TASKLET_DISABLED(keyboard_tasklet, sun_kbd_bh, 0); +#endif + /* #define SKBD_DEBUG */ /* This is our keyboard 'interrupt' routine. */ void sunkbd_inchar(unsigned char ch, struct pt_regs *regs) @@ -610,7 +608,7 @@ void sunkbd_inchar(unsigned char ch, struct pt_regs *regs) } } out: - mark_bh(KEYBOARD_BH); + tasklet_schedule(&keyboard_tasklet); } static void put_queue(int ch) @@ -1086,7 +1084,6 @@ static void do_lock(unsigned char value, char up_flag) */ static unsigned char ledstate = 0xff; /* undefined */ -static unsigned char sunkbd_ledstate = 0xff; /* undefined */ static unsigned char ledioctl; unsigned char sun_getledstate(void) { @@ -1163,7 +1160,8 @@ static inline unsigned char getleds(void){ * used, but this allows for easy and efficient race-condition * prevention later on. */ -static void kbd_bh(void) +static unsigned char sunkbd_ledstate = 0xff; /* undefined */ +void sun_kbd_bh(unsigned long dummy) { unsigned char leds = getleds(); unsigned char kbd_leds = vcleds_to_sunkbd(leds); @@ -1247,8 +1245,12 @@ int __init sun_kbd_init(void) } else { sunkbd_clickp = 0; } - init_bh(KEYBOARD_BH, kbd_bh); - mark_bh(KEYBOARD_BH); + + keyboard_tasklet.func = sun_kbd_bh; + + tasklet_enable(&keyboard_tasklet); + tasklet_schedule(&keyboard_tasklet); + return 0; } diff --git a/drivers/sbus/char/sunkbd.h b/drivers/sbus/char/sunkbd.h index a1bfeb85b326..1cf212902325 100644 --- a/drivers/sbus/char/sunkbd.h +++ b/drivers/sbus/char/sunkbd.h @@ -1,4 +1,4 @@ -/* $Id: sunkbd.h,v 1.3 1997/09/08 03:05:10 tdyas Exp $ +/* $Id: sunkbd.h,v 1.4 2000/02/09 11:15:54 davem Exp $ * sunkbd.h: Defines needed by SUN Keyboard drivers * * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be) @@ -26,6 +26,7 @@ extern void keyboard_zsinit(void (*kbd_put_char)(unsigned char)); extern void sunkbd_inchar(unsigned char, struct pt_regs *); extern void batten_down_hatches(void); +extern void sun_kbd_bh(unsigned long); extern int sun_kbd_init(void); extern void sun_compute_shiftstate(void); extern void sun_setledstate(struct kbd_struct *, unsigned int); diff --git a/drivers/sbus/char/uctrl.c b/drivers/sbus/char/uctrl.c index fed2885409cb..78f45613dd60 100644 --- a/drivers/sbus/char/uctrl.c +++ b/drivers/sbus/char/uctrl.c @@ -1,4 +1,4 @@ -/* $Id: uctrl.c,v 1.6 2000/01/22 05:22:07 anton Exp $ +/* $Id: uctrl.c,v 1.7 2000/02/09 22:33:28 davem Exp $ * uctrl.c: TS102 Microcontroller interface on Tadpole Sparcbook 3 * * Copyright 1999 Derrick J Brashear (shadow@dementia.org) diff --git a/drivers/sbus/char/zs.c b/drivers/sbus/char/zs.c index 77f031cce23e..b08b73e18b46 100644 --- a/drivers/sbus/char/zs.c +++ b/drivers/sbus/char/zs.c @@ -1,4 +1,4 @@ -/* $Id: zs.c,v 1.53 2000/01/29 01:29:38 anton Exp $ +/* $Id: zs.c,v 1.55 2000/02/09 21:11:24 davem Exp $ * zs.c: Zilog serial port driver for the Sparc. * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) @@ -1928,7 +1928,7 @@ int zs_open(struct tty_struct *tty, struct file * filp) static void show_serial_version(void) { - char *revision = "$Revision: 1.53 $"; + char *revision = "$Revision: 1.55 $"; char *version, *p; version = strchr(revision, ' '); diff --git a/drivers/scsi/Config.in b/drivers/scsi/Config.in index 1e85f4f36c21..a2dd9fb7f2ff 100644 --- a/drivers/scsi/Config.in +++ b/drivers/scsi/Config.in @@ -1,10 +1,22 @@ comment 'SCSI support type (disk, tape, CD-ROM)' dep_tristate ' SCSI disk support' CONFIG_BLK_DEV_SD $CONFIG_SCSI + +if [ "$CONFIG_BLK_DEV_SD" != "n" ]; then + int 'Maximum number of SCSI disks that can be loaded as modules' CONFIG_SD_EXTRA_DEVS 40 +fi + dep_tristate ' SCSI tape support' CONFIG_CHR_DEV_ST $CONFIG_SCSI + +if [ "$CONFIG_BLK_DEV_ST" != "n" ]; then + int 'Maximum number of SCSI tapes that can be loaded as modules' CONFIG_ST_EXTRA_DEVS 2 +fi + dep_tristate ' SCSI CD-ROM support' CONFIG_BLK_DEV_SR $CONFIG_SCSI + if [ "$CONFIG_BLK_DEV_SR" != "n" ]; then bool ' Enable vendor-specific extensions (for SCSI CDROM)' CONFIG_BLK_DEV_SR_VENDOR + int 'Maximum number of CDROM devices that can be loaded as modules' CONFIG_SR_EXTRA_DEVS 2 fi dep_tristate ' SCSI generic support' CONFIG_CHR_DEV_SG $CONFIG_SCSI diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c index 93387842f5c3..9e2f14548ab8 100644 --- a/drivers/scsi/advansys.c +++ b/drivers/scsi/advansys.c @@ -10505,8 +10505,8 @@ asc_prt_scsi_host(struct Scsi_Host *s) (ulong) s->host_queue, (ulong) s->hostt, (ulong) s->block); printk( -" wish_block %d, base %lu, io_port %lu, n_io_port %u, irq %d,\n", - s->wish_block, (ulong) s->base, (ulong) s->io_port, s->n_io_port, +" base %lu, io_port %lu, n_io_port %u, irq %d,\n", + (ulong) s->base, (ulong) s->io_port, s->n_io_port, s->irq); printk( diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c index be0f0a5f2f6b..b6a058c031aa 100644 --- a/drivers/scsi/eata.c +++ b/drivers/scsi/eata.c @@ -1049,7 +1049,7 @@ static inline int port_detect \ sh[j]->unchecked_isa_dma = FALSE; else { unsigned long flags; -//FIXME// sh[j]->wish_block = TRUE; + scsi_register_blocked_host(sh[j]); sh[j]->unchecked_isa_dma = TRUE; flags=claim_dma_lock(); @@ -2274,6 +2274,10 @@ int eata2x_release(struct Scsi_Host *shpnt) { if (sh[j] == NULL) panic("%s: release, invalid Scsi_Host pointer.\n", driver_name); + if( sh[j]->unchecked_isa_dma ) { + scsi_deregister_blocked_host(sh[j]); + } + for (i = 0; i < sh[j]->can_queue; i++) if ((&HD(j)->cp[i])->sglist) kfree((&HD(j)->cp[i])->sglist); diff --git a/drivers/scsi/eata_dma.c b/drivers/scsi/eata_dma.c index 97f94f442c68..0f072c46eead 100644 --- a/drivers/scsi/eata_dma.c +++ b/drivers/scsi/eata_dma.c @@ -131,12 +131,11 @@ int eata_release(struct Scsi_Host *sh) if (sh->irq && reg_IRQ[sh->irq] == 1) free_irq(sh->irq, NULL); else reg_IRQ[sh->irq]--; - scsi_init_free((void *)status, 512); - scsi_init_free((void *)dma_scratch - 4, 1024); + kfree((void *)status); + kfree((void *)dma_scratch - 4); for (i = 0; i < sh->can_queue; i++){ /* Free all SG arrays */ if(SD(sh)->ccb[i].sg_list != NULL) - scsi_init_free((void *) SD(sh)->ccb[i].sg_list, - sh->sg_tablesize * sizeof(struct eata_sg_list)); + kfree((void *) SD(sh)->ccb[i].sg_list); } if (SD(sh)->channel == 0) { @@ -908,9 +907,9 @@ char * get_board_data(u32 base, u32 irq, u32 id) static char *buff; ulong i; - cp = (struct eata_ccb *) scsi_init_malloc(sizeof(struct eata_ccb), - GFP_ATOMIC | GFP_DMA); - sp = (struct eata_sp *) scsi_init_malloc(sizeof(struct eata_sp), + cp = (struct eata_ccb *) kmalloc(sizeof(struct eata_ccb), + GFP_ATOMIC | GFP_DMA); + sp = (struct eata_sp *) kmalloc(sizeof(struct eata_sp), GFP_ATOMIC | GFP_DMA); buff = dma_scratch; @@ -954,8 +953,8 @@ char * get_board_data(u32 base, u32 irq, u32 id) fake_int_result, (u32) (sp->hba_stat /*& 0x7f*/), (u32) sp->scsi_stat, buff, sp)); - scsi_init_free((void *)cp, sizeof(struct eata_ccb)); - scsi_init_free((void *)sp, sizeof(struct eata_sp)); + kfree((void *)cp); + kfree((void *)sp); if ((fake_int_result & HA_SERROR) || time_after(jiffies, i)){ printk(KERN_WARNING "eata_dma: trying to reset HBA at %x to clear " @@ -1287,8 +1286,6 @@ short register_HBA(u32 base, struct get_conf *gc, Scsi_Host_Template * tpnt, else hd->primary = TRUE; -//FIXME// sh->wish_block = FALSE; - if (hd->bustype != IS_ISA) { sh->unchecked_isa_dma = FALSE; } else { @@ -1459,8 +1456,8 @@ int eata_detect(Scsi_Host_Template * tpnt) tpnt->proc_name = "eata_dma"; - status = scsi_init_malloc(512, GFP_ATOMIC | GFP_DMA); - dma_scratch = scsi_init_malloc(1024, GFP_ATOMIC | GFP_DMA); + status = kmalloc(512, GFP_ATOMIC | GFP_DMA); + dma_scratch = kmalloc(1024, GFP_ATOMIC | GFP_DMA); if(status == NULL || dma_scratch == NULL) { printk("eata_dma: can't allocate enough memory to probe for hosts !\n"); @@ -1511,10 +1508,10 @@ int eata_detect(Scsi_Host_Template * tpnt) HBA_ptr = SD(HBA_ptr)->next; } } else { - scsi_init_free((void *)status, 512); + kfree((void *)status); } - scsi_init_free((void *)dma_scratch - 4, 1024); + kfree((void *)dma_scratch - 4); DBG(DPT_DEBUG, DELAY(12)); diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index e98b4b15fa2b..0f8454516fbb 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -698,7 +698,7 @@ scsi_unregister(struct Scsi_Host * sh){ } } next_scsi_host--; - scsi_init_free((char *) sh, sizeof(struct Scsi_Host) + sh->extra_bytes); + kfree((char *) sh); } /* We call this when we come across a new host adapter. We only do this @@ -708,8 +708,9 @@ scsi_unregister(struct Scsi_Host * sh){ struct Scsi_Host * scsi_register(Scsi_Host_Template * tpnt, int j){ struct Scsi_Host * retval, *shpnt; - retval = (struct Scsi_Host *)scsi_init_malloc(sizeof(struct Scsi_Host) + j, - (tpnt->unchecked_isa_dma && j ? GFP_DMA : 0) | GFP_ATOMIC); + retval = (struct Scsi_Host *)kmalloc(sizeof(struct Scsi_Host) + j, + (tpnt->unchecked_isa_dma && j ? GFP_DMA : 0) | GFP_ATOMIC); + memset(retval, 0, sizeof(struct Scsi_Host) + j); atomic_set(&retval->host_active,0); retval->host_busy = 0; retval->host_failed = 0; @@ -754,7 +755,7 @@ struct Scsi_Host * scsi_register(Scsi_Host_Template * tpnt, int j){ retval->unchecked_isa_dma = tpnt->unchecked_isa_dma; retval->use_clustering = tpnt->use_clustering; - retval->select_queue_depths = NULL; + retval->select_queue_depths = tpnt->select_queue_depths; if(!scsi_hostlist) scsi_hostlist = retval; diff --git a/drivers/scsi/hosts.h b/drivers/scsi/hosts.h index 1ce17904a04e..30e97ff3edf4 100644 --- a/drivers/scsi/hosts.h +++ b/drivers/scsi/hosts.h @@ -210,6 +210,12 @@ typedef struct SHT */ int (* bios_param)(Disk *, kdev_t, int []); + + /* + * Used to set the queue depth for a specific device. + */ + void (*select_queue_depths)(struct Scsi_Host *, Scsi_Device *); + /* * This determines if we will use a non-interrupt driven * or an interrupt driven scheme, It is set to the maximum number @@ -417,16 +423,6 @@ extern void build_proc_dir_entries(Scsi_Host_Template *); * scsi_init initializes the scsi hosts. */ -/* - * We use these goofy things because the MM is not set up when we init - * the scsi subsystem. By using these functions we can write code that - * looks normal. Also, it makes it possible to use the same code for a - * loadable module. - */ - -extern void * scsi_init_malloc(unsigned int size, int priority); -extern void scsi_init_free(char * ptr, unsigned int size); - extern int next_scsi_host; extern int scsi_loadable_module_flag; @@ -434,7 +430,8 @@ unsigned int scsi_init(void); extern struct Scsi_Host * scsi_register(Scsi_Host_Template *, int j); extern void scsi_unregister(struct Scsi_Host * i); -extern request_fn_proc * scsi_get_request_handler(Scsi_Device * SDpnt, struct Scsi_Host * SHpnt); +extern void scsi_register_blocked_host(struct Scsi_Host * SHpnt); +extern void scsi_deregister_blocked_host(struct Scsi_Host * SHpnt); /* * Prototypes for functions/data in scsi_scan.c @@ -473,6 +470,8 @@ struct Scsi_Device_Template Selects command for blkdevs */ }; +void scsi_initialize_queue(Scsi_Device * SDpnt, struct Scsi_Host * SHpnt); + extern struct Scsi_Device_Template sd_template; extern struct Scsi_Device_Template st_template; extern struct Scsi_Device_Template sr_template; @@ -499,10 +498,14 @@ extern void scsi_unregister_module(int, void *); * * Even bigger hack for SparcSTORAGE arrays. Those are at least 6 disks, but * usually up to 30 disks, so everyone would need to change this. -jj + * + * Note: These things are all evil and all need to go away. My plan is to + * tackle the character devices first, as there aren't any locking implications + * in the block device layer. The block devices will require more work. */ -#define SD_EXTRA_DEVS 40 -#define ST_EXTRA_DEVS 2 -#define SR_EXTRA_DEVS 2 +#define SD_EXTRA_DEVS CONFIG_SD_EXTRA_DEVS +#define ST_EXTRA_DEVS CONFIG_ST_EXTRA_DEVS +#define SR_EXTRA_DEVS CONFIG_SR_EXTRA_DEVS #define SG_EXTRA_DEVS (SD_EXTRA_DEVS + SR_EXTRA_DEVS + ST_EXTRA_DEVS) #endif diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c index a5dd9d29b81e..22f98b60df81 100644 --- a/drivers/scsi/ips.c +++ b/drivers/scsi/ips.c @@ -459,7 +459,6 @@ ips_detect(Scsi_Host_Template *SHT) { sh->cmd_per_lun = sh->hostt->cmd_per_lun; sh->unchecked_isa_dma = sh->hostt->unchecked_isa_dma; sh->use_clustering = sh->hostt->use_clustering; -//FIXME// sh->wish_block = FALSE; /* Store info in HA structure */ ha->io_addr = io_addr; diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c index 7c14a6c3f962..0ede63b87fd2 100644 --- a/drivers/scsi/mesh.c +++ b/drivers/scsi/mesh.c @@ -253,7 +253,7 @@ mesh_detect(Scsi_Host_Template *tp) continue; } mesh_host->unique_id = nmeshes; -#ifndef MODULE +#if !defined(MODULE) && (defined(CONFIG_PMAC) || defined(CONFIG_ALL_PPC)) note_scsi_host(mesh, mesh_host); #endif @@ -305,7 +305,9 @@ mesh_detect(Scsi_Host_Template *tp) if (mesh_sync_period < minper) mesh_sync_period = minper; +#if defined(CONFIG_PMAC) || defined(CONFIG_ALL_PPC) feature_set(mesh, FEATURE_MESH_enable); +#endif mdelay(200); mesh_init(ms); diff --git a/drivers/scsi/pluto.c b/drivers/scsi/pluto.c index c955bf94ae25..b54f074e9aed 100644 --- a/drivers/scsi/pluto.c +++ b/drivers/scsi/pluto.c @@ -112,7 +112,7 @@ int __init pluto_detect(Scsi_Host_Template *tpnt) #endif return 0; } - fcs = (struct ctrl_inquiry *) scsi_init_malloc (sizeof (struct ctrl_inquiry) * fcscount, GFP_DMA); + fcs = (struct ctrl_inquiry *) kmalloc (sizeof (struct ctrl_inquiry) * fcscount, GFP_DMA); if (!fcs) { printk ("PLUTO: Not enough memory to probe\n"); return 0; @@ -265,7 +265,7 @@ int __init pluto_detect(Scsi_Host_Template *tpnt) } else fc->fcp_register(fc, TYPE_SCSI_FCP, 1); } - scsi_init_free((char *)fcs, sizeof (struct ctrl_inquiry) * fcscount); + kfree((char *)fcs); if (nplutos) printk ("PLUTO: Total of %d SparcSTORAGE Arrays found\n", nplutos); return nplutos; diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 2472fe16737c..2d86cbf95c62 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -147,7 +147,7 @@ extern void scsi_old_times_out(Scsi_Cmnd * SCpnt); /* - * Function: scsi_get_request_handler() + * Function: scsi_initialize_queue() * * Purpose: Selects queue handler function for a device. * @@ -165,20 +165,17 @@ extern void scsi_old_times_out(Scsi_Cmnd * SCpnt); * For this case, we have a special handler function, which * does some checks and ultimately calls scsi_request_fn. * - * As a future enhancement, it might be worthwhile to add support - * for stacked handlers - there might get to be too many permutations - * otherwise. Then again, we might just have one handler that does - * all of the special cases (a little bit slower), and those devices - * that don't need the special case code would directly call - * scsi_request_fn. - * - * As it stands, I can think of a number of special cases that - * we might need to handle. This would not only include the blocked - * case, but single_lun (for changers), and any special handling - * we might need for a spun-down disk to spin it back up again. + * The single_lun feature is a similar special case. + * + * We handle these things by stacking the handlers. The + * special case handlers simply check a few conditions, + * and return if they are not supposed to do anything. + * In the event that things are OK, then they call the next + * handler in the list - ultimately they call scsi_request_fn + * to do the dirty deed. */ -request_fn_proc * scsi_get_request_handler(Scsi_Device * SDpnt, struct Scsi_Host * SHpnt) { - return scsi_request_fn; +void scsi_initialize_queue(Scsi_Device * SDpnt, struct Scsi_Host * SHpnt) { + blk_init_queue(&SDpnt->request_queue, scsi_request_fn); } #ifdef MODULE @@ -530,7 +527,7 @@ int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt) #endif struct Scsi_Host *host; int rtn = 0; - unsigned long flags; + unsigned long flags = 0; unsigned long timeout; ASSERT_LOCK(&io_request_lock, 0); @@ -1075,45 +1072,46 @@ static void scsi_unregister_host(Scsi_Host_Template *); int scsi_loadable_module_flag; /* Set after we scan builtin drivers */ -void *scsi_init_malloc(unsigned int size, int gfp_mask) -{ - void *retval; - - /* - * For buffers used by the DMA pool, we assume page aligned - * structures. - */ - if ((size % PAGE_SIZE) == 0) { - int order, a_size; - for (order = 0, a_size = PAGE_SIZE; - a_size < size; order++, a_size <<= 1); - retval = (void *) __get_free_pages(gfp_mask | GFP_DMA, order); - } else - retval = kmalloc(size, gfp_mask); - - if (retval) - memset(retval, 0, size); - return retval; -} - - -void scsi_init_free(char *ptr, unsigned int size) +/* + * Function: scsi_release_commandblocks() + * + * Purpose: Release command blocks associated with a device. + * + * Arguments: SDpnt - device + * + * Returns: Nothing + * + * Lock status: No locking assumed or required. + * + * Notes: + */ +void scsi_release_commandblocks(Scsi_Device * SDpnt) { - /* - * We need this special code here because the DMA pool assumes - * page aligned data. Besides, it is wasteful to allocate - * page sized chunks with kmalloc. - */ - if ((size % PAGE_SIZE) == 0) { - int order, a_size; + Scsi_Cmnd *SCpnt; + unsigned long flags; - for (order = 0, a_size = PAGE_SIZE; - a_size < size; order++, a_size <<= 1); - free_pages((unsigned long) ptr, order); - } else - kfree(ptr); + spin_lock_irqsave(&device_request_lock, flags); + for (SCpnt = SDpnt->device_queue; SCpnt; SCpnt = SCpnt->next) { + SDpnt->device_queue = SCpnt->next; + kfree((char *) SCpnt); + } + SDpnt->has_cmdblocks = 0; + spin_unlock_irqrestore(&device_request_lock, flags); } +/* + * Function: scsi_build_commandblocks() + * + * Purpose: Allocate command blocks associated with a device. + * + * Arguments: SDpnt - device + * + * Returns: Nothing + * + * Lock status: No locking assumed or required. + * + * Notes: + */ void scsi_build_commandblocks(Scsi_Device * SDpnt) { unsigned long flags; @@ -1129,9 +1127,10 @@ void scsi_build_commandblocks(Scsi_Device * SDpnt) for (j = 0; j < SDpnt->queue_depth; j++) { SCpnt = (Scsi_Cmnd *) - scsi_init_malloc(sizeof(Scsi_Cmnd), + kmalloc(sizeof(Scsi_Cmnd), GFP_ATOMIC | (host->unchecked_isa_dma ? GFP_DMA : 0)); + memset(SCpnt, 0, sizeof(Scsi_Cmnd)); if (NULL == SCpnt) break; /* If not, the next line will oops ... */ memset(&SCpnt->eh_timeout, 0, sizeof(SCpnt->eh_timeout)); @@ -1323,7 +1322,6 @@ stop_output: static int proc_scsi_gen_write(struct file * file, const char * buf, unsigned long length, void *data) { - Scsi_Cmnd *SCpnt; struct Scsi_Device_Template *SDTpnt; Scsi_Device *scd; struct Scsi_Host *HBA_ptr; @@ -1537,10 +1535,8 @@ static int proc_scsi_gen_write(struct file * file, const char * buf, * Nobody is using this device any more. * Free all of the command structures. */ - for (SCpnt = scd->device_queue; SCpnt; SCpnt = SCpnt->next) { - scd->device_queue = SCpnt->next; - scsi_init_free((char *) SCpnt, sizeof(*SCpnt)); - } + scsi_release_commandblocks(scd); + /* Now we can remove the device structure */ if (scd->next != NULL) scd->next->prev = scd->prev; @@ -1552,7 +1548,7 @@ static int proc_scsi_gen_write(struct file * file, const char * buf, HBA_ptr->host_queue = scd->next; } blk_cleanup_queue(&scd->request_queue); - scsi_init_free((char *) scd, sizeof(Scsi_Device)); + kfree((char *) scd); } else { goto out; } @@ -1865,17 +1861,12 @@ static void scsi_unregister_host(Scsi_Host_Template * tpnt) } for (SDpnt = shpnt->host_queue; SDpnt; SDpnt = shpnt->host_queue) { - while (SDpnt->device_queue) { - SCpnt = SDpnt->device_queue->next; - scsi_init_free((char *) SDpnt->device_queue, sizeof(Scsi_Cmnd)); - SDpnt->device_queue = SCpnt; - } - SDpnt->has_cmdblocks = 0; + scsi_release_commandblocks(SDpnt); blk_cleanup_queue(&SDpnt->request_queue); /* Next free up the Scsi_Device structures for this host */ shpnt->host_queue = SDpnt->next; - scsi_init_free((char *) SDpnt, sizeof(Scsi_Device)); + kfree((char *) SDpnt); } } @@ -2023,7 +2014,6 @@ static int scsi_register_device_module(struct Scsi_Device_Template *tpnt) static int scsi_unregister_device(struct Scsi_Device_Template *tpnt) { Scsi_Device *SDpnt; - Scsi_Cmnd *SCpnt; struct Scsi_Host *shpnt; struct Scsi_Device_Template *spnt; struct Scsi_Device_Template *prev_spnt; @@ -2050,13 +2040,7 @@ static int scsi_unregister_device(struct Scsi_Device_Template *tpnt) * Nobody is using this device any more. Free all of the * command structures. */ - for (SCpnt = SDpnt->device_queue; SCpnt; - SCpnt = SCpnt->next) { - if (SCpnt == SDpnt->device_queue) - SDpnt->device_queue = SCpnt->next; - scsi_init_free((char *) SCpnt, sizeof(*SCpnt)); - } - SDpnt->has_cmdblocks = 0; + scsi_release_commandblocks(SDpnt); } } } @@ -2309,7 +2293,7 @@ void cleanup_module(void) Scsi_Device * scsi_get_host_dev(struct Scsi_Host * SHpnt) { Scsi_Device * SDpnt; - Scsi_Cmnd * SCpnt; + /* * Attach a single Scsi_Device to the Scsi_Host - this should * be made to look like a "pseudo-device" that points to the @@ -2330,18 +2314,9 @@ Scsi_Device * scsi_get_host_dev(struct Scsi_Host * SHpnt) SDpnt->type = -1; SDpnt->queue_depth = 1; - SCpnt = kmalloc(sizeof(Scsi_Cmnd), GFP_ATOMIC); - memset(SCpnt, 0, sizeof(Scsi_Cmnd)); - SCpnt->host = SHpnt; - SCpnt->device = SDpnt; - SCpnt->target = SDpnt->id; - SCpnt->state = SCSI_STATE_UNUSED; - SCpnt->owner = SCSI_OWNER_NOBODY; - SCpnt->request.rq_status = RQ_INACTIVE; - - SDpnt->device_queue = SCpnt; - - blk_init_queue(&SDpnt->request_queue, scsi_get_request_handler(SDpnt, SDpnt->host)); + scsi_build_commandblocks(SDpnt); + + scsi_initialize_queue(SDpnt, SHpnt); blk_queue_headactive(&SDpnt->request_queue, 0); SDpnt->request_queue.queuedata = (void *) SDpnt; @@ -2380,7 +2355,7 @@ void scsi_free_host_dev(Scsi_Device * SDpnt) * We only have a single SCpnt attached to this device. Free * it now. */ - kfree(SDpnt->device_queue); + scsi_release_commandblocks(SDpnt); kfree(SDpnt); } diff --git a/drivers/scsi/scsi.h b/drivers/scsi/scsi.h index f8f615c97a6d..df294f2e482c 100644 --- a/drivers/scsi/scsi.h +++ b/drivers/scsi/scsi.h @@ -415,8 +415,7 @@ extern int scsi_mlqueue_insert(Scsi_Cmnd * cmd, int reason); /* * Prototypes for functions in scsi_lib.c */ -extern void scsi_maybe_unblock_host(Scsi_Device * SDpnt); -extern void scsi_blocked_request_fn(request_queue_t * q); +extern int scsi_maybe_unblock_host(Scsi_Device * SDpnt); extern Scsi_Cmnd *scsi_end_request(Scsi_Cmnd * SCpnt, int uptodate, int sectors); extern struct Scsi_Device_Template *scsi_get_request_dev(struct request *); @@ -426,13 +425,14 @@ extern void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors, int block_sectors); extern void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt); extern void scsi_request_fn(request_queue_t * q); - +extern int scsi_starvation_completion(Scsi_Device * SDpnt); /* * Prototypes for functions in scsi.c */ extern int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt); extern void scsi_bottom_half_handler(void); +extern void scsi_release_commandblocks(Scsi_Device * SDpnt); extern void scsi_build_commandblocks(Scsi_Device * SDpnt); extern void scsi_done(Scsi_Cmnd * SCpnt); extern void scsi_finish_command(Scsi_Cmnd *); @@ -449,6 +449,7 @@ extern void scsi_wait_cmd(Scsi_Cmnd *, const void *cmnd, extern int scsi_dev_init(void); + /* * Prototypes for functions/data in hosts.c */ diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 94fa44f3c123..773dc00871e1 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -972,9 +972,13 @@ int scsi_decide_disposition(Scsi_Cmnd * SCpnt) * When the low level driver returns DID_SOFT_ERROR, * it is responsible for keeping an internal retry counter * in order to avoid endless loops (DB) + * + * Actually this is a bug in this function here. We should + * be mindful of the maximum number of retries specified + * and not get stuck in a loop. */ case DID_SOFT_ERROR: - return NEEDS_RETRY; + goto maybe_retry; case DID_BUS_BUSY: case DID_PARITY: @@ -1830,6 +1834,8 @@ void scsi_error_handler(void *data) */ if( host->loaded_as_module ) { siginitsetinv(¤t->blocked, SHUTDOWN_SIGS); + } else { + siginitsetinv(¤t->blocked, 0); } lock_kernel(); @@ -1865,13 +1871,20 @@ void scsi_error_handler(void *data) * trying to unload a module. */ SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler sleeping\n")); - if( host->loaded_as_module ) { - down_interruptible(&sem); - if (signal_pending(current)) - break; - } else { - down(&sem); + /* + * Note - we always use down_interruptible with the semaphore + * even if the module was loaded as part of the kernel. The + * reason is that down() will cause this thread to be counted + * in the load average as a running process, and down + * interruptible doesn't. Given that we need to allow this + * thread to die if the driver was loaded as a module, using + * semaphores isn't unreasonable. + */ + down_interruptible(&sem); + if( host->loaded_as_module ) { + if (signal_pending(current)) + break; } SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler waking up\n")); diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c index f62c1f015347..56ad676465cf 100644 --- a/drivers/scsi/scsi_ioctl.c +++ b/drivers/scsi/scsi_ioctl.c @@ -323,7 +323,7 @@ int scsi_ioctl_send_command(Scsi_Device * dev, Scsi_Ioctl_Command * sic) { int i; printk("scsi_ioctl : device %d. command = ", dev->id); - for (i = 0; i < 12; ++i) + for (i = 0; i < cmdlen; ++i) printk("%02x ", cmd[i]); printk("\nbuffer ="); for (i = 0; i < 20; ++i) diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 7d2c021e92bc..9ec2fe2813c8 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -50,12 +50,6 @@ * This entire source file deals with the new queueing code. */ -/* - * For hosts that request single-file access to the ISA bus, this is a pointer to - * the currently active host. - */ -volatile struct Scsi_Host *host_active = NULL; - /* * Function: scsi_insert_special_cmd() @@ -191,7 +185,6 @@ int scsi_init_cmd_errh(Scsi_Cmnd * SCpnt) return 1; } - /* * Function: scsi_queue_next_request() * @@ -1010,3 +1003,20 @@ void scsi_request_fn(request_queue_t * q) spin_lock_irq(&io_request_lock); } } + +/* + * FIXME(eric) - these are empty stubs for the moment. I need to re-implement + * host blocking from scratch. The theory is that hosts that wish to block + * will register/deregister using these functions instead of the old way + * of setting the wish_block flag. + * + * The details of the implementation remain to be settled, however the + * stubs are here now so that the actual drivers will properly compile. + */ +void scsi_register_blocked_host(struct Scsi_Host * SHpnt) +{ +} + +void scsi_deregister_blocked_host(struct Scsi_Host * SHpnt) +{ +} diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index ed30ae0913ce..645edb67cb76 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -42,7 +42,7 @@ static void print_inquiry(unsigned char *data); static int scan_scsis_single(int channel, int dev, int lun, int *max_scsi_dev, - int *sparse_lun, Scsi_Device ** SDpnt, Scsi_Cmnd * SCpnt, + int *sparse_lun, Scsi_Device ** SDpnt, struct Scsi_Host *shpnt, char *scsi_result); struct dev_info { @@ -119,6 +119,7 @@ static struct dev_info device_list[] = {"IOMEGA", "Io20S *F", "*", BLIST_KEY}, {"INSITE", "Floptical F*8I", "*", BLIST_KEY}, {"INSITE", "I325VM", "*", BLIST_KEY}, + {"LASOUND","CDX7405","3.10", BLIST_MAX5LUN | BLIST_SINGLELUN}, {"NRC", "MBR-7", "*", BLIST_FORCELUN | BLIST_SINGLELUN}, {"NRC", "MBR-7.4", "*", BLIST_FORCELUN | BLIST_SINGLELUN}, {"REGAL", "CDC-4X", "*", BLIST_MAX5LUN | BLIST_SINGLELUN}, @@ -259,7 +260,6 @@ void scan_scsis(struct Scsi_Host *shpnt, int dev; int lun; int max_dev_lun; - Scsi_Cmnd *SCpnt; unsigned char *scsi_result; unsigned char scsi_result0[256]; Scsi_Device *SDpnt; @@ -267,29 +267,26 @@ void scan_scsis(struct Scsi_Host *shpnt, int sparse_lun; scsi_result = NULL; - SCpnt = (Scsi_Cmnd *) kmalloc(sizeof(Scsi_Cmnd), - GFP_ATOMIC | GFP_DMA); - if (SCpnt) { - memset(SCpnt, 0, sizeof(Scsi_Cmnd)); - SDpnt = (Scsi_Device *) kmalloc(sizeof(Scsi_Device), - GFP_ATOMIC); - if (SDpnt) { - memset(SDpnt, 0, sizeof(Scsi_Device)); - /* - * Register the queue for the device. All I/O requests will come - * in through here. We also need to register a pointer to - * ourselves, since the queue handler won't know what device - * the queue actually represents. We could look it up, but it - * is pointless work. - */ - blk_init_queue(&SDpnt->request_queue, scsi_get_request_handler(SDpnt, shpnt)); - blk_queue_headactive(&SDpnt->request_queue, 0); - SDpnt->request_queue.queuedata = (void *) SDpnt; - /* Make sure we have something that is valid for DMA purposes */ - scsi_result = ((!shpnt->unchecked_isa_dma) - ? &scsi_result0[0] : kmalloc(512, GFP_DMA)); - } + + SDpnt = (Scsi_Device *) kmalloc(sizeof(Scsi_Device), + GFP_ATOMIC); + if (SDpnt) { + memset(SDpnt, 0, sizeof(Scsi_Device)); + /* + * Register the queue for the device. All I/O requests will come + * in through here. We also need to register a pointer to + * ourselves, since the queue handler won't know what device + * the queue actually represents. We could look it up, but it + * is pointless work. + */ + scsi_initialize_queue(SDpnt, shpnt); + blk_queue_headactive(&SDpnt->request_queue, 0); + SDpnt->request_queue.queuedata = (void *) SDpnt; + /* Make sure we have something that is valid for DMA purposes */ + scsi_result = ((!shpnt->unchecked_isa_dma) + ? &scsi_result0[0] : kmalloc(512, GFP_DMA)); } + if (scsi_result == NULL) { printk("Unable to obtain scsi_result buffer\n"); goto leave; @@ -297,11 +294,12 @@ void scan_scsis(struct Scsi_Host *shpnt, /* * We must chain ourself in the host_queue, so commands can time out */ - SCpnt->next = NULL; - SDpnt->device_queue = SCpnt; + SDpnt->queue_depth = 1; SDpnt->host = shpnt; SDpnt->online = TRUE; + scsi_build_commandblocks(SDpnt); + initialize_merge_fn(SDpnt); /* @@ -329,9 +327,6 @@ void scan_scsis(struct Scsi_Host *shpnt, * We need to increment the counter for this one device so we can track when * things are quiet. */ - atomic_inc(&shpnt->host_active); - atomic_inc(&SDpnt->device_active); - if (hardcoded == 1) { Scsi_Device *oldSDpnt = SDpnt; struct Scsi_Device_Template *sdtpnt; @@ -345,7 +340,7 @@ void scan_scsis(struct Scsi_Host *shpnt, if (lun >= shpnt->max_lun) goto leave; scan_scsis_single(channel, dev, lun, &max_dev_lun, &sparse_lun, - &SDpnt, SCpnt, shpnt, scsi_result); + &SDpnt, shpnt, scsi_result); if (SDpnt != oldSDpnt) { /* it could happen the blockdevice hasn't yet been inited */ @@ -397,7 +392,7 @@ void scan_scsis(struct Scsi_Host *shpnt, sparse_lun = 0; for (lun = 0; lun < max_dev_lun; ++lun) { if (!scan_scsis_single(channel, order_dev, lun, &max_dev_lun, - &sparse_lun, &SDpnt, SCpnt, shpnt, + &sparse_lun, &SDpnt, shpnt, scsi_result) && !sparse_lun) break; /* break means don't probe further for luns!=0 */ @@ -407,13 +402,6 @@ void scan_scsis(struct Scsi_Host *shpnt, } /* for channel ends */ } /* if/else hardcoded */ - /* - * We need to decrement the counter for this one device - * so we know when everything is quiet. - */ - atomic_dec(&shpnt->host_active); - atomic_dec(&SDpnt->device_active); - leave: { /* Unchain SCpnt from host_queue */ @@ -434,13 +422,12 @@ void scan_scsis(struct Scsi_Host *shpnt, } } + scsi_release_commandblocks(SDpnt); + /* Last device block does not exist. Free memory. */ if (SDpnt != NULL) kfree((char *) SDpnt); - if (SCpnt != NULL) - kfree((char *) SCpnt); - /* If we allocated a buffer so we could do DMA, free it now */ if (scsi_result != &scsi_result0[0] && scsi_result != NULL) { kfree(scsi_result); @@ -464,13 +451,14 @@ void scan_scsis(struct Scsi_Host *shpnt, * Returning 0 means Please don't ask further for lun!=0, 1 means OK go on. * Global variables used : scsi_devices(linked list) */ -int scan_scsis_single(int channel, int dev, int lun, int *max_dev_lun, - int *sparse_lun, Scsi_Device ** SDpnt2, Scsi_Cmnd * SCpnt, +static int scan_scsis_single(int channel, int dev, int lun, int *max_dev_lun, + int *sparse_lun, Scsi_Device ** SDpnt2, struct Scsi_Host *shpnt, char *scsi_result) { unsigned char scsi_cmd[MAX_COMMAND_SIZE]; struct Scsi_Device_Template *sdtpnt; Scsi_Device *SDtail, *SDpnt = *SDpnt2; + Scsi_Cmnd * SCpnt; int bflags, type = -1; static int ghost_channel=-1, ghost_dev=-1; int org_lun = lun; @@ -505,6 +493,8 @@ int scan_scsis_single(int channel, int dev, int lun, int *max_dev_lun, scsi_cmd[1] = lun << 5; scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[4] = scsi_cmd[5] = 0; + SCpnt = scsi_allocate_device(SDpnt, 0, 0); + SCpnt->host = SDpnt->host; SCpnt->device = SDpnt; SCpnt->target = SDpnt->id; @@ -527,10 +517,14 @@ int scan_scsis_single(int channel, int dev, int lun, int *max_dev_lun, ((SCpnt->sense_buffer[0] & 0x70) >> 4) == 7) { if (((SCpnt->sense_buffer[2] & 0xf) != NOT_READY) && ((SCpnt->sense_buffer[2] & 0xf) != UNIT_ATTENTION) && - ((SCpnt->sense_buffer[2] & 0xf) != ILLEGAL_REQUEST || lun > 0)) + ((SCpnt->sense_buffer[2] & 0xf) != ILLEGAL_REQUEST || lun > 0)) { + scsi_release_command(SCpnt); return 1; - } else + } + } else { + scsi_release_command(SCpnt); return 0; + } } SCSI_LOG_SCAN_BUS(3, printk("scsi: performing INQUIRY\n")); /* @@ -551,14 +545,17 @@ int scan_scsis_single(int channel, int dev, int lun, int *max_dev_lun, SCSI_LOG_SCAN_BUS(3, printk("scsi: INQUIRY %s with code 0x%x\n", SCpnt->result ? "failed" : "successful", SCpnt->result)); - if (SCpnt->result) + if (SCpnt->result) { + scsi_release_command(SCpnt); return 0; /* assume no peripheral if any sort of error */ + } /* * Check the peripheral qualifier field - this tells us whether LUNS * are supported here or not. */ if ((scsi_result[0] >> 5) == 3) { + scsi_release_command(SCpnt); return 0; /* assume no peripheral if any sort of error */ } @@ -703,11 +700,11 @@ int scan_scsis_single(int channel, int dev, int lun, int *max_dev_lun, (void *) scsi_result, 0x2a, SCSI_TIMEOUT, 3); } - /* - * Detach the command from the device. It was just a temporary to be used while - * scanning the bus - the real ones will be allocated later. - */ - SDpnt->device_queue = NULL; + + scsi_release_command(SCpnt); + SCpnt = NULL; + + scsi_release_commandblocks(SDpnt); /* * This device was already hooked up to the host in question, @@ -715,13 +712,19 @@ int scan_scsis_single(int channel, int dev, int lun, int *max_dev_lun, * allocate a new one and attach it to the host so that we can further scan the bus. */ SDpnt = (Scsi_Device *) kmalloc(sizeof(Scsi_Device), GFP_ATOMIC); - *SDpnt2 = SDpnt; if (!SDpnt) { printk("scsi: scan_scsis_single: Cannot malloc\n"); return 0; } memset(SDpnt, 0, sizeof(Scsi_Device)); + *SDpnt2 = SDpnt; + SDpnt->queue_depth = 1; + SDpnt->host = shpnt; + SDpnt->online = TRUE; + + scsi_build_commandblocks(SDpnt); + /* * Register the queue for the device. All I/O requests will come * in through here. We also need to register a pointer to @@ -729,17 +732,15 @@ int scan_scsis_single(int channel, int dev, int lun, int *max_dev_lun, * the queue actually represents. We could look it up, but it * is pointless work. */ - blk_init_queue(&SDpnt->request_queue, scsi_get_request_handler(SDpnt, shpnt)); + scsi_initialize_queue(SDpnt, shpnt); blk_queue_headactive(&SDpnt->request_queue, 0); SDpnt->request_queue.queuedata = (void *) SDpnt; SDpnt->host = shpnt; initialize_merge_fn(SDpnt); /* - * And hook up our command block to the new device we will be testing - * for. + * Mark this device as online, or otherwise we won't be able to do much with it. */ - SDpnt->device_queue = SCpnt; SDpnt->online = TRUE; /* diff --git a/drivers/scsi/scsi_syms.c b/drivers/scsi/scsi_syms.c index c05f59fdaa02..3379299d8d56 100644 --- a/drivers/scsi/scsi_syms.c +++ b/drivers/scsi/scsi_syms.c @@ -50,8 +50,6 @@ EXPORT_SYMBOL(scsi_allocate_device); EXPORT_SYMBOL(scsi_do_cmd); EXPORT_SYMBOL(scsi_wait_cmd); EXPORT_SYMBOL(scsi_command_size); -EXPORT_SYMBOL(scsi_init_malloc); -EXPORT_SYMBOL(scsi_init_free); EXPORT_SYMBOL(scsi_ioctl); EXPORT_SYMBOL(print_command); EXPORT_SYMBOL(print_sense); @@ -80,6 +78,9 @@ EXPORT_SYMBOL(proc_scsi); EXPORT_SYMBOL(scsi_io_completion); EXPORT_SYMBOL(scsi_end_request); +EXPORT_SYMBOL(scsi_register_blocked_host); +EXPORT_SYMBOL(scsi_deregister_blocked_host); + /* * These are here only while I debug the rest of the scsi stuff. */ diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index ccbacf12737f..a897c959713a 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -960,15 +960,15 @@ static int sd_init() return 0; rscsi_disks = (Scsi_Disk *) - scsi_init_malloc(sd_template.dev_max * sizeof(Scsi_Disk), GFP_ATOMIC); + kmalloc(sd_template.dev_max * sizeof(Scsi_Disk), GFP_ATOMIC); memset(rscsi_disks, 0, sd_template.dev_max * sizeof(Scsi_Disk)); /* for every (necessary) major: */ - sd_sizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) * sizeof(int), GFP_ATOMIC); + sd_sizes = (int *) kmalloc((sd_template.dev_max << 4) * sizeof(int), GFP_ATOMIC); memset(sd_sizes, 0, (sd_template.dev_max << 4) * sizeof(int)); - sd_blocksizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) * sizeof(int), GFP_ATOMIC); - sd_hardsizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) * sizeof(int), GFP_ATOMIC); + sd_blocksizes = (int *) kmalloc((sd_template.dev_max << 4) * sizeof(int), GFP_ATOMIC); + sd_hardsizes = (int *) kmalloc((sd_template.dev_max << 4) * sizeof(int), GFP_ATOMIC); for (i = 0; i < sd_template.dev_max << 4; i++) { sd_blocksizes[i] = 1024; @@ -979,9 +979,10 @@ static int sd_init() blksize_size[SD_MAJOR(i)] = sd_blocksizes + i * (SCSI_DISKS_PER_MAJOR << 4); hardsect_size[SD_MAJOR(i)] = sd_hardsizes + i * (SCSI_DISKS_PER_MAJOR << 4); } - sd = (struct hd_struct *) scsi_init_malloc((sd_template.dev_max << 4) * - sizeof(struct hd_struct), - GFP_ATOMIC); + sd = (struct hd_struct *) kmalloc((sd_template.dev_max << 4) * + sizeof(struct hd_struct), + GFP_ATOMIC); + memset(sd, 0, (sd_template.dev_max << 4) * sizeof(struct hd_struct)); if (N_USED_SD_MAJORS > 1) sd_gendisks = (struct gendisk *) @@ -1215,13 +1216,11 @@ void cleanup_module(void) sd_registered--; if (rscsi_disks != NULL) { - scsi_init_free((char *) rscsi_disks, - sd_template.dev_max * sizeof(Scsi_Disk)); - scsi_init_free((char *) sd_sizes, sd_template.dev_max * sizeof(int)); - scsi_init_free((char *) sd_blocksizes, sd_template.dev_max * sizeof(int)); - scsi_init_free((char *) sd_hardsizes, sd_template.dev_max * sizeof(int)); - scsi_init_free((char *) sd, - (sd_template.dev_max << 4) * sizeof(struct hd_struct)); + kfree((char *) rscsi_disks); + kfree((char *) sd_sizes); + kfree((char *) sd_blocksizes); + kfree((char *) sd_hardsizes); + kfree((char *) sd); /* * Now remove sd_gendisks from the linked list diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 5e1abb8347c7..7529c30636db 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -6,7 +6,7 @@ * * Original driver (sg.c): * Copyright (C) 1992 Lawrence Foard - * 2.x extensions to driver: + * Version 2 and 3 extensions to driver: * Copyright (C) 1998, 1999 Douglas Gilbert * * This program is free software; you can redistribute it and/or modify @@ -14,33 +14,26 @@ * the Free Software Foundation; either version 2, or (at your option) * any later version. * - * Borrows code from st driver. Thanks to Alessandro Rubini's "dd" book. */ - static char * sg_version_str = "Version: 2.3.35 (990708)"; - static int sg_version_num = 20335; /* 2 digits for each component */ + static char * sg_version_str = "Version: 3.1.10 (20000123)"; + static int sg_version_num = 30110; /* 2 digits for each component */ /* * D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes: * - scsi logging is available via SCSI_LOG_TIMEOUT macros. First * the kernel/module needs to be built with CONFIG_SCSI_LOGGING - * (otherwise the macros compile to empty statements). - * Then before running the program to be debugged enter: - * # echo "scsi log timeout 7" > /proc/scsi/scsi + * (otherwise the macros compile to empty statements). + * Then before running the program to be debugged enter: + * # echo "scsi log timeout 7" > /proc/scsi/scsi * This will send copious output to the console and the log which * is usually /var/log/messages. To turn off debugging enter: - * # echo "scsi log timeout 0" > /proc/scsi/scsi + * # echo "scsi log timeout 0" > /proc/scsi/scsi * The 'timeout' token was chosen because it is relatively unused. * The token 'hlcomplete' should be used but that triggers too * much output from the sd device driver. To dump the current * state of the SCSI mid level data structures enter: - * # echo "scsi dump 1" > /proc/scsi/scsi - * To dump the state of sg's data structures get the 'sg_debug' - * program from the utilities and enter: - * # sg_debug /dev/sga - * or any valid sg device name. The state of _all_ sg devices - * will be sent to the console and the log. - * - * - The 'alt_address' field in the scatter_list structure and the - * related 'mem_src' indicate the source of the heap allocation. + * # echo "scsi dump 1" > /proc/scsi/scsi + * To dump the state of sg's data structures use: + * # cat /proc/scsi/sg/debug * */ #include @@ -54,6 +47,7 @@ #include #include #include +#include #include #include #include @@ -65,13 +59,29 @@ #include #include -static spinlock_t sg_request_lock = SPIN_LOCK_UNLOCKED; +#ifdef CONFIG_PROC_FS +#include +static int sg_proc_init(void); +static void sg_proc_cleanup(void); +#endif -int sg_big_buff = SG_DEF_RESERVED_SIZE; /* sg_big_buff is ro through sysctl */ -/* N.B. This global is here to keep existing software happy. It now holds - the size of the reserve buffer of the most recent sucessful sg_open(). - Only available when 'sg' compiled into kernel (rather than a module). - This is deprecated (use SG_GET_RESERVED_SIZE ioctl() instead). */ +#ifndef LINUX_VERSION_CODE +#include +#endif /* LINUX_VERSION_CODE */ + +/* #define SG_ALLOW_DIO */ +#ifdef SG_ALLOW_DIO +#include +#endif + +int sg_big_buff = SG_DEF_RESERVED_SIZE; +/* N.B. This variable is readable and writeable via + /proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer + of this size (or less if there is not enough memory) will be reserved + for use by this file descriptor. [Deprecated usage: this variable is also + readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into + the kernel (i.e. it is not a module).] */ +static int def_reserved_size = -1; /* picks up init parameter */ #define SG_SECTOR_SZ 512 #define SG_SECTOR_MSK (SG_SECTOR_SZ - 1) @@ -81,17 +91,10 @@ int sg_big_buff = SG_DEF_RESERVED_SIZE; /* sg_big_buff is ro through sysctl */ static int sg_pool_secs_avail = SG_MAX_POOL_SECTORS; -/* #define SG_DEBUG */ /* for counting varieties of allocations */ - -#ifdef SG_DEBUG -static int sg_num_kmal = 0; -static int sg_num_pool = 0; -static int sg_num_page = 0; -#endif - #define SG_HEAP_PAGE 1 /* heap from kernel via get_free_pages() */ #define SG_HEAP_KMAL 2 /* heap from kernel via kmalloc() */ #define SG_HEAP_POOL 3 /* heap from scsi dma pool (mid-level) */ +#define SG_USER_MEM 4 /* memory belongs to user space */ static int sg_init(void); @@ -100,28 +103,36 @@ static void sg_finish(void); static int sg_detect(Scsi_Device *); static void sg_detach(Scsi_Device *); +static Scsi_Cmnd * dummy_cmdp = 0; /* only used for sizeof */ -struct Scsi_Device_Template sg_template = + +static spinlock_t sg_request_lock = SPIN_LOCK_UNLOCKED; + +struct Scsi_Device_Template sg_template = { - tag:"sg", - scsi_type:0xff, - major:SCSI_GENERIC_MAJOR, - detect:sg_detect, - init:sg_init, - finish:sg_finish, - attach:sg_attach, - detach:sg_detach + tag:"sg", + scsi_type:0xff, + major:SCSI_GENERIC_MAJOR, + detect:sg_detect, + init:sg_init, + finish:sg_finish, + attach:sg_attach, + detach:sg_detach }; +/* Need to add 'rwlock_t sg_rw_lock = RW_LOCK_UNLOCKED;' for list protection */ typedef struct sg_scatter_hold /* holding area for scsi scatter gather info */ { - unsigned short use_sg; /* Number of pieces of scatter-gather */ + unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */ unsigned short sglist_len; /* size of malloc'd scatter-gather list */ unsigned bufflen; /* Size of (aggregate) data buffer */ unsigned b_malloc_len; /* actual len malloc'ed in buffer */ void * buffer; /* Data buffer or scatter list,12 bytes each*/ - char mem_src; /* heap whereabouts of 'buffer' */ + struct kiobuf * kiobp; /* for direct IO information */ + char mapped; /* indicates kiobp has locked pages */ + char buffer_mem_src; /* heap whereabouts of 'buffer' */ + unsigned char cmd_opcode; /* first byte of command */ } Sg_scatter_hold; /* 20 bytes long on i386 */ struct sg_device; /* forward declarations */ @@ -129,20 +140,23 @@ struct sg_fd; typedef struct sg_request /* SG_MAX_QUEUE requests outstanding per file */ { - Scsi_Cmnd * my_cmdp; /* NULL -> ready to read, else id */ + Scsi_Cmnd * my_cmdp; /* != 0 when request with lower levels */ struct sg_request * nextrp; /* NULL -> tail request (slist) */ struct sg_fd * parentfp; /* NULL -> not in use */ Sg_scatter_hold data; /* hold buffer, perhaps scatter list */ - struct sg_header header; /* scsi command+info, see */ + sg_io_hdr_t header; /* scsi command+info, see */ + unsigned char sense_b[sizeof(dummy_cmdp->sense_buffer)]; char res_used; /* 1 -> using reserve buffer, 0 -> not ... */ -} Sg_request; /* 72 bytes long on i386 */ + char orphan; /* 1 -> drop on sight, 0 -> normal */ + char sg_io_owned; /* 1 -> packet belongs to SG_IO */ + char done; /* 1 -> bh handler done, 0 -> prior to bh */ +} Sg_request; /* 168 bytes long on i386 */ typedef struct sg_fd /* holds the state of a file descriptor */ { struct sg_fd * nextfp; /* NULL when last opened fd on this device */ struct sg_device * parentdp; /* owning device */ wait_queue_head_t read_wait; /* queue read until command done */ - wait_queue_head_t write_wait; /* write waits on pending read */ int timeout; /* defaults to SG_DEFAULT_TIMEOUT */ Sg_scatter_hold reserve; /* buffer held for this file descriptor */ unsigned save_scat_len; /* original length of trunc. scat. element */ @@ -152,60 +166,73 @@ typedef struct sg_fd /* holds the state of a file descriptor */ char low_dma; /* as in parent but possibly overridden to 1 */ char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */ char closed; /* 1 -> fd closed but request(s) outstanding */ - char my_mem_src; /* heap whereabouts of this Sg_fd object */ + char fd_mem_src; /* heap whereabouts of this Sg_fd object */ char cmd_q; /* 1 -> allow command queuing, 0 -> don't */ - char underrun_flag; /* 1 -> flag underruns, 0 -> don't, 2 -> test */ char next_cmd_len; /* 0 -> automatic (def), >0 -> use on next write() */ + char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */ } Sg_fd; /* 1212 bytes long on i386 */ typedef struct sg_device /* holds the state of each scsi generic device */ { Scsi_Device * device; - wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */ + wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */ int sg_tablesize; /* adapter's max scatter-gather table size */ Sg_fd * headfp; /* first open fd belonging to this device */ kdev_t i_rdev; /* holds device major+minor number */ char exclude; /* opened for exclusive access */ char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */ - unsigned char merge_fd; /* 0->sequencing per fd, else fd count */ } Sg_device; /* 24 bytes long on i386 */ static int sg_fasync(int fd, struct file * filp, int mode); -static void sg_command_done(Scsi_Cmnd * SCpnt); -static int sg_start_req(Sg_request * srp, int max_buff_size, - const char * inp, int num_write_xfer); -static void sg_finish_rem_req(Sg_request * srp, char * outp, - int num_read_xfer); -static int sg_build_scat(Sg_scatter_hold * schp, int buff_size, - const Sg_fd * sfp); -static void sg_write_xfer(Sg_scatter_hold * schp, const char * inp, - int num_write_xfer); +static void sg_cmd_done_bh(Scsi_Cmnd * SCpnt); +static int sg_start_req(Sg_request * srp); +static void sg_finish_rem_req(Sg_request * srp); +static int sg_build_indi(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size); +static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp); +static ssize_t sg_new_read(Sg_fd * sfp, char * buf, size_t count, + Sg_request * srp); +static ssize_t sg_new_write(Sg_fd * sfp, const char * buf, size_t count, + int blocking, int read_only, Sg_request ** o_srp); +static int sg_common_write(Sg_fd * sfp, Sg_request * srp, + unsigned char * cmnd, int timeout, int blocking); +static int sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind, + int wr_xf, int * countp, unsigned char ** up); +static int sg_write_xfer(Sg_request * srp); +static int sg_read_xfer(Sg_request * srp); +static void sg_read_oxfer(Sg_request * srp, char * outp, int num_read_xfer); static void sg_remove_scat(Sg_scatter_hold * schp); -static void sg_read_xfer(Sg_scatter_hold * schp, char * outp, - int num_read_xfer); +static char * sg_get_sgat_msa(Sg_scatter_hold * schp); static void sg_build_reserve(Sg_fd * sfp, int req_size); static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size); static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp); -static char * sg_malloc(const Sg_fd * sfp, int size, int * retSzp, +static char * sg_malloc(const Sg_fd * sfp, int size, int * retSzp, int * mem_srcp); static void sg_free(char * buff, int size, int mem_src); -static char * sg_low_malloc(int rqSz, int lowDma, int mem_src, +static char * sg_low_malloc(int rqSz, int lowDma, int mem_src, int * retSzp); static void sg_low_free(char * buff, int size, int mem_src); -static Sg_fd * sg_add_sfp(Sg_device * sdp, int dev, int get_reserved); +static Sg_fd * sg_add_sfp(Sg_device * sdp, int dev); static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp); static Sg_request * sg_get_request(const Sg_fd * sfp, int pack_id); static Sg_request * sg_add_request(Sg_fd * sfp); -static int sg_remove_request(Sg_fd * sfp, const Sg_request * srp); +static int sg_remove_request(Sg_fd * sfp, Sg_request * srp); static int sg_res_in_use(const Sg_fd * sfp); +static int sg_dio_in_use(const Sg_fd * sfp); static void sg_clr_scpnt(Scsi_Cmnd * SCpnt); static void sg_shorten_timeout(Scsi_Cmnd * scpnt); -static void sg_debug(const Sg_device * sdp, const Sg_fd * sfp, int part_of); -static void sg_debug_all(const Sg_fd * sfp); +static int sg_ms_to_jif(unsigned int msecs); +static unsigned sg_jif_to_ms(int jifs); +static int sg_allow_access(unsigned char opcode, char dev_type); +static int sg_last_dev(void); +static int sg_build_dir(Sg_request * srp, Sg_fd * sfp, int dxfer_len); +static void sg_unmap_and(Sg_scatter_hold * schp, int free_also); static Sg_device * sg_dev_arr = NULL; static const int size_sg_header = sizeof(struct sg_header); +static const int size_sg_io_hdr = sizeof(sg_io_hdr_t); +static const int size_sg_iovec = sizeof(sg_iovec_t); +static const int size_sg_req_info = sizeof(sg_req_info_t); static int sg_open(struct inode * inode, struct file * filp) @@ -240,8 +267,8 @@ static int sg_open(struct inode * inode, struct file * filp) if (sdp->headfp && (filp->f_flags & O_NONBLOCK)) return -EBUSY; res = 0; /* following is a macro that beats race condition */ - __wait_event_interruptible(sdp->o_excl_wait, - ((sdp->headfp || sdp->exclude) ? 0 : (sdp->exclude = 1)), + __wait_event_interruptible(sdp->o_excl_wait, + ((sdp->headfp || sdp->exclude) ? 0 : (sdp->exclude = 1)), res); if (res) return res; /* -ERESTARTSYS because signal hit process */ @@ -257,15 +284,9 @@ static int sg_open(struct inode * inode, struct file * filp) if (! sdp->headfp) { /* no existing opens on this device */ sdp->sgdebug = 0; sdp->sg_tablesize = sdp->device->host->sg_tablesize; - sdp->merge_fd = 0; /* A little tricky if SG_DEF_MERGE_FD set */ } - if ((sfp = sg_add_sfp(sdp, dev, O_RDWR == (flags & O_ACCMODE)))) { + if ((sfp = sg_add_sfp(sdp, dev))) filp->private_data = sfp; -#if SG_DEF_MERGE_FD - if (0 == sdp->merge_fd) - sdp->merge_fd = 1; -#endif - } else { if (flags & O_EXCL) sdp->exclude = 0; /* undo if error */ return -ENOMEM; @@ -287,12 +308,10 @@ static int sg_release(struct inode * inode, struct file * filp) if ((! (sfp = (Sg_fd *)filp->private_data)) || (! (sdp = sfp->parentdp))) return -ENXIO; SCSI_LOG_TIMEOUT(3, printk("sg_release: dev=%d\n", MINOR(sdp->i_rdev))); - sg_fasync(-1, filp, 0); /* remove filp from async notification list */ + sg_fasync(-1, filp, 0); /* remove filp from async notification list */ sg_remove_sfp(sdp, sfp); - if (! sdp->headfp) { + if (! sdp->headfp) filp->private_data = NULL; - sdp->merge_fd = 0; - } if (sdp->device->host->hostt->module) __MOD_DEC_USE_COUNT(sdp->device->host->hostt->module); @@ -311,70 +330,162 @@ static ssize_t sg_read(struct file * filp, char * buf, Sg_fd * sfp; Sg_request * srp; int req_pack_id = -1; - struct sg_header * shp = (struct sg_header *)buf; + struct sg_header old_hdr; + sg_io_hdr_t new_hdr; + sg_io_hdr_t * hp; if ((! (sfp = (Sg_fd *)filp->private_data)) || (! (sdp = sfp->parentdp))) return -ENXIO; - SCSI_LOG_TIMEOUT(3, printk("sg_read: dev=%d, count=%d\n", + SCSI_LOG_TIMEOUT(3, printk("sg_read: dev=%d, count=%d\n", MINOR(sdp->i_rdev), (int)count)); - + if(! scsi_block_when_processing_errors(sdp->device)) return -ENXIO; if (ppos != &filp->f_pos) ; /* FIXME: Hmm. Seek to the right place, or fail? */ if ((k = verify_area(VERIFY_WRITE, buf, count))) return k; - if (sfp->force_packid && (count >= size_sg_header)) - req_pack_id = shp->pack_id; + if (sfp->force_packid && (count >= size_sg_header)) { + __copy_from_user(&old_hdr, buf, size_sg_header); + if (old_hdr.reply_len < 0) { + if (count >= size_sg_io_hdr) { + __copy_from_user(&new_hdr, buf, size_sg_io_hdr); + req_pack_id = new_hdr.pack_id; + } + } + else + req_pack_id = old_hdr.pack_id; + } srp = sg_get_request(sfp, req_pack_id); if (! srp) { /* now wait on packet to arrive */ if (filp->f_flags & O_NONBLOCK) return -EAGAIN; - res = 0; /* following is a macro that beats race condition */ - __wait_event_interruptible(sfp->read_wait, + while (1) { + int dio = sg_dio_in_use(sfp); + res = 0; /* following is a macro that beats race condition */ + __wait_event_interruptible(sfp->read_wait, (srp = sg_get_request(sfp, req_pack_id)), res); - if (res) - return res; /* -ERESTARTSYS because signal hit process */ + if (0 == res) + break; + else if (! dio) /* only let signal out if no dio */ + return res; /* -ERESTARTSYS because signal hit process */ + } + } + if (srp->header.interface_id != '\0') + return sg_new_read(sfp, buf, count, srp); + + hp = &srp->header; + memset(&old_hdr, 0, size_sg_header); + old_hdr.reply_len = (int)hp->timeout; + old_hdr.pack_len = old_hdr.reply_len; /* very old, strange behaviour */ + old_hdr.pack_id = hp->pack_id; + old_hdr.twelve_byte = + ((srp->data.cmd_opcode >= 0xc0) && (12 == hp->cmd_len)) ? 1 : 0; + old_hdr.target_status = hp->masked_status; + old_hdr.host_status = hp->host_status; + old_hdr.driver_status = hp->driver_status; + if ((CHECK_CONDITION & hp->masked_status) || + (DRIVER_SENSE & hp->driver_status)) + memcpy(old_hdr.sense_buffer, srp->sense_b, + sizeof(old_hdr.sense_buffer)); + switch (hp->host_status) + { /* This setup of 'result' is for backward compatibility and is best + ignored by the user who should use target, host + driver status */ + case DID_OK: + case DID_PASSTHROUGH: + case DID_SOFT_ERROR: + old_hdr.result = 0; + break; + case DID_NO_CONNECT: + case DID_BUS_BUSY: + case DID_TIME_OUT: + old_hdr.result = EBUSY; + break; + case DID_BAD_TARGET: + case DID_ABORT: + case DID_PARITY: + case DID_RESET: + case DID_BAD_INTR: + old_hdr.result = EIO; + break; + case DID_ERROR: + old_hdr.result = + (srp->sense_b[0] == 0 && hp->masked_status == GOOD) ? 0 : EIO; + break; + default: + old_hdr.result = EIO; + break; } - if (2 != sfp->underrun_flag) - srp->header.pack_len = srp->header.reply_len; /* Why ????? */ /* Now copy the result back to the user buffer. */ if (count >= size_sg_header) { - __copy_to_user(buf, &srp->header, size_sg_header); + __copy_to_user(buf, &old_hdr, size_sg_header); buf += size_sg_header; - if (count > srp->header.reply_len) - count = srp->header.reply_len; - if (count > size_sg_header) /* release does copy_to_user */ - sg_finish_rem_req(srp, buf, count - size_sg_header); - else - sg_finish_rem_req(srp, NULL, 0); + if (count > old_hdr.reply_len) + count = old_hdr.reply_len; + if (count > size_sg_header) + sg_read_oxfer(srp, buf, count - size_sg_header); } - else { - count = (srp->header.result == 0) ? 0 : -EIO; - sg_finish_rem_req(srp, NULL, 0); - } - if (! sfp->cmd_q) - wake_up_interruptible(&sfp->write_wait); + else + count = (old_hdr.result == 0) ? 0 : -EIO; + sg_finish_rem_req(srp); return count; } -static ssize_t sg_write(struct file * filp, const char * buf, +static ssize_t sg_new_read(Sg_fd * sfp, char * buf, size_t count, + Sg_request * srp) +{ + Sg_device * sdp = sfp->parentdp; + sg_io_hdr_t * hp = &srp->header; + int k, len; + + if(! scsi_block_when_processing_errors(sdp->device) ) + return -ENXIO; + if (count < size_sg_io_hdr) + return -EINVAL; + + hp->sb_len_wr = 0; + if ((hp->mx_sb_len > 0) && hp->sbp) { + if ((CHECK_CONDITION & hp->masked_status) || + (DRIVER_SENSE & hp->driver_status)) { + int sb_len = sizeof(dummy_cmdp->sense_buffer); + sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len; + len = 8 + (int)srp->sense_b[7]; /* Additional sense length field */ + len = (len > sb_len) ? sb_len : len; + if ((k = verify_area(VERIFY_WRITE, hp->sbp, len))) + return k; + __copy_to_user(hp->sbp, srp->sense_b, len); + hp->sb_len_wr = len; + } + } + if (hp->masked_status || hp->host_status || hp->driver_status) + hp->info |= SG_INFO_CHECK; + copy_to_user(buf, hp, size_sg_io_hdr); + + k = sg_read_xfer(srp); + if (k) return k; /* probably -EFAULT, bad addr in dxferp or iovec list */ + sg_finish_rem_req(srp); + return count; +} + + +static ssize_t sg_write(struct file * filp, const char * buf, size_t count, loff_t *ppos) { int mxsize, cmd_size, k; - unsigned char cmnd[MAX_COMMAND_SIZE]; - int input_size; + int input_size, blocking; unsigned char opcode; - Scsi_Cmnd * SCpnt; Sg_device * sdp; Sg_fd * sfp; Sg_request * srp; + struct sg_header old_hdr; + sg_io_hdr_t * hp; + unsigned char cmnd[sizeof(dummy_cmdp->cmnd)]; if ((! (sfp = (Sg_fd *)filp->private_data)) || (! (sdp = sfp->parentdp))) return -ENXIO; - SCSI_LOG_TIMEOUT(3, printk("sg_write: dev=%d, count=%d\n", + SCSI_LOG_TIMEOUT(3, printk("sg_write: dev=%d, count=%d\n", MINOR(sdp->i_rdev), (int)count)); if(! scsi_block_when_processing_errors(sdp->device) ) @@ -384,33 +495,26 @@ static ssize_t sg_write(struct file * filp, const char * buf, if ((k = verify_area(VERIFY_READ, buf, count))) return k; /* protects following copy_from_user()s + get_user()s */ + if (count < size_sg_header) + return -EIO; + __copy_from_user(&old_hdr, buf, size_sg_header); + blocking = !(filp->f_flags & O_NONBLOCK); + if (old_hdr.reply_len < 0) + return sg_new_write(sfp, buf, count, blocking, 0, NULL); if (count < (size_sg_header + 6)) - return -EIO; /* The minimum scsi command length is 6 bytes. */ + return -EIO; /* The minimum scsi command length is 6 bytes. */ if (! (srp = sg_add_request(sfp))) { - if (sfp->cmd_q) { - SCSI_LOG_TIMEOUT(1, printk("sg_write: queue full\n")); - return -EDOM; - } - else { /* old semantics: wait for pending read() to finish */ - if (filp->f_flags & O_NONBLOCK) - return -EAGAIN; - k = 0; - __wait_event_interruptible(sfp->write_wait, - (srp = sg_add_request(sfp)), - k); - if (k) - return k; /* -ERESTARTSYS because signal hit process */ - } + SCSI_LOG_TIMEOUT(1, printk("sg_write: queue full\n")); + return -EDOM; } - __copy_from_user(&srp->header, buf, size_sg_header); buf += size_sg_header; - srp->header.pack_len = count; __get_user(opcode, buf); if (sfp->next_cmd_len > 0) { if (sfp->next_cmd_len > MAX_COMMAND_SIZE) { SCSI_LOG_TIMEOUT(1, printk("sg_write: command length too long\n")); sfp->next_cmd_len = 0; + sg_remove_request(sfp, srp); return -EIO; } cmd_size = sfp->next_cmd_len; @@ -418,87 +522,204 @@ static ssize_t sg_write(struct file * filp, const char * buf, } else { cmd_size = COMMAND_SIZE(opcode); /* based on SCSI command group */ - if ((opcode >= 0xc0) && srp->header.twelve_byte) + if ((opcode >= 0xc0) && old_hdr.twelve_byte) cmd_size = 12; } - SCSI_LOG_TIMEOUT(4, printk("sg_write: scsi opcode=0x%02x, cmd_size=%d\n", + SCSI_LOG_TIMEOUT(4, printk("sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int)opcode, cmd_size)); /* Determine buffer size. */ input_size = count - cmd_size; - mxsize = (input_size > srp->header.reply_len) ? input_size : - srp->header.reply_len; + mxsize = (input_size > old_hdr.reply_len) ? input_size : + old_hdr.reply_len; mxsize -= size_sg_header; input_size -= size_sg_header; if (input_size < 0) { sg_remove_request(sfp, srp); return -EIO; /* User did not pass enough bytes for this command. */ } - if ((k = sg_start_req(srp, mxsize, buf + cmd_size, input_size))) { - SCSI_LOG_TIMEOUT(1, printk("sg_write: build err=%d\n", k)); - sg_finish_rem_req(srp, NULL, 0); + hp = &srp->header; + hp->interface_id = '\0'; /* indicator of old interface tunnelled */ + hp->cmd_len = (unsigned char)cmd_size; + hp->iovec_count = 0; + hp->mx_sb_len = 0; + if (input_size > 0) + hp->dxfer_direction = ((old_hdr.reply_len - size_sg_header) > 0) ? + SG_DXFER_TO_FROM_DEV : SG_DXFER_TO_DEV; + else + hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : + SG_DXFER_NONE; + hp->dxfer_len = mxsize; + hp->dxferp = (unsigned char *)buf + cmd_size; + hp->sbp = NULL; + hp->timeout = old_hdr.reply_len; /* structure abuse ... */ + hp->flags = input_size; /* structure abuse ... */ + hp->pack_id = old_hdr.pack_id; + hp->usr_ptr = NULL; + __copy_from_user(cmnd, buf, cmd_size); + k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking); + return (k < 0) ? k : count; +} + +static ssize_t sg_new_write(Sg_fd * sfp, const char * buf, size_t count, + int blocking, int read_only, Sg_request ** o_srp) +{ + int k; + Sg_request * srp; + sg_io_hdr_t * hp; + unsigned char cmnd[sizeof(dummy_cmdp->cmnd)]; + int timeout; + + if (count < size_sg_io_hdr) + return -EINVAL; + if ((k = verify_area(VERIFY_READ, buf, count))) + return k; /* protects following copy_from_user()s + get_user()s */ + + sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */ + if (! (srp = sg_add_request(sfp))) { + SCSI_LOG_TIMEOUT(1, printk("sg_new_write: queue full\n")); + return -EDOM; + } + hp = &srp->header; + __copy_from_user(hp, buf, size_sg_io_hdr); + if (hp->interface_id != 'S') { + sg_remove_request(sfp, srp); + return -ENOSYS; + } + timeout = sg_ms_to_jif(srp->header.timeout); + if ((! hp->cmdp) || (hp->cmd_len < 6) || (hp->cmd_len > sizeof(cmnd))) { + sg_remove_request(sfp, srp); + return -EMSGSIZE; + } + if ((k = verify_area(VERIFY_READ, hp->cmdp, hp->cmd_len))) { + sg_remove_request(sfp, srp); + return k; /* protects following copy_from_user()s + get_user()s */ + } + __copy_from_user(cmnd, hp->cmdp, hp->cmd_len); + if (read_only && + (! sg_allow_access(cmnd[0], sfp->parentdp->device->type))) { + sg_remove_request(sfp, srp); + return -EACCES; + } + k = sg_common_write(sfp, srp, cmnd, timeout, blocking); + if (k < 0) return k; + if (o_srp) *o_srp = srp; + return count; +} + +static int sg_common_write(Sg_fd * sfp, Sg_request * srp, + unsigned char * cmnd, int timeout, int blocking) +{ + int k; + Scsi_Cmnd * SCpnt; + Sg_device * sdp = sfp->parentdp; + sg_io_hdr_t * hp = &srp->header; + + srp->data.cmd_opcode = cmnd[0]; /* hold opcode of command */ + hp->status = 0; + hp->masked_status = 0; + hp->msg_status = 0; + hp->info = 0; + hp->host_status = 0; + hp->driver_status = 0; + hp->resid = 0; + SCSI_LOG_TIMEOUT(4, + printk("sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", + (int)cmnd[0], (int)hp->cmd_len)); + + if ((k = sg_start_req(srp))) { + SCSI_LOG_TIMEOUT(1, printk("sg_write: start_req err=%d\n", k)); + sg_finish_rem_req(srp); return k; /* probably out of space --> ENOMEM */ } + if ((k = sg_write_xfer(srp))) { + SCSI_LOG_TIMEOUT(1, printk("sg_write: write_xfer, bad address\n")); + sg_finish_rem_req(srp); + return k; + } /* SCSI_LOG_TIMEOUT(7, printk("sg_write: allocating device\n")); */ - if (! (SCpnt = scsi_allocate_device(sdp->device, - !(filp->f_flags & O_NONBLOCK), - TRUE))) { - sg_finish_rem_req(srp, NULL, 0); - if( signal_pending(current) ) - { - return -EINTR; - } - return -EAGAIN; /* No available command blocks at the moment */ + SCpnt = scsi_allocate_device(sdp->device, blocking, TRUE); + if (! SCpnt) { + sg_finish_rem_req(srp); + return (signal_pending(current)) ? -EINTR : -EAGAIN; + /* No available command blocks, or, interrupted while waiting */ } /* SCSI_LOG_TIMEOUT(7, printk("sg_write: device allocated\n")); */ srp->my_cmdp = SCpnt; SCpnt->request.rq_dev = sdp->i_rdev; SCpnt->request.rq_status = RQ_ACTIVE; SCpnt->sense_buffer[0] = 0; - SCpnt->cmd_len = cmd_size; - __copy_from_user(cmnd, buf, cmd_size); + SCpnt->cmd_len = hp->cmd_len; /* Set the LUN field in the command structure, overriding user input */ - cmnd[1]= (cmnd[1] & 0x1f) | (sdp->device->lun << 5); + if (! (hp->flags & SG_FLAG_LUN_INHIBIT)) + cmnd[1] = (cmnd[1] & 0x1f) | (sdp->device->lun << 5); /* SCSI_LOG_TIMEOUT(7, printk("sg_write: do cmd\n")); */ - SCpnt->use_sg = srp->data.use_sg; + SCpnt->use_sg = srp->data.k_use_sg; SCpnt->sglist_len = srp->data.sglist_len; SCpnt->bufflen = srp->data.bufflen; - if (1 == sfp->underrun_flag) - SCpnt->underflow = srp->data.bufflen; - else - SCpnt->underflow = 0; + SCpnt->underflow = 0; SCpnt->buffer = srp->data.buffer; - srp->data.use_sg = 0; + srp->data.k_use_sg = 0; srp->data.sglist_len = 0; srp->data.bufflen = 0; srp->data.buffer = NULL; + hp->duration = jiffies; /* Now send everything of to mid-level. The next time we hear about this - packet is when sg_command_done() is called (ie a callback). */ + packet is when sg_cmd_done_bh() is called (i.e. a callback). */ scsi_do_cmd(SCpnt, (void *)cmnd, - (void *)SCpnt->buffer, mxsize, - sg_command_done, sfp->timeout, SG_DEFAULT_RETRIES); - /* 'mxsize' overwrites SCpnt->bufflen, hence need for b_malloc_len */ -/* SCSI_LOG_TIMEOUT(6, printk("sg_write: sent scsi cmd to mid-level\n")); */ - return count; + (void *)SCpnt->buffer, hp->dxfer_len, + sg_cmd_done_bh, timeout, SG_DEFAULT_RETRIES); + /* dxfer_len overwrites SCpnt->bufflen, hence need for b_malloc_len */ + return 0; } static int sg_ioctl(struct inode * inode, struct file * filp, unsigned int cmd_in, unsigned long arg) { - int result, val; + int result, val, read_only; Sg_device * sdp; Sg_fd * sfp; Sg_request * srp; if ((! (sfp = (Sg_fd *)filp->private_data)) || (! (sdp = sfp->parentdp))) return -ENXIO; - SCSI_LOG_TIMEOUT(3, printk("sg_ioctl: dev=%d, cmd=0x%x\n", + SCSI_LOG_TIMEOUT(3, printk("sg_ioctl: dev=%d, cmd=0x%x\n", MINOR(sdp->i_rdev), (int)cmd_in)); if(! scsi_block_when_processing_errors(sdp->device) ) return -ENXIO; + read_only = (O_RDWR != (filp->f_flags & O_ACCMODE)); switch(cmd_in) { + case SG_IO: + { + int blocking = 1; /* ignore O_NONBLOCK flag */ + + if(! scsi_block_when_processing_errors(sdp->device) ) + return -ENXIO; + result = verify_area(VERIFY_WRITE, (void *)arg, size_sg_io_hdr); + if (result) return result; + result = sg_new_write(sfp, (const char *)arg, size_sg_io_hdr, + blocking, read_only, &srp); + if (result < 0) return result; + srp->sg_io_owned = 1; + while (1) { + int dio = sg_dio_in_use(sfp); + result = 0; /* following macro to beat race condition */ + __wait_event_interruptible(sfp->read_wait, + (sfp->closed || srp->done), result); + if (sfp->closed) + return 0; /* request packet dropped already */ + if (0 == result) + break; + else if (! dio) { /* only let signal out if no dio */ + srp->orphan = 1; + return result; /* -ERESTARTSYS because signal hit process */ + } + } + result = sg_new_read(sfp, (char *)arg, size_sg_io_hdr, srp); + return (result < 0) ? result : 0; + } case SG_SET_TIMEOUT: result = get_user(val, (int *)arg); if (result) return result; @@ -525,21 +746,21 @@ static int sg_ioctl(struct inode * inode, struct file * filp, case SG_GET_LOW_DMA: return put_user((int)sfp->low_dma, (int *)arg); case SG_GET_SCSI_ID: - result = verify_area(VERIFY_WRITE, (void *)arg, sizeof(Sg_scsi_id)); + result = verify_area(VERIFY_WRITE, (void *)arg, sizeof(sg_scsi_id_t)); if (result) return result; else { - Sg_scsi_id * sg_idp = (Sg_scsi_id *)arg; + sg_scsi_id_t * sg_idp = (sg_scsi_id_t *)arg; __put_user((int)sdp->device->host->host_no, &sg_idp->host_no); __put_user((int)sdp->device->channel, &sg_idp->channel); __put_user((int)sdp->device->id, &sg_idp->scsi_id); __put_user((int)sdp->device->lun, &sg_idp->lun); __put_user((int)sdp->device->type, &sg_idp->scsi_type); - __put_user((short)sdp->device->host->cmd_per_lun, + __put_user((short)sdp->device->host->cmd_per_lun, &sg_idp->h_cmd_per_lun); - __put_user((short)sdp->device->queue_depth, + __put_user((short)sdp->device->queue_depth, &sg_idp->d_queue_depth); - __put_user(0, &sg_idp->unused1); - __put_user(0, &sg_idp->unused2); + __put_user(0, &sg_idp->unused[0]); + __put_user(0, &sg_idp->unused[1]); return 0; } case SG_SET_FORCE_PACK_ID: @@ -552,7 +773,7 @@ static int sg_ioctl(struct inode * inode, struct file * filp, if (result) return result; srp = sfp->headrp; while (srp) { - if (! srp->my_cmdp) { + if (srp->done && (! srp->sg_io_owned)) { __put_user(srp->header.pack_id, (int *)arg); return 0; } @@ -564,7 +785,7 @@ static int sg_ioctl(struct inode * inode, struct file * filp, srp = sfp->headrp; val = 0; while (srp) { - if (! srp->my_cmdp) + if (srp->done && (! srp->sg_io_owned)) ++val; srp = srp->nextrp; } @@ -572,8 +793,6 @@ static int sg_ioctl(struct inode * inode, struct file * filp, case SG_GET_SG_TABLESIZE: return put_user(sdp->sg_tablesize, (int *)arg); case SG_SET_RESERVED_SIZE: - if (O_RDWR != (filp->f_flags & O_ACCMODE)) - return -EACCES; result = get_user(val, (int *)arg); if (result) return result; if (val != sfp->reserve.bufflen) { @@ -586,20 +805,6 @@ static int sg_ioctl(struct inode * inode, struct file * filp, case SG_GET_RESERVED_SIZE: val = (int)sfp->reserve.bufflen; return put_user(val, (int *)arg); - case SG_GET_MERGE_FD: - return put_user((int)sdp->merge_fd, (int *)arg); - case SG_SET_MERGE_FD: - if (O_RDWR != (filp->f_flags & O_ACCMODE)) - return -EACCES; /* require write access since effect wider - then just this fd */ - result = get_user(val, (int *)arg); - if (result) return result; - val = val ? 1 : 0; - if ((val ^ (0 != sdp->merge_fd)) && - sdp->headfp && sdp->headfp->nextfp) - return -EBUSY; /* too much work if multiple fds already */ - sdp->merge_fd = val; - return 0; case SG_SET_COMMAND_Q: result = get_user(val, (int *)arg); if (result) return result; @@ -607,13 +812,13 @@ static int sg_ioctl(struct inode * inode, struct file * filp, return 0; case SG_GET_COMMAND_Q: return put_user((int)sfp->cmd_q, (int *)arg); - case SG_SET_UNDERRUN_FLAG: + case SG_SET_KEEP_ORPHAN: result = get_user(val, (int *)arg); if (result) return result; - sfp->underrun_flag = val; + sfp->keep_orphan = val; return 0; - case SG_GET_UNDERRUN_FLAG: - return put_user((int)sfp->underrun_flag, (int *)arg); + case SG_GET_KEEP_ORPHAN: + return put_user((int)sfp->keep_orphan, (int *)arg); case SG_NEXT_CMD_LEN: result = get_user(val, (int *)arg); if (result) return result; @@ -621,6 +826,32 @@ static int sg_ioctl(struct inode * inode, struct file * filp, return 0; case SG_GET_VERSION_NUM: return put_user(sg_version_num, (int *)arg); + case SG_GET_REQUEST_TABLE: + result = verify_area(VERIFY_WRITE, (void *) arg, + size_sg_req_info * SG_MAX_QUEUE); + if (result) return result; + else { + sg_req_info_t rinfo[SG_MAX_QUEUE]; + Sg_request * srp = sfp->headrp; + for (val = 0; val < SG_MAX_QUEUE; + ++val, srp = srp ? srp->nextrp : srp) { + memset(&rinfo[val], 0, size_sg_req_info); + if (srp) { + rinfo[val].req_state = srp->done ? 2 : 1; + rinfo[val].problem = srp->header.masked_status & + srp->header.host_status & srp->header.driver_status; + rinfo[val].duration = srp->done ? + sg_jif_to_ms(srp->header.duration) : + sg_jif_to_ms(jiffies - srp->header.duration); + rinfo[val].orphan = srp->orphan; + rinfo[val].sg_io_owned = srp->sg_io_owned; + rinfo[val].pack_id = srp->header.pack_id; + rinfo[val].usr_ptr = srp->header.usr_ptr; + } + } + __copy_to_user((void *)arg, rinfo, size_sg_req_info * SG_MAX_QUEUE); + return 0; + } case SG_EMULATED_HOST: return put_user(sdp->device->host->hostt->emulated, (int *)arg); case SG_SCSI_RESET: @@ -628,23 +859,22 @@ static int sg_ioctl(struct inode * inode, struct file * filp, return -EBUSY; result = get_user(val, (int *)arg); if (result) return result; - /* Don't do anything till scsi mod level visibility */ + /* Don't do anything till scsi mid level visibility */ return 0; case SCSI_IOCTL_SEND_COMMAND: - /* Allow SCSI_IOCTL_SEND_COMMAND without checking suser() since the - user already has read/write access to the generic device and so - can execute arbitrary SCSI commands. */ - if (O_RDWR != (filp->f_flags & O_ACCMODE)) - return -EACCES; /* very dangerous things can be done here */ + if (read_only) { + unsigned char opcode = WRITE_6; + Scsi_Ioctl_Command * siocp = (void *)arg; + + copy_from_user(&opcode, siocp->data, 1); + if (! sg_allow_access(opcode, sdp->device->type)) + return -EACCES; + } return scsi_ioctl_send_command(sdp->device, (void *)arg); case SG_SET_DEBUG: result = get_user(val, (int *)arg); if (result) return result; sdp->sgdebug = (char)val; - if (9 == sdp->sgdebug) - sg_debug(sdp, sfp, 0); - else if (sdp->sgdebug > 9) - sg_debug_all(sfp); return 0; case SCSI_IOCTL_GET_IDLUN: case SCSI_IOCTL_GET_BUS_NUMBER: @@ -652,7 +882,7 @@ static int sg_ioctl(struct inode * inode, struct file * filp, case SG_GET_TRANSFORM: return scsi_ioctl(sdp->device, cmd_in, (void *)arg); default: - if (O_RDWR != (filp->f_flags & O_ACCMODE)) + if (read_only) return -EACCES; /* don't know so take safe approach */ return scsi_ioctl(sdp->device, cmd_in, (void *)arg); } @@ -671,18 +901,18 @@ static unsigned int sg_poll(struct file * filp, poll_table * wait) poll_wait(filp, &sfp->read_wait, wait); srp = sfp->headrp; while (srp) { /* if any read waiting, flag it */ - if (! (res || srp->my_cmdp)) + if ((0 == res) && srp->done && (! srp->sg_io_owned)) res = POLLIN | POLLRDNORM; ++count; srp = srp->nextrp; } - if (0 == sfp->cmd_q) { + if (! sfp->cmd_q) { if (0 == count) res |= POLLOUT | POLLWRNORM; } else if (count < SG_MAX_QUEUE) res |= POLLOUT | POLLWRNORM; - SCSI_LOG_TIMEOUT(3, printk("sg_poll: dev=%d, res=0x%x\n", + SCSI_LOG_TIMEOUT(3, printk("sg_poll: dev=%d, res=0x%x\n", MINOR(sdp->i_rdev), (int)res)); return res; } @@ -695,36 +925,29 @@ static int sg_fasync(int fd, struct file * filp, int mode) if ((! (sfp = (Sg_fd *)filp->private_data)) || (! (sdp = sfp->parentdp))) return -ENXIO; - SCSI_LOG_TIMEOUT(3, printk("sg_fasync: dev=%d, mode=%d\n", + SCSI_LOG_TIMEOUT(3, printk("sg_fasync: dev=%d, mode=%d\n", MINOR(sdp->i_rdev), mode)); retval = fasync_helper(fd, filp, mode, &sfp->async_qp); return (retval < 0) ? retval : 0; } -/* This function is called by the interrupt handler when we - * actually have a command that is complete. */ -static void sg_command_done(Scsi_Cmnd * SCpnt) +/* This function is a "bottom half" handler that is called by the + * mid level when a command is completed (or has failed). */ +static void sg_cmd_done_bh(Scsi_Cmnd * SCpnt) { int dev = MINOR(SCpnt->request.rq_dev); Sg_device * sdp; Sg_fd * sfp; Sg_request * srp = NULL; - int closed = 0; - static const int min_sb_len = - SG_MAX_SENSE > sizeof(SCpnt->sense_buffer) ? - sizeof(SCpnt->sense_buffer) : SG_MAX_SENSE; - if ((NULL == sg_dev_arr) || (dev < 0) || (dev >= sg_template.dev_max)) { - SCSI_LOG_TIMEOUT(1, printk("sg__done: bad args dev=%d\n", dev)); + if ((NULL == sg_dev_arr) || (dev < 0) || (dev >= sg_template.dev_max) + || (NULL == (sdp = &sg_dev_arr[dev]))) { + SCSI_LOG_TIMEOUT(1, printk("sg...bh: bad args dev=%d\n", dev)); scsi_release_command(SCpnt); SCpnt = NULL; return; } - sdp = &sg_dev_arr[dev]; - if (NULL == sdp->device) - return; /* Get out of here quick ... */ - sfp = sdp->headfp; while (sfp) { srp = sfp->headrp; @@ -738,198 +961,77 @@ static void sg_command_done(Scsi_Cmnd * SCpnt) sfp = sfp->nextfp; } if (! srp) { - SCSI_LOG_TIMEOUT(1, printk("sg__done: req missing, dev=%d\n", dev)); + SCSI_LOG_TIMEOUT(1, printk("sg...bh: req missing, dev=%d\n", dev)); scsi_release_command(SCpnt); SCpnt = NULL; return; } -/* First transfer ownership of data buffers to sg_device object. */ - srp->data.use_sg = SCpnt->use_sg; + /* First transfer ownership of data buffers to sg_device object. */ + srp->data.k_use_sg = SCpnt->use_sg; srp->data.sglist_len = SCpnt->sglist_len; srp->data.bufflen = SCpnt->bufflen; srp->data.buffer = SCpnt->buffer; - if (2 == sfp->underrun_flag) - srp->header.pack_len = SCpnt->underflow; sg_clr_scpnt(SCpnt); srp->my_cmdp = NULL; - - SCSI_LOG_TIMEOUT(4, printk("sg__done: dev=%d, scsi_stat=%d, res=0x%x\n", - dev, (int)status_byte(SCpnt->result), (int)SCpnt->result)); - memcpy(srp->header.sense_buffer, SCpnt->sense_buffer, min_sb_len); - switch (host_byte(SCpnt->result)) - { /* This setup of 'result' is for backward compatibility and is best - ignored by the user who should use target, host + driver status */ - case DID_OK: - case DID_PASSTHROUGH: - case DID_SOFT_ERROR: - srp->header.result = 0; - break; - case DID_NO_CONNECT: - case DID_BUS_BUSY: - case DID_TIME_OUT: - srp->header.result = EBUSY; - break; - case DID_BAD_TARGET: - case DID_ABORT: - case DID_PARITY: - case DID_RESET: - case DID_BAD_INTR: - srp->header.result = EIO; - break; - case DID_ERROR: - if (SCpnt->sense_buffer[0] == 0 && - status_byte(SCpnt->result) == GOOD) - srp->header.result = 0; - else - srp->header.result = EIO; - break; - default: - SCSI_LOG_TIMEOUT(1, printk( - "sg: unexpected host_byte=%d, dev=%d in 'done'\n", - host_byte(SCpnt->result), dev)); - srp->header.result = EIO; - break; - } - -/* Following if statement is a patch supplied by Eric Youngdale */ - if (driver_byte(SCpnt->result) != 0 - && (SCpnt->sense_buffer[0] & 0x7f) == 0x70 - && (SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION - && sdp->device->removable) { -/* Detected disc change. Set the bit - this may be used if there are */ -/* filesystems using this device. */ - sdp->device->changed = 1; - } - srp->header.target_status = status_byte(SCpnt->result); - if ((sdp->sgdebug > 0) && - ((CHECK_CONDITION == srp->header.target_status) || - (COMMAND_TERMINATED == srp->header.target_status))) - print_sense("sg_command_done", SCpnt); - srp->header.host_status = host_byte(SCpnt->result); - srp->header.driver_status = driver_byte(SCpnt->result); + srp->done = 1; + + SCSI_LOG_TIMEOUT(4, printk("sg...bh: dev=%d, pack_id=%d, res=0x%x\n", + dev, srp->header.pack_id, (int)SCpnt->result)); + srp->header.resid = SCpnt->resid; + /* sg_unmap_and(&srp->data, 0); */ /* unmap locked pages a.s.a.p. */ + srp->header.duration = sg_jif_to_ms(jiffies - (int)srp->header.duration); + if (0 != SCpnt->result) { + memcpy(srp->sense_b, SCpnt->sense_buffer, sizeof(srp->sense_b)); + srp->header.status = 0xff & SCpnt->result; + srp->header.masked_status = status_byte(SCpnt->result); + srp->header.msg_status = msg_byte(SCpnt->result); + srp->header.host_status = host_byte(SCpnt->result); + srp->header.driver_status = driver_byte(SCpnt->result); + if ((sdp->sgdebug > 0) && + ((CHECK_CONDITION == srp->header.masked_status) || + (COMMAND_TERMINATED == srp->header.masked_status))) + print_sense("sg_cmd_done_bh", SCpnt); + + /* Following if statement is a patch supplied by Eric Youngdale */ + if (driver_byte(SCpnt->result) != 0 + && (SCpnt->sense_buffer[0] & 0x7f) == 0x70 + && (SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION + && sdp->device->removable) { + /* Detected disc change. Set the bit - this may be used if */ + /* there are filesystems using this device. */ + sdp->device->changed = 1; + } + } + /* Rely on write phase to clean out srp status values, so no "else" */ scsi_release_command(SCpnt); SCpnt = NULL; if (sfp->closed) { /* whoops this fd already released, cleanup */ - closed = 1; SCSI_LOG_TIMEOUT(1, - printk("sg__done: already closed, freeing ...\n")); -/* should check if module is unloaded <<<<<<< */ - sg_finish_rem_req(srp, NULL, 0); - if (NULL == sfp->headrp) { + printk("sg...bh: already closed, freeing ...\n")); + /* should check if module is unloaded <<<<<<< */ + sg_finish_rem_req(srp); + srp = NULL; + if (NULL == sfp->headrp) { SCSI_LOG_TIMEOUT(1, - printk("sg__done: already closed, final cleanup\n")); + printk("sg...bh: already closed, final cleanup\n")); sg_remove_sfp(sdp, sfp); + sfp = NULL; } } -/* Now wake up any sg_read() that is waiting for this packet. */ - wake_up_interruptible(&sfp->read_wait); - if ((sfp->async_qp) && (! closed)) - kill_fasync(sfp->async_qp, SIGPOLL, POLL_IN); -} - -static void sg_debug_all(const Sg_fd * sfp) -{ - const Sg_device * sdp = sg_dev_arr; - int k; - - if (NULL == sg_dev_arr) { - printk("sg_debug_all: sg_dev_arr NULL, death is imminent\n"); - return; - } - if (! sfp) - printk("sg_debug_all: sfp (file descriptor pointer) NULL\n"); - - printk("sg_debug_all: dev_max=%d, %s\n", - sg_template.dev_max, sg_version_str); - printk(" scsi_dma_free_sectors=%u, sg_pool_secs_aval=%d\n", - scsi_dma_free_sectors, sg_pool_secs_avail); - printk(" sg_big_buff=%d\n", sg_big_buff); -#ifdef SG_DEBUG - printk(" malloc counts, kmallocs=%d, dma_pool=%d, pages=%d\n", - sg_num_kmal, sg_num_pool, sg_num_page); -#endif - for (k = 0; k < sg_template.dev_max; ++k, ++sdp) { - if (sdp->headfp) { - if (! sfp) - sfp = sdp->headfp; /* just to keep things going */ - else if (sdp == sfp->parentdp) - printk(" ***** Invoking device follows *****\n"); - sg_debug(sdp, sfp, 1); + else if (srp && srp->orphan) { + if (sfp->keep_orphan) + srp->sg_io_owned = 0; + else { + sg_finish_rem_req(srp); + srp = NULL; } } -} - -static void sg_debug(const Sg_device * sdp, const Sg_fd * sfp, int part_of) -{ - Sg_fd * fp; - Sg_request * srp; - int dev; - int k; - - if (! sfp) - printk("sg_debug: sfp (file descriptor pointer) NULL\n"); - if (! sdp) { - printk("sg_debug: sdp pointer (to device) NULL\n"); - return; - } - else if (! sdp->device) { - printk("sg_debug: device detached ??\n"); - return; - } - dev = MINOR(sdp->i_rdev); - - if (part_of) - printk(" >>> device=%d(sg%c), ", dev, 'a' + dev); - else - printk("sg_debug: device=%d(sg%c), ", dev, 'a' + dev); - printk("scsi%d chan=%d id=%d lun=%d em=%d\n", sdp->device->host->host_no, - sdp->device->channel, sdp->device->id, sdp->device->lun, - sdp->device->host->hostt->emulated); - printk(" sg_tablesize=%d, excl=%d, sgdebug=%d, merge_fd=%d\n", - sdp->sg_tablesize, sdp->exclude, sdp->sgdebug, sdp->merge_fd); - if (! part_of) { - printk(" scsi_dma_free_sectors=%u, sg_pool_secs_aval=%d\n", - scsi_dma_free_sectors, sg_pool_secs_avail); -#ifdef SG_DEBUG - printk(" mallocs: kmallocs=%d, dma_pool=%d, pages=%d\n", - sg_num_kmal, sg_num_pool, sg_num_page); -#endif - } - - fp = sdp->headfp; - for (k = 1; fp; fp = fp->nextfp, ++k) { - if (sfp == fp) - printk(" *** Following data belongs to invoking FD ***\n"); - else if (! fp->parentdp) - printk(">> Following FD has NULL parent pointer ???\n"); - printk(" FD(%d): timeout=%d, bufflen=%d, use_sg=%d\n", - k, fp->timeout, fp->reserve.bufflen, (int)fp->reserve.use_sg); - printk(" low_dma=%d, cmd_q=%d, s_sc_len=%d, f_packid=%d\n", - (int)fp->low_dma, (int)fp->cmd_q, (int)fp->save_scat_len, - (int)fp->force_packid); - printk(" urun_flag=%d, next_cmd_len=%d, closed=%d\n", - (int)fp->underrun_flag, (int)fp->next_cmd_len, - (int)fp->closed); - srp = fp->headrp; - if (NULL == srp) - printk(" No requests active\n"); - while (srp) { - if (srp->res_used) - printk("reserved buff >> "); - else - printk(" "); - if (srp->my_cmdp) - printk("written: pack_id=%d, bufflen=%d, use_sg=%d\n", - srp->header.pack_id, srp->my_cmdp->bufflen, - srp->my_cmdp->use_sg); - else - printk("to_read: pack_id=%d, bufflen=%d, use_sg=%d\n", - srp->header.pack_id, srp->data.bufflen, srp->data.use_sg); - if (! srp->parentfp) - printk(">> request has NULL parent pointer ???\n"); - srp = srp->nextrp; - } + if (sfp && srp) { + /* Now wake up any sg_read() that is waiting for this packet. */ + wake_up_interruptible(&sfp->read_wait); + if (sfp->async_qp) + kill_fasync(sfp->async_qp, SIGPOLL, POLL_IN); } } @@ -956,7 +1058,7 @@ static int sg_detect(Scsi_Device * scsidp) printk("Detected scsi generic sg%c at scsi%d," " channel %d, id %d, lun %d\n", 'a'+sg_template.dev_noticed, - scsidp->host->host_no, scsidp->channel, + scsidp->host->host_no, scsidp->channel, scsidp->id, scsidp->lun); } sg_template.dev_noticed++; @@ -967,6 +1069,7 @@ static int sg_detect(Scsi_Device * scsidp) static int sg_init() { static int sg_registered = 0; + int size; if (sg_template.dev_noticed == 0) return 0; @@ -984,19 +1087,42 @@ static int sg_init() if(sg_dev_arr) return 0; SCSI_LOG_TIMEOUT(3, printk("sg_init\n")); - sg_dev_arr = (Sg_device *) - kmalloc((sg_template.dev_noticed + SG_EXTRA_DEVS) - * sizeof(Sg_device), GFP_ATOMIC); - memset(sg_dev_arr, 0, (sg_template.dev_noticed + SG_EXTRA_DEVS) - * sizeof(Sg_device)); + size = sizeof(Sg_device) * + (sg_template.dev_noticed + SG_EXTRA_DEVS); + sg_dev_arr = (Sg_device *)kmalloc(size, GFP_ATOMIC); + memset(sg_dev_arr, 0, size); if (NULL == sg_dev_arr) { printk("sg_init: no space for sg_dev_arr\n"); return 1; } +#ifdef CONFIG_PROC_FS + sg_proc_init(); +#endif /* CONFIG_PROC_FS */ sg_template.dev_max = sg_template.dev_noticed + SG_EXTRA_DEVS; return 0; } +#ifndef MODULE +static int __init sg_def_reserved_size_setup(char *str) +{ + int tmp; + + if (get_option(&str, &tmp) == 1) { + def_reserved_size = tmp; + if (tmp >= 0) + sg_big_buff = tmp; + return 1; + } else { + printk("sg_def_reserved_size : usage sg_def_reserved_size=n " + "(n could be 65536, 131072 or 262144)\n"); + return 0; + } +} + +__setup("sg_def_reserved_size=", sg_def_reserved_size_setup); +#endif + + static int sg_attach(Scsi_Device * scsidp) { Sg_device * sdp = sg_dev_arr; @@ -1005,6 +1131,8 @@ static int sg_attach(Scsi_Device * scsidp) if ((sg_template.nr_dev >= sg_template.dev_max) || (! sdp)) { scsidp->attached--; + printk("sg_attach: rejected since exceeds dev_max=%d\n", + sg_template.dev_max); return 1; } @@ -1018,7 +1146,6 @@ static int sg_attach(Scsi_Device * scsidp) init_waitqueue_head(&sdp->o_excl_wait); sdp->headfp= NULL; sdp->exclude = 0; - sdp->merge_fd = 0; /* Cope with SG_DEF_MERGE_FD on open */ sdp->sgdebug = 0; sdp->sg_tablesize = scsidp->host ? scsidp->host->sg_tablesize : 0; sdp->i_rdev = MKDEV(SCSI_GENERIC_MAJOR, k); @@ -1046,13 +1173,13 @@ static void sg_detach(Scsi_Device * scsidp) if(sdp->device != scsidp) continue; /* dirty but lowers nesting */ if (sdp->headfp) { -/* Need to stop sg_command_done() playing with this list during this loop */ +/* Need to stop sg_cmd_done_bh() playing with this list during this loop */ spin_lock_irqsave(&sg_request_lock, flags); sfp = sdp->headfp; while (sfp) { srp = sfp->headrp; while (srp) { - if (srp->my_cmdp) + if (! srp->done) sg_shorten_timeout(srp->my_cmdp); srp = srp->nextrp; } @@ -1064,7 +1191,7 @@ static void sg_detach(Scsi_Device * scsidp) } else { SCSI_LOG_TIMEOUT(3, printk("sg_detach: dev=%d\n", k)); - sdp->device = NULL; + sdp->device = NULL; } scsidp->attached--; sg_template.nr_dev--; @@ -1078,7 +1205,12 @@ static void sg_detach(Scsi_Device * scsidp) #ifdef MODULE +MODULE_PARM(def_reserved_size, "i"); +MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd"); + int init_module(void) { + if (def_reserved_size >= 0) + sg_big_buff = def_reserved_size; sg_template.module = &__this_module; return scsi_register_module(MODULE_SCSI_DEV, &sg_template); } @@ -1088,10 +1220,13 @@ void cleanup_module( void) scsi_unregister_module(MODULE_SCSI_DEV, &sg_template); unregister_chrdev(SCSI_GENERIC_MAJOR, "sg"); +#ifdef CONFIG_PROC_FS + sg_proc_cleanup(); +#endif /* CONFIG_PROC_FS */ if(sg_dev_arr != NULL) { /* Really worrying situation of writes still pending and get here */ /* Strategy: shorten timeout on release + wait on detach ... */ - kfree((char *) sg_dev_arr); + kfree((char *)sg_dev_arr); sg_dev_arr = NULL; } sg_template.dev_max = 0; @@ -1118,56 +1253,178 @@ static void sg_shorten_timeout(Scsi_Cmnd * scpnt) scsi_add_timer(scpnt, scpnt->timeout_per_command, scsi_old_times_out); #else - spin_unlock_irq(&sg_request_lock); + unsigned long flags = 0; + spin_lock_irqsave(&sg_request_lock, flags); scsi_sleep(HZ); /* just sleep 1 second and hope ... */ - spin_lock_irq(&sg_request_lock); + spin_unlock_irqrestore(&sg_request_lock, flags); #endif } -static int sg_start_req(Sg_request * srp, int max_buff_size, - const char * inp, int num_write_xfer) +static int sg_start_req(Sg_request * srp) { int res; Sg_fd * sfp = srp->parentfp; + sg_io_hdr_t * hp = &srp->header; + int dxfer_len = (int)hp->dxfer_len; Sg_scatter_hold * req_schp = &srp->data; Sg_scatter_hold * rsv_schp = &sfp->reserve; - SCSI_LOG_TIMEOUT(4, printk("sg_start_req: max_buff_size=%d\n", - max_buff_size)); - if ((! sg_res_in_use(sfp)) && (max_buff_size <= rsv_schp->bufflen)) { - sg_link_reserve(sfp, srp, max_buff_size); - sg_write_xfer(req_schp, inp, num_write_xfer); + SCSI_LOG_TIMEOUT(4, printk("sg_start_req: dxfer_len=%d\n", dxfer_len)); + if ((hp->flags & SG_FLAG_DIRECT_IO) && (dxfer_len > 0) && + (hp->dxfer_direction != SG_DXFER_NONE) && (0 == hp->iovec_count) && + (! sfp->parentdp->device->host->unchecked_isa_dma)) { + res = sg_build_dir(srp, sfp, dxfer_len); + if (res <= 0) /* -ve -> error, 0 -> done, 1 -> try indirect */ + return res; + } + if ((! sg_res_in_use(sfp)) && (dxfer_len <= rsv_schp->bufflen)) { + sg_link_reserve(sfp, srp, dxfer_len); } else { - res = sg_build_scat(req_schp, max_buff_size, sfp); + res = sg_build_indi(req_schp, sfp, dxfer_len); if (res) { sg_remove_scat(req_schp); return res; } - sg_write_xfer(req_schp, inp, num_write_xfer); } return 0; } -static void sg_finish_rem_req(Sg_request * srp, char * outp, - int num_read_xfer) +static void sg_finish_rem_req(Sg_request * srp) { Sg_fd * sfp = srp->parentfp; Sg_scatter_hold * req_schp = &srp->data; SCSI_LOG_TIMEOUT(4, printk("sg_finish_rem_req: res_used=%d\n", - (int)srp->res_used)); - if (num_read_xfer > 0) - sg_read_xfer(req_schp, outp, num_read_xfer); + (int)srp->res_used)); + sg_unmap_and(&srp->data, 1); if (srp->res_used) sg_unlink_reserve(sfp, srp); - else + else sg_remove_scat(req_schp); sg_remove_request(sfp, srp); } -static int sg_build_scat(Sg_scatter_hold * schp, int buff_size, - const Sg_fd * sfp) +static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp) +{ + int mem_src, ret_sz; + int sg_bufflen = PAGE_SIZE; + int elem_sz = sizeof(struct scatterlist) + sizeof(char); + int mx_sc_elems = (sg_bufflen / elem_sz) - 1; + + mem_src = SG_HEAP_KMAL; + schp->buffer = (struct scatterlist *)sg_malloc(sfp, sg_bufflen, + &ret_sz, &mem_src); + schp->buffer_mem_src = (char)mem_src; + if (! schp->buffer) + return -ENOMEM; + else if (ret_sz != sg_bufflen) { + sg_bufflen = ret_sz; + mx_sc_elems = (sg_bufflen / elem_sz) - 1; + } + schp->sglist_len = sg_bufflen; + memset(schp->buffer, 0, sg_bufflen); + return mx_sc_elems; /* number of scat_gath elements allocated */ +} + +static void sg_unmap_and(Sg_scatter_hold * schp, int free_also) +{ +#ifdef SG_ALLOW_DIO + if (schp && schp->kiobp) { + if (schp->mapped) { + unmap_kiobuf(schp->kiobp); + schp->mapped = 0; + } + if (free_also) { + free_kiovec(1, &schp->kiobp); + schp->kiobp = NULL; + } + } +#endif +} + +static int sg_build_dir(Sg_request * srp, Sg_fd * sfp, int dxfer_len) +{ +#ifdef SG_ALLOW_DIO + int res, k, split, offset, num, mx_sc_elems, rem_sz; + struct kiobuf * kp; + char * mem_src_arr; + struct scatterlist * sclp; + unsigned long addr, prev_addr; + sg_io_hdr_t * hp = &srp->header; + Sg_scatter_hold * schp = &srp->data; + int sg_tablesize = sfp->parentdp->sg_tablesize; + + res = alloc_kiovec(1, &schp->kiobp); + if (0 != res) { + SCSI_LOG_TIMEOUT(5, printk("sg_build_dir: alloc_kiovec res=%d\n", res)); + return 1; + } + res = map_user_kiobuf((SG_DXFER_TO_DEV == hp->dxfer_direction) ? 1 : 0, + schp->kiobp, (unsigned long)hp->dxferp, dxfer_len); + if (0 != res) { + SCSI_LOG_TIMEOUT(5, + printk("sg_build_dir: map_user_kiobuf res=%d\n", res)); + sg_unmap_and(schp, 1); + return 1; + } + schp->mapped = 1; + kp = schp->kiobp; + prev_addr = page_address(kp->maplist[0]); + for (k = 1, split = 0; k < kp->nr_pages; ++k, prev_addr = addr) { + addr = page_address(kp->maplist[k]); + if ((prev_addr + PAGE_SIZE) != addr) { + split = k; + break; + } + } + if (! split) { + schp->k_use_sg = 0; + schp->buffer = (void *)(page_address(kp->maplist[0]) + kp->offset); + schp->bufflen = dxfer_len; + schp->buffer_mem_src = SG_USER_MEM; + schp->b_malloc_len = dxfer_len; + hp->info |= SG_INFO_DIRECT_IO; + return 0; + } + mx_sc_elems = sg_build_sgat(schp, sfp); + if (mx_sc_elems <= 1) { + sg_unmap_and(schp, 1); + sg_remove_scat(schp); + return 1; + } + mem_src_arr = schp->buffer + (mx_sc_elems * sizeof(struct scatterlist)); + for (k = 0, sclp = schp->buffer, rem_sz = dxfer_len; + (k < sg_tablesize) && (rem_sz > 0) && (k < mx_sc_elems); + ++k, ++sclp) { + offset = (0 == k) ? kp->offset : 0; + num = (rem_sz > (PAGE_SIZE - offset)) ? (PAGE_SIZE - offset) : + rem_sz; + sclp->address = (void *)(page_address(kp->maplist[k]) + offset); + sclp->length = num; + mem_src_arr[k] = SG_USER_MEM; + rem_sz -= num; + SCSI_LOG_TIMEOUT(5, + printk("sg_build_dir: k=%d, a=0x%p, len=%d, ms=%d\n", + k, sclp->address, num, mem_src_arr[k])); + } + schp->k_use_sg = k; + SCSI_LOG_TIMEOUT(5, + printk("sg_build_dir: k_use_sg=%d, rem_sz=%d\n", k, rem_sz)); + schp->bufflen = dxfer_len; + if (rem_sz > 0) { /* must have failed */ + sg_unmap_and(schp, 1); + sg_remove_scat(schp); + return 1; /* out of scatter gather elements, try indirect */ + } + hp->info |= SG_INFO_DIRECT_IO; + return 0; +#else + return 1; +#endif /* SG_ALLOW_DIO */ +} + +static int sg_build_indi(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) { int ret_sz, mem_src; int blk_size = buff_size; @@ -1179,7 +1436,7 @@ static int sg_build_scat(Sg_scatter_hold * schp, int buff_size, ++blk_size; /* don't know why */ /* round request up to next highest SG_SECTOR_SZ byte boundary */ blk_size = (blk_size + SG_SECTOR_MSK) & (~SG_SECTOR_MSK); - SCSI_LOG_TIMEOUT(4, printk("sg_build_scat: buff_size=%d, blk_size=%d\n", + SCSI_LOG_TIMEOUT(4, printk("sg_build_indi: buff_size=%d, blk_size=%d\n", buff_size, blk_size)); if (blk_size <= SG_SCATTER_SZ) { mem_src = SG_HEAP_PAGE; @@ -1187,10 +1444,10 @@ static int sg_build_scat(Sg_scatter_hold * schp, int buff_size, if (! p) return -ENOMEM; if (blk_size == ret_sz) { /* got it on the first attempt */ - schp->use_sg = 0; + schp->k_use_sg = 0; schp->buffer = p; schp->bufflen = blk_size; - schp->mem_src = mem_src; + schp->buffer_mem_src = (char)mem_src; schp->b_malloc_len = blk_size; return 0; } @@ -1204,29 +1461,23 @@ static int sg_build_scat(Sg_scatter_hold * schp, int buff_size, /* Want some local declarations, so start new block ... */ { /* lets try and build a scatter gather list */ struct scatterlist * sclp; - int k, rem_sz, num, nxt; - int sc_bufflen = PAGE_SIZE; - int mx_sc_elems = (sc_bufflen / sizeof(struct scatterlist)) - 1; + int k, rem_sz, num; + int mx_sc_elems; int sg_tablesize = sfp->parentdp->sg_tablesize; int first = 1; + char * mem_src_arr; - k = SG_HEAP_KMAL; /* want to protect mem_src, use k as scratch */ - schp->buffer = (struct scatterlist *)sg_malloc(sfp, - sc_bufflen, &num, &k); - schp->mem_src = (char)k; /* N.B. ret_sz and mem_src carried into this block ... */ - if (! schp->buffer) - return -ENOMEM; - else if (num != sc_bufflen) { - sc_bufflen = num; - mx_sc_elems = (sc_bufflen / sizeof(struct scatterlist)) - 1; - } - schp->sglist_len = sc_bufflen; - memset(schp->buffer, 0, sc_bufflen); - for (k = 0, sclp = schp->buffer, rem_sz = blk_size, nxt =0; - (k < sg_tablesize) && (rem_sz > 0) && (k < mx_sc_elems); + mx_sc_elems = sg_build_sgat(schp, sfp); + if (mx_sc_elems < 0) + return mx_sc_elems; /* most likely -ENOMEM */ + mem_src_arr = schp->buffer + + (mx_sc_elems * sizeof(struct scatterlist)); + + for (k = 0, sclp = schp->buffer, rem_sz = blk_size; + (k < sg_tablesize) && (rem_sz > 0) && (k < mx_sc_elems); ++k, rem_sz -= ret_sz, ++sclp) { - if (first) + if (first) first = 0; else { num = (rem_sz > SG_SCATTER_SZ) ? SG_SCATTER_SZ : rem_sz; @@ -1237,15 +1488,15 @@ static int sg_build_scat(Sg_scatter_hold * schp, int buff_size, } sclp->address = p; sclp->length = ret_sz; - sclp->alt_address = (char *)(long)mem_src; - - SCSI_LOG_TIMEOUT(5, - printk("sg_build_build: k=%d, a=0x%p, len=%d, ms=%d\n", + mem_src_arr[k] = mem_src; + + SCSI_LOG_TIMEOUT(5, + printk("sg_build_build: k=%d, a=0x%p, len=%d, ms=%d\n", k, sclp->address, ret_sz, mem_src)); } /* end of for loop */ - schp->use_sg = k; - SCSI_LOG_TIMEOUT(5, - printk("sg_build_scat: use_sg=%d, rem_sz=%d\n", k, rem_sz)); + schp->k_use_sg = k; + SCSI_LOG_TIMEOUT(5, + printk("sg_build_indi: k_use_sg=%d, rem_sz=%d\n", k, rem_sz)); schp->bufflen = blk_size; if (rem_sz > 0) /* must have failed */ return -ENOMEM; @@ -1253,74 +1504,254 @@ static int sg_build_scat(Sg_scatter_hold * schp, int buff_size, return 0; } -static void sg_write_xfer(Sg_scatter_hold * schp, const char * inp, - int num_write_xfer) +static int sg_write_xfer(Sg_request * srp) { - SCSI_LOG_TIMEOUT(4, printk("sg_write_xfer: num_write_xfer=%d, use_sg=%d\n", - num_write_xfer, schp->use_sg)); - if ((! inp) || (num_write_xfer <= 0)) - return; - if (schp->use_sg > 0) { - int k, num; - struct scatterlist * sclp = (struct scatterlist *)schp->buffer; - - for (k = 0; (k < schp->use_sg) && sclp->address; ++k, ++sclp) { - num = (int)sclp->length; - if (num > num_write_xfer) { - __copy_from_user(sclp->address, inp, num_write_xfer); - break; + sg_io_hdr_t * hp = &srp->header; + Sg_scatter_hold * schp = &srp->data; + int num_xfer = 0; + int j, k, onum, usglen, ksglen, res, ok; + int iovec_count = (int)hp->iovec_count; + unsigned char * p; + unsigned char * up; + int new_interface = ('\0' == hp->interface_id) ? 0 : 1; + + if ((SG_DXFER_TO_DEV == hp->dxfer_direction) || + (SG_DXFER_TO_FROM_DEV == hp->dxfer_direction)) { + num_xfer = (int)(new_interface ? hp->dxfer_len : hp->flags); + if (schp->bufflen < num_xfer) + num_xfer = schp->bufflen; + } + if ((num_xfer <= 0) || (new_interface && (SG_FLAG_NO_DXFER & hp->flags))) + return 0; + + SCSI_LOG_TIMEOUT(4, + printk("sg_write_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n", + num_xfer, iovec_count, schp->k_use_sg)); + if (iovec_count) { + onum = iovec_count; + if ((k = verify_area(VERIFY_READ, hp->dxferp, + size_sg_iovec * onum))) + return k; + } + else + onum = 1; + + if (0 == schp->k_use_sg) { /* kernel has single buffer */ + if (SG_USER_MEM != schp->buffer_mem_src) { /* else nothing to do */ + + for (j = 0, p = schp->buffer; j < onum; ++j) { + res = sg_u_iovec(hp, iovec_count, j, 1, &usglen, &up); + if (res) return res; + usglen = (num_xfer > usglen) ? usglen : num_xfer; + __copy_from_user(p, up, usglen); + p += usglen; + num_xfer -= usglen; + if (num_xfer <= 0) + return 0; } - else { - __copy_from_user(sclp->address, inp, num); - num_write_xfer -= num; - if (num_write_xfer <= 0) + } + } + else { /* kernel using scatter gather list */ + struct scatterlist * sclp = (struct scatterlist *)schp->buffer; + char * mem_src_arr = sg_get_sgat_msa(schp); + ksglen = (int)sclp->length; + p = sclp->address; + + for (j = 0, k = 0; j < onum; ++j) { + res = sg_u_iovec(hp, iovec_count, j, 1, &usglen, &up); + if (res) return res; + + for (; (k < schp->k_use_sg) && p; + ++k, ++sclp, ksglen = (int)sclp->length, p = sclp->address) { + ok = (SG_USER_MEM != mem_src_arr[k]); + if (usglen <= 0) + break; + if (ksglen > usglen) { + if (usglen >= num_xfer) { + if (ok) __copy_from_user(p, up, num_xfer); + return 0; + } + if (ok) __copy_from_user(p, up, usglen); + p += usglen; + ksglen -= usglen; break; - inp += num; + } + else { + if (ksglen >= num_xfer) { + if (ok) __copy_from_user(p, up, num_xfer); + return 0; + } + if (ok) __copy_from_user(p, up, ksglen); + up += ksglen; + usglen -= ksglen; + } } } } - else - __copy_from_user(schp->buffer, inp, num_write_xfer); + return 0; +} + +static int sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind, + int wr_xf, int * countp, unsigned char ** up) +{ + int num_xfer = (int)hp->dxfer_len; + unsigned char * p; + int count, k; + sg_iovec_t u_iovec; + + if (0 == sg_num) { + p = (unsigned char *)hp->dxferp; + if (wr_xf && ('\0' == hp->interface_id)) + count = (int)hp->flags; /* holds "old" input_size */ + else + count = num_xfer; + } + else { + __copy_from_user(&u_iovec, + (unsigned char *)hp->dxferp + (ind * size_sg_iovec), + size_sg_iovec); + p = (unsigned char *)u_iovec.iov_base; + count = (int)u_iovec.iov_len; + } + if ((k = verify_area(wr_xf ? VERIFY_READ : VERIFY_WRITE, p, count))) + return k; + if (up) *up = p; + if (countp) *countp = count; + return 0; +} + +static char * sg_get_sgat_msa(Sg_scatter_hold * schp) +{ + int elem_sz = sizeof(struct scatterlist) + sizeof(char); + int mx_sc_elems = (schp->sglist_len / elem_sz) - 1; + return schp->buffer + (sizeof(struct scatterlist) * mx_sc_elems); } static void sg_remove_scat(Sg_scatter_hold * schp) { - SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: use_sg=%d\n", schp->use_sg)); - if(schp->use_sg > 0) { + SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", + schp->k_use_sg)); + if (schp->buffer && schp->sglist_len) { int k, mem_src; struct scatterlist * sclp = (struct scatterlist *)schp->buffer; + char * mem_src_arr = sg_get_sgat_msa(schp); - for (k = 0; (k < schp->use_sg) && sclp->address; ++k, ++sclp) { - mem_src = (int)(long)sclp->alt_address; - SCSI_LOG_TIMEOUT(5, - printk("sg_remove_scat: k=%d, a=0x%p, len=%d, ms=%d\n", + for (k = 0; (k < schp->k_use_sg) && sclp->address; ++k, ++sclp) { + mem_src = mem_src_arr[k]; + SCSI_LOG_TIMEOUT(5, + printk("sg_remove_scat: k=%d, a=0x%p, len=%d, ms=%d\n", k, sclp->address, sclp->length, mem_src)); sg_free(sclp->address, sclp->length, mem_src); sclp->address = NULL; sclp->length = 0; } - sg_free(schp->buffer, schp->sglist_len, schp->mem_src); + sg_free(schp->buffer, schp->sglist_len, schp->buffer_mem_src); } else if (schp->buffer) - sg_free(schp->buffer, schp->b_malloc_len, schp->mem_src); - schp->buffer = NULL; - schp->bufflen = 0; - schp->use_sg = 0; - schp->sglist_len = 0; + sg_free(schp->buffer, schp->b_malloc_len, schp->buffer_mem_src); + memset(schp, 0, sizeof(*schp)); +} + +static int sg_read_xfer(Sg_request * srp) +{ + sg_io_hdr_t * hp = &srp->header; + Sg_scatter_hold * schp = &srp->data; + int num_xfer = 0; + int j, k, onum, usglen, ksglen, res, ok; + int iovec_count = (int)hp->iovec_count; + unsigned char * p; + unsigned char * up; + int new_interface = ('\0' == hp->interface_id) ? 0 : 1; + + if ((SG_DXFER_FROM_DEV == hp->dxfer_direction) || + (SG_DXFER_TO_FROM_DEV == hp->dxfer_direction)) { + num_xfer = hp->dxfer_len; + if (schp->bufflen < num_xfer) + num_xfer = schp->bufflen; + } + if ((num_xfer <= 0) || (new_interface && (SG_FLAG_NO_DXFER & hp->flags))) + return 0; + + SCSI_LOG_TIMEOUT(4, + printk("sg_read_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n", + num_xfer, iovec_count, schp->k_use_sg)); + if (iovec_count) { + onum = iovec_count; + if ((k = verify_area(VERIFY_READ, hp->dxferp, + size_sg_iovec * onum))) + return k; + } + else + onum = 1; + + if (0 == schp->k_use_sg) { /* kernel has single buffer */ + if (SG_USER_MEM != schp->buffer_mem_src) { /* else nothing to do */ + + for (j = 0, p = schp->buffer; j < onum; ++j) { + res = sg_u_iovec(hp, iovec_count, j, 0, &usglen, &up); + if (res) return res; + usglen = (num_xfer > usglen) ? usglen : num_xfer; + __copy_to_user(up, p, usglen); + p += usglen; + num_xfer -= usglen; + if (num_xfer <= 0) + return 0; + } + } + } + else { /* kernel using scatter gather list */ + struct scatterlist * sclp = (struct scatterlist *)schp->buffer; + char * mem_src_arr = sg_get_sgat_msa(schp); + ksglen = (int)sclp->length; + p = sclp->address; + + for (j = 0, k = 0; j < onum; ++j) { + res = sg_u_iovec(hp, iovec_count, j, 0, &usglen, &up); + if (res) return res; + + for (; (k < schp->k_use_sg) && p; + ++k, ++sclp, ksglen = (int)sclp->length, p = sclp->address) { + ok = (SG_USER_MEM != mem_src_arr[k]); + if (usglen <= 0) + break; + if (ksglen > usglen) { + if (usglen >= num_xfer) { + if (ok) __copy_to_user(up, p, num_xfer); + return 0; + } + if (ok) __copy_to_user(up, p, usglen); + p += usglen; + ksglen -= usglen; + break; + } + else { + if (ksglen >= num_xfer) { + if (ok) __copy_to_user(up, p, num_xfer); + return 0; + } + if (ok) __copy_to_user(up, p, ksglen); + up += ksglen; + usglen -= ksglen; + } + } + } + } + return 0; } -static void sg_read_xfer(Sg_scatter_hold * schp, char * outp, - int num_read_xfer) +static void sg_read_oxfer(Sg_request * srp, char * outp, int num_read_xfer) { - SCSI_LOG_TIMEOUT(4, printk("sg_read_xfer: num_read_xfer=%d\n", - num_read_xfer)); + Sg_scatter_hold * schp = &srp->data; + + SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n", + num_read_xfer)); if ((! outp) || (num_read_xfer <= 0)) return; - if(schp->use_sg > 0) { + if(schp->k_use_sg > 0) { int k, num; struct scatterlist * sclp = (struct scatterlist *)schp->buffer; - for (k = 0; (k < schp->use_sg) && sclp->address; ++k, ++sclp) { + for (k = 0; (k < schp->k_use_sg) && sclp->address; ++k, ++sclp) { num = (int)sclp->length; if (num > num_read_xfer) { __copy_to_user(outp, sclp->address, num_read_xfer); @@ -1343,11 +1774,11 @@ static void sg_build_reserve(Sg_fd * sfp, int req_size) { Sg_scatter_hold * schp = &sfp->reserve; - SCSI_LOG_TIMEOUT(4, printk("sg_build_reserve: req_size=%d\n", req_size)); + SCSI_LOG_TIMEOUT(4, printk("sg_build_reserve: req_size=%d\n", req_size)); do { if (req_size < PAGE_SIZE) req_size = PAGE_SIZE; - if (0 == sg_build_scat(schp, req_size, sfp)) + if (0 == sg_build_indi(schp, sfp, req_size)) return; else sg_remove_scat(schp); @@ -1360,13 +1791,13 @@ static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size) Sg_scatter_hold * req_schp = &srp->data; Sg_scatter_hold * rsv_schp = &sfp->reserve; - SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size)); - if (rsv_schp->use_sg > 0) { + SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size)); + if (rsv_schp->k_use_sg > 0) { int k, num; int rem = size; struct scatterlist * sclp = (struct scatterlist *)rsv_schp->buffer; - for (k = 0; k < rsv_schp->use_sg; ++k, ++sclp) { + for (k = 0; k < rsv_schp->k_use_sg; ++k, ++sclp) { num = (int)sclp->length; if (rem <= num) { sfp->save_scat_len = num; @@ -1376,23 +1807,23 @@ static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size) else rem -= num; } - if (k < rsv_schp->use_sg) { - req_schp->use_sg = k + 1; /* adjust scatter list length */ + if (k < rsv_schp->k_use_sg) { + req_schp->k_use_sg = k + 1; /* adjust scatter list length */ req_schp->bufflen = size; req_schp->sglist_len = rsv_schp->sglist_len; req_schp->buffer = rsv_schp->buffer; - req_schp->mem_src = rsv_schp->mem_src; + req_schp->buffer_mem_src = rsv_schp->buffer_mem_src; req_schp->b_malloc_len = rsv_schp->b_malloc_len; } else - SCSI_LOG_TIMEOUT(1, printk("sg_link_reserve: BAD size\n")); + SCSI_LOG_TIMEOUT(1, printk("sg_link_reserve: BAD size\n")); } else { - req_schp->use_sg = 0; + req_schp->k_use_sg = 0; req_schp->bufflen = size; req_schp->buffer = rsv_schp->buffer; - req_schp->mem_src = rsv_schp->mem_src; - req_schp->use_sg = rsv_schp->use_sg; + req_schp->buffer_mem_src = rsv_schp->buffer_mem_src; + req_schp->k_use_sg = rsv_schp->k_use_sg; req_schp->b_malloc_len = rsv_schp->b_malloc_len; } srp->res_used = 1; @@ -1403,19 +1834,19 @@ static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp) Sg_scatter_hold * req_schp = &srp->data; Sg_scatter_hold * rsv_schp = &sfp->reserve; - SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->use_sg=%d\n", - (int)req_schp->use_sg)); - if (rsv_schp->use_sg > 0) { + SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n", + (int)req_schp->k_use_sg)); + if (rsv_schp->k_use_sg > 0) { struct scatterlist * sclp = (struct scatterlist *)rsv_schp->buffer; - if (sfp->save_scat_len > 0) - (sclp + (req_schp->use_sg - 1))->length = + if (sfp->save_scat_len > 0) + (sclp + (req_schp->k_use_sg - 1))->length = (unsigned)sfp->save_scat_len; else SCSI_LOG_TIMEOUT(1, printk( - "sg_unlink_reserve: BAD save_scat_len\n")); + "sg_unlink_reserve: BAD save_scat_len\n")); } - req_schp->use_sg = 0; + req_schp->k_use_sg = 0; req_schp->bufflen = 0; req_schp->buffer = NULL; req_schp->sglist_len = 0; @@ -1428,8 +1859,8 @@ static Sg_request * sg_get_request(const Sg_fd * sfp, int pack_id) Sg_request * resp = NULL; resp = sfp->headrp; - while (resp) { - if ((! resp->my_cmdp) && + while (resp) { /* look for requests that are ready + not SG_IO owned */ + if (resp->done && (! resp->sg_io_owned) && ((-1 == pack_id) || (resp->header.pack_id == pack_id))) return resp; resp = resp->nextrp; @@ -1471,15 +1902,21 @@ static Sg_request * sg_add_request(Sg_fd * sfp) resp->parentfp = sfp; resp->nextrp = NULL; resp->res_used = 0; + resp->orphan = 0; + resp->sg_io_owned = 0; + resp->done = 0; memset(&resp->data, 0, sizeof(Sg_scatter_hold)); - memset(&resp->header, 0, sizeof(struct sg_header)); + memset(&resp->header, 0, size_sg_io_hdr); + resp->header.duration = jiffies; resp->my_cmdp = NULL; + resp->data.kiobp = NULL; + resp->data.mapped = 0; } return resp; } /* Return of 1 for found; 0 for not found */ -static int sg_remove_request(Sg_fd * sfp, const Sg_request * srp) +static int sg_remove_request(Sg_fd * sfp, Sg_request * srp) { Sg_request * prev_rp; Sg_request * rp; @@ -1503,30 +1940,23 @@ static int sg_remove_request(Sg_fd * sfp, const Sg_request * srp) return 0; } -static Sg_fd * sg_add_sfp(Sg_device * sdp, int dev, int get_reserved) +static Sg_fd * sg_add_sfp(Sg_device * sdp, int dev) { Sg_fd * sfp; - if (sdp->merge_fd) { - ++sdp->merge_fd; - return sdp->headfp; - } sfp = (Sg_fd *)sg_low_malloc(sizeof(Sg_fd), 0, SG_HEAP_KMAL, 0); - if (sfp) { - memset(sfp, 0, sizeof(Sg_fd)); - sfp->my_mem_src = SG_HEAP_KMAL; - init_waitqueue_head(&sfp->read_wait); - init_waitqueue_head(&sfp->write_wait); - } - else + if (! sfp) return NULL; - + memset(sfp, 0, sizeof(Sg_fd)); + sfp->fd_mem_src = SG_HEAP_KMAL; + init_waitqueue_head(&sfp->read_wait); + sfp->timeout = SG_DEFAULT_TIMEOUT; sfp->force_packid = SG_DEF_FORCE_PACK_ID; sfp->low_dma = (SG_DEF_FORCE_LOW_DMA == 0) ? sdp->device->host->unchecked_isa_dma : 1; sfp->cmd_q = SG_DEF_COMMAND_Q; - sfp->underrun_flag = SG_DEF_UNDERRUN_FLAG; + sfp->keep_orphan = SG_DEF_KEEP_ORPHAN; sfp->parentdp = sdp; if (! sdp->headfp) sdp->headfp = sfp; @@ -1537,13 +1967,10 @@ static Sg_fd * sg_add_sfp(Sg_device * sdp, int dev, int get_reserved) pfp->nextfp = sfp; } SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p, m_s=%d\n", - sfp, (int)sfp->my_mem_src)); - if (get_reserved) { - sg_build_reserve(sfp, SG_DEF_RESERVED_SIZE); - sg_big_buff = sfp->reserve.bufflen; /* sysctl shows most recent size */ - SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, use_sg=%d\n", - sfp->reserve.bufflen, sfp->reserve.use_sg)); - } + sfp, (int)sfp->fd_mem_src)); + sg_build_reserve(sfp, sg_big_buff); + SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n", + sfp->reserve.bufflen, sfp->reserve.k_use_sg)); return sfp; } @@ -1554,17 +1981,13 @@ static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp) int dirty = 0; int res = 0; - if (sdp->merge_fd) { - if (--sdp->merge_fd) - return 0; /* if merge_fd then dec merge_fd counter */ - } srp = sfp->headrp; if (srp) { -/* Need to stop sg_command_done() playing with this list during this loop */ +/* Need to stop sg_cmd_done_bh() playing with this list during this loop */ while (srp) { tsrp = srp->nextrp; - if (! srp->my_cmdp) - sg_finish_rem_req(srp, NULL, 0); + if (srp->done) + sg_finish_rem_req(srp); else ++dirty; srp = tsrp; @@ -1586,13 +2009,13 @@ static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp) } } if (sfp->reserve.bufflen > 0) { -SCSI_LOG_TIMEOUT(6, printk("sg_remove_sfp: bufflen=%d, use_sg=%d\n", - (int)sfp->reserve.bufflen, (int)sfp->reserve.use_sg)); +SCSI_LOG_TIMEOUT(6, printk("sg_remove_sfp: bufflen=%d, k_use_sg=%d\n", + (int)sfp->reserve.bufflen, (int)sfp->reserve.k_use_sg)); sg_remove_scat(&sfp->reserve); } sfp->parentdp = NULL; - SCSI_LOG_TIMEOUT(6, printk("sg_remove_sfp: sfp=0x%p\n", sfp)); - sg_low_free((char *)sfp, sizeof(Sg_fd), sfp->my_mem_src); + SCSI_LOG_TIMEOUT(6, printk("sg_remove_sfp: sfp=0x%p\n", sfp)); + sg_low_free((char *)sfp, sizeof(Sg_fd), sfp->fd_mem_src); res = 1; } else { @@ -1615,6 +2038,18 @@ static int sg_res_in_use(const Sg_fd * sfp) return 0; } +static int sg_dio_in_use(const Sg_fd * sfp) +{ + const Sg_request * srp = sfp->headrp; + + while (srp) { + if ((! srp->done) && srp->data.kiobp) + return 1; + srp = srp->nextrp; + } + return 0; +} + /* If retSzp==NULL want exact size or fail */ /* sg_low_malloc() should always be called from a process context allowing GFP_KERNEL to be used instead of GFP_ATOMIC */ @@ -1630,9 +2065,6 @@ static char * sg_low_malloc(int rqSz, int lowDma, int mem_src, int * retSzp) /* Seen kmalloc(..,GFP_KERNEL) hang for 40 secs! */ resp = kmalloc(rqSz, page_mask); if (resp && retSzp) *retSzp = rqSz; -#ifdef SG_DEBUG - if (resp) ++sg_num_kmal; -#endif return resp; } if (SG_HEAP_POOL == mem_src) { @@ -1651,9 +2083,6 @@ static char * sg_low_malloc(int rqSz, int lowDma, int mem_src, int * retSzp) if (resp) { if (retSzp) *retSzp = rqSz; sg_pool_secs_avail -= num_sect; -#ifdef SG_DEBUG - ++sg_num_pool; -#endif return resp; } } @@ -1678,16 +2107,13 @@ static char * sg_low_malloc(int rqSz, int lowDma, int mem_src, int * retSzp) resSz = a_size; } if (retSzp) *retSzp = resSz; -#ifdef SG_DEBUG - if (resp) ++sg_num_page; -#endif } else printk("sg_low_malloc: bad mem_src=%d, rqSz=%df\n", mem_src, rqSz); return resp; } -static char * sg_malloc(const Sg_fd * sfp, int size, int * retSzp, +static char * sg_malloc(const Sg_fd * sfp, int size, int * retSzp, int * mem_srcp) { char * resp = NULL; @@ -1699,7 +2125,7 @@ static char * sg_malloc(const Sg_fd * sfp, int size, int * retSzp, int low_dma = sfp->low_dma; int l_ms = -1; /* invalid value */ - switch (*mem_srcp) + switch (*mem_srcp) { case SG_HEAP_PAGE: l_ms = (size < PAGE_SIZE) ? SG_HEAP_POOL : SG_HEAP_PAGE; @@ -1732,45 +2158,54 @@ static char * sg_malloc(const Sg_fd * sfp, int size, int * retSzp, } if (resp) *mem_srcp = l_ms; } - SCSI_LOG_TIMEOUT(6, printk("sg_malloc: size=%d, ms=%d, ret=0x%p\n", + SCSI_LOG_TIMEOUT(6, printk("sg_malloc: size=%d, ms=%d, ret=0x%p\n", size, *mem_srcp, resp)); return resp; } static void sg_low_free(char * buff, int size, int mem_src) { - if (! buff) - return; - if (SG_HEAP_POOL == mem_src) { - int num_sect = size / SG_SECTOR_SZ; - scsi_free(buff, size); - sg_pool_secs_avail += num_sect; - } - else if (SG_HEAP_KMAL == mem_src) - kfree(buff); /* size not used */ - else if (SG_HEAP_PAGE == mem_src) { - int order, a_size; - - for (order = 0, a_size = PAGE_SIZE; - a_size < size; order++, a_size <<= 1) - ; - free_pages((unsigned long)buff, order); - } - else - printk("sg_low_free: bad mem_src=%d, buff=0x%p, rqSz=%df\n", + if (! buff) return; + switch (mem_src) { + case SG_HEAP_POOL: + { + int num_sect = size / SG_SECTOR_SZ; + + scsi_free(buff, size); + sg_pool_secs_avail += num_sect; + } + break; + case SG_HEAP_KMAL: + kfree(buff); /* size not used */ + break; + case SG_HEAP_PAGE: + { + int order, a_size; + for (order = 0, a_size = PAGE_SIZE; + a_size < size; order++, a_size <<= 1) + ; + free_pages((unsigned long)buff, order); + } + break; + case SG_USER_MEM: + break; /* nothing to do */ + default: + printk("sg_low_free: bad mem_src=%d, buff=0x%p, rqSz=%df\n", mem_src, buff, size); + break; + } } static void sg_free(char * buff, int size, int mem_src) { - SCSI_LOG_TIMEOUT(6, + SCSI_LOG_TIMEOUT(6, printk("sg_free: buff=0x%p, size=%d\n", buff, size)); if ((! buff) || (size <= 0)) ; else sg_low_free(buff, size, mem_src); } - + static void sg_clr_scpnt(Scsi_Cmnd * SCpnt) { SCpnt->use_sg = 0; @@ -1781,3 +2216,391 @@ static void sg_clr_scpnt(Scsi_Cmnd * SCpnt) SCpnt->request.rq_dev = MKDEV(0, 0); /* "sg" _disowns_ command blk */ } +static int sg_ms_to_jif(unsigned int msecs) +{ + if ((UINT_MAX / 2U) < msecs) + return INT_MAX; /* special case, set largest possible */ + else + return ((int)msecs < (INT_MAX / 1000)) ? (((int)msecs * HZ) / 1000) + : (((int)msecs / 1000) * HZ); +} + +static unsigned sg_jif_to_ms(int jifs) +{ + if (jifs <= 0) + return 0U; + else { + unsigned int j = (unsigned int)jifs; + return (j < (UINT_MAX / 1000)) ? ((j * 1000) / HZ) : ((j / HZ) * 1000); + } +} + +static unsigned char allow_ops[] = {TEST_UNIT_READY, INQUIRY, +READ_CAPACITY, READ_BUFFER, READ_6, READ_10, READ_12}; + +static int sg_allow_access(unsigned char opcode, char dev_type) +{ + int k; + + if (TYPE_SCANNER == dev_type) /* TYPE_ROM maybe burner */ + return 1; + for (k = 0; k < sizeof(allow_ops); ++k) { + if (opcode == allow_ops[k]) + return 1; + } + return 0; +} + + +static int sg_last_dev() +{ + int k; + for (k = sg_template.dev_max - 1; k >= 0; --k) { + if (sg_dev_arr[k].device) + return k + 1; + } + return 0; /* origin 1 */ +} + +#ifdef CONFIG_PROC_FS + +static struct proc_dir_entry * sg_proc_sgp = NULL; + +static const char * sg_proc_sg_dirname = "sg"; +static const char * sg_proc_leaf_names[] = {"def_reserved_size", "debug", + "devices", "device_hdr", "device_strs", + "hosts", "host_hdr", "host_strs", "version"}; + +static int sg_proc_dressz_read(char * buffer, char ** start, off_t offset, + int size, int * eof, void * data); +static int sg_proc_dressz_info(char * buffer, int * len, off_t * begin, + off_t offset, int size); +static int sg_proc_dressz_write(struct file * filp, const char * buffer, + unsigned long count, void * data); +static int sg_proc_debug_read(char * buffer, char ** start, off_t offset, + int size, int * eof, void * data); +static int sg_proc_debug_info(char * buffer, int * len, off_t * begin, + off_t offset, int size); +static int sg_proc_dev_read(char * buffer, char ** start, off_t offset, + int size, int * eof, void * data); +static int sg_proc_dev_info(char * buffer, int * len, off_t * begin, + off_t offset, int size); +static int sg_proc_devhdr_read(char * buffer, char ** start, off_t offset, + int size, int * eof, void * data); +static int sg_proc_devhdr_info(char * buffer, int * len, off_t * begin, + off_t offset, int size); +static int sg_proc_devstrs_read(char * buffer, char ** start, off_t offset, + int size, int * eof, void * data); +static int sg_proc_devstrs_info(char * buffer, int * len, off_t * begin, + off_t offset, int size); +static int sg_proc_host_read(char * buffer, char ** start, off_t offset, + int size, int * eof, void * data); +static int sg_proc_host_info(char * buffer, int * len, off_t * begin, + off_t offset, int size); +static int sg_proc_hosthdr_read(char * buffer, char ** start, off_t offset, + int size, int * eof, void * data); +static int sg_proc_hosthdr_info(char * buffer, int * len, off_t * begin, + off_t offset, int size); +static int sg_proc_hoststrs_read(char * buffer, char ** start, off_t offset, + int size, int * eof, void * data); +static int sg_proc_hoststrs_info(char * buffer, int * len, off_t * begin, + off_t offset, int size); +static int sg_proc_version_read(char * buffer, char ** start, off_t offset, + int size, int * eof, void * data); +static int sg_proc_version_info(char * buffer, int * len, off_t * begin, + off_t offset, int size); +static read_proc_t * sg_proc_leaf_reads[] = { + sg_proc_dressz_read, sg_proc_debug_read, + sg_proc_dev_read, sg_proc_devhdr_read, sg_proc_devstrs_read, + sg_proc_host_read, sg_proc_hosthdr_read, sg_proc_hoststrs_read, + sg_proc_version_read}; +static write_proc_t * sg_proc_leaf_writes[] = { + sg_proc_dressz_write, 0, 0, 0, 0, 0, 0, 0, 0}; + +#define PRINT_PROC(fmt,args...) \ + do { \ + *len += sprintf(buffer + *len, fmt, ##args); \ + if (*begin + *len > offset + size) \ + return 0; \ + if (*begin + *len < offset) { \ + *begin += *len; \ + *len = 0; \ + } \ + } while(0) + +#define SG_PROC_READ_FN(infofp) \ + do { \ + int len = 0; \ + off_t begin = 0; \ + *eof = infofp(buffer, &len, &begin, offset, size); \ + if (offset >= (begin + len)) \ + return 0; \ + *start = buffer + ((begin > offset) ? \ + (begin - offset) : (offset - begin)); \ + return (size < (begin + len - offset)) ? \ + size : begin + len - offset; \ + } while(0) + + +static int sg_proc_init() +{ + int k, mask; + int leaves = sizeof(sg_proc_leaf_names) / sizeof(sg_proc_leaf_names[0]); + struct proc_dir_entry * pdep; + + if (! proc_scsi) + return 1; + sg_proc_sgp = create_proc_entry(sg_proc_sg_dirname, + S_IFDIR | S_IRUGO | S_IXUGO, proc_scsi); + if (! sg_proc_sgp) + return 1; + for (k = 0; k < leaves; ++k) { + mask = sg_proc_leaf_writes[k] ? S_IRUGO | S_IWUSR : S_IRUGO; + pdep = create_proc_entry(sg_proc_leaf_names[k], mask, sg_proc_sgp); + if (pdep) { + pdep->read_proc = sg_proc_leaf_reads[k]; + if (sg_proc_leaf_writes[k]) + pdep->write_proc = sg_proc_leaf_writes[k]; + } + } + return 0; +} + +static void sg_proc_cleanup() +{ + int k; + int leaves = sizeof(sg_proc_leaf_names) / sizeof(sg_proc_leaf_names[0]); + + if ((! proc_scsi) || (! sg_proc_sgp)) + return; + for (k = 0; k < leaves; ++k) + remove_proc_entry(sg_proc_leaf_names[k], sg_proc_sgp); + remove_proc_entry(sg_proc_sg_dirname, proc_scsi); +} + +static int sg_proc_dressz_read(char * buffer, char ** start, off_t offset, + int size, int * eof, void * data) +{ SG_PROC_READ_FN(sg_proc_dressz_info); } + +static int sg_proc_dressz_info(char * buffer, int * len, off_t * begin, + off_t offset, int size) +{ + PRINT_PROC("%d\n", sg_big_buff); + return 1; +} + +static int sg_proc_dressz_write(struct file * filp, const char * buffer, + unsigned long count, void * data) +{ + int num; + unsigned long k = ULONG_MAX; + char buff[11]; + + if (! capable(CAP_SYS_ADMIN)) + return -EACCES; + num = (count < 10) ? count : 10; + copy_from_user(buff, buffer, num); + buff[count] = '\0'; + k = simple_strtoul(buff, 0, 10); + if (k <= 1048576) { + sg_big_buff = k; + return count; + } + return -ERANGE; +} + +static int sg_proc_debug_read(char * buffer, char ** start, off_t offset, + int size, int * eof, void * data) +{ SG_PROC_READ_FN(sg_proc_debug_info); } + +static int sg_proc_debug_info(char * buffer, int * len, off_t * begin, + off_t offset, int size) +{ + const Sg_device * sdp = sg_dev_arr; + const sg_io_hdr_t * hp; + int j, max_dev; + + if (NULL == sg_dev_arr) { + PRINT_PROC("sg_dev_arr NULL, death is imminent\n"); + return 1; + } + max_dev = sg_last_dev(); + PRINT_PROC("dev_max=%d max_active_device=%d (origin 1)\n", + sg_template.dev_max, max_dev); + PRINT_PROC(" scsi_dma_free_sectors=%u sg_pool_secs_aval=%d " + "def_reserved_size=%d\n", + scsi_dma_free_sectors, sg_pool_secs_avail, sg_big_buff); + max_dev = sg_last_dev(); + for (j = 0; j < max_dev; ++j, ++sdp) { + if (sdp) { + Sg_fd * fp; + Sg_request * srp; + struct scsi_device * scsidp; + int dev, k, blen, usg, crep; + + if (! (scsidp = sdp->device)) { + PRINT_PROC("device %d detached ??\n", j); + continue; + } + dev = MINOR(sdp->i_rdev); + crep = 'a' + dev; + + PRINT_PROC(" >>> device=%d(sg%c) ", dev, crep > 126 ? '?' : crep); + PRINT_PROC("scsi%d chan=%d id=%d lun=%d em=%d sg_tablesize=%d" + " excl=%d\n", scsidp->host->host_no, scsidp->channel, + scsidp->id, scsidp->lun, scsidp->host->hostt->emulated, + sdp->sg_tablesize, sdp->exclude); + fp = sdp->headfp; + for (k = 1; fp; fp = fp->nextfp, ++k) { + PRINT_PROC(" FD(%d): timeout=%d bufflen=%d " + "(res)sgat=%d low_dma=%d\n", + k, fp->timeout, fp->reserve.bufflen, + (int)fp->reserve.k_use_sg, (int)fp->low_dma); + PRINT_PROC(" cmd_q=%d f_packid=%d k_orphan=%d closed=%d\n", + (int)fp->cmd_q, (int)fp->force_packid, + (int)fp->keep_orphan, (int)fp->closed); + srp = fp->headrp; + if (NULL == srp) + PRINT_PROC(" No requests active\n"); + while (srp) { + hp = &srp->header; +/* stop indenting so far ... */ + PRINT_PROC(srp->res_used ? " reserved_buff>> " : + ((SG_INFO_DIRECT_IO_MASK & hp->info) ? " dio>> " : " ")); + blen = srp->my_cmdp ? srp->my_cmdp->bufflen : srp->data.bufflen; + usg = srp->my_cmdp ? srp->my_cmdp->use_sg : srp->data.k_use_sg; + PRINT_PROC(srp->done ? "rcv: id=%d" : (srp->my_cmdp ? "act: id=%d" : + "prior: id=%d"), srp->header.pack_id); + if (! srp->res_used) PRINT_PROC(" blen=%d", blen); + if (srp->done) + PRINT_PROC(" dur=%d", sg_jif_to_ms(hp->duration)); + else + PRINT_PROC(" t_o/elap=%d/%d", ((hp->interface_id == '\0') ? + sg_jif_to_ms(fp->timeout) : hp->timeout), + sg_jif_to_ms(hp->duration ? (jiffies - hp->duration) : 0)); + PRINT_PROC(" sgat=%d op=0x%02x\n", usg, (int)srp->data.cmd_opcode); + srp = srp->nextrp; +/* reset indenting */ + } + } + } + } + return 1; +} + +static int sg_proc_dev_read(char * buffer, char ** start, off_t offset, + int size, int * eof, void * data) +{ SG_PROC_READ_FN(sg_proc_dev_info); } + +static int sg_proc_dev_info(char * buffer, int * len, off_t * begin, + off_t offset, int size) +{ + const Sg_device * sdp = sg_dev_arr; + int j, max_dev; + struct scsi_device * scsidp; + + max_dev = sg_last_dev(); + for (j = 0; j < max_dev; ++j, ++sdp) { + if (sdp) { + if (! (scsidp = sdp->device)) { + PRINT_PROC("-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n"); + continue; + } + PRINT_PROC("%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n", + scsidp->host->host_no, scsidp->channel, scsidp->id, + scsidp->lun, (int)scsidp->type, (int)scsidp->disconnect, + (int)scsidp->queue_depth, (int)scsidp->tagged_queue); + } + } + return 1; +} + +static int sg_proc_devhdr_read(char * buffer, char ** start, off_t offset, + int size, int * eof, void * data) +{ SG_PROC_READ_FN(sg_proc_devhdr_info); } + +static int sg_proc_devhdr_info(char * buffer, int * len, off_t * begin, + off_t offset, int size) +{ + PRINT_PROC("host\tchan\tid\tlun\ttype\tdiscon\tqdepth\ttq\n"); + return 1; +} + +static int sg_proc_devstrs_read(char * buffer, char ** start, off_t offset, + int size, int * eof, void * data) +{ SG_PROC_READ_FN(sg_proc_devstrs_info); } + +static int sg_proc_devstrs_info(char * buffer, int * len, off_t * begin, + off_t offset, int size) +{ + const Sg_device * sdp = sg_dev_arr; + int j, max_dev; + struct scsi_device * scsidp; + + max_dev = sg_last_dev(); + for (j = 0; j < max_dev; ++j, ++sdp) { + if (sdp) { + if ((scsidp = sdp->device)) + PRINT_PROC("%8.8s\t%16.16s\t%4.4s\n", + scsidp->vendor, scsidp->model, scsidp->rev); + else + PRINT_PROC("\n"); + } + } + return 1; +} + +static int sg_proc_host_read(char * buffer, char ** start, off_t offset, + int size, int * eof, void * data) +{ SG_PROC_READ_FN(sg_proc_host_info); } + +static int sg_proc_host_info(char * buffer, int * len, off_t * begin, + off_t offset, int size) +{ + struct Scsi_Host * shp; + + for (shp = scsi_hostlist; shp; shp = shp->next) + PRINT_PROC("%u\t%hu\t%hd\t%hu\t%d\t%d\n", + shp->unique_id, shp->host_busy, shp->cmd_per_lun, + shp->sg_tablesize, (int)shp->unchecked_isa_dma, + (int)shp->hostt->emulated); + return 1; +} + +static int sg_proc_hosthdr_read(char * buffer, char ** start, off_t offset, + int size, int * eof, void * data) +{ SG_PROC_READ_FN(sg_proc_hosthdr_info); } + +static int sg_proc_hosthdr_info(char * buffer, int * len, off_t * begin, + off_t offset, int size) +{ + PRINT_PROC("uid\tbusy\tcpl\tscatg\tisa\temul\n"); + return 1; +} + +static int sg_proc_hoststrs_read(char * buffer, char ** start, off_t offset, + int size, int * eof, void * data) +{ SG_PROC_READ_FN(sg_proc_hoststrs_info); } + +static int sg_proc_hoststrs_info(char * buffer, int * len, off_t * begin, + off_t offset, int size) +{ + struct Scsi_Host * shp; + + for (shp = scsi_hostlist; shp; shp = shp->next) + PRINT_PROC("%s\n", shp->hostt->info ? shp->hostt->info(shp) : + (shp->hostt->name ? shp->hostt->name : "")); + return 1; +} + +static int sg_proc_version_read(char * buffer, char ** start, off_t offset, + int size, int * eof, void * data) +{ SG_PROC_READ_FN(sg_proc_version_info); } + +static int sg_proc_version_info(char * buffer, int * len, off_t * begin, + off_t offset, int size) +{ + PRINT_PROC("%d\t%s\n", sg_version_num, sg_version_str); + return 1; +} +#endif /* CONFIG_PROC_FS */ diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index e683405b7fb5..365c906607fd 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -703,17 +703,17 @@ static int sr_init() return 0; sr_template.dev_max = sr_template.dev_noticed + SR_EXTRA_DEVS; - scsi_CDs = (Scsi_CD *) scsi_init_malloc(sr_template.dev_max * sizeof(Scsi_CD), GFP_ATOMIC); + scsi_CDs = (Scsi_CD *) kmalloc(sr_template.dev_max * sizeof(Scsi_CD), GFP_ATOMIC); memset(scsi_CDs, 0, sr_template.dev_max * sizeof(Scsi_CD)); - sr_sizes = (int *) scsi_init_malloc(sr_template.dev_max * sizeof(int), GFP_ATOMIC); + sr_sizes = (int *) kmalloc(sr_template.dev_max * sizeof(int), GFP_ATOMIC); memset(sr_sizes, 0, sr_template.dev_max * sizeof(int)); - sr_blocksizes = (int *) scsi_init_malloc(sr_template.dev_max * - sizeof(int), GFP_ATOMIC); + sr_blocksizes = (int *) kmalloc(sr_template.dev_max * + sizeof(int), GFP_ATOMIC); - sr_hardsizes = (int *) scsi_init_malloc(sr_template.dev_max * - sizeof(int), GFP_ATOMIC); + sr_hardsizes = (int *) kmalloc(sr_template.dev_max * + sizeof(int), GFP_ATOMIC); /* * These are good guesses for the time being. */ @@ -831,16 +831,14 @@ void cleanup_module(void) unregister_blkdev(MAJOR_NR, "sr"); sr_registered--; if (scsi_CDs != NULL) { - scsi_init_free((char *) scsi_CDs, - (sr_template.dev_noticed + SR_EXTRA_DEVS) - * sizeof(Scsi_CD)); + kfree((char *) scsi_CDs); - scsi_init_free((char *) sr_sizes, sr_template.dev_max * sizeof(int)); + kfree((char *) sr_sizes); sr_sizes = NULL; - scsi_init_free((char *) sr_blocksizes, sr_template.dev_max * sizeof(int)); + kfree((char *) sr_blocksizes); sr_blocksizes = NULL; - scsi_init_free((char *) sr_hardsizes, sr_template.dev_max * sizeof(int)); + kfree((char *) sr_hardsizes); sr_hardsizes = NULL; } blksize_size[MAJOR_NR] = NULL; diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c index 6f37bc11e3b0..4aa159decc71 100644 --- a/drivers/scsi/u14-34f.c +++ b/drivers/scsi/u14-34f.c @@ -826,7 +826,7 @@ static inline int port_detect \ } else { unsigned long flags; -//FIXME// sh[j]->wish_block = TRUE; + scsi_register_blocked_host(sh[j]); sh[j]->unchecked_isa_dma = TRUE; flags=claim_dma_lock(); @@ -1948,6 +1948,10 @@ int u14_34f_release(struct Scsi_Host *shpnt) { if (sh[j] == NULL) panic("%s: release, invalid Scsi_Host pointer.\n", driver_name); + if( sh[j]->block != NULL ) { + scsi_deregister_blocked_host(sh[j]); + } + for (i = 0; i < sh[j]->can_queue; i++) if ((&HD(j)->cp[i])->sglist) kfree((&HD(j)->cp[i])->sglist); diff --git a/drivers/sound/Makefile b/drivers/sound/Makefile index f0eca7909a26..b9ae751bfb51 100644 --- a/drivers/sound/Makefile +++ b/drivers/sound/Makefile @@ -53,7 +53,7 @@ obj-$(CONFIG_SOUND_CS4232) += cs4232.o ad1848.o obj-$(CONFIG_SOUND_CS4232) += uart401.o obj-$(CONFIG_SOUND_GUS) += gus.o ad1848.o obj-$(CONFIG_SOUND_MAD16) += mad16.o ad1848.o sb.o uart401.o -obj-$(CONFIG_SOUND_VIA82CXXX) += via82cxxx.o sb.o uart401.o ac97.o +obj-$(CONFIG_SOUND_VIA82CXXX) += via82cxxx_audio.o sb.o uart401.o ac97.o obj-$(CONFIG_SOUND_MAUI) += maui.o mpu401.o obj-$(CONFIG_SOUND_MPU401) += mpu401.o obj-$(CONFIG_SOUND_MSNDCLAS) += msnd.o msnd_classic.o diff --git a/drivers/sound/sb_card.c b/drivers/sound/sb_card.c index 106ff23e17f6..40511d52bc39 100644 --- a/drivers/sound/sb_card.c +++ b/drivers/sound/sb_card.c @@ -9,20 +9,25 @@ * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL) * Version 2 (June 1991). See the "COPYING" file distributed with this software * for more info. + * + * + * 26th November 1999 - patched to compile without ISA PnP support in the + * kernel. -Daniel Stone (tamriel@ductape.net) + * + * 06-01-2000 Refined and bugfixed ISA PnP support, added + * CMI 8330 support - Alessandro Zummo + * + * + * 04-02-2000 Added Soundblaster AWE 64 PnP support, isapnpjump + * Alessandro Zummo + * */ -/* 26th Novemner 1999 - patched to compile without ISA PnP support in the - kernel. -Daniel Stone (tamriel@ductape.net) */ - #include -#ifdef CONFIG_MCA #include -#endif #include #include -#ifdef CONFIG_ISAPNP /* Patched so it will compile withOUT ISA PnP */ #include -#endif #include "sound_config.h" #include "soundmodule.h" @@ -36,10 +41,8 @@ static int sbmpu = 0; void attach_sb_card(struct address_info *hw_config) { -#if defined(CONFIG_AUDIO) || defined(CONFIG_MIDI) if(!sb_dsp_init(hw_config)) hw_config->slots[0] = -1; -#endif } int probe_sb(struct address_info *hw_config) @@ -91,7 +94,7 @@ int probe_sb(struct address_info *hw_config) hw_config->driver_use_2 = midiaddr[(pos2 >> 3) & 0x3]; */ - printk("SB: Reply MCA SB at slot=%d \ + printk(KERN_INFO "sb: Reply MCA SB at slot=%d \ iobase=0x%x irq=%d lo_dma=%d hi_dma=%d\n", slot+1, hw_config->io_base, hw_config->irq, @@ -99,12 +102,14 @@ iobase=0x%x irq=%d lo_dma=%d hi_dma=%d\n", } else { - printk ("Reply SB Base I/O address disabled\n"); + printk (KERN_INFO "sb: Reply SB Base I/O address disabled\n"); } } } #endif + /* This is useless since is done by sb_dsp_detect - azummo*/ + if (check_region(hw_config->io_base, 16)) { printk(KERN_ERR "sb_card: I/O port %x is already in use\n\n", hw_config->io_base); @@ -127,92 +132,403 @@ extern int esstype; /* ESS chip type */ static struct address_info config; static struct address_info config_mpu; +struct pci_dev *sb_dev = NULL, + *wss_dev = NULL, + *jp_dev = NULL, + *mpu_dev = NULL, + *wt_dev = NULL; /* * Note DMA2 of -1 has the right meaning in the SB16 driver as well - * as here. It will cause either an error if it is needed or a fallback - * to the 8bit channel. + * as here. It will cause either an error if it is needed or a fallback + * to the 8bit channel. */ -int mpu_io = 0; -int io = -1; -int irq = -1; -int dma = -1; -int dma16 = -1; /* Set this for modules that need it */ -int type = 0; /* Can set this to a specific card type */ -int mad16 = 0; /* Set mad16=1 to load this as support for mad16 */ -int trix = 0; /* Set trix=1 to load this as support for trix */ -int pas2 = 0; /* Set pas2=1 to load this as support for pas2 */ -int support = 0; /* Set support to load this as a support module */ -int sm_games = 0; /* Mixer - see sb_mixer.c */ -int acer = 0; /* Do acer notebook init */ - -MODULE_PARM(io, "i"); -MODULE_PARM(irq, "i"); -MODULE_PARM(dma, "i"); -MODULE_PARM(dma16, "i"); -MODULE_PARM(mpu_io, "i"); -MODULE_PARM(type, "i"); -MODULE_PARM(mad16, "i"); -MODULE_PARM(support, "i"); -MODULE_PARM(trix, "i"); -MODULE_PARM(pas2, "i"); -MODULE_PARM(sm_games, "i"); -MODULE_PARM(esstype, "i"); +int mpu_io = 0; +int io = -1; +int irq = -1; +int dma = -1; +int dma16 = -1; /* Set this for modules that need it */ +int type = 0; /* Can set this to a specific card type */ +int mad16 = 0; /* Set mad16=1 to load this as support for mad16 */ +int trix = 0; /* Set trix=1 to load this as support for trix */ +int pas2 = 0; /* Set pas2=1 to load this as support for pas2 */ +int support = 0; /* Set support to load this as a support module */ +int sm_games = 0; /* Mixer - see sb_mixer.c */ +int acer = 0; /* Do acer notebook init */ + +#ifdef CONFIG_ISAPNP +int isapnp = 1; +int isapnpjump = 0; +#else +int isapnp = 0; +#endif + +MODULE_DESCRIPTION("Soundblaster driver"); + +MODULE_PARM(io, "i"); +MODULE_PARM(irq, "i"); +MODULE_PARM(dma, "i"); +MODULE_PARM(dma16, "i"); +MODULE_PARM(mpu_io, "i"); +MODULE_PARM(type, "i"); +MODULE_PARM(mad16, "i"); +MODULE_PARM(support, "i"); +MODULE_PARM(trix, "i"); +MODULE_PARM(pas2, "i"); +MODULE_PARM(sm_games, "i"); +MODULE_PARM(esstype, "i"); +MODULE_PARM(acer, "i"); + +#ifdef CONFIG_ISAPNP +MODULE_PARM(isapnp, "i"); +MODULE_PARM(isapnpjump, "i"); +MODULE_PARM_DESC(isapnp, "When set to 0, Plug & Play support will be disabled"); +MODULE_PARM_DESC(isapnpjump, "Jumps to a specific slot in the driver's PnP table. Use the source, Luke."); +#endif + +MODULE_PARM_DESC(io, "Soundblaster i/o base address (0x220,0x240,0x260,0x280)"); +MODULE_PARM_DESC(irq, "IRQ (5,7,9,10)"); +MODULE_PARM_DESC(dma, "8-bit DMA channel (0,1,3)"); +MODULE_PARM_DESC(dma16, "16-bit DMA channel (5,6,7)"); +MODULE_PARM_DESC(mpu_io, "Mpu base address"); +MODULE_PARM_DESC(type, "You can set this to specific card type"); +MODULE_PARM_DESC(mad16, "Enable MAD16 support"); +MODULE_PARM_DESC(trix, "Enable Audiotrix support"); +MODULE_PARM_DESC(pas2, "Enable Pas2 support"); +MODULE_PARM_DESC(support, "Set this to load as generic support module"); +MODULE_PARM_DESC(sm_games, "Enable support for Logitech soundman games"); +MODULE_PARM_DESC(esstype, "ESS chip type"); +MODULE_PARM_DESC(acer, "Set this to detect cards in some ACER notebooks"); void *smw_free = NULL; #ifdef CONFIG_ISAPNP -static struct { unsigned short vendor, function; char *name; } + +/* That's useful. */ + +static int check_base(char *devname, char *resname, struct resource *res) +{ + if (check_region(res->start, res->end - res->start)) + { + printk(KERN_ERR "sb: %s %s error, i/o at %#lx already in use\n", devname, resname, res->start); + return 0; + } + + printk(KERN_INFO "sb: %s %s base located at %#lx\n", devname, resname, res->start); + return 1; +} + + +/* Card's specific initialization functions + */ + +static struct pci_dev *sb_init_generic(struct pci_bus *card, struct address_info *hw_config, struct address_info *mpu_config) +{ + if((sb_dev = isapnp_find_dev(card, + card->vendor, + card->device, + NULL))) + { + sb_dev->prepare(sb_dev); + sb_dev->activate(sb_dev); + + if (!sb_dev->resource[0].start) + return(NULL); + + hw_config->io_base = sb_dev->resource[0].start; + hw_config->irq = sb_dev->irq_resource[0].start; + hw_config->dma = sb_dev->dma_resource[0].start; + hw_config->dma2 = sb_dev->dma_resource[1].start; + mpu_config->io_base = sb_dev->resource[1].start; + } + return(sb_dev); +} + +static struct pci_dev *sb_init_ess(struct pci_bus *card, struct address_info *hw_config, struct address_info *mpu_config) +{ + if((sb_dev = isapnp_find_dev(card, + card->vendor, + card->device, + NULL))) + { + sb_dev->prepare(sb_dev); + sb_dev->activate(sb_dev); + + if (!sb_dev->resource[0].start) + return(NULL); + + hw_config->io_base = sb_dev->resource[0].start; + hw_config->irq = sb_dev->irq_resource[0].start; + hw_config->dma = sb_dev->dma_resource[0].start; + hw_config->dma2 = sb_dev->dma_resource[1].start; + mpu_config->io_base = sb_dev->resource[2].start; + } + return(sb_dev); +} + +static struct pci_dev *sb_init_cmi(struct pci_bus *card, struct address_info *hw_config, struct address_info *mpu_config) +{ + /* What a stupid chip... where did they get all those @@@ ?*/ + + printk(KERN_INFO "sb: CMI8330 detected\n"); + + /* Soundblaster compatible logical device. */ + + if((sb_dev = isapnp_find_dev(card, + ISAPNP_VENDOR('@','X','@'), ISAPNP_FUNCTION(0x0001), NULL))) + { +#ifdef CMI8330_DMA0BAD + int dmahack = 0; +#endif + sb_dev->prepare(sb_dev); + + /* This device doesn't work with DMA 0, so we must allocate + it to prevent PnP routines to assign it to the card. + + I know i could have inlined the following lines, but it's cleaner + this way. + */ + +#ifdef CMI8330_DMA0BAD + if(sb_dev->dma_resource[0].start == 0) + { + if(!request_dma(0, "cmi8330 dma hack")) + { + /* DMA was free, we now have it */ + dmahack = 1; + } + } +#endif + + if(sb_dev->activate(sb_dev) >= 0) + { + hw_config->io_base = sb_dev->resource[0].start; + hw_config->irq = sb_dev->irq_resource[0].start; + hw_config->dma = sb_dev->dma_resource[0].start; + hw_config->dma2 = sb_dev->dma_resource[1].start; + + check_base("CMI8330", "sb", &sb_dev->resource[0]); + } + else + printk(KERN_ERR "sb: CMI8330 sb config failed (out of resources?)\n"); + +#ifdef CMI8330_DMA0BAD + if(dmahack) + free_dma(0); +#endif + } + else + printk(KERN_ERR "sb: CMI8330 panic! sb base not found\n"); + + if((mpu_dev = isapnp_find_dev(card, + ISAPNP_VENDOR('@','H','@'), ISAPNP_FUNCTION(0x0001), NULL))) + { + mpu_dev->prepare(mpu_dev); + + /* This disables the interrupt on this resource. Do we need it ? */ + + mpu_dev->irq_resource[0].flags = 0; + + if(mpu_dev->activate(mpu_dev) >= 0) + { + if( check_base("CMI8330", "mpu", &mpu_dev->resource[0]) ) + mpu_config->io_base = mpu_dev->resource[0].start; + } + else + printk(KERN_ERR "sb: CMI8330 mpu config failed (out of resources?)\n"); + } + else + printk(KERN_ERR "sb: CMI8330 panic! mpu not found\n"); + + + /* Gameport. */ + + if((jp_dev = isapnp_find_dev(card, + ISAPNP_VENDOR('@','P','@'), ISAPNP_FUNCTION(0x0001), NULL))) + { + jp_dev->prepare(jp_dev); + + if(jp_dev->activate(jp_dev) >= 0) + { + check_base("CMI8330", "gameport", &jp_dev->resource[0]); + } + else + printk(KERN_ERR "sb: CMI8330 gameport config failed (out of resources?)\n"); + } + else + printk(KERN_ERR "sb: CMI8330 panic! gameport not found\n"); + + + /* OPL3 support */ + +#if defined(CONFIG_SOUND_YM3812) || defined(CONFIG_SOUND_YM3812_MODULE) + if((wss_dev = isapnp_find_dev(card, + ISAPNP_VENDOR('@','@','@'), ISAPNP_FUNCTION(0x0001), NULL))) + { + wss_dev->prepare(wss_dev); + + /* Let's disable IRQ and DMA for WSS device */ + + wss_dev->irq_resource[0].flags = 0; + wss_dev->dma_resource[0].flags = 0; + + if(wss_dev->activate(wss_dev) >= 0) + { + check_base("CMI8330", "opl3", &wss_dev->resource[1]); + } + else + printk(KERN_ERR "sb: CMI8330 opl3 config failed (out of resources?)\n"); + } + else + printk(KERN_ERR "sb: CMI8330 panic! opl3 not found\n"); +#endif + + printk(KERN_INFO "sb: CMI8330 mail reports to Alessandro Zummo \n"); + + return(sb_dev); +} + +static struct pci_dev *sb_init_awe64(struct pci_bus *card, struct address_info *hw_config, struct address_info *mpu_config) +{ + printk(KERN_INFO "sb: SoundBlaster AWE 64 detected\n"); + + /* CTL0042:Audio. */ + + if((sb_dev = isapnp_find_dev(card, + ISAPNP_VENDOR('C','T','L'), ISAPNP_FUNCTION(0x0042), NULL))) + { + sb_dev->prepare(sb_dev); + + if(sb_dev->activate(sb_dev) >= 0) + { + hw_config->io_base = sb_dev->resource[0].start; + hw_config->irq = sb_dev->irq_resource[0].start; + hw_config->dma = sb_dev->dma_resource[0].start; + hw_config->dma2 = sb_dev->dma_resource[1].start; + + mpu_config->io_base = sb_dev->resource[1].start; + + check_base("AWE64", "sb", &sb_dev->resource[0]); + check_base("AWE64", "mpu", &sb_dev->resource[1]); + check_base("AWE64", "opl3", &sb_dev->resource[2]); + } + else + printk(KERN_ERR "sb: AWE64 sb config failed (out of resources?)\n"); + } + else + printk(KERN_ERR "sb: AWE64 panic! sb base not found\n"); + + + /* CTL7002:Game */ + + if((jp_dev = isapnp_find_dev(card, + ISAPNP_VENDOR('C','T','L'), ISAPNP_FUNCTION(0x7002), NULL))) + { + jp_dev->prepare(jp_dev); + + if(jp_dev->activate(jp_dev) >= 0) + { + check_base("AWE64", "gameport", &jp_dev->resource[0]); + } + else + printk(KERN_ERR "sb: AWE64 gameport config failed (out of resources?)\n"); + } + else + printk(KERN_ERR "sb: AWE64 panic! gameport not found\n"); + + + /* CTL0022:WaveTable */ + + if((wt_dev = isapnp_find_dev(card, + ISAPNP_VENDOR('C','T','L'), ISAPNP_FUNCTION(0x0022), NULL))) + { + wt_dev->prepare(wt_dev); + + if(wt_dev->activate(wt_dev) >= 0) + { + check_base("AWE64", "wavetable", &wt_dev->resource[0]); + check_base("AWE64", "wavetable", &wt_dev->resource[1]); + check_base("AWE64", "wavetable", &wt_dev->resource[2]); + } + else + printk(KERN_ERR "sb: AWE64 wavetable config failed (out of resources?)\n"); + } + else + printk(KERN_ERR "sb: AWE64 panic! wavetable not found\n"); + + printk(KERN_INFO "sb: AWE64 mail reports to Alessandro Zummo \n"); + + return(sb_dev); +} + + +static struct { unsigned short vendor, function; struct pci_dev * (*initfunc)(struct pci_bus *, struct address_info *, struct address_info *); char *name; } isapnp_sb_list[] __initdata = { - {ISAPNP_VENDOR('C','T','L'), ISAPNP_FUNCTION(0x0001), "Sound Blaster 16" }, - {ISAPNP_VENDOR('C','T','L'), ISAPNP_FUNCTION(0x0031), "Sound Blaster 16" }, - {ISAPNP_VENDOR('C','T','L'), ISAPNP_FUNCTION(0x0041), "Sound Blaster 16" }, - {ISAPNP_VENDOR('C','T','L'), ISAPNP_FUNCTION(0x0042), "Sound Blaster 16" }, - {ISAPNP_VENDOR('C','T','L'), ISAPNP_FUNCTION(0x0043), "Sound Blaster 16" }, - {ISAPNP_VENDOR('C','T','L'), ISAPNP_FUNCTION(0x0044), "Sound Blaster 16" }, - {ISAPNP_VENDOR('C','T','L'), ISAPNP_FUNCTION(0x0045), "Sound Blaster 16" }, - {ISAPNP_VENDOR('E','S','S'), ISAPNP_FUNCTION(0x1868), "ESS 1868" }, - {ISAPNP_VENDOR('E','S','S'), ISAPNP_FUNCTION(0x8611), "ESS 1868" }, - {ISAPNP_VENDOR('E','S','S'), ISAPNP_FUNCTION(0x1869), "ESS 1869" }, - {ISAPNP_VENDOR('E','S','S'), ISAPNP_FUNCTION(0x1878), "ESS 1878" }, - {ISAPNP_VENDOR('E','S','S'), ISAPNP_FUNCTION(0x1879), "ESS 1879" }, - {0,} + {ISAPNP_VENDOR('C','T','L'), ISAPNP_FUNCTION(0x0001), &sb_init_generic, "Sound Blaster 16" }, + {ISAPNP_VENDOR('C','T','L'), ISAPNP_FUNCTION(0x0031), &sb_init_generic, "Sound Blaster 16" }, + {ISAPNP_VENDOR('C','T','L'), ISAPNP_FUNCTION(0x0041), &sb_init_generic, "Sound Blaster 16" }, + {ISAPNP_VENDOR('C','T','L'), ISAPNP_FUNCTION(0x0042), &sb_init_generic, "Sound Blaster 16" }, + {ISAPNP_VENDOR('C','T','L'), ISAPNP_FUNCTION(0x0043), &sb_init_generic, "Sound Blaster 16" }, + {ISAPNP_VENDOR('C','T','L'), ISAPNP_FUNCTION(0x0044), &sb_init_generic, "Sound Blaster 16" }, + {ISAPNP_VENDOR('C','T','L'), ISAPNP_FUNCTION(0x0045), &sb_init_generic, "Sound Blaster 16" }, + {ISAPNP_VENDOR('C','T','L'), ISAPNP_FUNCTION(0x009D), &sb_init_awe64, "Sound Blaster AWE 64" }, + {ISAPNP_VENDOR('E','S','S'), ISAPNP_FUNCTION(0x1868), &sb_init_ess, "ESS 1868" }, + {ISAPNP_VENDOR('E','S','S'), ISAPNP_FUNCTION(0x8611), &sb_init_ess, "ESS 1868" }, + {ISAPNP_VENDOR('E','S','S'), ISAPNP_FUNCTION(0x1869), &sb_init_ess, "ESS 1869" }, + {ISAPNP_VENDOR('E','S','S'), ISAPNP_FUNCTION(0x1878), &sb_init_ess, "ESS 1878" }, + {ISAPNP_VENDOR('E','S','S'), ISAPNP_FUNCTION(0x1879), &sb_init_ess, "ESS 1879" }, + {ISAPNP_VENDOR('C','M','I'), ISAPNP_FUNCTION(0x0001), &sb_init_cmi, "CMI 8330 SoundPRO" }, + {0} }; +/* Actually this routine will detect and configure only the first card with successful + initalization. isapnpjump could be used to jump to a specific entry. + Please always add entries at the end of the array. + Should this be fixed? - azummo +*/ static int __init sb_probe_isapnp(struct address_info *hw_config, struct address_info *mpu_config) { + int i; + + /* Count entries in isapnp_sb_list */ + for (i = 0; isapnp_sb_list[i].vendor != 0; i++); + + /* Check and adjust isapnpjump */ + if( isapnpjump < 0 || isapnpjump > ( i - 1 ) ) + { + printk(KERN_ERR "sb: Valid range for isapnpjump is 0-%d. Adjusted to 0.\n", i-1); + isapnpjump = 0; + } - for (i = 0; isapnp_sb_list[i].vendor != 0; i++) { - struct pci_dev *idev = NULL; + for (i = isapnpjump; isapnp_sb_list[i].vendor != 0; i++) { + struct pci_bus *card = NULL; - while ((idev = isapnp_find_dev(NULL, + while ((card = isapnp_find_card( isapnp_sb_list[i].vendor, isapnp_sb_list[i].function, - idev))) { - idev->prepare(idev); - idev->activate(idev); - if (!idev->resource[0].start || check_region(idev->resource[0].start,16)) - continue; - hw_config->io_base = idev->resource[0].start; - hw_config->irq = idev->irq_resource[0].start; - hw_config->dma = idev->dma_resource[0].start; - hw_config->dma2 = idev->dma_resource[1].start; -#ifdef CONFIG_MIDI - if (isapnp_sb_list[i].vendor == ISAPNP_VENDOR('E','S','S')) - mpu_config->io_base = idev->resource[2].start; - else - mpu_config->io_base = idev->resource[1].start; -#endif - break; + card))) { + + /* You missed the init func? That's bad. */ + + if(isapnp_sb_list[i].initfunc) + { + struct pci_dev *idev = NULL; + + /* Initialize this baby. */ + + if((idev = isapnp_sb_list[i].initfunc(card, hw_config, mpu_config))) + { + /* We got it. */ + + printk(KERN_INFO "sb: ISAPnP reports %s at i/o %#x, irq %d, dma %d, %d\n", + isapnp_sb_list[i].name, + hw_config->io_base, hw_config->irq, hw_config->dma, + hw_config->dma2); + return 0; + } + } } - if (!idev) - continue; - printk(KERN_INFO "ISAPnP reports %s at i/o %#x, irq %d, dma %d, %d\n", - isapnp_sb_list[i].name, - hw_config->io_base, hw_config->irq, hw_config->dma, - hw_config->dma2); - return 0; } return -ENODEV; } @@ -224,38 +540,52 @@ int init_module(void) if (mad16 == 0 && trix == 0 && pas2 == 0 && support == 0) { + /* Please remember that even with CONFIG_ISAPNP defined one should still be + able to disable PNP support for this single driver! + */ + #ifdef CONFIG_ISAPNP - if (sb_probe_isapnp(&config, &config_mpu)<0) + if (isapnp) + { + if(sb_probe_isapnp(&config, &config_mpu) < 0 ) + { + printk(KERN_ERR "sb_card: No ISAPnP cards found\n"); + return -EINVAL; + } + } + else { -#endif +#endif if (io == -1 || dma == -1 || irq == -1) { printk(KERN_ERR "sb_card: I/O, IRQ, and DMA are mandatory\n"); return -EINVAL; } - config.io_base = io; - config.irq = irq; - config.dma = dma; - config.dma2 = dma16; - config.card_subtype = type; -#ifdef CONFIG_MIDI - config_mpu.io_base = mpu_io; -#endif + + config.io_base = io; + config.irq = irq; + config.dma = dma; + config.dma2 = dma16; #ifdef CONFIG_ISAPNP } #endif + + /* If this is not before the #ifdef line, there's a reason... */ + config.card_subtype = type; + if (!probe_sb(&config)) return -ENODEV; attach_sb_card(&config); if(config.slots[0]==-1) return -ENODEV; -#ifdef CONFIG_MIDI + + if (isapnp == 0) + config_mpu.io_base = mpu_io; if (probe_sbmpu(&config_mpu)) sbmpu = 1; if (sbmpu) attach_sbmpu(&config_mpu); -#endif } SOUND_LOCK; return 0; @@ -270,6 +600,12 @@ void cleanup_module(void) if (sbmpu) unload_sbmpu(&config_mpu); SOUND_LOCK_END; + + if(sb_dev) sb_dev->deactivate(sb_dev); + if(jp_dev) jp_dev->deactivate(jp_dev); + if(wt_dev) wt_dev->deactivate(wt_dev); + if(mpu_dev) mpu_dev->deactivate(mpu_dev); + if(wss_dev) wss_dev->deactivate(wss_dev); } #else diff --git a/drivers/sound/trident.c b/drivers/sound/trident.c index 1860b4505f45..bbe991eb2502 100644 --- a/drivers/sound/trident.c +++ b/drivers/sound/trident.c @@ -29,6 +29,8 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * History + * v0.11.1 Jan 28 2000 Ollie Lho + * small bug in setting sample rate for 4d-nx (reported by Aaron) * v0.11 Jan 27 2000 Ollie Lho * DMA bug, scheduler latency, second try * v0.10 Jan 24 2000 Ollie Lho @@ -85,7 +87,7 @@ #undef DEBUG -#define DRIVER_VERSION "0.11" +#define DRIVER_VERSION "0.11.1" /* magic numbers to protect our data structures */ #define TRIDENT_CARD_MAGIC 0x5072696E /* "Prin" */ @@ -522,7 +524,7 @@ static int trident_write_voice_regs(struct trident_state *state, unsigned int re break; case PCI_DEVICE_ID_TRIDENT_4DWAVE_NX: data[0] = (channel->delta << 24); - data[2] = ((channel->delta << 24) & 0xff000000) | (channel->eso & 0x00ffffff); + data[2] = ((channel->delta << 16) & 0xff000000) | (channel->eso & 0x00ffffff); data[3] = channel->fm_vol & 0xffff; break; default: diff --git a/drivers/sound/via82cxxx.c b/drivers/sound/via82cxxx_audio.c similarity index 99% rename from drivers/sound/via82cxxx.c rename to drivers/sound/via82cxxx_audio.c index cf0fce7c36a2..cee2e7b0ac93 100644 --- a/drivers/sound/via82cxxx.c +++ b/drivers/sound/via82cxxx_audio.c @@ -13,7 +13,7 @@ */ -#define VIA_VERSION "1.1.2" +#define VIA_VERSION "1.1.2.1" @@ -65,7 +65,7 @@ #define LINE_SIZE 10 #define VIA_CARD_NAME "VIA 82Cxxx Audio driver " VIA_VERSION -#define VIA_MODULE_NAME "via82cxxx" +#define VIA_MODULE_NAME "via_audio" #define PFX VIA_MODULE_NAME ": " #define VIA_COUNTER_LIMIT 100000 diff --git a/drivers/usb/usb-ohci.c b/drivers/usb/usb-ohci.c index 758ffae669fe..7ca3b5b59326 100644 --- a/drivers/usb/usb-ohci.c +++ b/drivers/usb/usb-ohci.c @@ -1671,6 +1671,7 @@ static int hc_found_ohci (struct pci_dev *dev, int irq, void * mem_base) static int hc_start_ohci (struct pci_dev * dev) { + u32 cmd; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0) unsigned long mem_base = dev->resource[0].start; #else @@ -1679,6 +1680,11 @@ static int hc_start_ohci (struct pci_dev * dev) mem_base &= PCI_BASE_ADDRESS_MEM_MASK; #endif + /* Some Mac firmware will switch memory response off */ + pci_read_config_dword(dev, PCI_COMMAND, &cmd); + cmd = (cmd | PCI_COMMAND_MEMORY); + pci_write_config_dword(dev, PCI_COMMAND, cmd); + pci_set_master (dev); mem_base = (unsigned long) ioremap_nocache (mem_base, 4096); diff --git a/drivers/video/atyfb.c b/drivers/video/atyfb.c index e6dacc9e2887..9760a6d3f389 100644 --- a/drivers/video/atyfb.c +++ b/drivers/video/atyfb.c @@ -460,7 +460,7 @@ static int atyfb_getcolreg(u_int regno, u_int *red, u_int *green, u_int *blue, static int atyfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *fb); static void do_install_cmap(int con, struct fb_info *info); -#ifdef CONFIG_PMAC +#ifdef CONFIG_PPC static int read_aty_sense(const struct fb_info_aty *info); #endif @@ -499,8 +499,8 @@ static int default_mclk __initdata = 0; static const char *mode_option __initdata = NULL; #endif -#ifdef CONFIG_PMAC -#ifdef CONFIG_NVRAM +#ifdef CONFIG_PPC +#ifdef CONFIG_NVRAM_NOT_DEFINED static int default_vmode __initdata = VMODE_NVRAM; static int default_cmode __initdata = CMODE_NVRAM; #else @@ -571,7 +571,7 @@ static inline u32 aty_ld_le32(unsigned int regindex, u32 val; temp = info->ati_regbase; - asm volatile("lwbrx %0,%1,%2" : "=r"(val) : "b" (regindex), "r" (temp)); + asm volatile("lwbrx %0,%1,%2;eieio" : "=r"(val) : "b" (regindex), "r" (temp)); return val; #elif defined(__mc68000__) return le32_to_cpu(*((volatile u32 *)(info->ati_regbase+regindex))); @@ -587,7 +587,7 @@ static inline void aty_st_le32(unsigned int regindex, u32 val, unsigned long temp; temp = info->ati_regbase; - asm volatile("stwbrx %0,%1,%2" : : "r" (val), "b" (regindex), "r" (temp) : + asm volatile("stwbrx %0,%1,%2;eieio" : : "r" (val), "b" (regindex), "r" (temp) : "memory"); #elif defined(__mc68000__) *((volatile u32 *)(info->ati_regbase+regindex)) = cpu_to_le32(val); @@ -765,9 +765,7 @@ static void aty_st_514(int offset, u8 val, const struct fb_info_aty *info) aty_st_8(DAC_W_INDEX, offset & 0xff, info); /* left addr byte */ aty_st_8(DAC_DATA, (offset >> 8) & 0xff, info); - eieio(); aty_st_8(DAC_MASK, val, info); - eieio(); aty_st_8(DAC_CNTL, 0, info); } @@ -775,10 +773,8 @@ static void aty_st_pll(int offset, u8 val, const struct fb_info_aty *info) { /* write addr byte */ aty_st_8(CLOCK_CNTL + 1, (offset << 2) | PLL_WR_EN, info); - eieio(); /* write the register value */ aty_st_8(CLOCK_CNTL + 2, val, info); - eieio(); aty_st_8(CLOCK_CNTL + 1, (offset << 2) & ~PLL_WR_EN, info); } @@ -788,14 +784,12 @@ static u8 aty_ld_pll(int offset, const struct fb_info_aty *info) /* write addr byte */ aty_st_8(CLOCK_CNTL + 1, (offset << 2), info); - eieio(); /* read the register value */ res = aty_ld_8(CLOCK_CNTL + 2, info); - eieio(); return res; } -#ifdef CONFIG_PMAC +#if defined(CONFIG_PPC) /* * Apple monitor sense @@ -835,7 +829,7 @@ static int read_aty_sense(const struct fb_info_aty *info) return sense; } -#endif /* CONFIG_PMAC */ +#endif /* defined(CONFIG_PPC) */ /* ------------------------------------------------------------------------- */ @@ -2489,7 +2483,7 @@ static void atyfb_set_par(const struct atyfb_par *par, init_engine(par, info); #ifdef CONFIG_FB_COMPAT_XPMAC - if (console_fb_info == &info->fb_info) { + if (!console_fb_info || console_fb_info == &info->fb_info) { struct fb_var_screeninfo var; int vmode, cmode; display_info.height = ((par->crtc.v_tot_disp>>16) & 0x7ff)+1; @@ -3276,7 +3270,7 @@ static int __init aty_init(struct fb_info_aty *info, const char *name) struct display *disp; const char *chipname = NULL, *ramname = NULL, *xtal; int pll, mclk, gtb_memsize; -#ifdef CONFIG_PMAC +#if defined(CONFIG_PPC) int sense; #endif u8 pll_ref_div; @@ -3365,11 +3359,15 @@ static int __init aty_init(struct fb_info_aty *info, const char *name) } else if (Gx == GB_CHIP_ID || Gx == GD_CHIP_ID || Gx == GI_CHIP_ID || Gx == GP_CHIP_ID || Gx == GQ_CHIP_ID || Gx == LB_CHIP_ID || - Gx == LD_CHIP_ID || Gx == LG_CHIP_ID || + Gx == LD_CHIP_ID || Gx == LI_CHIP_ID || Gx == LP_CHIP_ID) { /* RAGE PRO or LT PRO */ pll = 230; mclk = 100; + } else if (Gx == LG_CHIP_ID) { + /* Rage LT */ + pll = 230; + mclk = 63; } else { /* other RAGE */ pll = 135; @@ -3538,43 +3536,47 @@ static int __init aty_init(struct fb_info_aty *info, const char *name) var = default_var; #else /* !MODULE */ memset(&var, 0, sizeof(var)); -#ifdef CONFIG_PMAC - /* - * FIXME: The NVRAM stuff should be put in a Mac-specific file, as it - * applies to all Mac video cards - */ - if (mode_option) { - if (!mac_find_mode(&var, &info->fb_info, mode_option, 8)) - var = default_var; - } else { +#ifdef CONFIG_PPC + if (_machine == _MACH_Pmac) { + /* + * FIXME: The NVRAM stuff should be put in a Mac-specific file, as it + * applies to all Mac video cards + */ + if (mode_option) { + if (!mac_find_mode(&var, &info->fb_info, mode_option, 8)) + var = default_var; + } else { #ifdef CONFIG_NVRAM - if (default_vmode == VMODE_NVRAM) { - default_vmode = nvram_read_byte(NV_VMODE); - if (default_vmode <= 0 || default_vmode > VMODE_MAX) - default_vmode = VMODE_CHOOSE; - } + if (default_vmode == VMODE_NVRAM) { + default_vmode = nvram_read_byte(NV_VMODE); + if (default_vmode <= 0 || default_vmode > VMODE_MAX) + default_vmode = VMODE_CHOOSE; + } #endif - if (default_vmode == VMODE_CHOOSE) { - if (Gx == LG_CHIP_ID) - /* G3 PowerBook with 1024x768 LCD */ - default_vmode = VMODE_1024_768_60; - else { - sense = read_aty_sense(info); - default_vmode = mac_map_monitor_sense(sense); - } - } - if (default_vmode <= 0 || default_vmode > VMODE_MAX) - default_vmode = VMODE_640_480_60; + if (default_vmode == VMODE_CHOOSE) { + if (Gx == LG_CHIP_ID) + /* G3 PowerBook with 1024x768 LCD */ + default_vmode = VMODE_1024_768_60; + else { + sense = read_aty_sense(info); + default_vmode = mac_map_monitor_sense(sense); + } + } + if (default_vmode <= 0 || default_vmode > VMODE_MAX) + default_vmode = VMODE_640_480_60; #ifdef CONFIG_NVRAM - if (default_cmode == CMODE_NVRAM) - default_cmode = nvram_read_byte(NV_CMODE); + if (default_cmode == CMODE_NVRAM) + default_cmode = nvram_read_byte(NV_CMODE); #endif - if (default_cmode < CMODE_8 || default_cmode > CMODE_32) - default_cmode = CMODE_8; - if (mac_vmode_to_var(default_vmode, default_cmode, &var)) - var = default_var; + if (default_cmode < CMODE_8 || default_cmode > CMODE_32) + default_cmode = CMODE_8; + if (mac_vmode_to_var(default_vmode, default_cmode, &var)) + var = default_var; + } } -#else /* !CONFIG_PMAC */ + else if (!fb_find_mode(&var, &info->fb_info, mode_option, NULL, 0, NULL, 8)) + var = default_var; +#else /* !CONFIG_PPC */ #ifdef __sparc__ if (mode_option) { if (!fb_find_mode(&var, &info->fb_info, mode_option, NULL, 0, NULL, 8)) @@ -3585,7 +3587,7 @@ static int __init aty_init(struct fb_info_aty *info, const char *name) if (!fb_find_mode(&var, &info->fb_info, mode_option, NULL, 0, NULL, 8)) var = default_var; #endif /* !__sparc__ */ -#endif /* !CONFIG_PMAC */ +#endif /* !CONFIG_PPC */ #endif /* !MODULE */ if (noaccel) var.accel_flags &= ~FB_ACCELF_TEXT; @@ -4264,7 +4266,6 @@ static int atyfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, i |= 0x2; /*DAC_CNTL|0x2 turns off the extra brightness for gt*/ aty_st_8(DAC_CNTL, i, info); aty_st_8(DAC_MASK, 0xff, info); - eieio(); scale = ((Gx != GX_CHIP_ID) && (Gx != CX_CHIP_ID) && (info->current_par.crtc.bpp == 16)) ? 3 : 0; info->aty_cmap_regs->windex = regno << scale; diff --git a/drivers/video/chipsfb.c b/drivers/video/chipsfb.c index 8078a667688e..209662504ef8 100644 --- a/drivers/video/chipsfb.c +++ b/drivers/video/chipsfb.c @@ -727,9 +727,20 @@ chips_sleep_notify(struct pmu_sleep_notifier *self, int when) int nb = p->var.yres * p->fix.line_length; switch (when) { + case PBOOK_SLEEP_REQUEST: + p->save_framebuffer = vmalloc(nb); + if (p->save_framebuffer == NULL) + return PBOOK_SLEEP_REFUSE; + break; + case PBOOK_SLEEP_REJECT: + if (p->save_framebuffer) { + vfree(p->save_framebuffer); + p->save_framebuffer = 0; + } + break; + case PBOOK_SLEEP_NOW: chipsfb_blank(1, (struct fb_info *)p); - p->save_framebuffer = vmalloc(nb); if (p->save_framebuffer) memcpy(p->save_framebuffer, p->frame_buffer, nb); diff --git a/drivers/video/controlfb.c b/drivers/video/controlfb.c index cd269e86441c..847281efc6e6 100644 --- a/drivers/video/controlfb.c +++ b/drivers/video/controlfb.c @@ -528,17 +528,23 @@ static void __init init_control(struct fb_info_control *p) p->sense = read_control_sense(p); printk(KERN_INFO "Monitor sense value = 0x%x, ", p->sense); /* Try to pick a video mode out of NVRAM if we have one. */ - par->vmode = nvram_read_byte(NV_VMODE); - if(par->vmode <= 0 || par->vmode > VMODE_MAX || !control_reg_init[par->vmode - 1]) - par->vmode = VMODE_CHOOSE; - if(par->vmode == VMODE_CHOOSE) - par->vmode = mac_map_monitor_sense(p->sense); - if(!control_reg_init[par->vmode - 1]) - par->vmode = VMODE_640_480_60; - - par->cmode = nvram_read_byte(NV_CMODE); - if(par->cmode < CMODE_8 || par->cmode > CMODE_32) - par->cmode = CMODE_8; + if (default_vmode == VMODE_NVRAM) { + par->vmode = nvram_read_byte(NV_VMODE); + if(par->vmode <= 0 || par->vmode > VMODE_MAX || !control_reg_init[par->vmode - 1]) + par->vmode = VMODE_CHOOSE; + if(par->vmode == VMODE_CHOOSE) + par->vmode = mac_map_monitor_sense(p->sense); + if(!control_reg_init[par->vmode - 1]) + par->vmode = VMODE_640_480_60; + } else + par->vmode=default_vmode; + + if (default_cmode == CMODE_NVRAM){ + par->cmode = nvram_read_byte(NV_CMODE); + if(par->cmode < CMODE_8 || par->cmode > CMODE_32) + par->cmode = CMODE_8;} + else + par->cmode=default_cmode; /* * Reduce the pixel size if we don't have enough VRAM. */ diff --git a/drivers/video/offb.c b/drivers/video/offb.c index edeeae44782a..68e6201cc840 100644 --- a/drivers/video/offb.c +++ b/drivers/video/offb.c @@ -51,6 +51,7 @@ struct fb_info_offb { struct { u_char red, green, blue, pad; } palette[256]; volatile unsigned char *cmap_adr; volatile unsigned char *cmap_data; + int is_rage_128; union { #ifdef FBCON_HAS_CFB16 u16 cfb16[16]; @@ -107,7 +108,8 @@ extern boot_infos_t *boot_infos; static int offb_init_driver(struct device_node *); static void offb_init_nodriver(struct device_node *); static void offb_init_fb(const char *name, const char *full_name, int width, - int height, int depth, int pitch, unsigned long address); + int height, int depth, int pitch, unsigned long address, + struct device_node *dp); /* * Interface to the low level console driver @@ -390,7 +392,7 @@ int __init offb_init(void) boot_infos->dispDeviceRect[2], boot_infos->dispDeviceRect[3], boot_infos->dispDeviceDepth, - boot_infos->dispDeviceRowBytes, addr); + boot_infos->dispDeviceRowBytes, addr, NULL); } } @@ -518,13 +520,14 @@ static void __init offb_init_nodriver(struct device_node *dp) address += 0x1000; } offb_init_fb(dp->name, dp->full_name, width, height, depth, - pitch, address); + pitch, address, dp); } static void offb_init_fb(const char *name, const char *full_name, int width, int height, int depth, - int pitch, unsigned long address) + int pitch, unsigned long address, + struct device_node *dp) { int i; struct fb_fix_screeninfo *fix; @@ -569,10 +572,18 @@ static void offb_init_fb(const char *name, const char *full_name, fix->type = FB_TYPE_PACKED_PIXELS; fix->type_aux = 0; + info->is_rage_128 = 0; if (depth == 8) { /* XXX kludge for ati */ - if (strncmp(name, "ATY,", 4) == 0) { + if (strncmp(name, "ATY,Rage128", 11) == 0) { + if (dp) { + unsigned long regbase = dp->addrs[2].address; + info->cmap_adr = ioremap(regbase, 0x1FFF) + 0x00b0; + info->cmap_data = info->cmap_adr + 4; + info->is_rage_128 = 1; + } + } else if (strncmp(name, "ATY,", 4) == 0) { unsigned long base = address & 0xff000000UL; info->cmap_adr = ioremap(base + 0x7ff000, 0x1000) + 0xcc0; info->cmap_data = info->cmap_adr + 1; @@ -853,12 +864,17 @@ static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, *info2->cmap_adr = regno;/* On some chipsets, add << 3 in 15 bits */ mach_eieio(); - *info2->cmap_data = red; - mach_eieio(); - *info2->cmap_data = green; - mach_eieio(); - *info2->cmap_data = blue; - mach_eieio(); + if (info2->is_rage_128) { + out_le32((unsigned int *)info2->cmap_data, + (red << 16 | green << 8 | blue)); + } else { + *info2->cmap_data = red; + mach_eieio(); + *info2->cmap_data = green; + mach_eieio(); + *info2->cmap_data = blue; + mach_eieio(); + } if (regno < 16) switch (info2->var.bits_per_pixel) { diff --git a/fs/adfs/map.c b/fs/adfs/map.c index 62739bb32e5c..0fde11f5da95 100644 --- a/fs/adfs/map.c +++ b/fs/adfs/map.c @@ -44,7 +44,7 @@ lookup_zone(const struct adfs_discmap *dm, const unsigned int idlen, /* * get fragment id */ - asm("@ get fragment id start"); + //asm("@ get fragment id start"); { unsigned long v2; unsigned int tmp; @@ -60,14 +60,14 @@ lookup_zone(const struct adfs_discmap *dm, const unsigned int idlen, frag &= idmask; } - asm("@ get fragment id end"); + //asm("@ get fragment id end"); mapptr = start + idlen; /* * find end of fragment */ - asm("@ find end of fragment start"); + //asm("@ find end of fragment start"); { unsigned long v2; @@ -79,7 +79,7 @@ lookup_zone(const struct adfs_discmap *dm, const unsigned int idlen, mapptr += 1 + ffz(~v2); } - asm("@ find end of fragment end"); + //asm("@ find end of fragment end"); if (frag == frag_id) goto found; @@ -122,7 +122,7 @@ scan_free_map(struct adfs_sb_info *asb, struct adfs_discmap *dm) /* * get fragment id */ - asm("@ get fragment id start"); + //asm("@ get fragment id start"); { unsigned long v2; unsigned int tmp; @@ -138,7 +138,7 @@ scan_free_map(struct adfs_sb_info *asb, struct adfs_discmap *dm) frag &= idmask; } - asm("@ get fragment id end"); + //asm("@ get fragment id end"); /* * If the freelink is null, then no free fragments @@ -153,7 +153,7 @@ scan_free_map(struct adfs_sb_info *asb, struct adfs_discmap *dm) /* * get fragment id */ - asm("@ get fragment id start"); + //asm("@ get fragment id start"); { unsigned long v2; unsigned int tmp; @@ -169,14 +169,14 @@ scan_free_map(struct adfs_sb_info *asb, struct adfs_discmap *dm) frag &= idmask; } - asm("@ get fragment id end"); + //asm("@ get fragment id end"); mapptr = start + idlen; /* * find end of fragment */ - asm("@ find end of fragment start"); + //asm("@ find end of fragment start"); { unsigned long v2; @@ -188,7 +188,7 @@ scan_free_map(struct adfs_sb_info *asb, struct adfs_discmap *dm) mapptr += 1 + ffz(~v2); } - asm("@ find end of fragment end"); + //asm("@ find end of fragment end"); total += mapptr - start; } while (frag >= idlen + 1); diff --git a/fs/block_dev.c b/fs/block_dev.c index 47362da04278..b451332ed3d9 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -70,39 +70,53 @@ ssize_t block_write(struct file * filp, const char * buf, if (chars != blocksize) fn = bread; bh = fn(dev, block, blocksize); + if (!bh) + return written ? written : -EIO; + if (!buffer_uptodate(bh)) + wait_on_buffer(bh); } #else bh = getblk(dev, block, blocksize); + if (!bh) + return written ? written : -EIO; - if (chars != blocksize && !buffer_uptodate(bh)) { - if(!filp->f_reada || - !read_ahead[MAJOR(dev)]) { - /* We do this to force the read of a single buffer */ - brelse(bh); - bh = bread(dev,block,blocksize); - } else { - /* Read-ahead before write */ - blocks = read_ahead[MAJOR(dev)] / (blocksize >> 9) / 2; - if (block + blocks > size) blocks = size - block; - if (blocks > NBUF) blocks=NBUF; + if (!buffer_uptodate(bh)) + { + if (chars == blocksize) + wait_on_buffer(bh); + else + { bhlist[0] = bh; - for(i=1; i= 0) brelse(bhlist[i--]); - return written ? written : -EIO; - }; - }; + if (!filp->f_reada || !read_ahead[MAJOR(dev)]) { + /* We do this to force the read of a single buffer */ + blocks = 1; + } else { + /* Read-ahead before write */ + blocks = read_ahead[MAJOR(dev)] / (blocksize >> 9) / 2; + if (block + blocks > size) blocks = size - block; + if (blocks > NBUF) blocks=NBUF; + if (!blocks) blocks = 1; + for(i=1; i= 0) brelse(bhlist[i--]); + return written ? written : -EIO; + } + } + } ll_rw_block(READ, blocks, bhlist); for(i=1; ib_data; offset = 0; *ppos += chars; @@ -522,7 +536,7 @@ int check_disk_change(kdev_t dev) if (sb && invalidate_inodes(sb)) printk("VFS: busy inodes on changed media.\n"); - invalidate_buffers(dev); + destroy_buffers(dev); if (bdops->revalidate) bdops->revalidate(dev); diff --git a/fs/buffer.c b/fs/buffer.c index fbcb5364eedf..d9ab9c25db9a 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -94,6 +94,7 @@ static struct bh_free_head free_list[NR_SIZES]; kmem_cache_t *bh_cachep; static int grow_buffers(int size); +static void __refile_buffer(struct buffer_head *); /* This is used by some architectures to estimate available memory. */ atomic_t buffermem_pages = ATOMIC_INIT(0); @@ -277,11 +278,14 @@ repeat: void sync_dev(kdev_t dev) { - sync_buffers(dev, 0); sync_supers(dev); sync_inodes(dev); - sync_buffers(dev, 0); DQUOT_SYNC(dev); + /* sync all the dirty buffers out to disk only _after_ all the + high level layers finished generated buffer dirty data + (or we'll return with some buffer still dirty on the blockdevice + so breaking the semantics of this call) */ + sync_buffers(dev, 0); /* * FIXME(eric) we need to sync the physical devices here. * This is because some (scsi) controllers have huge amounts of @@ -412,40 +416,6 @@ out: return err; } -void invalidate_buffers(kdev_t dev) -{ - int nlist; - - spin_lock(&lru_list_lock); - for(nlist = 0; nlist < NR_LIST; nlist++) { - struct buffer_head * bh; - int i; - retry: - bh = lru_list[nlist]; - if (!bh) - continue; - for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; bh = bh->b_next_free) { - if (bh->b_dev != dev) - continue; - if (buffer_locked(bh)) { - atomic_inc(&bh->b_count); - spin_unlock(&lru_list_lock); - wait_on_buffer(bh); - spin_lock(&lru_list_lock); - atomic_dec(&bh->b_count); - goto retry; - } - if (atomic_read(&bh->b_count)) - continue; - clear_bit(BH_Protected, &bh->b_state); - clear_bit(BH_Uptodate, &bh->b_state); - clear_bit(BH_Dirty, &bh->b_state); - clear_bit(BH_Req, &bh->b_state); - } - } - spin_unlock(&lru_list_lock); -} - /* After several hours of tedious analysis, the following hash * function won. Do not mess with it... -DaveM */ @@ -464,10 +434,12 @@ static __inline__ void __hash_link(struct buffer_head *bh, struct buffer_head ** static __inline__ void __hash_unlink(struct buffer_head *bh) { - if (bh->b_next) - bh->b_next->b_pprev = bh->b_pprev; - *(bh->b_pprev) = bh->b_next; - bh->b_pprev = NULL; + if (bh->b_pprev) { + if (bh->b_next) + bh->b_next->b_pprev = bh->b_pprev; + *(bh->b_pprev) = bh->b_next; + bh->b_pprev = NULL; + } } static void __insert_into_lru_list(struct buffer_head * bh, int blist) @@ -514,17 +486,12 @@ static void __remove_from_free_list(struct buffer_head * bh, int index) bh->b_next_free = bh->b_prev_free = NULL; } -/* The following two functions must operate atomically - * because they control the visibility of a buffer head - * to the rest of the kernel. - */ -static __inline__ void __remove_from_queues(struct buffer_head *bh) +/* must be called with both the hash_table_lock and the lru_list_lock + held */ +static void __remove_from_queues(struct buffer_head *bh) { - write_lock(&hash_table_lock); - if (bh->b_pprev) - __hash_unlink(bh); + __hash_unlink(bh); __remove_from_lru_list(bh, bh->b_list); - write_unlock(&hash_table_lock); } static void insert_into_queues(struct buffer_head *bh) @@ -547,6 +514,8 @@ static void put_last_free(struct buffer_head * bh) struct bh_free_head *head = &free_list[BUFSIZE_INDEX(bh->b_size)]; struct buffer_head **bhp = &head->list; + bh->b_state = 0; + spin_lock(&head->lock); bh->b_dev = B_FREE; if(!*bhp) { @@ -604,11 +573,73 @@ unsigned int get_hardblocksize(kdev_t dev) return 0; } +/* If invalidate_buffers() will trash dirty buffers, it means some kind + of fs corruption is going on. Trashing dirty data always imply losing + information that was supposed to be just stored on the physical layer + by the user. + + Thus invalidate_buffers in general usage is not allwowed to trash dirty + buffers. For example ioctl(FLSBLKBUF) expects dirty data to be preserved. + + NOTE: In the case where the user removed a removable-media-disk even if + there's still dirty data not synced on disk (due a bug in the device driver + or due an error of the user), by not destroying the dirty buffers we could + generate corruption also on the next media inserted, thus a parameter is + necessary to handle this case in the most safe way possible (trying + to not corrupt also the new disk inserted with the data belonging to + the old now corrupted disk). Also for the ramdisk the natural thing + to do in order to release the ramdisk memory is to destroy dirty buffers. + + These are two special cases. Normal usage imply the device driver + to issue a sync on the device (without waiting I/O completation) and + then an invalidate_buffers call that doesn't trashes dirty buffers. */ +void __invalidate_buffers(kdev_t dev, int destroy_dirty_buffers) +{ + int i, nlist, slept; + struct buffer_head * bh, * bh_next; + + retry: + slept = 0; + spin_lock(&lru_list_lock); + for(nlist = 0; nlist < NR_LIST; nlist++) { + bh = lru_list[nlist]; + if (!bh) + continue; + for (i = nr_buffers_type[nlist]; i > 0 ; bh = bh_next, i--) { + bh_next = bh->b_next_free; + if (bh->b_dev != dev) + continue; + if (buffer_locked(bh)) { + atomic_inc(&bh->b_count); + spin_unlock(&lru_list_lock); + wait_on_buffer(bh); + slept = 1; + spin_lock(&lru_list_lock); + atomic_dec(&bh->b_count); + } + + write_lock(&hash_table_lock); + if (!atomic_read(&bh->b_count) && + (destroy_dirty_buffers || !buffer_dirty(bh))) { + __remove_from_queues(bh); + put_last_free(bh); + } + write_unlock(&hash_table_lock); + if (slept) + goto out; + } + } +out: + spin_unlock(&lru_list_lock); + if (slept) + goto retry; +} + void set_blocksize(kdev_t dev, int size) { extern int *blksize_size[]; - int i, nlist; - struct buffer_head * bh, *bhnext; + int i, nlist, slept; + struct buffer_head * bh, * bh_next; if (!blksize_size[MAJOR(dev)]) return; @@ -626,41 +657,53 @@ void set_blocksize(kdev_t dev, int size) sync_buffers(dev, 2); blksize_size[MAJOR(dev)][MINOR(dev)] = size; - /* We need to be quite careful how we do this - we are moving entries - * around on the free list, and we can get in a loop if we are not careful. - */ + retry: + slept = 0; + spin_lock(&lru_list_lock); for(nlist = 0; nlist < NR_LIST; nlist++) { - repeat: - spin_lock(&lru_list_lock); bh = lru_list[nlist]; - for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; bh = bhnext) { - if(!bh) - break; - - bhnext = bh->b_next_free; - if (bh->b_dev != dev) - continue; - if (bh->b_size == size) - continue; + if (!bh) + continue; + for (i = nr_buffers_type[nlist]; i > 0 ; bh = bh_next, i--) { + bh_next = bh->b_next_free; + if (bh->b_dev != dev || bh->b_size == size) + continue; if (buffer_locked(bh)) { atomic_inc(&bh->b_count); spin_unlock(&lru_list_lock); wait_on_buffer(bh); + slept = 1; + spin_lock(&lru_list_lock); atomic_dec(&bh->b_count); - goto repeat; - } - if (bh->b_dev == dev && bh->b_size != size) { - clear_bit(BH_Dirty, &bh->b_state); - clear_bit(BH_Uptodate, &bh->b_state); - clear_bit(BH_Req, &bh->b_state); } - if (atomic_read(&bh->b_count) == 0) { + + write_lock(&hash_table_lock); + if (!atomic_read(&bh->b_count)) { + if (buffer_dirty(bh)) + printk(KERN_WARNING + "set_blocksize: dev %s buffer_dirty %lu size %hu\n", + kdevname(dev), bh->b_blocknr, bh->b_size); __remove_from_queues(bh); put_last_free(bh); + } else { + if (atomic_set_buffer_clean(bh)) + __refile_buffer(bh); + clear_bit(BH_Uptodate, &bh->b_state); + printk(KERN_WARNING + "set_blocksize: " + "b_count %d, dev %s, block %lu, from %p\n", + atomic_read(&bh->b_count), bdevname(bh->b_dev), + bh->b_blocknr, __builtin_return_address(0)); } + write_unlock(&hash_table_lock); + if (slept) + goto out; } - spin_unlock(&lru_list_lock); } + out: + spin_unlock(&lru_list_lock); + if (slept) + goto retry; } /* @@ -785,30 +828,31 @@ repeat: atomic_set(&bh->b_count, 1); } spin_unlock(&free_list[isize].lock); - if (!bh) - goto refill; - /* OK, FINALLY we know that this buffer is the only one of its kind, - * we hold a reference (b_count>0), it is unlocked, and it is clean. + /* + * OK, FINALLY we know that this buffer is the only one of + * its kind, we hold a reference (b_count>0), it is unlocked, + * and it is clean. */ - init_buffer(bh, end_buffer_io_sync, NULL); - bh->b_dev = dev; - bh->b_blocknr = block; - bh->b_state = 1 << BH_Mapped; + if (bh) { + init_buffer(bh, end_buffer_io_sync, NULL); + bh->b_dev = dev; + bh->b_blocknr = block; + bh->b_state = 1 << BH_Mapped; - /* Insert the buffer into the regular lists */ - insert_into_queues(bh); - goto out; + /* Insert the buffer into the regular lists */ + insert_into_queues(bh); + out: + touch_buffer(bh); + return bh; + } /* * If we block while refilling the free list, somebody may * create the buffer first ... search the hashes again. */ -refill: refill_freelist(size); goto repeat; -out: - return bh; } /* -1 -> no need to flush @@ -820,11 +864,13 @@ static int balance_dirty_state(kdev_t dev) dirty = size_buffers_type[BUF_DIRTY] >> PAGE_SHIFT; tot = nr_free_buffer_pages(); - hard_dirty_limit = tot * bdf_prm.b_un.nfract / 100; - soft_dirty_limit = hard_dirty_limit >> 1; + tot -= size_buffers_type[BUF_PROTECTED] >> PAGE_SHIFT; + + dirty *= 200; + soft_dirty_limit = tot * bdf_prm.b_un.nfract; + hard_dirty_limit = soft_dirty_limit * 2; - if (dirty > soft_dirty_limit) - { + if (dirty > soft_dirty_limit) { if (dirty > hard_dirty_limit) return 1; return 0; @@ -848,29 +894,39 @@ void balance_dirty(kdev_t dev) wakeup_bdflush(state); } -static inline void __mark_dirty(struct buffer_head *bh, int flag) +static __inline__ void __mark_dirty(struct buffer_head *bh, int flag) { bh->b_flushtime = jiffies + (flag ? bdf_prm.b_un.age_super : bdf_prm.b_un.age_buffer); - clear_bit(BH_New, &bh->b_state); refile_buffer(bh); } +/* atomic version, the user must call balance_dirty() by hand + as soon as it become possible to block */ void __mark_buffer_dirty(struct buffer_head *bh, int flag) { - __mark_dirty(bh, flag); + if (!atomic_set_buffer_dirty(bh)) + __mark_dirty(bh, flag); +} + +void mark_buffer_dirty(struct buffer_head *bh, int flag) +{ + __mark_buffer_dirty(bh, flag); + balance_dirty(bh->b_dev); } /* * A buffer may need to be moved from one buffer list to another * (e.g. in case it is not shared any more). Handle this. */ -static __inline__ void __refile_buffer(struct buffer_head *bh) +static void __refile_buffer(struct buffer_head *bh) { int dispose = BUF_CLEAN; if (buffer_locked(bh)) dispose = BUF_LOCKED; if (buffer_dirty(bh)) dispose = BUF_DIRTY; + if (buffer_protected(bh)) + dispose = BUF_PROTECTED; if (dispose != bh->b_list) { __remove_from_lru_list(bh, bh->b_list); bh->b_list = dispose; @@ -890,8 +946,6 @@ void refile_buffer(struct buffer_head *bh) */ void __brelse(struct buffer_head * buf) { - touch_buffer(buf); - if (atomic_read(&buf->b_count)) { atomic_dec(&buf->b_count); return; @@ -912,12 +966,10 @@ void __bforget(struct buffer_head * buf) write_lock(&hash_table_lock); if (!atomic_dec_and_test(&buf->b_count) || buffer_locked(buf)) goto in_use; - if (buf->b_pprev) - __hash_unlink(buf); + __hash_unlink(buf); write_unlock(&hash_table_lock); __remove_from_lru_list(buf, buf->b_list); spin_unlock(&lru_list_lock); - buf->b_state = 0; put_last_free(buf); return; @@ -1218,13 +1270,13 @@ static int create_page_buffers(int rw, struct page *page, kdev_t dev, int b[], i static void unmap_buffer(struct buffer_head * bh) { - if (buffer_mapped(bh)) - { + if (buffer_mapped(bh)) { mark_buffer_clean(bh); wait_on_buffer(bh); clear_bit(BH_Uptodate, &bh->b_state); clear_bit(BH_Mapped, &bh->b_state); clear_bit(BH_Req, &bh->b_state); + clear_bit(BH_New, &bh->b_state); } } @@ -1303,20 +1355,16 @@ static void create_empty_buffers(struct page *page, struct inode *inode, unsigne static void unmap_underlying_metadata(struct buffer_head * bh) { -#if 0 - if (buffer_new(bh)) { - struct buffer_head *old_bh; - - old_bh = get_hash_table(bh->b_dev, bh->b_blocknr, bh->b_size); - if (old_bh) { - unmap_buffer(old_bh); - /* Here we could run brelse or bforget. We use - bforget because it will try to put the buffer - in the freelist. */ - __bforget(old_bh); - } + struct buffer_head *old_bh; + + old_bh = get_hash_table(bh->b_dev, bh->b_blocknr, bh->b_size); + if (old_bh) { + unmap_buffer(old_bh); + /* Here we could run brelse or bforget. We use + bforget because it will try to put the buffer + in the freelist. */ + __bforget(old_bh); } -#endif } /* @@ -1326,7 +1374,7 @@ static void unmap_underlying_metadata(struct buffer_head * bh) int block_write_full_page(struct dentry *dentry, struct page *page) { struct inode *inode = dentry->d_inode; - int err, i; + int err, i, need_balance_dirty = 0; unsigned long block; struct buffer_head *bh, *head; @@ -1361,15 +1409,22 @@ int block_write_full_page(struct dentry *dentry, struct page *page) err = inode->i_op->get_block(inode, block, bh, 1); if (err) goto out; - unmap_underlying_metadata(bh); + if (buffer_new(bh)) + unmap_underlying_metadata(bh); } set_bit(BH_Uptodate, &bh->b_state); - mark_buffer_dirty(bh,0); + if (!atomic_set_buffer_dirty(bh)) { + __mark_dirty(bh, 0); + need_balance_dirty = 1; + } bh = bh->b_this_page; block++; } while (bh != head); + if (need_balance_dirty) + balance_dirty(bh->b_dev); + SetPageUptodate(page); return 0; out: @@ -1415,13 +1470,13 @@ int block_write_zero_range(struct inode *inode, struct page *page, err = inode->i_op->get_block(inode, block, bh, 1); if (err) goto out; - unmap_underlying_metadata(bh); - } - if (buffer_new(bh)) { - zeroto = block_end; - if (block_start < zerofrom) - zerofrom = block_start; - continue; + if (buffer_new(bh)) { + unmap_underlying_metadata(bh); + zeroto = block_end; + if (block_start < zerofrom) + zerofrom = block_start; + continue; + } } if (!buffer_uptodate(bh) && (block_start < zerofrom || block_end > to)) { @@ -1475,7 +1530,7 @@ int block_write_zero_range(struct inode *inode, struct page *page, partial = 1; } else { set_bit(BH_Uptodate, &bh->b_state); - if (!test_and_set_bit(BH_Dirty, &bh->b_state)) { + if (!atomic_set_buffer_dirty(bh)) { __mark_dirty(bh, 0); need_balance_dirty = 1; } @@ -2000,7 +2055,7 @@ out: */ int try_to_free_buffers(struct page * page) { - struct buffer_head * tmp, * bh = page->buffers; + struct buffer_head * tmp, * p, * bh = page->buffers; int index = BUFSIZE_INDEX(bh->b_size); int ret; @@ -2009,7 +2064,7 @@ int try_to_free_buffers(struct page * page) spin_lock(&free_list[index].lock); tmp = bh; do { - struct buffer_head * p = tmp; + p = tmp; tmp = tmp->b_this_page; if (buffer_busy(p)) @@ -2025,13 +2080,10 @@ int try_to_free_buffers(struct page * page) /* The buffer can be either on the regular * queues or on the free list.. */ - if (p->b_dev == B_FREE) { + if (p->b_dev != B_FREE) + __remove_from_queues(p); + else __remove_from_free_list(p, index); - } else { - if (p->b_pprev) - __hash_unlink(p); - __remove_from_lru_list(p, p->b_list); - } __put_unused_buffer_head(p); } while (tmp != bh); spin_unlock(&unused_list_lock); @@ -2051,7 +2103,8 @@ out: busy_buffer_page: /* Uhhuh, start writeback so that we don't end up with all dirty pages */ - wakeup_bdflush(0); + if (buffer_dirty(p)) + wakeup_bdflush(0); ret = 0; goto out; } @@ -2065,7 +2118,7 @@ void show_buffers(void) int found = 0, locked = 0, dirty = 0, used = 0, lastused = 0; int protected = 0; int nlist; - static char *buf_types[NR_LIST] = { "CLEAN", "LOCKED", "DIRTY" }; + static char *buf_types[NR_LIST] = { "CLEAN", "LOCKED", "DIRTY", "PROTECTED", }; #endif printk("Buffer memory: %6dkB\n", @@ -2091,10 +2144,16 @@ void show_buffers(void) used++, lastused = found; bh = bh->b_next_free; } while (bh != lru_list[nlist]); - printk("%8s: %d buffers, %d used (last=%d), " + { + int tmp = nr_buffers_type[nlist]; + if (found != tmp) + printk("%9s: BUG -> found %d, reported %d\n", + buf_types[nlist], found, tmp); + } + printk("%9s: %d buffers, %lu kbyte, %d used (last=%d), " "%d locked, %d protected, %d dirty\n", - buf_types[nlist], found, used, lastused, - locked, protected, dirty); + buf_types[nlist], found, size_buffers_type[nlist]>>10, + used, lastused, locked, protected, dirty); } spin_unlock(&lru_list_lock); #endif @@ -2184,8 +2243,7 @@ void wakeup_bdflush(int block) if (current == bdflush_tsk) return; - if (!block) - { + if (!block) { wake_up_process(bdflush_tsk); return; } @@ -2210,7 +2268,7 @@ void wakeup_bdflush(int block) as all dirty buffers lives _only_ in the DIRTY lru list. As we never browse the LOCKED and CLEAN lru lists they are infact completly useless. */ -static void flush_dirty_buffers(int check_flushtime) +static int flush_dirty_buffers(int check_flushtime) { struct buffer_head * bh, *next; int flushed = 0, i; @@ -2220,29 +2278,24 @@ static void flush_dirty_buffers(int check_flushtime) bh = lru_list[BUF_DIRTY]; if (!bh) goto out_unlock; - for (i = nr_buffers_type[BUF_DIRTY]; i-- > 0; bh = next) - { + for (i = nr_buffers_type[BUF_DIRTY]; i-- > 0; bh = next) { next = bh->b_next_free; - if (!buffer_dirty(bh)) - { + if (!buffer_dirty(bh)) { __refile_buffer(bh); continue; } if (buffer_locked(bh)) continue; - if (check_flushtime) - { + if (check_flushtime) { /* The dirty lru list is chronologically ordered so if the current bh is not yet timed out, then also all the following bhs will be too young. */ if (time_before(jiffies, bh->b_flushtime)) goto out_unlock; - } - else - { + } else { if (++flushed > bdf_prm.b_un.ndirty) goto out_unlock; } @@ -2259,6 +2312,8 @@ static void flush_dirty_buffers(int check_flushtime) } out_unlock: spin_unlock(&lru_list_lock); + + return flushed; } /* @@ -2342,6 +2397,7 @@ asmlinkage long sys_bdflush(int func, long data) */ int bdflush(void * unused) { + int flushed; /* * We have a bare-bones task_struct, and really should fill * in a few more things so "top" and /proc/2/{exe,root,cwd} @@ -2363,7 +2419,7 @@ int bdflush(void * unused) for (;;) { CHECK_EMERGENCY_SYNC - flush_dirty_buffers(0); + flushed = flush_dirty_buffers(0); /* If wakeup_bdflush will wakeup us after our bdflush_done wakeup, then @@ -2378,10 +2434,10 @@ int bdflush(void * unused) /* * If there are still a lot of dirty buffers around, * skip the sleep and flush some more. Otherwise, we - * sleep for a while. + * go to sleep waiting a wakeup. */ - if (balance_dirty_state(NODEV) < 0) - schedule_timeout(5*HZ); + if (!flushed || balance_dirty_state(NODEV) < 0) + schedule(); /* Remember to mark us as running otherwise the next schedule will block. */ __set_current_state(TASK_RUNNING); @@ -2413,24 +2469,19 @@ int kupdate(void * unused) for (;;) { /* update interval */ interval = bdf_prm.b_un.interval; - if (interval) - { + if (interval) { tsk->state = TASK_INTERRUPTIBLE; schedule_timeout(interval); - } - else - { + } else { stop_kupdate: tsk->state = TASK_STOPPED; schedule(); /* wait for SIGCONT */ } /* check for sigstop */ - if (signal_pending(tsk)) - { + if (signal_pending(tsk)) { int stopped = 0; spin_lock_irq(&tsk->sigmask_lock); - if (sigismember(&tsk->signal, SIGSTOP)) - { + if (sigismember(&tsk->signal, SIGSTOP)) { sigdelset(&tsk->signal, SIGSTOP); stopped = 1; } diff --git a/fs/coda/file.c b/fs/coda/file.c index a19d0d93f34a..054afdf2dbee 100644 --- a/fs/coda/file.c +++ b/fs/coda/file.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include diff --git a/fs/coda/upcall.c b/fs/coda/upcall.c index 9aecd8a6e259..fe86dbac9419 100644 --- a/fs/coda/upcall.c +++ b/fs/coda/upcall.c @@ -392,7 +392,7 @@ int venus_readlink(struct super_block *sb, struct ViceFid *fid, if ( retlen > *length ) retlen = *length; *length = retlen; - result = (char *)outp + (int)outp->coda_readlink.data; + result = (char *)outp + (long)outp->coda_readlink.data; memcpy(buffer, result, retlen); *(buffer + retlen) = '\0'; } @@ -541,7 +541,7 @@ int venus_pioctl(struct super_block *sb, struct ViceFid *fid, inp->coda_ioctl.data = (char *)(INSIZE(ioctl)); /* get the data out of user space */ - if ( copy_from_user((char*)inp + (int)inp->coda_ioctl.data, + if ( copy_from_user((char*)inp + (long)inp->coda_ioctl.data, data->vi.in, data->vi.in_size) ) { error = EINVAL; goto exit; @@ -567,7 +567,7 @@ int venus_pioctl(struct super_block *sb, struct ViceFid *fid, if ( error ) goto exit; if (copy_to_user(data->vi.out, - (char *)outp + (int)outp->coda_ioctl.data, + (char *)outp + (long)outp->coda_ioctl.data, data->vi.out_size)) { error = EINVAL; goto exit; @@ -660,7 +660,8 @@ static inline unsigned long coda_waitfor_upcall(struct upc_req *vmp) } CDEBUG(D_SPECIAL, "begin: %ld.%06ld, elapsed: %ld.%06ld\n", - begin.tv_sec, begin.tv_usec, end.tv_sec, end.tv_usec); + begin.tv_sec, (unsigned long)begin.tv_usec, + end.tv_sec, (unsigned long)end.tv_usec); return ((end.tv_sec * 1000000) + end.tv_usec); } diff --git a/fs/cramfs/inflate/zconf.h b/fs/cramfs/inflate/zconf.h index adc70c276eb7..0b5ec883812b 100644 --- a/fs/cramfs/inflate/zconf.h +++ b/fs/cramfs/inflate/zconf.h @@ -83,8 +83,8 @@ typedef uLong FAR uLongf; typedef void FAR *voidpf; typedef void *voidp; -#include /* for off_t */ -#include /* for SEEK_* and off_t */ +#include /* for off_t */ +#include /* for SEEK_* and off_t */ #define z_off_t off_t #endif /* _ZCONF_H */ diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c index 36e665c322a7..cc0c4cce91bf 100644 --- a/fs/hpfs/dir.c +++ b/fs/hpfs/dir.c @@ -8,7 +8,7 @@ #include "hpfs_fn.h" -int hpfs_dir_read(struct file *filp, char *name, size_t len, loff_t *loff) +ssize_t hpfs_dir_read(struct file *filp, char *name, size_t len, loff_t *loff) { return -EISDIR; } diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h index d1e70e579159..38c47116c72e 100644 --- a/fs/hpfs/hpfs_fn.h +++ b/fs/hpfs/hpfs_fn.h @@ -224,7 +224,7 @@ void hpfs_set_dentry_operations(struct dentry *); /* dir.c */ -int hpfs_dir_read(struct file *, char *, size_t, loff_t *); +ssize_t hpfs_dir_read(struct file *, char *, size_t, loff_t *); int hpfs_dir_release(struct inode *, struct file *); loff_t hpfs_dir_lseek(struct file *, loff_t, int); int hpfs_readdir(struct file *, void *, filldir_t); diff --git a/fs/minix/fsync.c b/fs/minix/fsync.c index acb5e94b7298..30794d27ab0d 100644 --- a/fs/minix/fsync.c +++ b/fs/minix/fsync.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c index c1de11112727..c5d0c479706d 100644 --- a/fs/openpromfs/inode.c +++ b/fs/openpromfs/inode.c @@ -1,4 +1,4 @@ -/* $Id: inode.c,v 1.3 2000/01/04 10:02:29 jj Exp $ +/* $Id: inode.c,v 1.4 2000/02/09 22:35:50 davem Exp $ * openpromfs.c: /proc/openprom handling routines * * Copyright (C) 1996-1999 Jakub Jelinek (jakub@redhat.com) diff --git a/fs/partitions/mac.c b/fs/partitions/mac.c index 41364385d5b5..119de17bac50 100644 --- a/fs/partitions/mac.c +++ b/fs/partitions/mac.c @@ -28,6 +28,14 @@ extern void note_bootable_part(kdev_t dev, int part); * Code to understand MacOS partition tables. */ +static inline void mac_fix_string(char *stg, int len) +{ + int i; + + for (i = len - 1; i >= 0 && stg[i] == ' '; i--) + stg[i] = 0; +} + int mac_partition(struct gendisk *hd, kdev_t dev, unsigned long fsec, int first_part_minor) { struct buffer_head *bh; @@ -35,7 +43,8 @@ int mac_partition(struct gendisk *hd, kdev_t dev, unsigned long fsec, int first_ int dev_bsize, dev_pos, pos; unsigned secsize; #ifdef CONFIG_PPC - int first_bootable = 1; + int found_root = 0; + int found_root_goodness = 0; #endif struct mac_partition *part; struct mac_driver_desc *md; @@ -93,16 +102,49 @@ int mac_partition(struct gendisk *hd, kdev_t dev, unsigned long fsec, int first_ * If this is the first bootable partition, tell the * setup code, in case it wants to make this the root. */ - if ( (_machine == _MACH_Pmac) && first_bootable - && (be32_to_cpu(part->status) & MAC_STATUS_BOOTABLE) - && strcasecmp(part->processor, "powerpc") == 0) { - note_bootable_part(dev, blk); - first_bootable = 0; + if (_machine == _MACH_Pmac) { + int goodness = 0; + + mac_fix_string(part->processor, 16); + mac_fix_string(part->name, 32); + mac_fix_string(part->type, 32); + + if ((be32_to_cpu(part->status) & MAC_STATUS_BOOTABLE) + && strcasecmp(part->processor, "powerpc") == 0) + goodness++; + + if (strcasecmp(part->type, "Apple_UNIX_SVR2") == 0 + || strcasecmp(part->type, "Linux_PPC") == 0) { + int i, l; + + goodness++; + l = strlen(part->name); + if (strcmp(part->name, "/") == 0) + goodness++; + for (i = 0; i <= l - 4; ++i) { + if (strnicmp(part->name + i, "root", + 4) == 0) { + goodness += 2; + break; + } + } + if (strnicmp(part->name, "swap", 4) == 0) + goodness--; + } + + if (goodness > found_root_goodness) { + found_root = blk; + found_root_goodness = goodness; + } } #endif /* CONFIG_PPC */ ++first_part_minor; } +#ifdef CONFIG_PPC + if (found_root_goodness) + note_bootable_part(dev, found_root); +#endif brelse(bh); printk("\n"); return 1; diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c index d891c9e34ef1..cd8d10bda641 100644 --- a/fs/proc/proc_misc.c +++ b/fs/proc/proc_misc.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -616,7 +617,7 @@ static struct proc_dir_entry proc_root_profile = { 0, &proc_profile_inode_operations }; -void proc_misc_init(void) +void __init proc_misc_init(void) { static struct { char *name; diff --git a/fs/proc/procfs_syms.c b/fs/proc/procfs_syms.c index 45d80dd5fd4e..cc1ae7d96228 100644 --- a/fs/proc/procfs_syms.c +++ b/fs/proc/procfs_syms.c @@ -2,6 +2,7 @@ #include #include #include +#include extern struct proc_dir_entry *proc_sys_root; @@ -26,7 +27,7 @@ static struct file_system_type proc_fs_type = { NULL }; -int init_proc_fs(void) +int __init init_proc_fs(void) { return register_filesystem(&proc_fs_type) == 0; } diff --git a/fs/qnx4/fsync.c b/fs/qnx4/fsync.c index 94871b48e7fb..e90291f032f1 100644 --- a/fs/qnx4/fsync.c +++ b/fs/qnx4/fsync.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include diff --git a/fs/super.c b/fs/super.c index 71b38fd46c5d..4e7146fcffe6 100644 --- a/fs/super.c +++ b/fs/super.c @@ -1378,7 +1378,10 @@ int __init change_root(kdev_t new_root_dev,const char *put_old) bdev = do_umount(old_root_dev,1, 0); if (!IS_ERR(bdev)) { printk("okay\n"); - invalidate_buffers(old_root_dev); + /* special: the old device driver is going to be + a ramdisk and the point of this call is to free its + protected memory (even if dirty). */ + destroy_buffers(old_root_dev); if (bdev) { blkdev_put(bdev, BDEV_FS); bdput(bdev); diff --git a/fs/sysv/fsync.c b/fs/sysv/fsync.c index ab0755cef1c9..c2ba82b0b3fa 100644 --- a/fs/sysv/fsync.c +++ b/fs/sysv/fsync.c @@ -16,6 +16,7 @@ #include #include +#include #include #include diff --git a/fs/udf/fsync.c b/fs/udf/fsync.c index 0eea4f1e231e..1b7a0a4591ba 100644 --- a/fs/udf/fsync.c +++ b/fs/udf/fsync.c @@ -27,6 +27,7 @@ #include #include +#include #include #include "udf_i.h" diff --git a/include/asm-alpha/bitops.h b/include/asm-alpha/bitops.h index c9e7e7aee9e8..c590d9e5129f 100644 --- a/include/asm-alpha/bitops.h +++ b/include/asm-alpha/bitops.h @@ -76,8 +76,8 @@ extern __inline__ void change_bit(unsigned long nr, volatile void * addr) :"Ir" (1UL << (nr & 31)), "m" (*m)); } -extern __inline__ unsigned long test_and_set_bit(unsigned long nr, - volatile void * addr) +extern __inline__ int test_and_set_bit(unsigned long nr, + volatile void * addr) { unsigned long oldbit; unsigned long temp; @@ -101,8 +101,8 @@ extern __inline__ unsigned long test_and_set_bit(unsigned long nr, return oldbit != 0; } -extern __inline__ unsigned long test_and_clear_bit(unsigned long nr, - volatile void * addr) +extern __inline__ int test_and_clear_bit(unsigned long nr, + volatile void * addr) { unsigned long oldbit; unsigned long temp; @@ -126,8 +126,8 @@ extern __inline__ unsigned long test_and_clear_bit(unsigned long nr, return oldbit != 0; } -extern __inline__ unsigned long test_and_change_bit(unsigned long nr, - volatile void * addr) +extern __inline__ int test_and_change_bit(unsigned long nr, + volatile void * addr) { unsigned long oldbit; unsigned long temp; @@ -149,9 +149,9 @@ extern __inline__ unsigned long test_and_change_bit(unsigned long nr, return oldbit != 0; } -extern __inline__ unsigned long test_bit(int nr, volatile void * addr) +extern __inline__ int test_bit(int nr, volatile void * addr) { - return 1UL & (((const int *) addr)[nr >> 5] >> (nr & 31)); + return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL; } /* diff --git a/include/asm-alpha/hardirq.h b/include/asm-alpha/hardirq.h index 67544e13d8b7..86e895b83e77 100644 --- a/include/asm-alpha/hardirq.h +++ b/include/asm-alpha/hardirq.h @@ -23,6 +23,8 @@ extern int __local_irq_count; (local_irq_count(__cpu) + local_bh_count(__cpu)) != 0; \ }) +#define in_irq() (local_irq_count(smp_processor_id()) != 0) + #ifndef __SMP__ #define hardirq_trylock(cpu) (local_irq_count(cpu) == 0) diff --git a/include/asm-alpha/softirq.h b/include/asm-alpha/softirq.h index d49064790a5f..8b2713ed6635 100644 --- a/include/asm-alpha/softirq.h +++ b/include/asm-alpha/softirq.h @@ -24,132 +24,9 @@ extern inline void cpu_bh_enable(int cpu) local_bh_count(cpu)--; } -extern inline int cpu_bh_trylock(int cpu) -{ - return local_bh_count(cpu) ? 0 : (local_bh_count(cpu) = 1); -} - -extern inline void cpu_bh_endlock(int cpu) -{ - local_bh_count(cpu) = 0; -} - #define local_bh_enable() cpu_bh_enable(smp_processor_id()) #define local_bh_disable() cpu_bh_disable(smp_processor_id()) -#define get_active_bhs() (bh_mask & bh_active) - -static inline void clear_active_bhs(unsigned long x) -{ - unsigned long temp; - __asm__ __volatile__( - "1: ldq_l %0,%1\n" - " bic %0,%2,%0\n" - " stq_c %0,%1\n" - " beq %0,2f\n" - ".section .text2,\"ax\"\n" - "2: br 1b\n" - ".previous" - :"=&r" (temp), "=m" (bh_active) - :"Ir" (x), "m" (bh_active)); -} - -extern inline void init_bh(int nr, void (*routine)(void)) -{ - bh_base[nr] = routine; - atomic_set(&bh_mask_count[nr], 0); - bh_mask |= 1 << nr; -} - -extern inline void remove_bh(int nr) -{ - bh_mask &= ~(1 << nr); - wmb(); - bh_base[nr] = NULL; -} - -extern inline void mark_bh(int nr) -{ - set_bit(nr, &bh_active); -} - -#ifdef __SMP__ - -/* - * The locking mechanism for base handlers, to prevent re-entrancy, - * is entirely private to an implementation, it should not be - * referenced at all outside of this file. - */ -extern atomic_t global_bh_lock; -extern atomic_t global_bh_count; - -extern void synchronize_bh(void); - -static inline void start_bh_atomic(void) -{ - atomic_inc(&global_bh_lock); - synchronize_bh(); -} - -static inline void end_bh_atomic(void) -{ - atomic_dec(&global_bh_lock); -} - -/* These are for the irq's testing the lock */ -static inline int softirq_trylock(int cpu) -{ - if (cpu_bh_trylock(cpu)) { - if (!test_and_set_bit(0, &global_bh_count)) { - if (atomic_read(&global_bh_lock) == 0) - return 1; - clear_bit(0, &global_bh_count); - } - cpu_bh_endlock(cpu); - } - return 0; -} - -static inline void softirq_endlock(int cpu) -{ - cpu_bh_enable(cpu); - clear_bit(0, &global_bh_count); -} - -#else - -extern inline void start_bh_atomic(void) -{ - local_bh_disable(); -} - -extern inline void end_bh_atomic(void) -{ - local_bh_enable(); -} - -/* These are for the irq's testing the lock */ -#define softirq_trylock(cpu) cpu_bh_trylock(cpu) -#define softirq_endlock(cpu) cpu_bh_endlock(cpu) -#define synchronize_bh() barrier() - -#endif /* SMP */ - -/* - * These use a mask count to correctly handle - * nested disable/enable calls - */ -extern inline void disable_bh(int nr) -{ - bh_mask &= ~(1 << nr); - atomic_inc(&bh_mask_count[nr]); - synchronize_bh(); -} - -extern inline void enable_bh(int nr) -{ - if (atomic_dec_and_test(&bh_mask_count[nr])) - bh_mask |= 1 << nr; -} +#define in_softirq() (local_bh_count(smp_processor_id()) != 0) #endif /* _ALPHA_SOFTIRQ_H */ diff --git a/include/asm-i386/hardirq.h b/include/asm-i386/hardirq.h index fc8ff9016b3f..610db5633f82 100644 --- a/include/asm-i386/hardirq.h +++ b/include/asm-i386/hardirq.h @@ -12,6 +12,8 @@ extern unsigned int local_irq_count[NR_CPUS]; #define in_interrupt() ({ int __cpu = smp_processor_id(); \ (local_irq_count[__cpu] + local_bh_count[__cpu] != 0); }) +#define in_irq() (local_irq_count[smp_processor_id()] != 0) + #ifndef __SMP__ #define hardirq_trylock(cpu) (local_irq_count[cpu] == 0) diff --git a/include/asm-i386/softirq.h b/include/asm-i386/softirq.h index 6eb68524a82d..9964ba5bc664 100644 --- a/include/asm-i386/softirq.h +++ b/include/asm-i386/softirq.h @@ -9,133 +9,9 @@ extern unsigned int local_bh_count[NR_CPUS]; #define cpu_bh_disable(cpu) do { local_bh_count[(cpu)]++; barrier(); } while (0) #define cpu_bh_enable(cpu) do { barrier(); local_bh_count[(cpu)]--; } while (0) -#define cpu_bh_trylock(cpu) (local_bh_count[(cpu)] ? 0 : (local_bh_count[(cpu)] = 1)) -#define cpu_bh_endlock(cpu) (local_bh_count[(cpu)] = 0) - #define local_bh_disable() cpu_bh_disable(smp_processor_id()) #define local_bh_enable() cpu_bh_enable(smp_processor_id()) -#define get_active_bhs() (bh_mask & bh_active) -#define clear_active_bhs(x) atomic_clear_mask((x),&bh_active) - -extern spinlock_t i386_bh_lock; - -#ifdef __SMP__ - -/* - * The locking mechanism for base handlers, to prevent re-entrancy, - * is entirely private to an implementation, it should not be - * referenced at all outside of this file. - */ -extern atomic_t global_bh_lock; -extern atomic_t global_bh_count; - -extern void synchronize_bh(void); - -static inline void start_bh_atomic(void) -{ - atomic_inc(&global_bh_lock); - synchronize_bh(); -} - -static inline void end_bh_atomic(void) -{ - atomic_dec(&global_bh_lock); -} - -/* These are for the IRQs testing the lock */ -static inline int softirq_trylock(int cpu) -{ - if (cpu_bh_trylock(cpu)) { - if (!test_and_set_bit(0,&global_bh_count)) { - if (atomic_read(&global_bh_lock) == 0) - return 1; - clear_bit(0,&global_bh_count); - } - cpu_bh_endlock(cpu); - } - return 0; -} - -static inline void softirq_endlock(int cpu) -{ - cpu_bh_enable(cpu); - clear_bit(0,&global_bh_count); -} - -#else - -extern inline void start_bh_atomic(void) -{ - local_bh_disable(); - barrier(); -} - -extern inline void end_bh_atomic(void) -{ - barrier(); - local_bh_enable(); -} - -/* These are for the irq's testing the lock */ -#define softirq_trylock(cpu) (cpu_bh_trylock(cpu)) -#define softirq_endlock(cpu) (cpu_bh_endlock(cpu)) -#define synchronize_bh() barrier() - -#endif /* SMP */ - -extern inline void init_bh(int nr, void (*routine)(void)) -{ - unsigned long flags; - - bh_base[nr] = routine; - atomic_set(&bh_mask_count[nr], 0); - - spin_lock_irqsave(&i386_bh_lock, flags); - bh_mask |= 1 << nr; - spin_unlock_irqrestore(&i386_bh_lock, flags); -} - -extern inline void remove_bh(int nr) -{ - unsigned long flags; - - spin_lock_irqsave(&i386_bh_lock, flags); - bh_mask &= ~(1 << nr); - spin_unlock_irqrestore(&i386_bh_lock, flags); - - synchronize_bh(); - bh_base[nr] = NULL; -} - -extern inline void mark_bh(int nr) -{ - set_bit(nr, &bh_active); -} - -/* - * These use a mask count to correctly handle - * nested disable/enable calls - */ -extern inline void disable_bh(int nr) -{ - unsigned long flags; - - spin_lock_irqsave(&i386_bh_lock, flags); - bh_mask &= ~(1 << nr); - atomic_inc(&bh_mask_count[nr]); - spin_unlock_irqrestore(&i386_bh_lock, flags); - synchronize_bh(); -} - -extern inline void enable_bh(int nr) -{ - unsigned long flags; - - spin_lock_irqsave(&i386_bh_lock, flags); - if (atomic_dec_and_test(&bh_mask_count[nr])) - bh_mask |= 1 << nr; - spin_unlock_irqrestore(&i386_bh_lock, flags); -} +#define in_softirq() (local_bh_count[smp_processor_id()] != 0) #endif /* __ASM_SOFTIRQ_H */ diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h index 44a2e59e517e..4ce58066c806 100644 --- a/include/asm-i386/spinlock.h +++ b/include/asm-i386/spinlock.h @@ -44,6 +44,7 @@ typedef struct { */ #define spin_unlock_wait(x) do { barrier(); } while(((volatile spinlock_t *)(x))->lock) +#define spin_is_locked(x) ((x)->lock != 0) #define spin_lock_string \ "\n1:\t" \ diff --git a/include/asm-ia64/atomic.h b/include/asm-ia64/atomic.h index 183f3b26f9dd..105eea2c28aa 100644 --- a/include/asm-ia64/atomic.h +++ b/include/asm-ia64/atomic.h @@ -12,6 +12,7 @@ * Copyright (C) 1998, 1999 Hewlett-Packard Co * Copyright (C) 1998, 1999 David Mosberger-Tang */ +#include #include #include @@ -76,14 +77,14 @@ atomic_add_negative (int i, atomic_t *v) ((__builtin_constant_p(i) && \ ( (i == 1) || (i == 4) || (i == 8) || (i == 16) \ || (i == -1) || (i == -4) || (i == -8) || (i == -16))) \ - ? ia64_fetch_and_add(i, v) \ + ? ia64_fetch_and_add(i, &(v)->counter) \ : ia64_atomic_add(i, v)) #define atomic_sub_return(i,v) \ ((__builtin_constant_p(i) && \ ( (i == 1) || (i == 4) || (i == 8) || (i == 16) \ || (i == -1) || (i == -4) || (i == -8) || (i == -16))) \ - ? ia64_fetch_and_add(-i, v) \ + ? ia64_fetch_and_add(-(i), &(v)->counter) \ : ia64_atomic_sub(i, v)) #define atomic_dec_return(v) atomic_sub_return(1, (v)) diff --git a/include/asm-ia64/bitops.h b/include/asm-ia64/bitops.h index cd865920699b..c10d745e7dde 100644 --- a/include/asm-ia64/bitops.h +++ b/include/asm-ia64/bitops.h @@ -11,8 +11,11 @@ #include /* - * These operations need to be atomic. The address must be "long" - * aligned. + * These operations need to be atomic. The address must be (at least) + * 32-bit aligned. Note that there are driver (e.g., eepro100) which + * use these operations to operate on hw-defined data-structures, so + * we can't easily change these operations to force a bigger + * alignment. * * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). */ @@ -20,12 +23,12 @@ extern __inline__ void set_bit (int nr, volatile void *addr) { - __u64 bit, old, new; - volatile __u64 *m; + __u32 bit, old, new; + volatile __u32 *m; CMPXCHG_BUGCHECK_DECL - m = (volatile __u64 *) addr + (nr >> 6); - bit = 1UL << (nr & 63); + m = (volatile __u32 *) addr + (nr >> 5); + bit = 1 << (nr & 31); do { CMPXCHG_BUGCHECK(m); old = *m; @@ -36,12 +39,12 @@ set_bit (int nr, volatile void *addr) extern __inline__ void clear_bit (int nr, volatile void *addr) { - __u64 mask, old, new; - volatile __u64 *m; + __u32 mask, old, new; + volatile __u32 *m; CMPXCHG_BUGCHECK_DECL - m = (volatile __u64 *) addr + (nr >> 6); - mask = ~(1UL << (nr & 63)); + m = (volatile __u32 *) addr + (nr >> 5); + mask = ~(1 << (nr & 31)); do { CMPXCHG_BUGCHECK(m); old = *m; @@ -52,12 +55,12 @@ clear_bit (int nr, volatile void *addr) extern __inline__ void change_bit (int nr, volatile void *addr) { - __u64 bit, old, new; - volatile __u64 *m; + __u32 bit, old, new; + volatile __u32 *m; CMPXCHG_BUGCHECK_DECL - m = (volatile __u64 *) addr + (nr >> 6); - bit = (1UL << (nr & 63)); + m = (volatile __u32 *) addr + (nr >> 5); + bit = (1 << (nr & 31)); do { CMPXCHG_BUGCHECK(m); old = *m; @@ -68,12 +71,12 @@ change_bit (int nr, volatile void *addr) extern __inline__ int test_and_set_bit (int nr, volatile void *addr) { - __u64 bit, old, new; - volatile __u64 *m; + __u32 bit, old, new; + volatile __u32 *m; CMPXCHG_BUGCHECK_DECL - m = (volatile __u64 *) addr + (nr >> 6); - bit = 1UL << (nr & 63); + m = (volatile __u32 *) addr + (nr >> 5); + bit = 1 << (nr & 31); do { CMPXCHG_BUGCHECK(m); old = *m; @@ -85,12 +88,12 @@ test_and_set_bit (int nr, volatile void *addr) extern __inline__ int test_and_clear_bit (int nr, volatile void *addr) { - __u64 mask, old, new; - volatile __u64 *m; + __u32 mask, old, new; + volatile __u32 *m; CMPXCHG_BUGCHECK_DECL - m = (volatile __u64 *) addr + (nr >> 6); - mask = ~(1UL << (nr & 63)); + m = (volatile __u32 *) addr + (nr >> 5); + mask = ~(1 << (nr & 31)); do { CMPXCHG_BUGCHECK(m); old = *m; @@ -102,12 +105,12 @@ test_and_clear_bit (int nr, volatile void *addr) extern __inline__ int test_and_change_bit (int nr, volatile void *addr) { - __u64 bit, old, new; - volatile __u64 *m; + __u32 bit, old, new; + volatile __u32 *m; CMPXCHG_BUGCHECK_DECL - m = (volatile __u64 *) addr + (nr >> 6); - bit = (1UL << (nr & 63)); + m = (volatile __u32 *) addr + (nr >> 5); + bit = (1 << (nr & 31)); do { CMPXCHG_BUGCHECK(m); old = *m; @@ -119,7 +122,7 @@ test_and_change_bit (int nr, volatile void *addr) extern __inline__ int test_bit (int nr, volatile void *addr) { - return 1UL & (((const volatile __u64 *) addr)[nr >> 6] >> (nr & 63)); + return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31)); } /* diff --git a/include/asm-ia64/fpswa.h b/include/asm-ia64/fpswa.h index b5f7fc1b5618..a6facbd8cab3 100644 --- a/include/asm-ia64/fpswa.h +++ b/include/asm-ia64/fpswa.h @@ -1,5 +1,5 @@ -#ifndef _ASM_IA64_FPSWA_H_ -#define _ASM_IA64_FPSWA_H_ +#ifndef _ASM_IA64_FPSWA_H +#define _ASM_IA64_FPSWA_H /* * Floating-point Software Assist @@ -9,7 +9,9 @@ * Copyright (C) 1999 Goutham Rao */ +#if 0 #define FPSWA_BUG +#endif typedef struct { /* 4 * 128 bits */ @@ -70,4 +72,4 @@ typedef struct { efi_fpswa_t fpswa; } fpswa_interface_t; -#endif /* _ASM_IA64_FPSWA_H_ */ +#endif /* _ASM_IA64_FPSWA_H */ diff --git a/include/asm-ia64/irq.h b/include/asm-ia64/irq.h index 41469d73e356..137670219b50 100644 --- a/include/asm-ia64/irq.h +++ b/include/asm-ia64/irq.h @@ -35,9 +35,10 @@ #define IA64_MAX_VECTORED_IRQ 255 #define IA64_SPURIOUS_INT 0x0f +#define PERFMON_IRQ 0x28 /* performanc monitor interrupt vector */ #define TIMER_IRQ 0xef /* use highest-prio group 15 interrupt for timer */ #define IPI_IRQ 0xfe /* inter-processor interrupt vector */ -#define PERFMON_IRQ 0x28 /* performanc monitor interrupt vector */ +#define CMC_IRQ 0xff /* correctable machine-check interrupt vector */ #define IA64_MIN_VECTORED_IRQ 16 #define IA64_MAX_VECTORED_IRQ 255 @@ -103,6 +104,7 @@ irq_cannonicalize (int irq) extern int invoke_irq_handlers (unsigned int irq, struct pt_regs *regs, struct irqaction *action); extern void disable_irq (unsigned int); +extern void disable_irq_nosync (unsigned int); extern void enable_irq (unsigned int); extern void ipi_send (int cpu, int vector, int delivery_mode); diff --git a/include/asm-ia64/signal.h b/include/asm-ia64/signal.h index 6951aac61cca..f6a01d64034d 100644 --- a/include/asm-ia64/signal.h +++ b/include/asm-ia64/signal.h @@ -4,6 +4,9 @@ /* * Copyright (C) 1998-2000 Hewlett-Packard Co * Copyright (C) 1998-2000 David Mosberger-Tang + * + * Unfortunately, this file is being included by bits/signal.h in + * glibc-2.x. Hence the #ifdef __KERNEL__ ugliness. */ #define SIGHUP 1 @@ -86,6 +89,8 @@ #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 +#ifdef __KERNEL__ + #define _NSIG 64 #define _NSIG_BPW 64 #define _NSIG_WORDS (_NSIG / _NSIG_BPW) @@ -102,6 +107,8 @@ #define SA_SHIRQ 0x04000000 #define SA_LEGACY 0x02000000 /* installed via a legacy irq? */ +#endif /* __KERNEL__ */ + #define SIG_BLOCK 0 /* for blocking signals */ #define SIG_UNBLOCK 1 /* for unblocking signals */ #define SIG_SETMASK 2 /* for setting the signal mask */ @@ -117,6 +124,17 @@ /* Avoid too many header ordering problems. */ struct siginfo; +/* Type of a signal handler. */ +typedef void (*__sighandler_t)(int); + +typedef struct sigaltstack { + void *ss_sp; + int ss_flags; + size_t ss_size; +} stack_t; + +#ifdef __KERNEL__ + /* Most things should be clean enough to redefine this at will, if care is taken to make libc match. */ @@ -126,9 +144,6 @@ typedef struct { unsigned long sig[_NSIG_WORDS]; } sigset_t; -/* Type of a signal handler. */ -typedef void (*__sighandler_t)(int); - struct sigaction { __sighandler_t sa_handler; unsigned long sa_flags; @@ -139,14 +154,9 @@ struct k_sigaction { struct sigaction sa; }; -typedef struct sigaltstack { - void *ss_sp; - int ss_flags; - size_t ss_size; -} stack_t; - - /* sigcontext.h needs stack_t... */ # include +#endif /* __KERNEL__ */ + # endif /* !__ASSEMBLY__ */ #endif /* _ASM_IA64_SIGNAL_H */ diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h index b4322e607663..53dc2d2e54bf 100644 --- a/include/asm-ia64/system.h +++ b/include/asm-ia64/system.h @@ -263,9 +263,7 @@ extern unsigned long __bad_increment_for_ia64_fetch_and_add (void); _tmp = __bad_increment_for_ia64_fetch_and_add(); \ break; \ } \ - if (sizeof(*(v)) == 4) \ - _tmp = (int) _tmp; \ - _tmp + (i); /* return new value */ \ + (__typeof__(*v)) (_tmp + (i)); /* return new value */ \ }) /* diff --git a/include/asm-ppc/bitops.h b/include/asm-ppc/bitops.h index 95b59cafc5df..c981d5fa01b1 100644 --- a/include/asm-ppc/bitops.h +++ b/include/asm-ppc/bitops.h @@ -1,5 +1,5 @@ /* - * $Id: bitops.h,v 1.11 1999/01/03 20:16:48 cort Exp $ + * $Id: bitops.h,v 1.12 2000/02/09 03:28:31 davem Exp $ * bitops.h: Bit string operations on the ppc */ @@ -88,11 +88,11 @@ extern __inline__ unsigned long change_bit(unsigned long nr, void *addr) } #endif -extern __inline__ unsigned long test_bit(int nr, __const__ volatile void *addr) +extern __inline__ int test_bit(int nr, __const__ volatile void *addr) { __const__ unsigned int *p = (__const__ unsigned int *) addr; - return (p[nr >> 5] >> (nr & 0x1f)) & 1UL; + return ((p[nr >> 5] >> (nr & 0x1f)) & 1) != 0; } extern __inline__ int ffz(unsigned int x) diff --git a/include/asm-ppc/bootinfo.h b/include/asm-ppc/bootinfo.h index cc32d941ff8e..af614d798015 100644 --- a/include/asm-ppc/bootinfo.h +++ b/include/asm-ppc/bootinfo.h @@ -9,6 +9,12 @@ #ifndef _PPC_BOOTINFO_H #define _PPC_BOOTINFO_H +#include + +#if defined(CONFIG_APUS) && !defined(__BOOTER__) +#include +#else + struct bi_record { unsigned long tag; /* tag ID */ unsigned long size; /* size of record (in bytes) */ @@ -23,5 +29,7 @@ struct bi_record { #define BI_SYSMAP 0x1015 #define BI_MACHTYPE 0x1016 +#endif /* CONFIG_APUS */ + #endif /* _PPC_BOOTINFO_H */ diff --git a/include/asm-ppc/feature.h b/include/asm-ppc/feature.h index 07b10e8bc66c..318150dab09b 100644 --- a/include/asm-ppc/feature.h +++ b/include/asm-ppc/feature.h @@ -8,12 +8,13 @@ * for more details. * * Copyright (C) 1998 Paul Mackerras. + * */ #ifndef __ASM_PPC_FEATURE_H #define __ASM_PPC_FEATURE_H /* - * The FCR bits for particular features vary somewhat between + * The FCR selector for particular features vary somewhat between * different machines. So we abstract a list of features here * and let the feature_* routines map them to the actual bits. */ @@ -25,19 +26,23 @@ enum system_feature { FEATURE_Serial_IO_B, FEATURE_SWIM3_enable, FEATURE_MESH_enable, - FEATURE_IDE_enable, - FEATURE_VIA_enable, - FEATURE_CD_power, + FEATURE_IDE0_enable, /* Internal IDE */ + FEATURE_IDE0_reset, /* Internal IDE */ + FEATURE_IOBUS_enable, /* Internal IDE */ FEATURE_Mediabay_reset, - FEATURE_Mediabay_enable, + FEATURE_Mediabay_power, FEATURE_Mediabay_PCI_enable, - FEATURE_Mediabay_IDE_enable, + FEATURE_Mediabay_IDE_enable, /* Also IDE 1 */ + FEATURE_Mediabay_IDE_reset, /* Also IDE 1 */ FEATURE_Mediabay_floppy_enable, FEATURE_BMac_reset, FEATURE_BMac_IO_enable, - FEATURE_Modem_Reset, - FEATURE_IDE_DiskPower, - FEATURE_IDE_Reset, + FEATURE_Modem_power, + FEATURE_Slow_SCC_PCLK, + FEATURE_Sound_power, + FEATURE_Sound_CLK_enable, + FEATURE_IDE2_enable, + FEATURE_IDE2_reset, FEATURE_last, }; diff --git a/include/asm-ppc/heathrow.h b/include/asm-ppc/heathrow.h new file mode 100644 index 000000000000..4081e123735e --- /dev/null +++ b/include/asm-ppc/heathrow.h @@ -0,0 +1,45 @@ +/* + * heathrow.h: definitions for using the "Heathrow" I/O controller chip. + * + * Grabbed from Open Firmware definitions on a PowerBook G3 Series + * + * Copyright (C) 1997 Paul Mackerras. + */ + +/* offset from ohare base for feature control register */ +#define HEATHROW_FEATURE_REG 0x38 + +/* + * Bits in feature control register. + * Bits postfixed with a _N are in inverse logic + */ +#define HRW_RESET_SCC 1 /* Named in_use_led in OF ??? */ +#define HRW_BAY_POWER_N 2 +#define HRW_BAY_PCI_ENABLE 4 +#define HRW_BAY_IDE_ENABLE 8 +#define HRW_BAY_FLOPPY_ENABLE 0x10 +#define HRW_IDE0_ENABLE 0x20 +#define HRW_IDE0_RESET_N 0x40 +#define HRW_BAY_RESET_N 0x80 +#define HRW_IOBUS_ENABLE 0x100 /* Internal IDE ? */ +#define HRW_SCC_ENABLE 0x200 +#define HRW_MESH_ENABLE 0x400 +#define HRW_SWIM_ENABLE 0x800 +#define HRW_SOUND_POWER_N 0x1000 +#define HRW_SOUND_CLK_ENABLE 0x2000 +#define HRW_SCCA_IO 0x4000 +#define HRW_SCCB_IO 0x8000 +#define HRW_PORT_OR_DESK_VIA_N 0x10000 /* This one is 0 on PowerBook */ +#define HRW_PWM_MON_ID_N 0x20000 /* ??? (0) */ +#define HRW_HOOK_MB_CNT_N 0x40000 /* ??? (0) */ +#define HRW_SWIM_CLONE_FLOPPY 0x80000 /* ??? (0) */ +#define HRW_AUD_RUN22 0x100000 /* ??? (1) */ +#define HRW_SCSI_LINK_MODE 0x200000 /* Read ??? (1) */ +#define HRW_ARB_BYPASS 0x400000 /* ??? (0 on main, 1 on gatwick) */ +#define HRW_IDE1_RESET_N 0x800000 /* Media bay */ +#define HRW_SLOW_SCC_PCLK 0x1000000 /* ??? (0) */ +#define HRW_MODEM_POWER_N 0x2000000 /* Used by internal modem on wallstreet */ +#define HRW_MFDC_CELL_ENABLE 0x4000000 /* ??? (0) */ +#define HRW_USE_MFDC 0x8000000 /* ??? (0) */ +#define HRW_BMAC_IO_ENABLE 0x60000000 /* two bits, not documented in OF */ +#define HRW_BMAC_RESET 0x80000000 /* not documented in OF */ diff --git a/include/asm-ppc/irq.h b/include/asm-ppc/irq.h index 11a272bba023..00922988207f 100644 --- a/include/asm-ppc/irq.h +++ b/include/asm-ppc/irq.h @@ -129,10 +129,9 @@ static __inline__ int irq_cannonicalize(int irq) /* * this is the # irq's for all ppc arch's (pmac/chrp/prep) - * so it is the max of them all - which happens to be powermac - * at present (G3 powermacs have 64). + * so it is the max of them all */ -#define NR_IRQS 128 +#define NR_IRQS 256 #endif /* CONFIG_APUS */ diff --git a/include/asm-ppc/machdep.h b/include/asm-ppc/machdep.h index eaf91b288a34..5e7b1b578620 100644 --- a/include/asm-ppc/machdep.h +++ b/include/asm-ppc/machdep.h @@ -21,7 +21,7 @@ struct machdep_calls { unsigned int (*irq_cannonicalize)(unsigned int irq); void (*init_IRQ)(void); int (*get_irq)(struct pt_regs *); - void (*post_irq)( int ); + void (*post_irq)( struct pt_regs *, int ); /* A general init function, called by ppc_init in init/main.c. May be NULL. */ diff --git a/include/asm-ppc/mediabay.h b/include/asm-ppc/mediabay.h index 04792d15ed50..a634d7f20f99 100644 --- a/include/asm-ppc/mediabay.h +++ b/include/asm-ppc/mediabay.h @@ -8,6 +8,7 @@ #define _PPC_MEDIABAY_H #define MB_FD 0 /* media bay contains floppy drive */ +#define MB_FD1 1 /* media bay contains floppy drive */ #define MB_CD 3 /* media bay contains ATA drive such as CD */ #define MB_NO 7 /* media bay contains nothing */ diff --git a/include/asm-ppc/mmu.h b/include/asm-ppc/mmu.h index 81dadd22a4e7..55f185d918f9 100644 --- a/include/asm-ppc/mmu.h +++ b/include/asm-ppc/mmu.h @@ -310,4 +310,61 @@ extern void _tlbia(void); /* invalidate all TLB entries */ * a processor working register during a tablewalk. */ #define M_TW 799 + +/* + * At present, all PowerPC 400-class processors share a similar TLB + * architecture. The instruction and data sides share a unified, + * 64-entry, fully-associative TLB which is maintained totally under + * software control. In addition, the instruction side has a + * hardware-managed, 4-entry, fully- associative TLB which serves as a + * first level to the shared TLB. These two TLBs are known as the UTLB + * and ITLB, respectively. + */ + +#define PPC4XX_TLB_SIZE 64 + +/* + * TLB entries are defined by a "high" tag portion and a "low" data + * portion. On all architectures, the data portion is 32-bits. + * + * TLB entries are managed entirely under software control by reading, + * writing, and searchoing using the 4xx-specific tlbre, tlbwr, and tlbsx + * instructions. + */ + +#define TLB_LO 1 +#define TLB_HI 0 + +#define TLB_DATA TLB_LO +#define TLB_TAG TLB_HI + +/* Tag portion */ + +#define TLB_EPN_MASK 0xFFFFFC00 /* Effective Page Number */ +#define TLB_PAGESZ_MASK 0x00000380 +#define TLB_PAGESZ(x) (((x) & 0x7) << 7) +#define PAGESZ_1K 0 +#define PAGESZ_4K 1 +#define PAGESZ_16K 2 +#define PAGESZ_64K 3 +#define PAGESZ_256K 4 +#define PAGESZ_1M 5 +#define PAGESZ_4M 6 +#define PAGESZ_16M 7 +#define TLB_VALID 0x00000040 /* Entry is valid */ + +/* Data portion */ + +#define TLB_RPN_MASK 0xFFFFFC00 /* Real Page Number */ +#define TLB_PERM_MASK 0x00000300 +#define TLB_EX 0x00000200 /* Instruction execution allowed */ +#define TLB_WR 0x00000100 /* Writes permitted */ +#define TLB_ZSEL_MASK 0x000000F0 +#define TLB_ZSEL(x) (((x) & 0xF) << 4) +#define TLB_ATTR_MASK 0x0000000F +#define TLB_W 0x00000008 /* Caching is write-through */ +#define TLB_I 0x00000004 /* Caching is inhibited */ +#define TLB_M 0x00000002 /* Memory is coherent */ +#define TLB_G 0x00000001 /* Memory is guarded from prefetch */ + #endif /* _PPC_MMU_H_ */ diff --git a/include/asm-ppc/ohare.h b/include/asm-ppc/ohare.h index ffc4ef10bf50..1303e5869be1 100644 --- a/include/asm-ppc/ohare.h +++ b/include/asm-ppc/ohare.h @@ -2,6 +2,9 @@ * ohare.h: definitions for using the "O'Hare" I/O controller chip. * * Copyright (C) 1997 Paul Mackerras. + * + * BenH: Changed to match those of heathrow (but not all of them). Please + * check if I didn't break anything (especially the media bay). */ /* offset from ohare base for feature control register */ @@ -13,21 +16,21 @@ * and may differ for other machines. */ #define OH_SCC_RESET 1 -#define OH_BAY_RESET 2 /* a guess */ +#define OH_BAY_POWER_N 2 /* a guess */ #define OH_BAY_PCI_ENABLE 4 /* a guess */ #define OH_BAY_IDE_ENABLE 8 #define OH_BAY_FLOPPY_ENABLE 0x10 -#define OH_IDE_ENABLE 0x20 -#define OH_IDE_POWER 0x40 /* a guess */ -#define OH_BAY_ENABLE 0x80 -#define OH_IDE_RESET 0x100 /* 0-based, a guess */ +#define OH_IDE0_ENABLE 0x20 +#define OH_IDE0_RESET_N 0x40 /* a guess */ +#define OH_BAY_RESET_N 0x80 +#define OH_IOBUS_ENABLE 0x100 /* IOBUS seems to be IDE */ #define OH_SCC_ENABLE 0x200 #define OH_MESH_ENABLE 0x400 #define OH_FLOPPY_ENABLE 0x800 #define OH_SCCA_IO 0x4000 #define OH_SCCB_IO 0x8000 -#define OH_VIA_ENABLE 0x10000 -#define OH_IDECD_POWER 0x800000 +#define OH_VIA_ENABLE 0x10000 /* Is apparently wrong, to be verified */ +#define OH_IDE1_RESET_N 0x800000 /* * Bits to set in the feature control register on PowerBooks. diff --git a/include/asm-ppc/pci.h b/include/asm-ppc/pci.h index 698ecc51864b..92347e40673a 100644 --- a/include/asm-ppc/pci.h +++ b/include/asm-ppc/pci.h @@ -10,4 +10,56 @@ #define PCIBIOS_MIN_IO 0x1000 #define PCIBIOS_MIN_MEM 0x10000000 +/* Dynamic DMA Mapping stuff + * ++ajoshi + */ + +#include +#include +#include +#include +#include + +struct pci_dev; + +extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, + dma_addr_t *dma_handle); +extern void pci_free_consistent(struct pci_dev *hwdev, size_t size, + void *vaddr, dma_addr_t dma_handle); +extern inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, + size_t size) +{ + return virt_to_bus(ptr); +} +extern inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, + size_t size) +{ + /* nothing to do */ +} +extern inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, + int nents) +{ + return nents; +} +extern inline void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, + int nents) +{ + /* nothing to do */ +} +extern inline void pci_dma_sync_single(struct pci_dev *hwdev, + dma_addr_t dma_handle, + size_t size) +{ + /* nothing to do */ +} +extern inline void pci_dma_syng_sg(struct pci_dev *hwdev, + struct scatterlist *sg, + int nelems) +{ + /* nothing to do */ +} + +#define sg_dma_address(sg) (virt_to_bus((sg)->address)) +#define sg_dma_len(sg) ((sg)->length) + #endif /* __PPC_PCI_H */ diff --git a/include/asm-ppc/pgtable.h b/include/asm-ppc/pgtable.h index 1bc86d7532f6..f1f0f578a6bc 100644 --- a/include/asm-ppc/pgtable.h +++ b/include/asm-ppc/pgtable.h @@ -15,7 +15,7 @@ extern void local_flush_tlb_all(void); extern void local_flush_tlb_mm(struct mm_struct *mm); extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); extern void local_flush_tlb_range(struct mm_struct *mm, unsigned long start, - unsigned long end); + unsigned long end); extern inline void flush_hash_page(unsigned context, unsigned long va) { } #elif defined(CONFIG_8xx) @@ -108,6 +108,16 @@ extern unsigned long ioremap_bot, ioremap_base; * copied to the MD_TWC before it gets loaded. */ +/* + * At present, all PowerPC 400-class processors share a similar TLB + * architecture. The instruction and data sides share a unified, + * 64-entry, fully-associative TLB which is maintained totally under + * software control. In addition, the instruction side has a + * hardware-managed, 4-entry, fully-associative TLB which serves as a + * first level to the shared TLB. These two TLBs are known as the UTLB + * and ITLB, respectively (see "mmu.h" for definitions). + */ + /* PMD_SHIFT determines the size of the area mapped by the second-level page tables */ #define PMD_SHIFT 22 #define PMD_SIZE (1UL << PMD_SHIFT) @@ -165,68 +175,19 @@ extern unsigned long ioremap_bot, ioremap_base; */ #if defined(CONFIG_4xx) -/* - * At present, all PowerPC 400-class processors share a similar TLB - * architecture. The instruction and data sides share a unified, 64-entry, - * fully-associative TLB which is maintained under software control. In - * addition, the instruction side has a hardware-managed, 4-entry, fully- - * associative TLB which serves as a first level to the shared TLB. These - * two TLBs are known as the UTLB and ITLB, respectively. - */ - -#define PPC4XX_TLB_SIZE 64 - -/* - * TLB entries are defined by a "high" tag portion and a "low" data portion. - * On all architectures, the data portion is 32-bits. - */ - -#define TLB_LO 1 -#define TLB_HI 0 - -#define TLB_DATA TLB_LO -#define TLB_TAG TLB_HI - -/* Tag portion */ - -#define TLB_EPN_MASK 0xFFFFFC00 /* Effective Page Number */ -#define TLB_PAGESZ_MASK 0x00000380 -#define TLB_PAGESZ(x) (((x) & 0x7) << 7) -#define PAGESZ_1K 0 -#define PAGESZ_4K 1 -#define PAGESZ_16K 2 -#define PAGESZ_64K 3 -#define PAGESZ_256K 4 -#define PAGESZ_1M 5 -#define PAGESZ_4M 6 -#define PAGESZ_16M 7 -#define TLB_VALID 0x00000040 /* Entry is valid */ - -/* Data portion */ - -#define TLB_RPN_MASK 0xFFFFFC00 /* Real Page Number */ -#define TLB_PERM_MASK 0x00000300 -#define TLB_EX 0x00000200 /* Instruction execution allowed */ -#define TLB_WR 0x00000100 /* Writes permitted */ -#define TLB_ZSEL_MASK 0x000000F0 -#define TLB_ZSEL(x) (((x) & 0xF) << 4) -#define TLB_ATTR_MASK 0x0000000F -#define TLB_W 0x00000008 /* Caching is write-through */ -#define TLB_I 0x00000004 /* Caching is inhibited */ -#define TLB_M 0x00000002 /* Memory is coherent */ -#define TLB_G 0x00000001 /* Memory is guarded from prefetch */ - -#define _PAGE_PRESENT 0x001 /* software: pte contains a translation */ -#define _PAGE_USER 0x002 /* matches one of the PP bits */ -#define _PAGE_RW 0x004 /* software: user write access allowed */ -#define _PAGE_GUARDED 0x008 -#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */ -#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */ -#define _PAGE_WRITETHRU 0x040 /* W: cache write-through */ -#define _PAGE_DIRTY 0x080 /* C: page changed */ -#define _PAGE_ACCESSED 0x100 /* R: page referenced */ -#define _PAGE_HWWRITE 0x200 /* software: _PAGE_RW & _PAGE_DIRTY */ +/* Definitions for 4xx embedded chips. */ +#define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */ +#define _PAGE_COHERENT 0x002 /* M: enforece memory coherence */ +#define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */ +#define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */ +#define _PAGE_USER 0x010 /* matches one of the zone permission bits */ +#define _PAGE_PRESENT 0x040 /* software: PTE contains a translation */ +#define _PAGE_DIRTY 0x100 /* C: page changed */ +#define _PAGE_RW 0x200 /* Writes permitted */ +#define _PAGE_ACCESSED 0x400 /* R: page referenced */ +#define _PAGE_HWWRITE 0x800 /* software: _PAGE_RW & _PAGE_DIRTY */ #define _PAGE_SHARED 0 + #elif defined(CONFIG_8xx) /* Definitions for 8xx embedded chips. */ #define _PAGE_PRESENT 0x0001 /* Page is valid */ @@ -248,7 +209,8 @@ extern unsigned long ioremap_bot, ioremap_base; * protection. */ #define _PAGE_HWWRITE _PAGE_DIRTY -#else + +#else /* CONFIG_6xx */ /* Definitions for 60x, 740/750, etc. */ #define _PAGE_PRESENT 0x001 /* software: pte contains a translation */ #define _PAGE_USER 0x002 /* matches one of the PP bits */ diff --git a/include/asm-ppc/processor.h b/include/asm-ppc/processor.h index 9d32dd011260..20e337f34ec8 100644 --- a/include/asm-ppc/processor.h +++ b/include/asm-ppc/processor.h @@ -174,7 +174,11 @@ #define HID0_DLOCK (1<<12) /* Data Cache Lock */ #define HID0_ICFI (1<<11) /* Instr. Cache Flash Invalidate */ #define HID0_DCI (1<<10) /* Data Cache Invalidate */ +#define HID0_SPD (1<<9) /* Speculative disable */ +#define HID0_SGE (1<<7) /* Store Gathering Enable */ #define HID0_SIED (1<<7) /* Serial Instr. Execution [Disable] */ +#define HID0_BTIC (1<<5) /* Branch Target Instruction Cache Enable */ +#define HID0_ABE (1<<3) /* Address Broadcast Enable */ #define HID0_BHTE (1<<2) /* Branch History Table Enable */ #define HID0_BTCD (1<<1) /* Branch target cache disable */ #define SPRN_HID1 0x3F1 /* Hardware Implementation Register 1 */ @@ -339,6 +343,7 @@ #define TBRU SPRN_TBRU /* Time Base Read Upper Register */ #define TBWL SPRN_TBWL /* Time Base Write Lower Register */ #define TBWU SPRN_TBWU /* Time Base Write Upper Register */ +#define ICTC 1019 #define THRM1 SPRN_THRM1 /* Thermal Management Register 1 */ #define THRM2 SPRN_THRM2 /* Thermal Management Register 2 */ #define THRM3 SPRN_THRM3 /* Thermal Management Register 3 */ diff --git a/include/asm-ppc/prom.h b/include/asm-ppc/prom.h index 2acd7b45a4b7..40deb121b0fe 100644 --- a/include/asm-ppc/prom.h +++ b/include/asm-ppc/prom.h @@ -63,7 +63,7 @@ typedef void (*prom_entry)(struct prom_args *); /* Prototypes */ extern void abort(void); -extern void prom_init(int, int, prom_entry); +extern unsigned long prom_init(int, int, prom_entry); extern void prom_print(const char *msg); extern void relocate_nodes(void); extern void finish_device_tree(void); @@ -72,7 +72,10 @@ extern struct device_node *find_type_devices(const char *type); extern struct device_node *find_path_device(const char *path); extern struct device_node *find_compatible_devices(const char *type, const char *compat); +extern struct device_node *find_pci_device_OFnode(unsigned char bus, + unsigned char dev_fn); extern struct device_node *find_phandle(phandle); +extern struct device_node *find_all_nodes(void); extern int device_is_compatible(struct device_node *device, const char *); extern int machine_is_compatible(const char *compat); extern unsigned char *get_property(struct device_node *node, const char *name, diff --git a/include/asm-ppc/semaphore.h b/include/asm-ppc/semaphore.h index b73bd59613ec..461ac03605cb 100644 --- a/include/asm-ppc/semaphore.h +++ b/include/asm-ppc/semaphore.h @@ -4,6 +4,9 @@ /* * Swiped from asm-sparc/semaphore.h and modified * -- Cort (cort@cs.nmt.edu) + * + * Stole some rw spinlock-based semaphore stuff from asm-alpha/semaphore.h + * -- Ani Joshi (ajoshi@unixbox.com) */ #ifdef __KERNEL__ @@ -102,6 +105,99 @@ extern inline void up(struct semaphore * sem) __up(sem); } + +/* RW spinlock-based semaphores */ + +struct rw_semaphore +{ + spinlock_t lock; + int rd, wr; + wait_queue_head_t wait; +#if WAITQUEUE_DEBUG + long __magic; +#endif +}; + +#define __RWSEM_INITIALIZER(name, rd, wr) \ +{ \ + SPIN_LOCK_UNLOCKED, \ + (rd), (wr), \ + __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ + __SEM_DEBUG_INIT(name) \ +} + +#define __DECLARE_RWSEM_GENERIC(name, rd, wr) \ + struct rw_semaphore name = __RWSEM_INITIALIZER(name, rd, wr) + +#define DECLARE_RWSEM(name) __DECLARE_RWSEM_GENERIC(name, 0, 0) +#define DECLARE_RWSEM_READ_LOCKED(name) __DECLARE_RWSEM_GENERIC(name, 1, 0) +#define DECLAER_RWSEM_WRITE_LOCKED(name) __DECLARE_RWSEM_GENERIC(name, 0, 1) + +extern inline void init_rwsem(struct rw_semaphore *sem) +{ + spin_lock_init(&sem->lock); + sem->rd = sem->wr = 0; + init_waitqueue_head(&sem->wait); +#if WAITQUEUE_DEBUG + sem->__magic = (long)&sem->__magic; +#endif +} + +#ifndef CHECK_MAGIC +#define CHECK_MAGIC(x) +#endif + +extern void down_read_failed(struct rw_semaphore *); +extern void down_write_failed(struct rw_semaphore *); + +extern inline void down_read(struct rw_semaphore *sem) +{ + CHECK_MAGIC(sem->__magic); + + spin_lock_irq(&sem->lock); + if (sem->wr) + down_read_failed(sem); + sem->rd++; + spin_unlock_irq(&sem->lock); +} + +extern inline void down_write(struct rw_semaphore *sem) +{ + CHECK_MAGIC(sem->__magic); + + spin_lock(&sem->lock); + if(sem->rd || sem->wr) + down_write_failed(sem); + sem->wr = 1; + spin_unlock(&sem->lock); +} + +#define up_read(sem) \ + do { \ + unsigned long flags; \ + \ + CHECK_MAGIC((sem)->__magic); \ + \ + spin_lock_irqsave(&(sem)->lock, flags); \ + if (!--(sem)->rd && waitqueue_active(&(sem)->wait)) \ + wake_up(&(sem)->wait); \ + spin_unlock_irqrestore(&(sem)->lock, flags); \ + } while (0) + +#define up_write(sem) \ + do { \ + unsigned long flags; \ + \ + CHECK_MAGIC((sem)->__magic); \ + \ + spin_lock_irqsave(&(sem)->lock, flags); \ + (sem)->wr = 0; \ + if (waitqueue_active(&(sem)->wait)) \ + wake_up(&(sem)->wait); \ + spin_unlock_irqrestore(&(sem)->lock, flags); \ + } while (0) + + #endif /* __KERNEL__ */ #endif /* !(_PPC_SEMAPHORE_H) */ diff --git a/include/asm-ppc/types.h b/include/asm-ppc/types.h index 86fa349d375f..4c5e9766e6ef 100644 --- a/include/asm-ppc/types.h +++ b/include/asm-ppc/types.h @@ -41,6 +41,10 @@ typedef unsigned long long u64; #define BITS_PER_LONG 32 +/* DMA addresses are 32-bits wide */ + +typedef u32 dma_addr_t; + #endif /* __KERNEL__ */ #endif diff --git a/include/asm-ppc/vga.h b/include/asm-ppc/vga.h index 85c9ec8a9483..e6a7f9a53ee1 100644 --- a/include/asm-ppc/vga.h +++ b/include/asm-ppc/vga.h @@ -8,43 +8,33 @@ #define _LINUX_ASM_VGA_H_ #include -#include #include -#include + +#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_MDA_CONSOLE) #define VT_BUF_HAVE_RW +/* + * These are only needed for supporting VGA or MDA text mode, which use little + * endian byte ordering. + * In other cases, we can optimize by using native byte ordering and + * has already done the right job for us. + */ extern inline void scr_writew(u16 val, u16 *addr) { - /* If using vgacon (not fbcon) byteswap the writes. - * If non-vgacon assume fbcon and don't byteswap - * just like include/linux/vt_buffer.h. - * XXX: this is a performance loss so get rid of it - * as soon as fbcon works on prep. - * -- Cort - */ -#ifdef CONFIG_FB - if ( conswitchp != &vga_con ) - (*(addr) = (val)); - else -#endif /* CONFIG_FB */ - st_le16(addr, val); + writew(val, (unsigned long)addr); } extern inline u16 scr_readw(const u16 *addr) { -#ifdef CONFIG_FB - if ( conswitchp != &vga_con ) - return (*(addr)); - else -#endif /* CONFIG_FB */ - return ld_le16((unsigned short *)addr); + return readw((unsigned long)addr); } -#define VT_BUF_HAVE_MEMCPYF -#define scr_memcpyw_from memcpy -#define scr_memcpyw_to memcpy +#define VT_BUF_HAVE_MEMCPYW +#define scr_memcpyw memcpy + +#endif /* !CONFIG_VGA_CONSOLE && !CONFIG_MDA_CONSOLE */ extern unsigned long vgacon_remap_base; #define VGA_MAP_MEM(x) (x + vgacon_remap_base) diff --git a/include/asm-sparc/bitops.h b/include/asm-sparc/bitops.h index fbaa0f005d86..1139f58f7d6a 100644 --- a/include/asm-sparc/bitops.h +++ b/include/asm-sparc/bitops.h @@ -1,4 +1,4 @@ -/* $Id: bitops.h,v 1.54 1998/09/21 05:07:34 jj Exp $ +/* $Id: bitops.h,v 1.55 2000/02/09 03:28:32 davem Exp $ * bitops.h: Bit string operations on the Sparc. * * Copyright 1995 David S. Miller (davem@caip.rutgers.edu) @@ -94,7 +94,7 @@ extern __inline__ void change_bit(unsigned long nr, void *addr) * all bit-ops return 0 if bit was previously clear and != 0 otherwise. */ -extern __inline__ unsigned long test_and_set_bit(unsigned long nr, __SMPVOL void *addr) +extern __inline__ int test_and_set_bit(unsigned long nr, __SMPVOL void *addr) { register unsigned long mask asm("g2"); register unsigned long *ADDR asm("g1"); @@ -116,7 +116,7 @@ extern __inline__ void set_bit(unsigned long nr, __SMPVOL void *addr) (void) test_and_set_bit(nr, addr); } -extern __inline__ unsigned long test_and_clear_bit(unsigned long nr, __SMPVOL void *addr) +extern __inline__ int test_and_clear_bit(unsigned long nr, __SMPVOL void *addr) { register unsigned long mask asm("g2"); register unsigned long *ADDR asm("g1"); @@ -139,7 +139,7 @@ extern __inline__ void clear_bit(unsigned long nr, __SMPVOL void *addr) (void) test_and_clear_bit(nr, addr); } -extern __inline__ unsigned long test_and_change_bit(unsigned long nr, __SMPVOL void *addr) +extern __inline__ int test_and_change_bit(unsigned long nr, __SMPVOL void *addr) { register unsigned long mask asm("g2"); register unsigned long *ADDR asm("g1"); @@ -165,9 +165,9 @@ extern __inline__ void change_bit(unsigned long nr, __SMPVOL void *addr) #endif /* __KERNEL__ */ /* The following routine need not be atomic. */ -extern __inline__ unsigned long test_bit(int nr, __const__ __SMPVOL void *addr) +extern __inline__ int test_bit(int nr, __const__ __SMPVOL void *addr) { - return 1UL & (((__const__ unsigned int *) addr)[nr >> 5] >> (nr & 31)); + return (1 & (((__const__ unsigned int *) addr)[nr >> 5] >> (nr & 31))) != 0; } /* The easy/cheese version for now. */ diff --git a/include/asm-sparc/hardirq.h b/include/asm-sparc/hardirq.h index ed47c7760d2b..56fe88bba327 100644 --- a/include/asm-sparc/hardirq.h +++ b/include/asm-sparc/hardirq.h @@ -26,6 +26,8 @@ extern unsigned int local_irq_count; #define synchronize_irq() barrier() +#define in_irq() (local_irq_count != 0) + #else #include @@ -45,6 +47,9 @@ extern atomic_t global_irq_count; #define in_interrupt() ({ int __cpu = smp_processor_id(); \ (local_irq_count[__cpu] + local_bh_count[__cpu] != 0); }) +#define in_irq() ({ int __cpu = smp_processor_id(); \ + (local_irq_count[__cpu] != 0); }) + static inline void release_irqlock(int cpu) { /* if we didn't own the irq lock, just ignore.. */ diff --git a/include/asm-sparc/softirq.h b/include/asm-sparc/softirq.h index c82a080adb72..d61b56554913 100644 --- a/include/asm-sparc/softirq.h +++ b/include/asm-sparc/softirq.h @@ -14,170 +14,21 @@ #include -#define get_active_bhs() (bh_mask & bh_active) - #ifdef __SMP__ extern unsigned int local_bh_count[NR_CPUS]; -/* - * The locking mechanism for base handlers, to prevent re-entrancy, - * is entirely private to an implementation, it should not be - * referenced at all outside of this file. - */ -extern atomic_t global_bh_lock; -extern spinlock_t global_bh_count; -extern spinlock_t sparc_bh_lock; - -extern void synchronize_bh(void); - -static inline void clear_active_bhs(unsigned int mask) -{ - unsigned long flags; - spin_lock_irqsave(&sparc_bh_lock, flags); - bh_active &= ~(mask); - spin_unlock_irqrestore(&sparc_bh_lock, flags); -} - -extern inline void init_bh(int nr, void (*routine)(void)) -{ - unsigned long flags; - spin_lock_irqsave(&sparc_bh_lock, flags); - bh_base[nr] = routine; - atomic_set(&bh_mask_count[nr], 0); - bh_mask |= 1 << nr; - spin_unlock_irqrestore(&sparc_bh_lock, flags); -} - -extern inline void remove_bh(int nr) -{ - unsigned long flags; - spin_lock_irqsave(&sparc_bh_lock, flags); - bh_mask &= ~(1 << nr); - bh_base[nr] = NULL; - spin_unlock_irqrestore(&sparc_bh_lock, flags); -} - -extern inline void mark_bh(int nr) -{ - unsigned long flags; - spin_lock_irqsave(&sparc_bh_lock, flags); - bh_active |= (1 << nr); - spin_unlock_irqrestore(&sparc_bh_lock, flags); -} - -/* - * These use a mask count to correctly handle - * nested disable/enable calls - */ -extern inline void disable_bh(int nr) -{ - unsigned long flags; - spin_lock_irqsave(&sparc_bh_lock, flags); - bh_mask &= ~(1 << nr); - atomic_inc(&bh_mask_count[nr]); - spin_unlock_irqrestore(&sparc_bh_lock, flags); - synchronize_bh(); -} - -extern inline void enable_bh(int nr) -{ - unsigned long flags; - spin_lock_irqsave(&sparc_bh_lock, flags); - if (atomic_dec_and_test(&bh_mask_count[nr])) - bh_mask |= 1 << nr; - spin_unlock_irqrestore(&sparc_bh_lock, flags); -} - -static inline void start_bh_atomic(void) -{ - atomic_inc(&global_bh_lock); - synchronize_bh(); -} - -static inline void end_bh_atomic(void) -{ - atomic_dec(&global_bh_lock); -} - -/* These are for the IRQs testing the lock */ -static inline int softirq_trylock(int cpu) -{ - if (spin_trylock(&global_bh_count)) { - if (atomic_read(&global_bh_lock) == 0 && - local_bh_count[cpu] == 0) { - ++local_bh_count[cpu]; - return 1; - } - spin_unlock(&global_bh_count); - } - return 0; -} - -static inline void softirq_endlock(int cpu) -{ - local_bh_count[cpu]--; - spin_unlock(&global_bh_count); -} - #define local_bh_disable() (local_bh_count[smp_processor_id()]++) #define local_bh_enable() (local_bh_count[smp_processor_id()]--) +#define in_softirq() (local_bh_count[smp_processor_id()] != 0) + #else extern unsigned int local_bh_count; -#define clear_active_bhs(x) (bh_active &= ~(x)) -#define mark_bh(nr) (bh_active |= (1 << (nr))) - -/* These are for the irq's testing the lock */ -#define softirq_trylock(cpu) (local_bh_count ? 0 : (local_bh_count=1)) -#define softirq_endlock(cpu) (local_bh_count = 0) -#define synchronize_bh() barrier() - #define local_bh_disable() (local_bh_count++) #define local_bh_enable() (local_bh_count--) -/* - * These use a mask count to correctly handle - * nested disable/enable calls - */ -extern inline void disable_bh(int nr) -{ - bh_mask &= ~(1 << nr); - atomic_inc(&bh_mask_count[nr]); - synchronize_bh(); -} - -extern inline void enable_bh(int nr) -{ - if (atomic_dec_and_test(&bh_mask_count[nr])) - bh_mask |= 1 << nr; -} - -extern inline void init_bh(int nr, void (*routine)(void)) -{ - bh_base[nr] = routine; - atomic_set(&bh_mask_count[nr], 0); - bh_mask |= 1 << nr; -} - -extern inline void remove_bh(int nr) -{ - bh_mask &= ~(1 << nr); - mb(); - bh_base[nr] = NULL; -} - -extern inline void start_bh_atomic(void) -{ - local_bh_count++; - barrier(); -} - -extern inline void end_bh_atomic(void) -{ - barrier(); - local_bh_count--; -} +#define in_softirq() (local_bh_count != 0) #endif /* SMP */ diff --git a/include/asm-sparc64/bitops.h b/include/asm-sparc64/bitops.h index 27820f26564b..6a6ec52b186d 100644 --- a/include/asm-sparc64/bitops.h +++ b/include/asm-sparc64/bitops.h @@ -1,4 +1,4 @@ -/* $Id: bitops.h,v 1.26 1999/01/07 14:14:15 jj Exp $ +/* $Id: bitops.h,v 1.27 2000/02/09 03:28:33 davem Exp $ * bitops.h: Bit string operations on the V9. * * Copyright 1996, 1997 David S. Miller (davem@caip.rutgers.edu) @@ -20,7 +20,7 @@ * all bit-ops return 0 if bit was previously clear and != 0 otherwise. */ -extern __inline__ unsigned long test_and_set_bit(unsigned long nr, void *addr) +extern __inline__ int test_and_set_bit(unsigned long nr, void *addr) { unsigned long * m = ((unsigned long *) addr) + (nr >> 6); unsigned long oldbit; @@ -60,7 +60,7 @@ extern __inline__ void set_bit(unsigned long nr, void *addr) : "g5", "g7", "cc", "memory"); } -extern __inline__ unsigned long test_and_clear_bit(unsigned long nr, void *addr) +extern __inline__ int test_and_clear_bit(unsigned long nr, void *addr) { unsigned long * m = ((unsigned long *) addr) + (nr >> 6); unsigned long oldbit; @@ -100,7 +100,7 @@ extern __inline__ void clear_bit(unsigned long nr, void *addr) : "g5", "g7", "cc", "memory"); } -extern __inline__ unsigned long test_and_change_bit(unsigned long nr, void *addr) +extern __inline__ int test_and_change_bit(unsigned long nr, void *addr) { unsigned long * m = ((unsigned long *) addr) + (nr >> 6); unsigned long oldbit; @@ -135,9 +135,9 @@ extern __inline__ void change_bit(unsigned long nr, void *addr) : "g5", "g7", "cc", "memory"); } -extern __inline__ unsigned long test_bit(int nr, __const__ void *addr) +extern __inline__ int test_bit(int nr, __const__ void *addr) { - return 1UL & (((__const__ long *) addr)[nr >> 6] >> (nr & 63)); + return (1UL & (((__const__ long *) addr)[nr >> 6] >> (nr & 63))) != 0UL; } /* The easy/cheese version for now. */ diff --git a/include/asm-sparc64/hardirq.h b/include/asm-sparc64/hardirq.h index 7df1d1346451..daff61ac4856 100644 --- a/include/asm-sparc64/hardirq.h +++ b/include/asm-sparc64/hardirq.h @@ -16,10 +16,13 @@ extern unsigned int local_irq_count; /* * Are we in an interrupt context? Either doing bottom half - * or hardware interrupt processing? + * or hardware interrupt processing? On any cpu? */ #define in_interrupt() ((local_irq_count + local_bh_count) != 0) +/* This tests only the local processors hw IRQ context disposition. */ +#define in_irq() (local_irq_count != 0) + #ifndef __SMP__ #define hardirq_trylock(cpu) (local_irq_count == 0) diff --git a/include/asm-sparc64/posix_types.h b/include/asm-sparc64/posix_types.h index e2a024e3e7e9..e486344ad271 100644 --- a/include/asm-sparc64/posix_types.h +++ b/include/asm-sparc64/posix_types.h @@ -9,11 +9,12 @@ #if (__GNUC__ > 2) || (__GNUC_MINOR__ >= 8) typedef unsigned long int __kernel_size_t; +typedef long int __kernel_ssize_t; #else typedef unsigned long long __kernel_size_t; +typedef long long __kernel_ssize_t; #endif -typedef long long __kernel_ssize_t; typedef long __kernel_ptrdiff_t; typedef long __kernel_time_t; typedef long __kernel_clock_t; diff --git a/include/asm-sparc64/softirq.h b/include/asm-sparc64/softirq.h index b8e017d79098..460c96633448 100644 --- a/include/asm-sparc64/softirq.h +++ b/include/asm-sparc64/softirq.h @@ -19,117 +19,6 @@ extern unsigned int local_bh_count; #define local_bh_disable() (local_bh_count++) #define local_bh_enable() (local_bh_count--) -/* The locking mechanism for base handlers, to prevent re-entrancy, - * is entirely private to an implementation, it should not be - * referenced at all outside of this file. - */ - -#define get_active_bhs() (bh_mask & bh_active) -#define clear_active_bhs(mask) \ - __asm__ __volatile__( \ -"1: ldx [%1], %%g7\n" \ -" andn %%g7, %0, %%g5\n" \ -" casx [%1], %%g7, %%g5\n" \ -" cmp %%g7, %%g5\n" \ -" bne,pn %%xcc, 1b\n" \ -" nop" \ - : /* no outputs */ \ - : "HIr" (mask), "r" (&bh_active) \ - : "g5", "g7", "cc", "memory") - -extern inline void init_bh(int nr, void (*routine)(void)) -{ - bh_base[nr] = routine; - atomic_set(&bh_mask_count[nr], 0); - bh_mask |= 1 << nr; -} - -extern inline void remove_bh(int nr) -{ - bh_mask &= ~(1 << nr); - membar("#StoreStore"); - bh_base[nr] = NULL; -} - -extern inline void mark_bh(int nr) -{ - set_bit(nr, &bh_active); -} - -#ifndef __SMP__ - -extern inline void start_bh_atomic(void) -{ - local_bh_count++; - barrier(); -} - -extern inline void end_bh_atomic(void) -{ - barrier(); - local_bh_count--; -} - -/* These are for the irq's testing the lock */ -#define softirq_trylock(cpu) (local_bh_count ? 0 : (local_bh_count=1)) -#define softirq_endlock(cpu) (local_bh_count = 0) -#define synchronize_bh() barrier() - -#else /* (__SMP__) */ - -extern atomic_t global_bh_lock; -extern spinlock_t global_bh_count; - -extern void synchronize_bh(void); - -static inline void start_bh_atomic(void) -{ - atomic_inc(&global_bh_lock); - synchronize_bh(); -} - -static inline void end_bh_atomic(void) -{ - atomic_dec(&global_bh_lock); -} - -/* These are for the IRQs testing the lock */ -static inline int softirq_trylock(int cpu) -{ - if (spin_trylock(&global_bh_count)) { - if (atomic_read(&global_bh_lock) == 0 && - cpu_data[cpu].bh_count == 0) { - ++(cpu_data[cpu].bh_count); - return 1; - } - spin_unlock(&global_bh_count); - } - return 0; -} - -static inline void softirq_endlock(int cpu) -{ - (cpu_data[cpu].bh_count)--; - spin_unlock(&global_bh_count); -} - -#endif /* (__SMP__) */ - -/* - * These use a mask count to correctly handle - * nested disable/enable calls - */ -extern inline void disable_bh(int nr) -{ - bh_mask &= ~(1 << nr); - atomic_inc(&bh_mask_count[nr]); - synchronize_bh(); -} - -extern inline void enable_bh(int nr) -{ - if (atomic_dec_and_test(&bh_mask_count[nr])) - bh_mask |= 1 << nr; -} +#define in_softirq() (local_bh_count != 0) #endif /* !(__SPARC64_SOFTIRQ_H) */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 35530b7779ef..2ac766b863ff 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -852,20 +852,17 @@ extern inline void mark_buffer_protected(struct buffer_head * bh) } extern void FASTCALL(__mark_buffer_dirty(struct buffer_head *bh, int flag)); +extern void FASTCALL(mark_buffer_dirty(struct buffer_head *bh, int flag)); #define atomic_set_buffer_dirty(bh) test_and_set_bit(BH_Dirty, &(bh)->b_state) -extern inline void mark_buffer_dirty(struct buffer_head * bh, int flag) -{ - if (!atomic_set_buffer_dirty(bh)) - __mark_buffer_dirty(bh, flag); -} - extern void balance_dirty(kdev_t); extern int check_disk_change(kdev_t); extern int invalidate_inodes(struct super_block *); extern void invalidate_inode_pages(struct inode *); -extern void invalidate_buffers(kdev_t); +#define invalidate_buffers(dev) __invalidate_buffers((dev), 0) +#define destroy_buffers(dev) __invalidate_buffers((dev), 1) +extern void __invalidate_buffers(kdev_t dev, int); extern int floppy_is_wp(int); extern void sync_inodes(kdev_t); extern void write_inode_now(struct inode *); diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h index d0d2ce2f9223..55e103dd583d 100644 --- a/include/linux/i2c-id.h +++ b/include/linux/i2c-id.h @@ -20,7 +20,7 @@ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* ------------------------------------------------------------------------- */ -/* $Id: i2c-id.h,v 1.6 1999/12/21 23:45:58 frodo Exp $ */ +/* $Id: i2c-id.h,v 1.10 2000/02/04 02:47:41 mds Exp $ */ #ifndef I2C_ID_H #define I2C_ID_H @@ -44,35 +44,36 @@ * never be used in official drivers */ -#define I2C_DRIVERID_MSP3400 1 -#define I2C_DRIVERID_TUNER 2 -#define I2C_DRIVERID_VIDEOTEXT 3 /* please rename */ +#define I2C_DRIVERID_MSP3400 1 +#define I2C_DRIVERID_TUNER 2 +#define I2C_DRIVERID_VIDEOTEX 3 /* please rename */ #define I2C_DRIVERID_TDA8425 4 /* stereo sound processor */ #define I2C_DRIVERID_TEA6420 5 /* audio matrix switch */ #define I2C_DRIVERID_TEA6415C 6 /* video matrix switch */ #define I2C_DRIVERID_TDA9840 7 /* stereo sound processor */ #define I2C_DRIVERID_SAA7111A 8 /* video input processor */ #define I2C_DRIVERID_SAA5281 9 /* videotext decoder */ -#define I2C_DRIVERID_SAA7112 10 /* video decoder, image scaler */ -#define I2C_DRIVERID_SAA7120 11 /* video encoder */ -#define I2C_DRIVERID_SAA7121 12 /* video encoder */ -#define I2C_DRIVERID_SAA7185B 13 /* video encoder */ -#define I2C_DRIVERID_CH7003 14 /* digital pc to tv encoder */ -#define I2C_DRIVERID_PCF8574A 15 /* i2c expander - 8 bit in/out */ -#define I2C_DRIVERID_PCF8582C 16 /* eeprom */ -#define I2C_DRIVERID_AT24Cxx 17 /* eeprom 1/2/4/8/16 K */ +#define I2C_DRIVERID_SAA7112 10 /* video decoder, image scaler */ +#define I2C_DRIVERID_SAA7120 11 /* video encoder */ +#define I2C_DRIVERID_SAA7121 12 /* video encoder */ +#define I2C_DRIVERID_SAA7185B 13 /* video encoder */ +#define I2C_DRIVERID_CH7003 14 /* digital pc to tv encoder */ +#define I2C_DRIVERID_PCF8574A 15 /* i2c expander - 8 bit in/out */ +#define I2C_DRIVERID_PCF8582C 16 /* eeprom */ +#define I2C_DRIVERID_AT24Cxx 17 /* eeprom 1/2/4/8/16 K */ #define I2C_DRIVERID_TEA6300 18 /* audio mixer */ -#define I2C_DRIVERID_BT829 19 /* pc to tv encoder */ +#define I2C_DRIVERID_BT829 19 /* pc to tv encoder */ #define I2C_DRIVERID_TDA9850 20 /* audio mixer */ #define I2C_DRIVERID_TDA9855 21 /* audio mixer */ +#define I2C_DRIVERID_SAA7110 22 /* */ +#define I2C_DRIVERID_MGATVO 23 /* Matrox TVOut */ +#define I2C_DRIVERID_SAA5249 24 /* SAA5249 and compatibles */ #define I2C_DRIVERID_EXP0 0xF0 /* experimental use id's */ #define I2C_DRIVERID_EXP1 0xF1 #define I2C_DRIVERID_EXP2 0xF2 #define I2C_DRIVERID_EXP3 0xF3 -#define I2C_DRIVERID_MGATVO 0x0101 /* Matrox TVOut */ - #define I2C_DRIVERID_I2CDEV 900 #define I2C_DRIVERID_I2CPROC 901 @@ -88,8 +89,8 @@ #define I2C_ALGO_PCF 0x020000 /* PCF 8584 style adapters */ #define I2C_ALGO_ATI 0x030000 /* ATI video card */ #define I2C_ALGO_SMBUS 0x040000 -#define I2C_ALGO_ISA 0x050000 /* lm_sensors ISA pseudo-adapter */ -#define I2C_ALGO_SAA7146 0x060000 /* SAA 7146 video decoder bus */ +#define I2C_ALGO_ISA 0x050000 /* lm_sensors ISA pseudo-adapter */ +#define I2C_ALGO_SAA714 0x060000 /* SAA 7146 video decoder bus */ #define I2C_ALGO_SAA7146A 0x060001 /* SAA 7146A - enhanced version */ @@ -113,9 +114,11 @@ #define I2C_HW_B_VELLE 0x04 /* Vellemann K8000 */ #define I2C_HW_B_BT848 0x05 /* BT848 video boards */ #define I2C_HW_B_WNV 0x06 /* Winnov Videums */ -#define I2C_HW_B_VIA 0x07 /* Via vt82c586b */ -#define I2C_HW_B_HYDRA 0x08 /* Apple Hydra Mac I/O */ +#define I2C_HW_B_VIA 0x07 /* Via vt82c586b */ +#define I2C_HW_B_HYDRA 0x08 /* Apple Hydra Mac I/O */ #define I2C_HW_B_G400 0x09 /* Matrox G400 */ +#define I2C_HW_B_I810 0x0a /* Intel I810 */ +#define I2C_HW_B_RIVA 0x10 /* Riva based graphics cards */ /* --- PCF 8584 based algorithms */ #define I2C_HW_P_LP 0x00 /* Parallel port interface */ @@ -127,11 +130,11 @@ #define I2C_HW_SMBUS_ALI15X3 0x01 #define I2C_HW_SMBUS_VIA2 0x02 #define I2C_HW_SMBUS_VOODOO3 0x03 -#define I2C_HW_SMBUS_I801 0x04 +#define I2C_HW_SMBUS_I801 0x04 #define I2C_HW_SMBUS_AMD756 0x05 #define I2C_HW_SMBUS_SIS5595 0x06 -/* --- ISA pseudo-adapter */ +/* --- ISA pseudo-adapter */ #define I2C_HW_ISA 0x00 #endif /* I2C_ID_H */ diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 9dcc57d49500..3aa308bc24d9 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -3,7 +3,7 @@ /* i2c.h - definitions for the i2c-bus interface */ /* */ /* ------------------------------------------------------------------------- */ -/* Copyright (C) 1995-1999 Simon G. Vogl +/* Copyright (C) 1995-2000 Simon G. Vogl This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -23,7 +23,7 @@ /* With some changes from Kyösti Mälkki and Frodo Looijaard */ -/* $Id: i2c.h,v 1.32 1999/12/21 23:45:58 frodo Exp $ */ +/* $Id: i2c.h,v 1.36 2000/01/18 23:54:07 frodo Exp $ */ #ifndef I2C_H #define I2C_H @@ -94,6 +94,8 @@ struct i2c_msg { unsigned short flags; #define I2C_M_TEN 0x10 /* we have a ten bit chip address */ #define I2C_M_RD 0x01 +#define I2C_M_NOSTART 0x4000 +#define I2C_M_REV_DIR_ADDR 0x2000 #if 0 #define I2C_M_PROBE 0x20 #endif @@ -171,8 +173,8 @@ struct i2c_driver { * dec_use is the inverse operation. * NB: Make sure you have no circular dependencies, or else you get a * deadlock when trying to unload the modules. - * You should use the i2c_{inc,dec}_use_client functions instead of - * calling this function directly. + * You should use the i2c_{inc,dec}_use_client functions instead of + * calling this function directly. */ void (*inc_use)(struct i2c_client *client); void (*dec_use)(struct i2c_client *client); @@ -190,9 +192,9 @@ struct i2c_client { unsigned int addr; /* chip address - NOTE: 7bit */ /* addresses are stored in the */ /* _LOWER_ 7 bits of this char */ - /* addr: unsigned int to make lm_sensors i2c-isa adapter work - more cleanly. It does not take any more memory space, due to - alignment considerations */ + /* addr: unsigned int to make lm_sensors i2c-isa adapter work + more cleanly. It does not take any more memory space, due to + alignment considerations */ struct i2c_adapter *adapter; /* the adapter we sit on */ struct i2c_driver *driver; /* and our access routines */ void *data; /* for the clients */ @@ -207,17 +209,17 @@ struct i2c_client { */ struct i2c_algorithm { char name[32]; /* textual description */ - unsigned int id; + unsigned int id; /* If a adapter algorithm can't to I2C-level access, set master_xfer - to NULL. If an adapter algorithm can do SMBus access, set - smbus_xfer. If set to NULL, the SMBus protocol is simulated - using common I2C messages */ + to NULL. If an adapter algorithm can do SMBus access, set + smbus_xfer. If set to NULL, the SMBus protocol is simulated + using common I2C messages */ int (*master_xfer)(struct i2c_adapter *adap,struct i2c_msg msgs[], - int num); + int num); int (*smbus_xfer) (struct i2c_adapter *adap, u16 addr, - unsigned short flags, char read_write, - u8 command, int size, union i2c_smbus_data * data); + unsigned short flags, char read_write, + u8 command, int size, union i2c_smbus_data * data); /* --- these optional/future use for some adapter types.*/ int (*slave_send)(struct i2c_adapter *,char*,int); @@ -245,7 +247,7 @@ struct i2c_adapter { struct i2c_algorithm *algo;/* the algorithm to access the bus */ void *algo_data; - /* --- These may be NULL, but should increase the module use count */ + /* --- These may be NULL, but should increase the module use count */ void (*inc_use)(struct i2c_adapter *); void (*dec_use)(struct i2c_adapter *); @@ -279,20 +281,20 @@ struct i2c_adapter { /*flags for the driver struct: */ #define I2C_DF_NOTIFY 0x01 /* notify on bus (de/a)ttaches */ -#define I2C_DF_DUMMY 0x02 /* do not connect any clients */ +#define I2C_DF_DUMMY 0x02 /* do not connect any clients */ /* i2c_client_address_data is the struct for holding default client * addresses for a driver and for the parameters supplied on the * command line */ struct i2c_client_address_data { - unsigned short *normal_i2c; - unsigned short *normal_i2c_range; - unsigned short *probe; - unsigned short *probe_range; - unsigned short *ignore; - unsigned short *ignore_range; - unsigned short *force; + unsigned short *normal_i2c; + unsigned short *normal_i2c_range; + unsigned short *probe; + unsigned short *probe_range; + unsigned short *ignore; + unsigned short *ignore_range; + unsigned short *force; }; /* Internal numbers to terminate lists */ @@ -361,20 +363,21 @@ extern int i2c_check_functionality (struct i2c_adapter *adap, u32 func); /* To determine what functionality is present */ -#define I2C_FUNC_I2C 0x00000001 -#define I2C_FUNC_10BIT_ADDR 0x00000002 -#define I2C_FUNC_SMBUS_QUICK 0x00010000 -#define I2C_FUNC_SMBUS_READ_BYTE 0x00020000 -#define I2C_FUNC_SMBUS_WRITE_BYTE 0x00040000 -#define I2C_FUNC_SMBUS_READ_BYTE_DATA 0x00080000 -#define I2C_FUNC_SMBUS_WRITE_BYTE_DATA 0x00100000 -#define I2C_FUNC_SMBUS_READ_WORD_DATA 0x00200000 -#define I2C_FUNC_SMBUS_WRITE_WORD_DATA 0x00400000 -#define I2C_FUNC_SMBUS_PROC_CALL 0x00800000 -#define I2C_FUNC_SMBUS_READ_BLOCK_DATA 0x01000000 +#define I2C_FUNC_I2C 0x00000001 +#define I2C_FUNC_10BIT_ADDR 0x00000002 +#define I2C_FUNC_PROTOCOL_MANGLING 0x00000004 /* I2C_M_{REV_DIR_ADDR,NOSTART} */ +#define I2C_FUNC_SMBUS_QUICK 0x00010000 +#define I2C_FUNC_SMBUS_READ_BYTE 0x00020000 +#define I2C_FUNC_SMBUS_WRITE_BYTE 0x00040000 +#define I2C_FUNC_SMBUS_READ_BYTE_DATA 0x00080000 +#define I2C_FUNC_SMBUS_WRITE_BYTE_DATA 0x00100000 +#define I2C_FUNC_SMBUS_READ_WORD_DATA 0x00200000 +#define I2C_FUNC_SMBUS_WRITE_WORD_DATA 0x00400000 +#define I2C_FUNC_SMBUS_PROC_CALL 0x00800000 +#define I2C_FUNC_SMBUS_READ_BLOCK_DATA 0x01000000 #define I2C_FUNC_SMBUS_WRITE_BLOCK_DATA 0x02000000 -#define I2C_FUNC_SMBUS_READ_I2C_BLOCK 0x04000000 /* New I2C-like block */ -#define I2C_FUNC_SMBUS_WRITE_I2C_BLOCK 0x08000000 /* transfers */ +#define I2C_FUNC_SMBUS_READ_I2C_BLOCK 0x04000000 /* New I2C-like block */ +#define I2C_FUNC_SMBUS_WRITE_I2C_BLOCK 0x08000000 /* transfer */ #define I2C_FUNC_SMBUS_BYTE I2C_FUNC_SMBUS_READ_BYTE | \ I2C_FUNC_SMBUS_WRITE_BYTE @@ -398,23 +401,23 @@ extern int i2c_check_functionality (struct i2c_adapter *adap, u32 func); * Data for SMBus Messages */ union i2c_smbus_data { - __u8 byte; - __u16 word; - __u8 block[33]; /* block[0] is used for length */ + __u8 byte; + __u16 word; + __u8 block[33]; /* block[0] is used for length */ }; /* smbus_access read or write markers */ -#define I2C_SMBUS_READ 1 -#define I2C_SMBUS_WRITE 0 +#define I2C_SMBUS_READ 1 +#define I2C_SMBUS_WRITE 0 /* SMBus transaction types (size parameter in the above functions) Note: these no longer correspond to the (arbitrary) PIIX4 internal codes! */ -#define I2C_SMBUS_QUICK 0 -#define I2C_SMBUS_BYTE 1 -#define I2C_SMBUS_BYTE_DATA 2 -#define I2C_SMBUS_WORD_DATA 3 -#define I2C_SMBUS_PROC_CALL 4 -#define I2C_SMBUS_BLOCK_DATA 5 +#define I2C_SMBUS_QUICK 0 +#define I2C_SMBUS_BYTE 1 +#define I2C_SMBUS_BYTE_DATA 2 +#define I2C_SMBUS_WORD_DATA 3 +#define I2C_SMBUS_PROC_CALL 4 +#define I2C_SMBUS_BLOCK_DATA 5 /* ----- commands for the ioctl like i2c_command call: @@ -423,7 +426,7 @@ union i2c_smbus_data { * corresponding header files. */ /* -> bit-adapter specific ioctls */ -#define I2C_RETRIES 0x0701 /* number times a device adress should */ +#define I2C_RETRIES 0x0701 /* number times a device adress should */ /* be polled when not acknowledging */ #define I2C_TIMEOUT 0x0702 /* set timeout - call with int */ @@ -433,11 +436,11 @@ union i2c_smbus_data { /* Attn.: Slave address is 7 or 10 bits */ #define I2C_SLAVE_FORCE 0x0706 /* Change slave address */ /* Attn.: Slave address is 7 or 10 bits */ - /* This changes the address, even if it */ - /* is already taken! */ -#define I2C_TENBIT 0x0704 /* 0 for 7 bit addrs, != 0 for 10 bit */ + /* This changes the address, even if it */ + /* is already taken! */ +#define I2C_TENBIT 0x0704 /* 0 for 7 bit addrs, != 0 for 10 bit */ -#define I2C_FUNCS 0x0705 /* Get the adapter functionality */ +#define I2C_FUNCS 0x0705 /* Get the adapter functionality */ #if 0 #define I2C_ACK_TEST 0x0710 /* See if a slave is at a specific adress */ #endif @@ -445,7 +448,7 @@ union i2c_smbus_data { #define I2C_SMBUS 0x0720 /* SMBus-level access */ /* ... algo-bit.c recognizes */ -#define I2C_UDELAY 0x0705 /* set delay in microsecs between each */ +#define I2C_UDELAY 0x0705 /* set delay in microsecs between each */ /* written byte (except address) */ #define I2C_MDELAY 0x0706 /* millisec delay between written bytes */ diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 8bc1f9ee6d97..29a42aef8fe9 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -3,6 +3,7 @@ #define _LINUX_INTERRUPT_H #include +#include #include #include @@ -15,17 +16,9 @@ struct irqaction { struct irqaction *next; }; -extern volatile unsigned char bh_running; - -extern atomic_t bh_mask_count[32]; -extern unsigned long bh_active; -extern unsigned long bh_mask; -extern void (*bh_base[32])(void); - -asmlinkage void do_bottom_half(void); /* Who gets which entry in bh_base. Things which will occur most often - should come first - in which case NET should be up the top with SERIAL/TQUEUE! */ + should come first */ enum { TIMER_BH = 0, @@ -37,10 +30,8 @@ enum { SPECIALIX_BH, AURORA_BH, ESP_BH, - NET_BH, SCSI_BH, IMMEDIATE_BH, - KEYBOARD_BH, CYCLADES_BH, CM206_BH, JS_BH, @@ -51,6 +42,228 @@ enum { #include #include + + +/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high + frequency threaded job scheduling. For almost all the purposes + tasklets are more than enough. F.e. KEYBOARD_BH, CONSOLE_BH, all serial + device BHs et al. are converted to tasklets, not to softirqs. + */ + +enum +{ + HI_SOFTIRQ=0, + NET_TX_SOFTIRQ, + NET_RX_SOFTIRQ, + TASKLET_SOFTIRQ +}; + +#if SMP_CACHE_BYTES <= 32 +/* It is trick to make assembly easier. */ +#define SOFTIRQ_STATE_PAD 32 +#else +#define SOFTIRQ_STATE_PAD SMP_CACHE_BYTES +#endif + +struct softirq_state +{ + __u32 active; + __u32 mask; +} __attribute__ ((__aligned__(SOFTIRQ_STATE_PAD))); + +extern struct softirq_state softirq_state[NR_CPUS]; + +struct softirq_action +{ + void (*action)(struct softirq_action *); + void *data; +}; + +asmlinkage void do_softirq(void); +extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data); + +extern __inline__ void __cpu_raise_softirq(int cpu, int nr) +{ + softirq_state[cpu].active |= (1<state)) +#define tasklet_unlock_wait(t) while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { /* NOTHING */ } +#define tasklet_unlock(t) clear_bit(TASKLET_STATE_RUN, &(t)->state) +#else +#define tasklet_trylock(t) 1 +#define tasklet_unlock_wait(t) do { } while (0) +#define tasklet_unlock(t) do { } while (0) +#endif + +extern __inline__ void tasklet_schedule(struct tasklet_struct *t) +{ + if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { + int cpu = smp_processor_id(); + unsigned long flags; + + local_irq_save(flags); + t->next = tasklet_vec[cpu].list; + tasklet_vec[cpu].list = t; + __cpu_raise_softirq(cpu, TASKLET_SOFTIRQ); + local_irq_restore(flags); + } +} + +extern __inline__ void tasklet_hi_schedule(struct tasklet_struct *t) +{ + if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { + int cpu = smp_processor_id(); + unsigned long flags; + + local_irq_save(flags); + t->next = tasklet_hi_vec[cpu].list; + tasklet_hi_vec[cpu].list = t; + __cpu_raise_softirq(cpu, HI_SOFTIRQ); + local_irq_restore(flags); + } +} + + +extern __inline__ void tasklet_disable_nosync(struct tasklet_struct *t) +{ + atomic_inc(&t->count); +} + +extern __inline__ void tasklet_disable(struct tasklet_struct *t) +{ + tasklet_disable_nosync(t); + tasklet_unlock_wait(t); +} + +extern __inline__ void tasklet_enable(struct tasklet_struct *t) +{ + atomic_dec(&t->count); +} + +extern void tasklet_kill(struct tasklet_struct *t); +extern void tasklet_init(struct tasklet_struct *t, + void (*func)(unsigned long), unsigned long data); + +#ifdef __SMP__ + +#define SMP_TIMER_NAME(name) name##__thr + +#define SMP_TIMER_DEFINE(name, task) \ +DECLARE_TASKLET(task, name##__thr, 0); \ +static void name (unsigned long dummy) \ +{ \ + tasklet_schedule(&(task)); \ +} + +#else /* __SMP__ */ + +#define SMP_TIMER_NAME(name) name +#define SMP_TIMER_DEFINE(name, task) + +#endif /* __SMP__ */ + + +/* Old BH definitions */ + +extern struct tasklet_struct bh_task_vec[]; + +/* It is exported _ONLY_ for wait_on_irq(). */ +extern spinlock_t global_bh_lock; + +extern __inline__ void mark_bh(int nr) +{ + tasklet_hi_schedule(bh_task_vec+nr); +} + +extern __inline__ void disable_bh_nosync(int nr) +{ + tasklet_disable_nosync(bh_task_vec+nr); +} + +extern __inline__ void disable_bh(int nr) +{ + tasklet_disable_nosync(bh_task_vec+nr); + if (!in_interrupt()) + tasklet_unlock_wait(bh_task_vec+nr); +} + +extern __inline__ void enable_bh(int nr) +{ + tasklet_enable(bh_task_vec+nr); +} + + +extern void init_bh(int nr, void (*routine)(void)); +extern void remove_bh(int nr); + + /* * Autoprobing for irqs: * diff --git a/include/linux/kbd_kern.h b/include/linux/kbd_kern.h index 9b215fb148bc..56dd41faccfa 100644 --- a/include/linux/kbd_kern.h +++ b/include/linux/kbd_kern.h @@ -4,6 +4,8 @@ #include #include +extern struct tasklet_struct keyboard_tasklet; + extern int shift_state; extern char *func_table[MAX_NR_FUNC]; @@ -85,7 +87,7 @@ extern inline void set_console(int nr) extern inline void set_leds(void) { - mark_bh(KEYBOARD_BH); + tasklet_schedule(&keyboard_tasklet); } extern inline int vc_kbd_mode(struct kbd_struct * kbd, int flag) diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h index edf0f0768367..9e2a1e4ffe22 100644 --- a/include/linux/miscdevice.h +++ b/include/linux/miscdevice.h @@ -14,6 +14,7 @@ #define WATCHDOG_MINOR 130 /* Watchdog timer */ #define TEMP_MINOR 131 /* Temperature Sensor */ #define RTC_MINOR 135 +#define EFI_RTC_MINOR 136 /* EFI Time services */ #define SUN_OPENPROM_MINOR 139 #define NVRAM_MINOR 144 #define I2O_MINOR 166 diff --git a/include/linux/mm.h b/include/linux/mm.h index 74fe4654f24a..6c4dac14cd3a 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -462,7 +462,7 @@ extern struct page *filemap_nopage(struct vm_area_struct * area, #define __GFP_DMA 0x20 -#define GFP_BUFFER (__GFP_WAIT) +#define GFP_BUFFER (__GFP_HIGH | __GFP_WAIT) #define GFP_ATOMIC (__GFP_HIGH) #define GFP_USER (__GFP_WAIT | __GFP_IO) #define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index e3d688acffc1..d2b1d5a6a783 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -29,7 +29,7 @@ typedef struct zone_struct { unsigned long offset; unsigned long free_pages; int low_on_memory; - unsigned long pages_low, pages_high; + unsigned long pages_min, pages_low, pages_high; struct pglist_data *zone_pgdat; /* diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 323458ca4b1a..3905728e4ea2 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -29,6 +29,8 @@ #include #include +#include +#include #ifdef __KERNEL__ #include @@ -107,15 +109,6 @@ struct net_device_stats unsigned long tx_compressed; }; -#ifdef CONFIG_NET_FASTROUTE -struct net_fastroute_stats -{ - int hits; - int succeed; - int deferred; - int latency_reduction; -}; -#endif /* Media selection options. */ enum { @@ -138,6 +131,23 @@ struct neighbour; struct neigh_parms; struct sk_buff; +struct netif_rx_stats +{ + unsigned total; + unsigned dropped; + unsigned time_squeeze; + unsigned throttled; + unsigned fastroute_hit; + unsigned fastroute_success; + unsigned fastroute_defer; + unsigned fastroute_deferred_out; + unsigned fastroute_latency_reduction; + unsigned cpu_collision; +} __attribute__ ((__aligned__(SMP_CACHE_BYTES))); + +extern struct netif_rx_stats netdev_rx_stat[]; + + /* * We tag multicasts with these structures. */ @@ -163,6 +173,16 @@ struct hh_cache unsigned long hh_data[16/sizeof(unsigned long)]; }; +enum netdev_state_t +{ + LINK_STATE_XOFF=0, + LINK_STATE_DOWN, + LINK_STATE_START, + LINK_STATE_RXSEM, + LINK_STATE_TXSEM, + LINK_STATE_SCHED +}; + /* * The DEVICE structure. @@ -194,34 +214,30 @@ struct net_device unsigned long mem_start; /* shared mem start */ unsigned long base_addr; /* device I/O address */ unsigned int irq; /* device IRQ number */ - - /* Low-level status flags. */ - volatile unsigned char start; /* start an operation */ + /* - * These two are just single-bit flags, but due to atomicity - * reasons they have to be inside a "unsigned long". However, - * they should be inside the SAME unsigned long instead of - * this wasteful use of memory.. + * Some hardware also needs these fields, but they are not + * part of the usual set specified in Space.c. */ - unsigned long interrupt; /* bitops.. */ - unsigned long tbusy; /* transmitter busy */ - - struct net_device *next; + + unsigned char if_port; /* Selectable AUI, TP,..*/ + unsigned char dma; /* DMA channel */ + + unsigned long state; + + struct net_device *next; /* The device initialization function. Called only once. */ int (*init)(struct net_device *dev); + /* ------- Fields preinitialized in Space.c finish here ------- */ + + struct net_device *next_sched; + /* Interface index. Unique device identifier */ int ifindex; int iflink; - /* - * Some hardware also needs these fields, but they are not - * part of the usual set specified in Space.c. - */ - - unsigned char if_port; /* Selectable AUI, TP,..*/ - unsigned char dma; /* DMA channel */ struct net_device_stats* (*get_stats)(struct net_device *dev); struct iw_statistics* (*get_wireless_stats)(struct net_device *dev); @@ -235,14 +251,18 @@ struct net_device /* These may be needed for future network-power-down code. */ unsigned long trans_start; /* Time (in jiffies) of last Tx */ unsigned long last_rx; /* Time of last Rx */ - + unsigned short flags; /* interface flags (a la BSD) */ unsigned short gflags; unsigned mtu; /* interface MTU value */ unsigned short type; /* interface hardware type */ unsigned short hard_header_len; /* hardware hdr length */ void *priv; /* pointer to private data */ - + + struct net_device *master; /* Pointer to master device of a group, + * which this device is member of. + */ + /* Interface address info. */ unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ unsigned char pad; /* make dev_addr aligned to 8 bytes */ @@ -253,11 +273,9 @@ struct net_device int mc_count; /* Number of installed mcasts */ int promiscuity; int allmulti; - - /* For load balancing driver pair support */ - - unsigned long pkt_queue; /* Packets queued */ - struct net_device *slave; /* Slave device */ + + int watchdog_timeo; + struct timer_list watchdog_timer; /* Protocol specific pointers */ @@ -329,13 +347,15 @@ struct net_device #define HAVE_CHANGE_MTU int (*change_mtu)(struct net_device *dev, int new_mtu); +#define HAVE_TX_TIMOUT + void (*tx_timeout) (struct net_device *dev); + int (*hard_header_parse)(struct sk_buff *skb, unsigned char *haddr); int (*neigh_setup)(struct net_device *dev, struct neigh_parms *); int (*accept_fastpath)(struct net_device *, struct dst_entry*); #ifdef CONFIG_NET_FASTROUTE - unsigned long tx_semaphore; #define NETDEV_FASTROUTE_HMASK 0xF /* Semi-private data. Keep it at the end of device struct. */ rwlock_t fastpath_lock; @@ -361,8 +381,6 @@ struct packet_type extern struct net_device loopback_dev; /* The loopback */ extern struct net_device *dev_base; /* All devices */ extern rwlock_t dev_base_lock; /* Device list lock */ -extern int netdev_dropping; -extern int net_cpu_congestion; extern struct net_device *dev_getbyhwaddr(unsigned short type, char *hwaddr); extern void dev_add_pack(struct packet_type *pt); @@ -392,9 +410,77 @@ extern __inline__ int unregister_gifconf(unsigned int family) return register_gifconf(family, 0); } +/* + * Incoming packets are placed on per-cpu queues so that + * no locking is needed. + */ + +struct softnet_data +{ + int throttle; + struct sk_buff_head input_pkt_queue; + struct net_device *output_queue; + struct sk_buff *completion_queue; +} __attribute__((__aligned__(SMP_CACHE_BYTES))); + + +extern struct softnet_data softnet_data[NR_CPUS]; + +#define HAS_NETIF_QUEUE + +extern __inline__ void __netif_schedule(struct net_device *dev) +{ + if (!test_and_set_bit(LINK_STATE_SCHED, &dev->state)) { + unsigned long flags; + int cpu = smp_processor_id(); + + local_irq_save(flags); + dev->next_sched = softnet_data[cpu].output_queue; + softnet_data[cpu].output_queue = dev; + __cpu_raise_softirq(cpu, NET_TX_SOFTIRQ); + local_irq_restore(flags); + } +} + +extern __inline__ void netif_schedule(struct net_device *dev) +{ + if (!test_bit(LINK_STATE_XOFF, &dev->state)) + __netif_schedule(dev); +} + +extern __inline__ void netif_start_queue(struct net_device *dev) +{ + clear_bit(LINK_STATE_XOFF, &dev->state); +} + +extern __inline__ void netif_wake_queue(struct net_device *dev) +{ + if (test_and_clear_bit(LINK_STATE_XOFF, &dev->state)) + __netif_schedule(dev); +} + +extern __inline__ void netif_stop_queue(struct net_device *dev) +{ + set_bit(LINK_STATE_XOFF, &dev->state); +} + +extern __inline__ void dev_kfree_skb_irq(struct sk_buff *skb) +{ + if (atomic_dec_and_test(&skb->users)) { + int cpu =smp_processor_id(); + unsigned long flags; + + local_irq_save(flags); + skb->next = softnet_data[cpu].completion_queue; + softnet_data[cpu].completion_queue = skb; + __cpu_raise_softirq(cpu, NET_TX_SOFTIRQ); + local_irq_restore(flags); + } +} + + #define HAVE_NETIF_RX 1 extern void netif_rx(struct sk_buff *skb); -extern void net_bh(void); extern int dev_ioctl(unsigned int cmd, void *); extern int dev_change_flags(struct net_device *, unsigned); extern void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); @@ -448,15 +534,13 @@ extern void dev_load(const char *name); extern void dev_mcast_init(void); extern int netdev_register_fc(struct net_device *dev, void (*stimul)(struct net_device *dev)); extern void netdev_unregister_fc(int bit); -extern int netdev_dropping; extern int netdev_max_backlog; -extern atomic_t netdev_rx_dropped; extern unsigned long netdev_fc_xoff; +extern int netdev_set_master(struct net_device *dev, struct net_device *master); #ifdef CONFIG_NET_FASTROUTE extern int netdev_fastroute; extern int netdev_fastroute_obstacles; extern void dev_clear_fastroute(struct net_device *dev); -extern struct net_fastroute_stats dev_fastroute_stat; #endif diff --git a/include/linux/openpic.h b/include/linux/openpic.h index ce7ffdf899e8..b4a9ecab1ced 100644 --- a/include/linux/openpic.h +++ b/include/linux/openpic.h @@ -263,8 +263,12 @@ extern u_char *OpenPIC_InitSenses; * Interrupt Source Registers */ -#define OPENPIC_SENSE_POLARITY 0x00800000 /* Undoc'd */ +#define OPENPIC_POLARITY_POSITIVE 0x00800000 +#define OPENPIC_POLARITY_NEGATIVE 0x00000000 +#define OPENPIC_POLARITY_MASK 0x00800000 #define OPENPIC_SENSE_LEVEL 0x00400000 +#define OPENPIC_SENSE_EDGE 0x00000000 +#define OPENPIC_SENSE_MASK 0x00400000 /* diff --git a/include/linux/pmu.h b/include/linux/pmu.h index 907b58c2d742..ad4d12b67731 100644 --- a/include/linux/pmu.h +++ b/include/linux/pmu.h @@ -51,6 +51,7 @@ enum { PMU_OHARE_BASED, /* 2400, 3400, 3500 (old G3 powerbook) */ PMU_HEATHROW_BASED, /* PowerBook G3 series */ PMU_PADDINGTON_BASED, /* 1999 PowerBook G3 */ + PMU_KEYLARGO_BASED, /* Core99 motherboard (PMU99) */ }; /* @@ -66,6 +67,8 @@ enum { #define PMU_IOC_SET_BACKLIGHT _IOW('B', 2, sizeof(__u32)) /* out param: u32* backlight value: 0 to 31 */ #define PMU_IOC_GET_MODEL _IOR('B', 3, sizeof(__u32*)) +/* out param: u32* has_adb: 0 or 1 */ +#define PMU_IOC_HAS_ADB _IOR('B', 4, sizeof(__u32*)) #ifdef __KERNEL__ diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index e8f6c7328fe4..f27ad591f6c0 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -434,11 +434,15 @@ enum IFLA_QDISC, IFLA_STATS, IFLA_COST, - IFLA_PRIORITY +#define IFLA_COST IFLA_COST + IFLA_PRIORITY, +#define IFLA_PRIORITY IFLA_PRIORITY + IFLA_MASTER +#define IFLA_MASTER IFLA_MASTER }; -#define IFLA_MAX IFLA_STATS +#define IFLA_MAX IFLA_MASTER #define IFLA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct ifinfomsg)))) #define IFLA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ifinfomsg)) @@ -464,7 +468,7 @@ enum IFF_BROADCAST devices are able to use multicasts too. */ -/* ifi_link. +/* IFLA_LINK. For usual devices it is equal ifi_index. If it is a "virtual interface" (f.e. tunnel), ifi_link can point to real physical interface (f.e. for bandwidth calculations), @@ -558,6 +562,13 @@ extern void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const voi #define RTA_PUT(skb, attrtype, attrlen, data) \ ({ if (skb_tailroom(skb) < (int)RTA_SPACE(attrlen)) goto rtattr_failure; \ __rta_fill(skb, attrtype, attrlen, data); }) + +extern void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change); + +#else + +#define rtmsg_ifinfo(a,b,c) do { } while (0) + #endif extern struct semaphore rtnl_sem; diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 6c1d060d067a..a84ae422c8ad 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -53,6 +53,7 @@ #define spin_lock_init(lock) do { } while(0) #define spin_lock(lock) (void)(lock) /* Not "unused variable". */ +#define spin_is_locked(lock) (0) #define spin_trylock(lock) ({1; }) #define spin_unlock_wait(lock) do { } while(0) #define spin_unlock(lock) do { } while(0) @@ -65,6 +66,7 @@ typedef struct { #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } #define spin_lock_init(x) do { (x)->lock = 0; } while (0) +#define spin_is_locked(lock) (test_bit(0,(lock))) #define spin_trylock(lock) (!test_and_set_bit(0,(lock))) #define spin_lock(x) do { (x)->lock = 1; } while (0) @@ -83,6 +85,7 @@ typedef struct { #include #define spin_lock_init(x) do { (x)->lock = 0; } while (0) +#define spin_is_locked(lock) (test_bit(0,(lock))) #define spin_trylock(lock) (!test_and_set_bit(0,(lock))) #define spin_lock(x) do {unsigned long __spinflags; save_flags(__spinflags); cli(); if ((x)->lock&&(x)->babble) {printk("%s:%d: spin_lock(%s:%p) already locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 1; restore_flags(__spinflags);} while (0) diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index db1cb7c415aa..1e17e52bba13 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -29,6 +29,7 @@ struct svc_serv { struct svc_sock * sv_sockets; /* pending sockets */ struct svc_program * sv_program; /* RPC program */ struct svc_stat * sv_stats; /* RPC statistics */ + spinlock_t sv_lock; unsigned int sv_nrthreads; /* # of server threads */ unsigned int sv_bufsz; /* datagram buffer size */ unsigned int sv_xdrsize; /* XDR buffer size */ diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h index 5f8dc87689f1..82d9678d4905 100644 --- a/include/linux/sunrpc/svcsock.h +++ b/include/linux/sunrpc/svcsock.h @@ -21,6 +21,7 @@ struct svc_sock { struct svc_sock * sk_list; /* list of all sockets */ struct socket * sk_sock; /* berkeley socket layer */ struct sock * sk_sk; /* INET layer */ + spinlock_t sk_lock; struct svc_serv * sk_server; /* service for this socket */ unsigned char sk_inuse; /* use count */ diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 03148253d94a..e7c710646179 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -183,7 +183,8 @@ enum NET_CORE_FASTROUTE=7, NET_CORE_MSG_COST=8, NET_CORE_MSG_BURST=9, - NET_CORE_OPTMEM_MAX=10 + NET_CORE_OPTMEM_MAX=10, + NET_CORE_HOT_LIST_LENGTH=11 }; /* /proc/sys/net/ethernet */ diff --git a/include/linux/timer.h b/include/linux/timer.h index 796749fdba0f..d159222b7706 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -51,6 +51,7 @@ struct timer_list { unsigned long expires; unsigned long data; void (*function)(unsigned long); + volatile int running; }; extern void add_timer(struct timer_list * timer); @@ -61,7 +62,7 @@ extern int del_timer(struct timer_list * timer); * active timer (if the timer is inactive it will be activated) * mod_timer(a,b) is equivalent to del_timer(a); a->expires = b; add_timer(a) */ -void mod_timer(struct timer_list *timer, unsigned long expires); +int mod_timer(struct timer_list *timer, unsigned long expires); extern void it_real_fn(unsigned long); @@ -69,6 +70,9 @@ extern inline void init_timer(struct timer_list * timer) { timer->next = NULL; timer->prev = NULL; +#ifdef __SMP__ + timer->running = 0; +#endif } extern inline int timer_pending(const struct timer_list * timer) @@ -76,6 +80,20 @@ extern inline int timer_pending(const struct timer_list * timer) return timer->prev != NULL; } +#ifdef __SMP__ +#define timer_exit(t) do { (t)->running = 0; mb(); } while (0) +#define timer_set_running(t) do { (t)->running = 1; mb(); } while (0) +#define timer_is_running(t) ((t)->running != 0) +#define timer_synchronize(t) while (timer_is_running(t)) barrier() +extern int del_timer_sync(struct timer_list * timer); +#else +#define timer_exit(t) do { } while (0) +#define timer_set_running(t) do { } while (0) +#define timer_is_running(t) (0) +#define timer_synchronize(t) barrier() +#define del_timer_sync(t) del_timer(t) +#endif + /* * These inlines deal with timer wrapping correctly. You are * strongly encouraged to use them diff --git a/include/linux/udf_167.h b/include/linux/udf_167.h index ee09bd9adb15..19b3aa8e2b07 100644 --- a/include/linux/udf_167.h +++ b/include/linux/udf_167.h @@ -390,10 +390,13 @@ struct LogicalVolIntegrityDesc { #define INTEGRITY_TYPE_CLOSE 1 /* Recorded Address (ECMA 167 4/7.1) */ +#ifndef _LINUX_UDF_FS_I_H +/* Declared in udf_fs_i.h */ typedef struct { Uint32 logicalBlockNum; Uint16 partitionReferenceNum; } lb_addr; +#endif /* Extent interpretation (ECMA 167 4/14.14.1.1) */ #define EXTENT_RECORDED_ALLOCATED 0x00 diff --git a/include/linux/vt_buffer.h b/include/linux/vt_buffer.h index ca1ec519db7d..626b2524d7c2 100644 --- a/include/linux/vt_buffer.h +++ b/include/linux/vt_buffer.h @@ -15,7 +15,7 @@ #include -#ifdef CONFIG_VGA_CONSOLE +#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_MDA_CONSOLE) #include #endif diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 1a6f1dad0cdb..b63398881b65 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -163,6 +163,7 @@ struct neigh_table unsigned long last_rand; struct neigh_parms *parms_list; kmem_cache_t *kmem_cachep; + struct tasklet_struct gc_task; struct neigh_statistics stats; struct neighbour *hash_buckets[NEIGH_HASHMASK+1]; struct pneigh_entry *phash_buckets[PNEIGH_HASHMASK+1]; diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index b866777a6dcb..2c4b4cff9b44 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -63,19 +63,10 @@ struct Qdisc_ops int (*dump)(struct Qdisc *, struct sk_buff *); }; -struct Qdisc_head -{ - struct Qdisc_head *forw; - struct Qdisc_head *back; -}; - -extern struct Qdisc_head qdisc_head; -extern spinlock_t qdisc_runqueue_lock; extern rwlock_t qdisc_tree_lock; struct Qdisc { - struct Qdisc_head h; int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev); struct sk_buff * (*dequeue)(struct Qdisc *dev); unsigned flags; @@ -87,11 +78,9 @@ struct Qdisc u32 handle; atomic_t refcnt; struct sk_buff_head q; - struct net_device *dev; + struct net_device *dev; struct tc_stats stats; - unsigned long tx_timeo; - unsigned long tx_last; int (*reshape_fail)(struct sk_buff *skb, struct Qdisc *q); /* This field is deprecated, but it is still used by CBQ @@ -437,60 +426,13 @@ int teql_init(void); int tc_filter_init(void); int pktsched_init(void); -extern void qdisc_run_queues(void); extern int qdisc_restart(struct net_device *dev); -extern spinlock_t qdisc_runqueue_lock; - -/* Is it on run list? Reliable only under qdisc_runqueue_lock. */ - -extern __inline__ int qdisc_on_runqueue(struct Qdisc *q) -{ - return q->h.forw != NULL; -} - -/* Is run list not empty? Reliable only under qdisc_runqueue_lock. */ - -extern __inline__ int qdisc_pending(void) -{ - return qdisc_head.forw != &qdisc_head; -} - -/* Add qdisc to tail of run list. Called with BH, disabled on this CPU */ - -extern __inline__ void qdisc_run(struct Qdisc *q) -{ - spin_lock(&qdisc_runqueue_lock); - if (!qdisc_on_runqueue(q) && q->dev) { - q->h.forw = &qdisc_head; - q->h.back = qdisc_head.back; - qdisc_head.back->forw = &q->h; - qdisc_head.back = &q->h; - } - spin_unlock(&qdisc_runqueue_lock); -} - -extern __inline__ int __qdisc_wakeup(struct net_device *dev) +extern __inline__ void qdisc_run(struct net_device *dev) { - int res; - - while ((res = qdisc_restart(dev))<0 && !dev->tbusy) + while (!test_bit(LINK_STATE_XOFF, &dev->state) && + qdisc_restart(dev)<0) /* NOTHING */; - - return res; -} - - -/* If the device is not throttled, restart it and add to run list. - * BH must be disabled on this CPU. Usually, it is called by timers. - */ - -extern __inline__ void qdisc_wakeup(struct net_device *dev) -{ - spin_lock(&dev->queue_lock); - if (dev->tbusy || __qdisc_wakeup(dev)) - qdisc_run(dev->qdisc); - spin_unlock(&dev->queue_lock); } /* Calculate maximal size of packet seen by hard_start_xmit diff --git a/include/net/snmp.h b/include/net/snmp.h index 5105fd2209d4..8bcb17085ef0 100644 --- a/include/net/snmp.h +++ b/include/net/snmp.h @@ -202,7 +202,7 @@ struct linux_mib unsigned long __pad[32-26]; }; -#define SNMP_INC_STATS(mib, field) ((mib)[2*smp_processor_id()+!in_interrupt()].field++) +#define SNMP_INC_STATS(mib, field) ((mib)[2*smp_processor_id()+!in_softirq()].field++) #define SNMP_INC_STATS_BH(mib, field) ((mib)[2*smp_processor_id()].field++) #define SNMP_INC_STATS_USER(mib, field) ((mib)[2*smp_processor_id()+1].field++) diff --git a/include/net/sock.h b/include/net/sock.h index 45b6c700cc3e..92519ee88603 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -934,20 +934,20 @@ extern __inline__ void sock_put(struct sock *sk) */ extern __inline__ void sock_orphan(struct sock *sk) { - write_lock_irq(&sk->callback_lock); + write_lock_bh(&sk->callback_lock); sk->dead = 1; sk->socket = NULL; sk->sleep = NULL; - write_unlock_irq(&sk->callback_lock); + write_unlock_bh(&sk->callback_lock); } extern __inline__ void sock_graft(struct sock *sk, struct socket *parent) { - write_lock_irq(&sk->callback_lock); + write_lock_bh(&sk->callback_lock); sk->sleep = &parent->wait; parent->sk = sk; sk->socket = parent; - write_unlock_irq(&sk->callback_lock); + write_unlock_bh(&sk->callback_lock); } @@ -1150,7 +1150,7 @@ extern __inline__ int sock_writeable(struct sock *sk) extern __inline__ int gfp_any(void) { - return in_interrupt() ? GFP_ATOMIC : GFP_KERNEL; + return in_softirq() ? GFP_ATOMIC : GFP_KERNEL; } extern __inline__ long sock_rcvtimeo(struct sock *sk, int noblock) diff --git a/include/net/tcp.h b/include/net/tcp.h index 8cefdb4b140d..a4d671ede4a9 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1188,7 +1188,7 @@ static __inline__ void tcp_set_state(struct sock *sk, int state) /* fall through */ default: if (oldstate==TCP_ESTABLISHED) - tcp_statistics[smp_processor_id()*2+!in_interrupt()].TcpCurrEstab--; + tcp_statistics[smp_processor_id()*2+!in_softirq()].TcpCurrEstab--; } /* Change state AFTER socket is unhashed to avoid closed diff --git a/include/scsi/sg.h b/include/scsi/sg.h index 6756f8d2f208..ddd942d1453a 100644 --- a/include/scsi/sg.h +++ b/include/scsi/sg.h @@ -3,25 +3,25 @@ /* History: - Started: Aug 9 by Lawrence Foard (entropy@world.std.com), to allow user - process control of SCSI devices. + Started: Aug 9 by Lawrence Foard (entropy@world.std.com), to allow user + process control of SCSI devices. Development Sponsored by Killy Corp. NY NY Original driver (sg.h): * Copyright (C) 1992 Lawrence Foard -2.x extensions to driver: +Version 2 and 3 extensions to driver: * Copyright (C) 1998, 1999 Douglas Gilbert + Version: 3.1.10 (20000123) + This version is for 2.3/2.4 series kernels. - Version: 2.3.35 (990708) - This version for 2.3 series kernels. It only differs from sg version - 2.1.35 used in the 2.2 series kernels by changes to wait_queue. This - in an internal kernel interface and should not effect users. - D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au) - - Changes since 2.1.34 (990603) + Changes since 2.1.34 (990603) and 2.3.35 (990708) + - add new interface structure: sg_io_hdr_t + - supports larger sense buffer, DMA residual count + direct IO + - add SG_IO ioctl (combines function of write() + read() ) + - remove SG_SET_MERGE_FD, UNDERRUN_FLAG + _GET_ ioctls + logic + - add proc_fs support in /proc/scsi/sg/ directory - add queuing info into struct sg_scsi_id - - block negative timeout values - - add back write() wait on previous read() when no cmd queuing + - def_reserved_size can be given at driver or module load time Changes since 2.1.33 (990521) - implement SG_SET_RESERVED_SIZE and associated memory re-org. - add SG_NEXT_CMD_LEN to override SCSI command lengths @@ -34,117 +34,135 @@ Original driver (sg.h): - clean up logging of pointers to use %p (for 64 bit architectures) - rework usage of get_user/copy_to_user family of kernel calls - "disown" scsi_command blocks before releasing them - Changes since 2.1.30 (990320) - - memory tweaks: change flags on kmalloc (GFP_KERNEL to GFP_ATOMIC) - - increase max allowable mid-level pool usage - - - New features and changes: - - per file descriptor (fd) write-read sequencing and command queues. - - command queuing supported (SG_MAX_QUEUE is maximum per fd). - - scatter-gather supported (allowing potentially megabyte transfers). - - the SCSI target, host and driver status are returned - in unused fields of sg_header (maintaining its original size). - - asynchronous notification support added (SIGPOLL, SIGIO) for - read()s (write()s should never block). - - pack_id logic added so read() can wait for a specific pack_id. - - uses memory > ISA_DMA_THRESHOLD if adapter allows it (e.g. a - pci scsi adapter). - - this driver no longer uses a single SG_BIG_BUFF sized buffer - obtained at driver/module init time. Rather it tries to obtain a - SG_DEF_RESERVED_SIZE buffer when a fd is open()ed and frees it - at the corresponding release() (ie per fd). Actually the "buffer" - may be a collection of buffers if scatter-gather is being used. - - add SG_SET_RESERVED_SIZE ioctl allowing the user to request a - large buffer for duration of current file descriptor's lifetime. - - SG_GET_RESERVED_SIZE ioctl can be used to find out how much - actually has been reserved. - - add SG_NEXT_CMD_LEN ioctl to override SCSI command length on - the next write() to this file descriptor. - - SG_GET_RESERVED_SIZE's presence as a symbol can be used for - compile time identification of the version 2 sg driver. - However, it is recommended that run time identification based on - calling the ioctl of the same name is a more flexible and - safer approach. - - adds several ioctl calls, see ioctl section below. - - Good documentation on the original "sg" device interface and usage can be - found in the Linux HOWTO document: "SCSI Programming HOWTO" (version 0.5) - by Heiko Eissfeldt; last updated 7 May 1996. Here is a quick summary of - sg basics: - An SG device is accessed by writing SCSI commands plus any associated - outgoing data to it; the resulting status codes and any incoming data - are then obtained by a read call. The device can be opened O_NONBLOCK - (non-blocking) and poll() used to monitor its progress. The device may be - opened O_EXCL which excludes other "sg" users from this device (but not - "sd", "st" or "sr" users). The buffer given to the write() call is made - up as follows: - - struct sg_header image (see below) - - scsi command (6, 10 or 12 bytes long) - - data to be written to the device (if any) - - The buffer received from the corresponding read() call contains: - - struct sg_header image (check results + sense_buffer) - - data read back from device (if any) - - The given SCSI command has its LUN field overwritten internally by the - value associated with the device that has been opened. - - This device currently uses "indirect IO" in the sense that data is - DMAed into kernel buffers from the hardware and afterwards is - transferred into the user space (or vice versa if you are writing). - Transfer speeds or up to 20 to 30MBytes/sec have been measured using - indirect IO. For faster throughputs "direct IO" which cuts out the - double handling of data is required. This will also need a new interface. - - Grabbing memory for those kernel buffers used in this driver for DMA may - cause the dreaded ENOMEM error. This error seems to be more prevalent - under early 2.2.x kernels than under the 2.0.x kernel series. For a given - (large) transfer the memory obtained by this driver must be contiguous or - scatter-gather must be used (if supported by the adapter). [Furthermore, - ISA SCSI adapters can only use memory below the 16MB level on a i386.] - - When a "sg" device is open()ed O_RDWR then this driver will attempt to - reserve a buffer of SG_DEF_RESERVED_SIZE that will be used by subsequent - write()s on this file descriptor as long as: - - it is not already in use (eg when command queuing is in use) - - the write() does not call for a buffer size larger than the - reserved size. - In these cases the write() will attempt to find the memory it needs for - DMA buffers dynamically and in the worst case will fail with ENOMEM. - The amount of memory actually reserved depends on various dynamic factors - and can be checked with the SG_GET_RESERVED_SIZE ioctl(). [In a very - tight memory situation it may yield 0!] The size of the reserved buffer - can be changed with the SG_SET_RESERVED_SIZE ioctl(). It should be - followed with a call to the SG_GET_RESERVED_SIZE ioctl() to find out how - much was actually reserved. - - More documentation plus test and utility programs can be found at - http://www.torque.net/sg + +Map of SG verions to the Linux kernels in which they appear: + ---------- ---------------------------------- + original all kernels < 2.2.6 + 2.1.31 2.2.6 and 2.2.7 + 2.1.32 2.2.8 and 2.2.9 + 2.1.34 2.2.10 to 2.2.13 + 2.1.36 2.2.14 + 2.3.35 2.3.x development series kernels (starting 2.3.20) + 3.0.x optional version 3 sg driver for 2.2 series + 3.1.x candidate version 3 sg driver for 2.3 series + +Major new features in SG 3.x driver (cf SG 2.x drivers) + - SG_IO ioctl() combines function if write() and read() + - new interface (sg_io_hdr_t) but still supports old interface + - scatter/gather in user space and direct IO supported + +Major features in SG 2.x driver (cf original SG driver) + - per file descriptor (fd) write-read sequencing + - command queuing supported + - scatter-gather supported at kernel level allowing potentially + large transfers + - more SCSI status information returned + - asynchronous notification support added (SIGPOLL, SIGIO) + - read() can fetch by given pack_id + - uses kernel memory as appropriate for SCSI adapter being used + - single SG_BIG_BUFF replaced by per file descriptor "reserve + buffer" whose size can be manipulated by ioctls() + + The term "indirect IO" refers a method by which data is DMAed into kernel + buffers from the hardware and afterwards is transferred into the user + space (or vice versa if you are writing). Transfer speeds of up to 20 to + 30MBytes/sec have been measured using indirect IO. For faster throughputs + "direct IO" which cuts out the double handling of data is required. + Direct IO is supported by the SG 3.x drivers on 2.3 series Linux kernels + (or later) and requires the use of the new interface. + + Requests for direct IO with the new interface will automatically fall back + to indirect IO mode if they cannot be fulfilled. An example of such a case + is an ISA SCSI adapter which is only capable of DMAing to the lower 16MB of + memory due to the architecture of ISA. The 'info' field in the new + interface indicates whether a direct or indirect data transfer took place. + + Obtaining memory for the kernel buffers used in indirect IO is done by + first checking if the "reserved buffer" for the current file descriptor + is available and large enough. If these conditions are _not_ met then + kernel memory is obtained on a per SCSI command basis. This corresponds + to a write(), read() sequence or a SG_IO ioctl() call. Further, the + kernel memory that is suitable for DMA may be constrained by the + architecture of the SCSI adapter (e.g. ISA adapters). + + Documentation + ============= + A web site for SG device drivers can be found at: + http://www.torque.net/sg [alternatively check the MAINTAINERS file] + The main documents are still based on 2.x versions: + http://www.torque.net/sg/p/scsi-generic.txt + http://www.torque.net/sg/p/scsi-generic_long.txt + The first document can also be found in the kernel source tree, probably at: + /usr/src/linux/Documentation/scsi-generic.txt . + Documentation on the changes and additions in 3.x version of the sg driver + can be found at: http://www.torque.net/sg/p/scsi-generic_v3.txt + Utility and test programs are also available at that web site. */ -#define SG_MAX_SENSE 16 /* too little, unlikely to change in 2.2.x */ +/* New interface introduced in the 3.x SG drivers follows */ -struct sg_header -{ - int pack_len; /* [o] reply_len (ie useless), ignored as input */ - int reply_len; /* [i] max length of expected reply (inc. sg_header) */ - int pack_id; /* [io] id number of packet (use ints >= 0) */ - int result; /* [o] 0==ok, else (+ve) Unix errno (best ignored) */ - unsigned int twelve_byte:1; - /* [i] Force 12 byte command length for group 6 & 7 commands */ - unsigned int target_status:5; /* [o] scsi status from target */ - unsigned int host_status:8; /* [o] host status (see "DID" codes) */ - unsigned int driver_status:8; /* [o] driver status+suggestion */ - unsigned int other_flags:10; /* unused */ - unsigned char sense_buffer[SG_MAX_SENSE]; /* [o] Output in 3 cases: - when target_status is CHECK_CONDITION or - when target_status is COMMAND_TERMINATED or - when (driver_status & DRIVER_SENSE) is true. */ -}; /* This structure is 36 bytes long on i386 */ +typedef struct sg_iovec /* same structure as used by readv() Linux system */ +{ /* call. It defines one scatter-gather element. */ + void * iov_base; /* Starting address */ + size_t iov_len; /* Length in bytes */ +} sg_iovec_t; -typedef struct sg_scsi_id { +typedef struct sg_io_hdr +{ + char interface_id; /* [i] 'S' for SCSI generic (required) */ + unsigned char cmd_len; /* [i] SCSI command length ( <= 16 bytes) */ + unsigned char iovec_count; /* [i] 0 implies no scatter gather */ + unsigned char mx_sb_len; /* [i] max length to write to sbp */ + int dxfer_direction; /* [i] data transfer direction */ + unsigned int dxfer_len; /* [i] byte count of data transfer */ + void * dxferp; /* [i], [*io] points to data transfer memory + or scatter gather list */ + unsigned char * cmdp; /* [i], [*i] points to command to perform */ + unsigned char * sbp; /* [i], [*o] points to sense_buffer memory */ + unsigned int timeout; /* [i] MAX_UINT->no timeout (unit: millisec) */ + unsigned int flags; /* [i] 0 -> default, see SG_FLAG... */ + int pack_id; /* [i->o] unused internally (normally) */ + void * usr_ptr; /* [i->o] unused internally */ + unsigned char status; /* [o] scsi status */ + unsigned char masked_status;/* [o] shifted, masked scsi status */ + unsigned char msg_status; /* [o] messaging level data (optional) */ + unsigned char sb_len_wr; /* [o] byte count actually written to sbp */ + unsigned short host_status; /* [o] errors from host adapter */ + unsigned short driver_status;/* [o] errors from software driver */ + int resid; /* [o] dxfer_len - actual_transferred */ + unsigned int duration; /* [o] time taken by cmd (unit: millisec) */ + unsigned int info; /* [o] auxiliary information */ +} sg_io_hdr_t; /* 60 bytes long (on i386) */ + +/* Use negative values to flag difference from original sg_header structure */ +#define SG_DXFER_NONE -1 /* e.g. a SCSI Test Unit Ready command */ +#define SG_DXFER_TO_DEV -2 /* e.g. a SCSI WRITE command */ +#define SG_DXFER_FROM_DEV -3 /* e.g. a SCSI READ command */ +#define SG_DXFER_TO_FROM_DEV -4 /* treated like SG_DXFER_FROM_DEV with the + additional property than during indirect + IO the user buffer is copied into the + kernel buffers before the transfer */ + +/* following flag values can be "or"-ed together */ +#define SG_FLAG_DIRECT_IO 1 /* default is indirect IO */ +#define SG_FLAG_LUN_INHIBIT 2 /* default is to put device's lun into */ + /* the 2nd byte of SCSI command */ +#define SG_FLAG_NO_DXFER 0x10000 /* no transfer of kernel buffers to/from */ + /* user space (debug indirect IO) */ + +/* following 'info' values are "or"-ed together */ +#define SG_INFO_OK_MASK 0x1 +#define SG_INFO_OK 0x0 /* no sense, host nor driver "noise" */ +#define SG_INFO_CHECK 0x1 /* something abnormal happened */ + +#define SG_INFO_DIRECT_IO_MASK 0x6 +#define SG_INFO_INDIRECT_IO 0x0 /* data xfer via kernel buffers (or no xfer) */ +#define SG_INFO_DIRECT_IO 0x2 /* direct IO requested and performed */ +#define SG_INFO_MIXED_IO 0x4 /* part direct, part indirect IO */ + + +typedef struct sg_scsi_id { /* used by SG_GET_SCSI_ID ioctl() */ int host_no; /* as in "scsi" where 'n' is one of 0, 1, 2 etc */ int channel; int scsi_id; /* scsi id of target device */ @@ -152,25 +170,38 @@ typedef struct sg_scsi_id { int scsi_type; /* TYPE_... defined in scsi/scsi.h */ short h_cmd_per_lun;/* host (adapter) maximum commands per lun */ short d_queue_depth;/* device (or adapter) maximum queue length */ - int unused1; /* probably find a good use, set 0 for now */ - int unused2; /* ditto */ -} Sg_scsi_id; - -/* IOCTLs: ( _GET_s yield result via 'int *' 3rd argument unless - otherwise indicated) */ -#define SG_SET_TIMEOUT 0x2201 /* unit: jiffies (10ms on i386) */ -#define SG_GET_TIMEOUT 0x2202 /* yield timeout as _return_ value */ + int unused[2]; /* probably find a good use, set 0 for now */ +} sg_scsi_id_t; /* 32 bytes long on i386 */ + +typedef struct sg_req_info { /* used by SG_GET_REQUEST_TABLE ioctl() */ + char req_state; /* 0 -> not used, 1 -> written, 2 -> ready to read */ + char orphan; /* 0 -> normal request, 1 -> from interruped SG_IO */ + char sg_io_owned; /* 0 -> complete with read(), 1 -> owned by SG_IO */ + char problem; /* 0 -> no problem detected, 1 -> error to report */ + int pack_id; /* pack_id associated with request */ + void * usr_ptr; /* user provided pointer (in new interface) */ + unsigned int duration; /* millisecs elapsed since written (req_state==1) + or request duration (req_state==2) */ + int unused; +} sg_req_info_t; /* 20 bytes long on i386 */ + + +/* IOCTLs: Those ioctls that are relevant to the SG 3.x drivers follow. + [Those that only apply to the SG 2.x drivers are at the end of the file.] + (_GET_s yield result via 'int *' 3rd argument unless otherwise indicated) */ #define SG_EMULATED_HOST 0x2203 /* true for emulated host adapter (ATAPI) */ /* Used to configure SCSI command transformation layer for ATAPI devices */ -#define SG_SET_TRANSFORM 0x2204 +/* Only supported by the ide-scsi driver */ +#define SG_SET_TRANSFORM 0x2204 /* N.B. 3rd arg is not pointer but value: */ + /* 3rd arg = 0 to disable transform, 1 to enable it */ #define SG_GET_TRANSFORM 0x2205 #define SG_SET_RESERVED_SIZE 0x2275 /* request a new reserved buffer size */ #define SG_GET_RESERVED_SIZE 0x2272 /* actual size of reserved buffer */ -/* The following ioctl takes a 'Sg_scsi_id *' object as its 3rd argument. */ +/* The following ioctl has a 'sg_scsi_id_t *' object as its 3rd argument. */ #define SG_GET_SCSI_ID 0x2276 /* Yields fd's bus, chan, dev, lun + type */ /* SCSI id information can also be obtained from SCSI_IOCTL_GET_IDLUN */ @@ -179,66 +210,111 @@ typedef struct sg_scsi_id { #define SG_GET_LOW_DMA 0x227a /* 0-> use all ram for dma; 1-> low dma ram */ /* When SG_SET_FORCE_PACK_ID set to 1, pack_id is input to read() which - will attempt to read that pack_id or block (or return EAGAIN). If - pack_id is -1 then read oldest waiting. When ...FORCE_PACK_ID set to 0 - then pack_id ignored by read() and oldest readable fetched. */ + tries to fetch a packet with a matching pack_id, waits, or returns EAGAIN. + If pack_id is -1 then read oldest waiting. When ...FORCE_PACK_ID set to 0 + then pack_id ignored by read() and oldest readable fetched. */ #define SG_SET_FORCE_PACK_ID 0x227b #define SG_GET_PACK_ID 0x227c /* Yields oldest readable pack_id (or -1) */ #define SG_GET_NUM_WAITING 0x227d /* Number of commands awaiting read() */ -/* Turn on error sense trace (1..8), dump this device to log/console (9) - or dump all sg device states ( >9 ) to log/console */ -#define SG_SET_DEBUG 0x227e /* 0 -> turn off debug */ - /* Yields max scatter gather tablesize allowed by current host adapter */ #define SG_GET_SG_TABLESIZE 0x227F /* 0 implies can't do scatter gather */ -/* Control whether sequencing per file descriptor or per device */ -#define SG_GET_MERGE_FD 0x2274 /* 0-> per fd, 1-> per device */ -#define SG_SET_MERGE_FD 0x2273 /* Attempt to change sequencing state, - if more than current fd open on device, will fail with EBUSY */ - -/* Get/set command queuing state per fd (default is SG_DEF_COMMAND_Q) */ -#define SG_GET_COMMAND_Q 0x2270 /* Yields 0 (queuing off) or 1 (on) */ -#define SG_SET_COMMAND_Q 0x2271 /* Change queuing state with 0 or 1 */ - -/* Get/set whether DMA underrun will cause an error (DID_ERROR). This only - currently applies to the [much-used] aic7xxx driver. */ -#define SG_GET_UNDERRUN_FLAG 0x2280 /* Yields 0 (don't flag) or 1 (flag) */ -#define SG_SET_UNDERRUN_FLAG 0x2281 /* Change flag underrun state */ - #define SG_GET_VERSION_NUM 0x2282 /* Example: version 2.1.34 yields 20134 */ -#define SG_NEXT_CMD_LEN 0x2283 /* override SCSI command length with given - number on the next write() on this file descriptor */ /* Returns -EBUSY if occupied else takes as input: 0 -> do nothing, 1 -> device reset or 2 -> bus reset (may not be activated yet) */ #define SG_SCSI_RESET 0x2284 +/* synchronous SCSI command ioctl, (only in version 3 interface) */ +#define SG_IO 0x2285 /* similar effect as write() followed by read() */ + +#define SG_GET_REQUEST_TABLE 0x2286 /* yields table of active requests */ + +/* How to treat EINTR during SG_IO ioctl(), only in SG 3.x series */ +#define SG_SET_KEEP_ORPHAN 0x2287 /* 1 -> hold for read(), 0 -> drop (def) */ +#define SG_GET_KEEP_ORPHAN 0x2288 + #define SG_SCATTER_SZ (8 * 4096) /* PAGE_SIZE not available to user */ /* Largest size (in bytes) a single scatter-gather list element can have. - The value must be a power of 2 and <= (PAGE_SIZE * 32) [131072 bytes on + The value must be a power of 2 and <= (PAGE_SIZE * 32) [131072 bytes on i386]. The minimum value is PAGE_SIZE. If scatter-gather not supported by adapter then this value is the largest data block that can be read/written by a single scsi command. The user can find the value of PAGE_SIZE by calling getpagesize() defined in unistd.h . */ -#define SG_DEFAULT_TIMEOUT (60*HZ) /* HZ == 'jiffies in 1 second' */ #define SG_DEFAULT_RETRIES 1 /* Defaults, commented if they differ from original sg driver */ -#define SG_DEF_COMMAND_Q 0 -#define SG_DEF_MERGE_FD 0 /* was 1 -> per device sequencing */ #define SG_DEF_FORCE_LOW_DMA 0 /* was 1 -> memory below 16MB on i386 */ #define SG_DEF_FORCE_PACK_ID 0 -#define SG_DEF_UNDERRUN_FLAG 0 -#define SG_DEF_RESERVED_SIZE SG_SCATTER_SZ +#define SG_DEF_KEEP_ORPHAN 0 +#define SG_DEF_RESERVED_SIZE SG_SCATTER_SZ /* load time option */ /* maximum outstanding requests, write() yields EDOM if exceeded */ #define SG_MAX_QUEUE 16 #define SG_BIG_BUFF SG_DEF_RESERVED_SIZE /* for backward compatibility */ +/* Alternate style type names, "..._t" variants preferred */ +typedef struct sg_io_hdr Sg_io_hdr; +typedef struct sg_io_vec Sg_io_vec; +typedef struct sg_scsi_id Sg_scsi_id; +typedef struct sg_req_info Sg_req_info; + + +/* vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv */ +/* The older SG interface based on the 'sg_header' structure follows. */ +/* ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ */ + +#define SG_MAX_SENSE 16 /* this only applies to the sg_header interface */ + +struct sg_header +{ + int pack_len; /* [o] reply_len (ie useless), ignored as input */ + int reply_len; /* [i] max length of expected reply (inc. sg_header) */ + int pack_id; /* [io] id number of packet (use ints >= 0) */ + int result; /* [o] 0==ok, else (+ve) Unix errno (best ignored) */ + unsigned int twelve_byte:1; + /* [i] Force 12 byte command length for group 6 & 7 commands */ + unsigned int target_status:5; /* [o] scsi status from target */ + unsigned int host_status:8; /* [o] host status (see "DID" codes) */ + unsigned int driver_status:8; /* [o] driver status+suggestion */ + unsigned int other_flags:10; /* unused */ + unsigned char sense_buffer[SG_MAX_SENSE]; /* [o] Output in 3 cases: + when target_status is CHECK_CONDITION or + when target_status is COMMAND_TERMINATED or + when (driver_status & DRIVER_SENSE) is true. */ +}; /* This structure is 36 bytes long on i386 */ + + +/* IOCTLs: The following are not required (or ignored) when the sg_io_hdr_t + interface is used. That are kept for backward compatibility with + the original and version 2 drivers. */ + +#define SG_SET_TIMEOUT 0x2201 /* unit: jiffies (10ms on i386) */ +#define SG_GET_TIMEOUT 0x2202 /* yield timeout as _return_ value */ + +/* Get/set command queuing state per fd (default is SG_DEF_COMMAND_Q. + Each time a sg_io_hdr_t object is seen on this file descriptor, this + command queuing flag is set on (overriding the previous setting). */ +#define SG_GET_COMMAND_Q 0x2270 /* Yields 0 (queuing off) or 1 (on) */ +#define SG_SET_COMMAND_Q 0x2271 /* Change queuing state with 0 or 1 */ + +/* Turn on/off error sense trace (1 and 0 respectively, default is off). + Try using: "# cat /proc/scsi/sg/debug" instead in the v3 driver */ +#define SG_SET_DEBUG 0x227e /* 0 -> turn off debug */ + +#define SG_NEXT_CMD_LEN 0x2283 /* override SCSI command length with given + number on the next write() on this file descriptor */ + + +/* Defaults, commented if they differ from original sg driver */ +#define SG_DEFAULT_TIMEOUT (60*HZ) /* HZ == 'jiffies in 1 second' */ +#define SG_DEF_COMMAND_Q 0 /* command queuing is always on when + the new interface is used */ +#define SG_DEF_UNDERRUN_FLAG 0 + #endif diff --git a/include/video/macmodes.h b/include/video/macmodes.h index 1bdfa815b851..054bd984534f 100644 --- a/include/video/macmodes.h +++ b/include/video/macmodes.h @@ -42,6 +42,7 @@ #define VMODE_CHOOSE 99 #define CMODE_NVRAM -1 +#define CMODE_CHOOSE -2 #define CMODE_8 0 /* 8 bits/pixel */ #define CMODE_16 1 /* 16 (actually 15) bits/pixel */ #define CMODE_32 2 /* 32 (actually 24) bits/pixel */ diff --git a/init/main.c b/init/main.c index 0ed25367c425..d22fa6fa4728 100644 --- a/init/main.c +++ b/init/main.c @@ -111,6 +111,7 @@ extern void dquot_init_hash(void); #define MAX_INIT_ENVS 8 extern void time_init(void); +extern void softirq_init(void); int rows, cols; @@ -469,6 +470,7 @@ asmlinkage void __init start_kernel(void) init_IRQ(); sched_init(); time_init(); + softirq_init(); parse_options(command_line); /* diff --git a/kernel/exit.c b/kernel/exit.c index c99ba220ca70..874561e73a88 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -395,9 +395,7 @@ NORET_TYPE void do_exit(long code) if (!tsk->pid) panic("Attempted to kill the idle task!"); tsk->flags |= PF_EXITING; - start_bh_atomic(); - del_timer(&tsk->real_timer); - end_bh_atomic(); + del_timer_sync(&tsk->real_timer); lock_kernel(); fake_volatile: diff --git a/kernel/itimer.c b/kernel/itimer.c index 7d38ac1acd44..6c38477be9ff 100644 --- a/kernel/itimer.c +++ b/kernel/itimer.c @@ -48,7 +48,9 @@ int do_getitimer(int which, struct itimerval *value) case ITIMER_REAL: interval = current->it_real_incr; val = 0; - start_bh_atomic(); + /* + * FIXME! This needs to be atomic, in case the kernel timer happens! + */ if (timer_pending(¤t->real_timer)) { val = current->real_timer.expires - jiffies; @@ -56,7 +58,6 @@ int do_getitimer(int which, struct itimerval *value) if ((long) val <= 0) val = 1; } - end_bh_atomic(); break; case ITIMER_VIRTUAL: val = current->it_virt_value; @@ -102,6 +103,7 @@ void it_real_fn(unsigned long __data) p->real_timer.expires = jiffies + interval; add_timer(&p->real_timer); } + timer_exit(&p->real_timer); } int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue) @@ -115,9 +117,7 @@ int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue) return k; switch (which) { case ITIMER_REAL: - start_bh_atomic(); - del_timer(¤t->real_timer); - end_bh_atomic(); + del_timer_sync(¤t->real_timer); current->it_real_value = j; current->it_real_incr = i; if (!j) diff --git a/kernel/ksyms.c b/kernel/ksyms.c index bc5592f8ffcf..e34d941759f7 100644 --- a/kernel/ksyms.c +++ b/kernel/ksyms.c @@ -151,6 +151,7 @@ EXPORT_SYMBOL(d_instantiate); EXPORT_SYMBOL(d_alloc); EXPORT_SYMBOL(d_lookup); EXPORT_SYMBOL(d_path); +EXPORT_SYMBOL(mark_buffer_dirty); EXPORT_SYMBOL(__mark_buffer_dirty); EXPORT_SYMBOL(__mark_inode_dirty); EXPORT_SYMBOL(free_kiovec); @@ -163,7 +164,7 @@ EXPORT_SYMBOL(filp_close); EXPORT_SYMBOL(put_filp); EXPORT_SYMBOL(files_lock); EXPORT_SYMBOL(check_disk_change); -EXPORT_SYMBOL(invalidate_buffers); +EXPORT_SYMBOL(__invalidate_buffers); EXPORT_SYMBOL(invalidate_inodes); EXPORT_SYMBOL(invalidate_inode_pages); EXPORT_SYMBOL(truncate_inode_pages); @@ -315,12 +316,11 @@ EXPORT_SYMBOL(request_irq); EXPORT_SYMBOL(free_irq); EXPORT_SYMBOL(probe_irq_on); EXPORT_SYMBOL(probe_irq_off); -EXPORT_SYMBOL(bh_active); -EXPORT_SYMBOL(bh_mask); -EXPORT_SYMBOL(bh_mask_count); -EXPORT_SYMBOL(bh_base); EXPORT_SYMBOL(add_timer); EXPORT_SYMBOL(del_timer); +#ifdef __SMP__ +EXPORT_SYMBOL(del_timer_sync); +#endif EXPORT_SYMBOL(mod_timer); EXPORT_SYMBOL(tq_timer); EXPORT_SYMBOL(tq_immediate); @@ -457,5 +457,12 @@ EXPORT_SYMBOL(get_fast_time); /* library functions */ EXPORT_SYMBOL(strnicmp); +/* software interrupts */ +EXPORT_SYMBOL(tasklet_hi_vec); +EXPORT_SYMBOL(bh_task_vec); +EXPORT_SYMBOL(init_bh); +EXPORT_SYMBOL(remove_bh); + /* init task, for moving kthread roots - ought to export a function ?? */ + EXPORT_SYMBOL(init_task_union); diff --git a/kernel/sched.c b/kernel/sched.c index 6377be965c4d..61fc91a6143b 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -198,31 +198,18 @@ static inline void reschedule_idle(struct task_struct * p, unsigned long flags) if (cpu_curr(best_cpu) == tsk) goto send_now; - /* - * The only heuristics - we use the tsk->avg_slice value - * to detect 'frequent reschedulers'. - * - * If both the woken-up process and the preferred CPU is - * is a frequent rescheduler, then skip the asynchronous - * wakeup, the frequent rescheduler will likely chose this - * task during it's next schedule(): - */ - if (p->policy == SCHED_OTHER) { - tsk = cpu_curr(best_cpu); - if (p->avg_slice + tsk->avg_slice < cacheflush_time) - goto out_no_target; - } - /* * We know that the preferred CPU has a cache-affine current * process, lets try to find a new idle CPU for the woken-up * process: */ - for (i = 0; i < smp_num_cpus; i++) { + for (i = smp_num_cpus - 1; i >= 0; i--) { cpu = cpu_logical_map(i); + if (cpu == best_cpu) + continue; tsk = cpu_curr(cpu); /* - * We use the first available idle CPU. This creates + * We use the last available idle CPU. This creates * a priority list between idle CPUs, but this is not * a problem. */ @@ -232,26 +219,32 @@ static inline void reschedule_idle(struct task_struct * p, unsigned long flags) /* * No CPU is idle, but maybe this process has enough priority - * to preempt it's preferred CPU. (this is a shortcut): + * to preempt it's preferred CPU. */ tsk = cpu_curr(best_cpu); if (preemption_goodness(tsk, p, best_cpu) > 0) goto send_now; /* - * We should get here rarely - or in the high CPU contention + * We will get here often - or in the high CPU contention * case. No CPU is idle and this process is either lowprio or - * the preferred CPU is highprio. Maybe some other CPU can/must - * be preempted: + * the preferred CPU is highprio. Try to preemt some other CPU + * only if it's RT or if it's iteractive and the preferred + * cpu won't reschedule shortly. */ - for (i = 0; i < smp_num_cpus; i++) { - cpu = cpu_logical_map(i); - tsk = cpu_curr(cpu); - if (preemption_goodness(tsk, p, cpu) > 0) - goto send_now; + if ((p->avg_slice < cacheflush_time && cpu_curr(best_cpu)->avg_slice > cacheflush_time) || + p->policy != SCHED_OTHER) + { + for (i = smp_num_cpus - 1; i >= 0; i--) { + cpu = cpu_logical_map(i); + if (cpu == best_cpu) + continue; + tsk = cpu_curr(cpu); + if (preemption_goodness(tsk, p, cpu) > 0) + goto send_now; + } } -out_no_target: spin_unlock_irqrestore(&runqueue_lock, flags); return; @@ -397,6 +390,9 @@ signed long schedule_timeout(signed long timeout) add_timer(&timer); schedule(); del_timer(&timer); + /* RED-PEN. Timer may be running now on another cpu. + * Pray that process will not exit enough fastly. + */ timeout = expire - jiffies; @@ -460,9 +456,9 @@ tq_scheduler_back: release_kernel_lock(prev, this_cpu); /* Do "administrative" work here while we don't hold any locks */ - if (bh_mask & bh_active) - goto handle_bh; -handle_bh_back: + if (softirq_state[this_cpu].active & softirq_state[this_cpu].mask) + goto handle_softirq; +handle_softirq_back: /* * 'sched_data' is protected by the fact that we can run @@ -621,9 +617,9 @@ still_running: next = prev; goto still_running_back; -handle_bh: - do_bottom_half(); - goto handle_bh_back; +handle_softirq: + do_softirq(); + goto handle_softirq_back; handle_tq_scheduler: run_task_queue(&tq_scheduler); @@ -1187,4 +1183,3 @@ void __init sched_init(void) atomic_inc(&init_mm.mm_count); enter_lazy_tlb(&init_mm, current, cpu); } - diff --git a/kernel/softirq.c b/kernel/softirq.c index d184c944e843..0f3c23ee9a9a 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -3,68 +3,271 @@ * * Copyright (C) 1992 Linus Torvalds * - * do_bottom_half() runs at normal kernel priority: all interrupts - * enabled. do_bottom_half() is atomic with respect to itself: a - * bottom_half handler need not be re-entrant. - * * Fixed a disable_bh()/enable_bh() race (was causing a console lockup) * due bh_mask_count not atomic handling. Copyright (C) 1998 Andrea Arcangeli + * + * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) */ #include #include #include #include +#include -#include +/* + - No shared variables, all the data are CPU local. + - If a softirq needs serialization, let it serialize itself + by its own spinlocks. + - Even if softirq is serialized, only local cpu is marked for + execution. Hence, we get something sort of weak cpu binding. + Though it is still not clear, will it result in better locality + or will not. + - These softirqs are not masked by global cli() and start_bh_atomic() + (by clear reasons). Hence, old parts of code still using global locks + MUST NOT use softirqs, but insert interfacing routines acquiring + global locks. F.e. look at BHs implementation. -/* intr_count died a painless death... -DaveM */ + Examples: + - NET RX softirq. It is multithreaded and does not require + any global serialization. + - NET TX softirq. It kicks software netdevice queues, hence + it is logically serialized per device, but this serialization + is invisible to common code. + - Tasklets: serialized wrt itself. + - Bottom halves: globally serialized, grr... + */ -atomic_t bh_mask_count[32]; -unsigned long bh_active = 0; -unsigned long bh_mask = 0; -void (*bh_base[32])(void); -/* - * This needs to make sure that only one bottom half handler - * is ever active at a time. We do this without locking by - * doing an atomic increment on the intr_count, and checking - * (nonatomically) against 1. Only if it's 1 do we schedule - * the bottom half. - * - * Note that the non-atomicity of the test (as opposed to the - * actual update) means that the test may fail, and _nobody_ - * runs the handlers if there is a race that makes multiple - * CPU's get here at the same time. That's ok, we'll run them - * next time around. - */ -static inline void run_bottom_halves(void) +struct softirq_state softirq_state[NR_CPUS]; +static struct softirq_action softirq_vec[32]; + +asmlinkage void do_softirq() { - unsigned long active; - void (**bh)(void); - - active = get_active_bhs(); - clear_active_bhs(active); - bh = bh_base; - do { - if (active & 1) - (*bh)(); - bh++; - active >>= 1; - } while (active); + int cpu = smp_processor_id(); + __u32 active, mask; + + if (in_interrupt()) + return; + + local_bh_disable(); + + local_irq_disable(); + mask = softirq_state[cpu].mask; + active = softirq_state[cpu].active & mask; + + if (active) { + struct softirq_action *h; + +restart: + /* Reset active bitmask before enabling irqs */ + softirq_state[cpu].active &= ~active; + + local_irq_enable(); + + h = softirq_vec; + mask &= ~active; + + do { + if (active & 1) + h->action(h); + h++; + active >>= 1; + } while (active); + + local_irq_disable(); + + active = softirq_state[cpu].active; + if ((active &= mask) != 0) + goto retry; + } + + local_bh_enable(); + + /* Leave with locally disabled hard irqs. It is critical to close + * window for infinite recursion, while we help local bh count, + * it protected us. Now we are defenceless. + */ + return; + +retry: + goto restart; } -asmlinkage void do_bottom_half(void) + +static spinlock_t softirq_mask_lock = SPIN_LOCK_UNLOCKED; + +void open_softirq(int nr, void (*action)(struct softirq_action*), void *data) +{ + unsigned long flags; + int i; + + spin_lock_irqsave(&softirq_mask_lock, flags); + softirq_vec[nr].data = data; + softirq_vec[nr].action = action; + + for (i=0; inext; + + if (tasklet_trylock(t)) { + if (atomic_read(&t->count) == 0) { + clear_bit(TASKLET_STATE_SCHED, &t->state); + + t->func(t->data); + tasklet_unlock(t); + continue; + } + tasklet_unlock(t); } - softirq_endlock(cpu); + local_irq_disable(); + t->next = tasklet_vec[cpu].list; + tasklet_vec[cpu].list = t; + __cpu_raise_softirq(cpu, TASKLET_SOFTIRQ); + local_irq_enable(); } } + + + +struct tasklet_head tasklet_hi_vec[NR_CPUS] __cacheline_aligned; + +static void tasklet_hi_action(struct softirq_action *a) +{ + int cpu = smp_processor_id(); + struct tasklet_struct *list; + + local_irq_disable(); + list = tasklet_hi_vec[cpu].list; + tasklet_hi_vec[cpu].list = NULL; + local_irq_enable(); + + while (list != NULL) { + struct tasklet_struct *t = list; + + list = list->next; + + if (tasklet_trylock(t)) { + if (atomic_read(&t->count) == 0) { + clear_bit(TASKLET_STATE_SCHED, &t->state); + + t->func(t->data); + tasklet_unlock(t); + continue; + } + tasklet_unlock(t); + } + local_irq_disable(); + t->next = tasklet_hi_vec[cpu].list; + tasklet_hi_vec[cpu].list = t; + __cpu_raise_softirq(cpu, HI_SOFTIRQ); + local_irq_enable(); + } +} + + +void tasklet_init(struct tasklet_struct *t, + void (*func)(unsigned long), unsigned long data) +{ + t->func = func; + t->data = data; + t->state = 0; + atomic_set(&t->count, 0); +} + +void tasklet_kill(struct tasklet_struct *t) +{ + while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { + if (in_interrupt()) + panic("Attempt to kill tasklet from interrupt\n"); + schedule(); + } + tasklet_unlock_wait(t); +} + + + +/* Old style BHs */ + +static void (*bh_base[32])(void); +struct tasklet_struct bh_task_vec[32]; + +/* BHs are serialized by spinlock global_bh_lock. + + It is still possible to make synchronize_bh() as + spin_unlock_wait(&global_bh_lock). This operation is not used + by kernel now, so that this lock is not made private only + due to wait_on_irq(). + + It can be removed only after auditing all the BHs. + */ +spinlock_t global_bh_lock = SPIN_LOCK_UNLOCKED; + +static void bh_action(unsigned long nr) +{ + int cpu = smp_processor_id(); + + if (!spin_trylock(&global_bh_lock)) + goto resched; + + if (!hardirq_trylock(cpu)) + goto resched_unlock; + + if (bh_base[nr]) + bh_base[nr](); + + hardirq_endlock(cpu); + spin_unlock(&global_bh_lock); + return; + +resched_unlock: + spin_unlock(&global_bh_lock); +resched: + mark_bh(nr); +} + +void init_bh(int nr, void (*routine)(void)) +{ + bh_base[nr] = routine; + mb(); +} + +void remove_bh(int nr) +{ + tasklet_kill(bh_task_vec+nr); + bh_base[nr] = NULL; +} + +void __init softirq_init() +{ + int i; + + for (i=0; i<32; i++) + tasklet_init(bh_task_vec+i, bh_action, i); + + open_softirq(TASKLET_SOFTIRQ, tasklet_action, NULL); + open_softirq(HI_SOFTIRQ, tasklet_hi_action, NULL); +} + + diff --git a/kernel/timer.c b/kernel/timer.c index fccf7faa7422..fbe98e48b93a 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -105,13 +105,15 @@ static struct timer_vec * const tvecs[] = { static unsigned long timer_jiffies = 0; -static inline void insert_timer(struct timer_list *timer, - struct timer_list **vec, int idx) +static inline void insert_timer(struct timer_list *timer, struct timer_list **vec) { - if ((timer->next = vec[idx])) - vec[idx]->prev = timer; - vec[idx] = timer; - timer->prev = (struct timer_list *)&vec[idx]; + struct timer_list *next = *vec; + + timer->next = next; + if (next) + next->prev = timer; + *vec = timer; + timer->prev = (struct timer_list *)vec; } static inline void internal_add_timer(struct timer_list *timer) @@ -121,31 +123,34 @@ static inline void internal_add_timer(struct timer_list *timer) */ unsigned long expires = timer->expires; unsigned long idx = expires - timer_jiffies; + struct timer_list ** vec; if (idx < TVR_SIZE) { int i = expires & TVR_MASK; - insert_timer(timer, tv1.vec, i); + vec = tv1.vec + i; } else if (idx < 1 << (TVR_BITS + TVN_BITS)) { int i = (expires >> TVR_BITS) & TVN_MASK; - insert_timer(timer, tv2.vec, i); + vec = tv2.vec + i; } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) { int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK; - insert_timer(timer, tv3.vec, i); + vec = tv3.vec + i; } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) { int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK; - insert_timer(timer, tv4.vec, i); + vec = tv4.vec + i; } else if ((signed long) idx < 0) { /* can happen if you add a timer with expires == jiffies, * or you set a timer to go off in the past */ - insert_timer(timer, tv1.vec, tv1.index); + vec = tv1.vec + tv1.index; } else if (idx <= 0xffffffffUL) { int i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK; - insert_timer(timer, tv5.vec, i); + vec = tv5.vec + i; } else { /* Can only get here on architectures with 64-bit jiffies */ timer->next = timer->prev = timer; + return; } + insert_timer(timer, vec); } spinlock_t timerlist_lock = SPIN_LOCK_UNLOCKED; @@ -181,15 +186,17 @@ static inline int detach_timer(struct timer_list *timer) return 0; } -void mod_timer(struct timer_list *timer, unsigned long expires) +int mod_timer(struct timer_list *timer, unsigned long expires) { + int ret; unsigned long flags; spin_lock_irqsave(&timerlist_lock, flags); timer->expires = expires; - detach_timer(timer); + ret = detach_timer(timer); internal_add_timer(timer); spin_unlock_irqrestore(&timerlist_lock, flags); + return ret; } int del_timer(struct timer_list * timer) @@ -204,6 +211,39 @@ int del_timer(struct timer_list * timer) return ret; } +#ifdef __SMP__ +/* + * SMP specific function to delete periodic timer. + * Caller must disable by some means restarting the timer + * for new. Upon exit the timer is not queued and handler is not running + * on any CPU. It returns number of times, which timer was deleted + * (for reference counting). + */ + +int del_timer_sync(struct timer_list * timer) +{ + int ret = 0; + + for (;;) { + unsigned long flags; + int running; + + spin_lock_irqsave(&timerlist_lock, flags); + ret += detach_timer(timer); + timer->next = timer->prev = 0; + running = timer->running; + spin_unlock_irqrestore(&timerlist_lock, flags); + + if (!running) + return ret; + timer_synchronize(timer); + } + + return ret; +} +#endif + + static inline void cascade_timers(struct timer_vec *tv) { /* cascade all the timers from tv up one level */ @@ -238,6 +278,7 @@ static inline void run_timer_list(void) unsigned long data = timer->data; detach_timer(timer); timer->next = timer->prev = NULL; + timer_set_running(timer); spin_unlock_irq(&timerlist_lock); fn(data); spin_lock_irq(&timerlist_lock); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index fd270642b9be..b212a6252971 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -29,7 +29,9 @@ int nr_lru_pages; LIST_HEAD(lru_cache); static char *zone_names[MAX_NR_ZONES] = { "DMA", "Normal", "HighMem" }; -static int zone_balance_ratio[MAX_NR_ZONES] = { 128, 128, 128 }; +static int zone_balance_ratio[MAX_NR_ZONES] = { 128, 128, 128, }; +static int zone_balance_min[MAX_NR_ZONES] = { 10 , 10, 10, }; +static int zone_balance_max[MAX_NR_ZONES] = { 255 , 255, 255, }; /* * Free_page() adds the page to the free lists. This is optimized for @@ -196,9 +198,6 @@ static inline struct page * rmqueue (zone_t *zone, unsigned long order) return NULL; } -#define ZONE_BALANCED(zone) \ - (((zone)->free_pages > (zone)->pages_low) && (!(zone)->low_on_memory)) - static inline unsigned long classfree(zone_t *zone) { unsigned long free = 0; @@ -215,21 +214,6 @@ static inline unsigned long classfree(zone_t *zone) static inline int zone_balance_memory (zone_t *zone, int gfp_mask) { int freed; - unsigned long free = classfree(zone); - - if (free >= zone->pages_low) { - if (!zone->low_on_memory) - return 1; - /* - * Simple hysteresis: exit 'low memory mode' if - * the upper limit has been reached: - */ - if (free >= zone->pages_high) { - zone->low_on_memory = 0; - return 1; - } - } else - zone->low_on_memory = 1; /* * In the atomic allocation case we only 'kick' the @@ -243,43 +227,6 @@ static inline int zone_balance_memory (zone_t *zone, int gfp_mask) return 1; } -#if 0 -/* - * We are still balancing memory in a global way: - */ -static inline int balance_memory (zone_t *zone, int gfp_mask) -{ - unsigned long free = nr_free_pages(); - static int low_on_memory = 0; - int freed; - - if (free >= freepages.low) { - if (!low_on_memory) - return 1; - /* - * Simple hysteresis: exit 'low memory mode' if - * the upper limit has been reached: - */ - if (free >= freepages.high) { - low_on_memory = 0; - return 1; - } - } else - low_on_memory = 1; - - /* - * In the atomic allocation case we only 'kick' the - * state machine, but do not try to free pages - * ourselves. - */ - freed = try_to_free_pages(gfp_mask, zone); - - if (!freed && !(gfp_mask & __GFP_HIGH)) - return 0; - return 1; -} -#endif - /* * This is the 'heart' of the zoned buddy allocator: */ @@ -310,11 +257,31 @@ struct page * __alloc_pages (zonelist_t *zonelist, unsigned long order) * further thought. */ if (!(current->flags & PF_MEMALLOC)) - /* - * fastpath - */ - if (!ZONE_BALANCED(z)) - goto balance; + { + if (classfree(z) > z->pages_high) + { + if (z->low_on_memory) + z->low_on_memory = 0; + } + else + { + extern wait_queue_head_t kswapd_wait; + + if (z->low_on_memory) + goto balance; + + if (classfree(z) <= z->pages_low) + { + wake_up_interruptible(&kswapd_wait); + + if (classfree(z) <= z->pages_min) + { + z->low_on_memory = 1; + goto balance; + } + } + } + } /* * This is an optimization for the 'higher order zone * is empty' case - it can happen even in well-behaved @@ -378,7 +345,7 @@ unsigned int nr_free_buffer_pages (void) zone_t *zone; int i; - sum = nr_lru_pages; + sum = nr_lru_pages - atomic_read(&page_cache_size); for (i = 0; i < NUMNODES; i++) for (zone = NODE_DATA(i)->node_zones; zone <= NODE_DATA(i)->node_zones+ZONE_NORMAL; zone++) sum += zone->free_pages; @@ -590,7 +557,11 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap, zone->offset = offset; cumulative += size; mask = (cumulative / zone_balance_ratio[j]); - if (mask < 1) mask = 1; + if (mask < zone_balance_min[j]) + mask = zone_balance_min[j]; + else if (mask > zone_balance_max[j]) + mask = zone_balance_max[j]; + zone->pages_min = mask; zone->pages_low = mask*2; zone->pages_high = mask*3; zone->low_on_memory = 0; diff --git a/mm/slab.c b/mm/slab.c index 760f5bd8d52b..dc8294f2d705 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1864,11 +1864,10 @@ next: } while (--scan && searchp != clock_searchp); clock_searchp = searchp; - up(&cache_chain_sem); if (!best_cachep) { /* couldn't find anything to reap */ - return; + goto out; } spin_lock_irq(&best_cachep->c_spinlock); @@ -1902,6 +1901,8 @@ good_dma: } dma_fail: spin_unlock_irq(&best_cachep->c_spinlock); +out: + up(&cache_chain_sem); return; } @@ -1990,14 +1991,14 @@ get_slabinfo(char *buf) unsigned long allocs = cachep->c_num_allocations; errors = (unsigned long) atomic_read(&cachep->c_errors); spin_unlock_irqrestore(&cachep->c_spinlock, save_flags); - len += sprintf(buf+len, "%-16s %6lu %6lu %4lu %4lu %4lu %6lu %7lu %5lu %4lu %4lu\n", - cachep->c_name, active_objs, num_objs, active_slabs, num_slabs, + len += sprintf(buf+len, "%-16s %6lu %6lu %6lu %4lu %4lu %4lu %6lu %7lu %5lu %4lu %4lu\n", + cachep->c_name, active_objs, num_objs, cachep->c_offset, active_slabs, num_slabs, (1<c_gfporder)*num_slabs, high, allocs, grown, reaped, errors); } #else spin_unlock_irqrestore(&cachep->c_spinlock, save_flags); - len += sprintf(buf+len, "%-17s %6lu %6lu\n", cachep->c_name, active_objs, num_objs); + len += sprintf(buf+len, "%-17s %6lu %6lu %6lu\n", cachep->c_name, active_objs, num_objs, cachep->c_offset); #endif /* SLAB_STATS */ } while ((cachep = cachep->c_nextp) != &cache_cache); up(&cache_chain_sem); diff --git a/mm/vmscan.c b/mm/vmscan.c index 231cbf8f7194..e6cb394d9251 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -451,7 +451,7 @@ done: return priority >= 0; } -static struct task_struct *kswapd_process; +DECLARE_WAIT_QUEUE_HEAD(kswapd_wait); /* * The background pageout daemon, started as a kernel thread @@ -471,7 +471,6 @@ int kswapd(void *unused) { struct task_struct *tsk = current; - kswapd_process = tsk; tsk->session = 1; tsk->pgrp = 1; strcpy(tsk->comm, "kswapd"); @@ -510,7 +509,7 @@ int kswapd(void *unused) run_task_queue(&tq_disk); } while (!tsk->need_resched); tsk->state = TASK_INTERRUPTIBLE; - schedule_timeout(HZ); + interruptible_sleep_on(&kswapd_wait); } } @@ -533,7 +532,6 @@ int try_to_free_pages(unsigned int gfp_mask, zone_t *zone) { int retval = 1; - wake_up_process(kswapd_process); if (gfp_mask & __GFP_WAIT) { current->flags |= PF_MEMALLOC; retval = do_try_to_free_pages(gfp_mask, zone); diff --git a/net/core/dev.c b/net/core/dev.c index 698a59cfc25a..00d5caa2a37b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -95,9 +95,7 @@ extern int plip_init(void); #endif NET_PROFILE_DEFINE(dev_queue_xmit) -NET_PROFILE_DEFINE(net_bh) -NET_PROFILE_DEFINE(net_bh_skb) - +NET_PROFILE_DEFINE(softnet_process) const char *if_port_text[] = { "unknown", @@ -141,19 +139,15 @@ static struct notifier_block *netdev_chain=NULL; /* * Device drivers call our routines to queue packets here. We empty the - * queue in the bottom half handler. + * queue in the local softnet handler. */ - -static struct sk_buff_head backlog; +struct softnet_data softnet_data[NR_CPUS] __cacheline_aligned; #ifdef CONFIG_NET_FASTROUTE int netdev_fastroute; int netdev_fastroute_obstacles; -struct net_fastroute_stats dev_fastroute_stat; #endif -static void dev_clear_backlog(struct net_device *dev); - /****************************************************************************************** @@ -186,6 +180,9 @@ int netdev_nit=0; void dev_add_pack(struct packet_type *pt) { int hash; + + write_lock_bh(&ptype_lock); + #ifdef CONFIG_NET_FASTROUTE /* Hack to detect packet socket */ if (pt->data) { @@ -193,7 +190,6 @@ void dev_add_pack(struct packet_type *pt) dev_clear_fastroute(pt->dev); } #endif - write_lock_bh(&ptype_lock); if(pt->type==htons(ETH_P_ALL)) { netdev_nit++; @@ -217,6 +213,9 @@ void dev_add_pack(struct packet_type *pt) void dev_remove_pack(struct packet_type *pt) { struct packet_type **pt1; + + write_lock_bh(&ptype_lock); + if(pt->type==htons(ETH_P_ALL)) { netdev_nit--; @@ -224,7 +223,7 @@ void dev_remove_pack(struct packet_type *pt) } else pt1=&ptype_base[ntohs(pt->type)&15]; - write_lock_bh(&ptype_lock); + for(; (*pt1)!=NULL; pt1=&((*pt1)->next)) { if(pt==(*pt1)) @@ -284,6 +283,9 @@ struct net_device *dev_get_by_name(const char *name) /* Return value is changed to int to prevent illegal usage in future. It is still legal to use to check for device existance. + + User should understand, that the result returned by this function + is meaningless, if it was not issued under rtnl semaphore. */ int dev_get(const char *name) @@ -391,8 +393,10 @@ struct net_device *dev_alloc(const char *name, int *err) void netdev_state_change(struct net_device *dev) { - if (dev->flags&IFF_UP) + if (dev->flags&IFF_UP) { notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev); + rtmsg_ifinfo(RTM_NEWLINK, dev, 0); + } } @@ -449,18 +453,12 @@ int dev_open(struct net_device *dev) if (ret == 0) { - /* - * nil rebuild_header routine, - * that should be never called and used as just bug trap. - */ - - if (dev->rebuild_header == NULL) - dev->rebuild_header = default_rebuild_header; - /* * Set the flags. */ - dev->flags |= (IFF_UP | IFF_RUNNING); + dev->flags |= IFF_UP; + + set_bit(LINK_STATE_START, &dev->state); /* * Initialize multicasting status @@ -476,7 +474,6 @@ int dev_open(struct net_device *dev) * ... and announce new interface. */ notifier_call_chain(&netdev_chain, NETDEV_UP, dev); - } return(ret); } @@ -523,8 +520,16 @@ int dev_close(struct net_device *dev) if (!(dev->flags&IFF_UP)) return 0; + /* + * Tell people we are going down, so that they can + * prepare to death, when device is still operating. + */ + notifier_call_chain(&netdev_chain, NETDEV_GOING_DOWN, dev); + dev_deactivate(dev); + clear_bit(LINK_STATE_START, &dev->state); + /* * Call the device specific close. This cannot fail. * Only if device is UP @@ -533,21 +538,17 @@ int dev_close(struct net_device *dev) if (dev->stop) dev->stop(dev); - if (dev->start) - printk("dev_close: bug %s still running\n", dev->name); - /* * Device is now down. */ - dev_clear_backlog(dev); - dev->flags&=~(IFF_UP|IFF_RUNNING); + dev->flags &= ~IFF_UP; #ifdef CONFIG_NET_FASTROUTE dev_clear_fastroute(dev); #endif /* - * Tell people we are going down + * Tell people we are down */ notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev); @@ -647,12 +648,7 @@ int dev_queue_xmit(struct sk_buff *skb) if (q->enqueue) { int ret = q->enqueue(skb, q); - /* If the device is not busy, kick it. - * Otherwise or if queue is not empty after kick, - * add it to run list. - */ - if (dev->tbusy || __qdisc_wakeup(dev)) - qdisc_run(q); + qdisc_run(dev); spin_unlock_bh(&dev->queue_lock); return ret; @@ -670,17 +666,22 @@ int dev_queue_xmit(struct sk_buff *skb) Either shot noqueue qdisc, it is even simpler 8) */ if (dev->flags&IFF_UP) { - if (dev->xmit_lock_owner != smp_processor_id()) { + int cpu = smp_processor_id(); + + if (dev->xmit_lock_owner != cpu) { spin_unlock(&dev->queue_lock); spin_lock(&dev->xmit_lock); - dev->xmit_lock_owner = smp_processor_id(); + dev->xmit_lock_owner = cpu; - if (netdev_nit) - dev_queue_xmit_nit(skb,dev); - if (dev->hard_start_xmit(skb, dev) == 0) { - dev->xmit_lock_owner = -1; - spin_unlock_bh(&dev->xmit_lock); - return 0; + if (!test_bit(LINK_STATE_XOFF, &dev->state)) { + if (netdev_nit) + dev_queue_xmit_nit(skb,dev); + + if (dev->hard_start_xmit(skb, dev) == 0) { + dev->xmit_lock_owner = -1; + spin_unlock_bh(&dev->xmit_lock); + return 0; + } } dev->xmit_lock_owner = -1; spin_unlock_bh(&dev->xmit_lock); @@ -705,12 +706,13 @@ int dev_queue_xmit(struct sk_buff *skb) Receiver rotutines =======================================================================*/ -int netdev_dropping = 0; int netdev_max_backlog = 300; -atomic_t netdev_rx_dropped; + +struct netif_rx_stats netdev_rx_stat[NR_CPUS]; + #ifdef CONFIG_NET_HW_FLOWCONTROL -int netdev_throttle_events; +static atomic_t netdev_dropping = ATOMIC_INIT(0); static unsigned long netdev_fc_mask = 1; unsigned long netdev_fc_xoff = 0; spinlock_t netdev_fc_lock = SPIN_LOCK_UNLOCKED; @@ -756,59 +758,18 @@ static void netdev_wakeup(void) { unsigned long xoff; - spin_lock_irq(&netdev_fc_lock); + spin_lock(&netdev_fc_lock); xoff = netdev_fc_xoff; netdev_fc_xoff = 0; - netdev_dropping = 0; - netdev_throttle_events++; while (xoff) { int i = ffz(~xoff); xoff &= ~(1<next; - if (curr->prev->dev == dev) { - prev = curr->prev; - __skb_unlink(prev, &backlog); - __skb_queue_tail(&garbage, prev); - } - } - } - spin_unlock_irq(&backlog.lock); - - if (garbage.qlen) { -#ifdef CONFIG_NET_HW_FLOWCONTROL - if (netdev_dropping) - netdev_wakeup(); -#else - netdev_dropping = 0; -#endif - skb_queue_purge(&garbage); - } -} - /* * Receive a packet from a device driver and queue it for the upper * (protocol) levels. It always succeeds. @@ -816,44 +777,59 @@ static void dev_clear_backlog(struct net_device *dev) void netif_rx(struct sk_buff *skb) { + int this_cpu = smp_processor_id(); + struct softnet_data *queue; + unsigned long flags; + if(skb->stamp.tv_sec==0) get_fast_time(&skb->stamp); /* The code is rearranged so that the path is the most short when CPU is congested, but is still operating. */ - - if (backlog.qlen <= netdev_max_backlog) { - if (backlog.qlen) { - if (netdev_dropping == 0) { - if (skb->rx_dev) - dev_put(skb->rx_dev); - skb->rx_dev = skb->dev; - dev_hold(skb->rx_dev); - skb_queue_tail(&backlog,skb); - mark_bh(NET_BH); - return; - } - atomic_inc(&netdev_rx_dropped); - kfree_skb(skb); + queue = &softnet_data[this_cpu]; + + local_irq_save(flags); + + netdev_rx_stat[this_cpu].total++; + if (queue->input_pkt_queue.qlen <= netdev_max_backlog) { + if (queue->input_pkt_queue.qlen) { + if (queue->throttle) + goto drop; + +enqueue: + if (skb->rx_dev) + dev_put(skb->rx_dev); + skb->rx_dev = skb->dev; + dev_hold(skb->rx_dev); + __skb_queue_tail(&queue->input_pkt_queue,skb); + __cpu_raise_softirq(this_cpu, NET_RX_SOFTIRQ); + local_irq_restore(flags); return; } + + if (queue->throttle) { + queue->throttle = 0; #ifdef CONFIG_NET_HW_FLOWCONTROL - if (netdev_dropping) - netdev_wakeup(); -#else - netdev_dropping = 0; + if (atomic_dec_and_test(&netdev_dropping)) + netdev_wakeup(); #endif - if (skb->rx_dev) - dev_put(skb->rx_dev); - skb->rx_dev = skb->dev; - dev_hold(skb->rx_dev); - skb_queue_tail(&backlog,skb); - mark_bh(NET_BH); - return; + } + goto enqueue; } - netdev_dropping = 1; - atomic_inc(&netdev_rx_dropped); + + if (queue->throttle == 0) { + queue->throttle = 1; + netdev_rx_stat[this_cpu].throttled++; +#ifdef CONFIG_NET_HW_FLOWCONTROL + atomic_inc(&netdev_dropping); +#endif + } + +drop: + netdev_rx_stat[this_cpu].dropped++; + local_irq_restore(flags); + kfree_skb(skb); } @@ -888,195 +864,199 @@ static inline void handle_bridge(struct sk_buff *skb, unsigned short type) } #endif -/* - * When we are called the queue is ready to grab, the interrupts are - * on and hardware can interrupt and queue to the receive queue as we - * run with no problems. - * This is run as a bottom half after an interrupt handler that does - * mark_bh(NET_BH); +/* Deliver skb to an old protocol, which is not threaded well + or which do not understand shared skbs. */ - -void net_bh(void) +static void deliver_to_old_ones(struct packet_type *pt, struct sk_buff *skb, int last) { - struct packet_type *ptype; - struct packet_type *pt_prev; - unsigned short type; - unsigned long start_time = jiffies; + static spinlock_t net_bh_lock = SPIN_LOCK_UNLOCKED; - NET_PROFILE_ENTER(net_bh); - /* - * Can we send anything now? We want to clear the - * decks for any more sends that get done as we - * process the input. This also minimises the - * latency on a transmit interrupt bh. + if (!last) { + skb = skb_clone(skb, GFP_ATOMIC); + if (skb == NULL) + return; + } + + /* The assumption (correct one) is that old protocols + did not depened on BHs different of NET_BH and TIMER_BH. */ - if (qdisc_pending()) - qdisc_run_queues(); + /* Emulate NET_BH with special spinlock */ + spin_lock(&net_bh_lock); - /* - * Any data left to process. This may occur because a - * mark_bh() is done after we empty the queue including - * that from the device which does a mark_bh() just after - */ + /* Disable timers and wait for all timers completion */ + tasklet_disable(bh_task_vec+TIMER_BH); - /* - * While the queue is not empty.. - * - * Note that the queue never shrinks due to - * an interrupt, so we can do this test without - * disabling interrupts. - */ + pt->func(skb, skb->dev, pt); - while (!skb_queue_empty(&backlog)) - { - struct sk_buff * skb; + tasklet_enable(bh_task_vec+TIMER_BH); + spin_unlock(&net_bh_lock); +} - /* Give chance to other bottom halves to run */ - if (jiffies - start_time > 1) - goto net_bh_break; +/* Reparent skb to master device. This function is called + * only from net_rx_action under ptype_lock. It is misuse + * of ptype_lock, but it is OK for now. + */ +static __inline__ void skb_bond(struct sk_buff *skb) +{ + struct net_device *dev = skb->rx_dev; + + if (dev->master) { + dev_hold(dev->master); + skb->dev = skb->rx_dev = dev->master; + dev_put(dev); + } +} - /* - * We have a packet. Therefore the queue has shrunk - */ - skb = skb_dequeue(&backlog); +static void net_tx_action(struct softirq_action *h) +{ + int cpu = smp_processor_id(); + unsigned long flags; -#ifdef CONFIG_NET_FASTROUTE - if (skb->pkt_type == PACKET_FASTROUTE) { - dev_queue_xmit(skb); - continue; + if (softnet_data[cpu].completion_queue) { + struct sk_buff *clist; + + local_irq_save(flags); + clist = softnet_data[cpu].completion_queue; + softnet_data[cpu].completion_queue = NULL; + local_irq_restore(flags); + + while (clist != NULL) { + struct sk_buff *skb = clist; + clist = clist->next; + + BUG_TRAP(atomic_read(&skb->users) == 0); + __kfree_skb(skb); } -#endif + } - /* - * Bump the pointer to the next structure. - * - * On entry to the protocol layer. skb->data and - * skb->nh.raw point to the MAC and encapsulated data - */ + if (softnet_data[cpu].output_queue) { + struct net_device *head; - /* XXX until we figure out every place to modify.. */ - skb->h.raw = skb->nh.raw = skb->data; + local_irq_save(flags); + head = softnet_data[cpu].output_queue; + softnet_data[cpu].output_queue = NULL; + local_irq_restore(flags); - if (skb->mac.raw < skb->head || skb->mac.raw > skb->data) { - printk(KERN_CRIT "%s: wrong mac.raw ptr, proto=%04x\n", skb->dev->name, skb->protocol); - kfree_skb(skb); - continue; + while (head != NULL) { + struct net_device *dev = head; + head = head->next_sched; + + clear_bit(LINK_STATE_SCHED, &dev->state); + + if (spin_trylock(&dev->queue_lock)) { + qdisc_run(dev); + spin_unlock(&dev->queue_lock); + } else { + netif_schedule(dev); + } } + } +} - /* - * Fetch the packet protocol ID. - */ +static void net_rx_action(struct softirq_action *h) +{ + int this_cpu = smp_processor_id(); + struct softnet_data *queue = &softnet_data[this_cpu]; + unsigned long start_time = jiffies; + int bugdet = netdev_max_backlog; - type = skb->protocol; + read_lock(&ptype_lock); -#ifdef CONFIG_BRIDGE - /* - * If we are bridging then pass the frame up to the - * bridging code (if this protocol is to be bridged). - * If it is bridged then move on - */ - handle_bridge(skb, type); -#endif + for (;;) { + struct sk_buff *skb; - /* - * We got a packet ID. Now loop over the "known protocols" - * list. There are two lists. The ptype_all list of taps (normally empty) - * and the main protocol list which is hashed perfectly for normal protocols. - */ + local_irq_disable(); + skb = __skb_dequeue(&queue->input_pkt_queue); + local_irq_enable(); - pt_prev = NULL; - read_lock(&ptype_lock); - for (ptype = ptype_all; ptype!=NULL; ptype=ptype->next) + if (skb == NULL) + break; + + skb_bond(skb); + +#ifdef CONFIG_NET_FASTROUTE + if (skb->pkt_type == PACKET_FASTROUTE) { + netdev_rx_stat[this_cpu].fastroute_deferred_out++; + dev_queue_xmit(skb); + continue; + } +#endif + skb->h.raw = skb->nh.raw = skb->data; { - if (!ptype->dev || ptype->dev == skb->dev) { - if(pt_prev) - { - struct sk_buff *skb2; - if (pt_prev->data == NULL) - skb2 = skb_clone(skb, GFP_ATOMIC); - else { - skb2 = skb; - atomic_inc(&skb2->users); + struct packet_type *ptype, *pt_prev; + unsigned short type = skb->protocol; +#ifdef CONFIG_BRIDGE + handle_bridge(skb, type); +#endif + pt_prev = NULL; + for (ptype = ptype_all; ptype; ptype = ptype->next) { + if (!ptype->dev || ptype->dev == skb->dev) { + if (pt_prev) { + if (!pt_prev->data) { + deliver_to_old_ones(pt_prev, skb, 0); + } else { + atomic_inc(&skb->users); + pt_prev->func(skb, + skb->dev, + pt_prev); + } } - if(skb2) - pt_prev->func(skb2, skb->dev, pt_prev); + pt_prev = ptype; } - pt_prev=ptype; } - } - - for (ptype = ptype_base[ntohs(type)&15]; ptype != NULL; ptype = ptype->next) - { - if (ptype->type == type && (!ptype->dev || ptype->dev==skb->dev)) - { - /* - * We already have a match queued. Deliver - * to it and then remember the new match - */ - if(pt_prev) - { - struct sk_buff *skb2; - - if (pt_prev->data == NULL) - skb2 = skb_clone(skb, GFP_ATOMIC); - else { - skb2 = skb; - atomic_inc(&skb2->users); + for (ptype=ptype_base[ntohs(type)&15];ptype;ptype=ptype->next) { + if (ptype->type == type && + (!ptype->dev || ptype->dev == skb->dev)) { + if (pt_prev) { + if (!pt_prev->data) + deliver_to_old_ones(pt_prev, skb, 0); + else { + atomic_inc(&skb->users); + pt_prev->func(skb, + skb->dev, + pt_prev); + } } - - /* - * Kick the protocol handler. This should be fast - * and efficient code. - */ - - if(skb2) - pt_prev->func(skb2, skb->dev, pt_prev); + pt_prev = ptype; } - /* Remember the current last to do */ - pt_prev=ptype; } - } /* End of protocol list loop */ - - /* - * Is there a last item to send to ? - */ - - if(pt_prev) - pt_prev->func(skb, skb->dev, pt_prev); - /* - * Has an unknown packet has been received ? - */ - - else { - kfree_skb(skb); + if (pt_prev) { + if (!pt_prev->data) + deliver_to_old_ones(pt_prev, skb, 1); + else + pt_prev->func(skb, skb->dev, pt_prev); + } else + kfree_skb(skb); } - read_unlock(&ptype_lock); - } /* End of queue loop */ - /* - * We have emptied the queue - */ - - /* - * One last output flush. - */ - - if (qdisc_pending()) - qdisc_run_queues(); + if (bugdet-- < 0 || jiffies - start_time > 1) + goto softnet_break; + } + read_unlock(&ptype_lock); + local_irq_disable(); + if (queue->throttle) { + queue->throttle = 0; #ifdef CONFIG_NET_HW_FLOWCONTROL - if (netdev_dropping) - netdev_wakeup(); -#else - netdev_dropping = 0; + if (atomic_dec_and_test(&netdev_dropping)) + netdev_wakeup(); #endif - NET_PROFILE_LEAVE(net_bh); + } + local_irq_enable(); + + NET_PROFILE_LEAVE(softnet_process); return; -net_bh_break: - mark_bh(NET_BH); - NET_PROFILE_LEAVE(net_bh); +softnet_break: + read_unlock(&ptype_lock); + + local_irq_disable(); + netdev_rx_stat[this_cpu].time_squeeze++; + __cpu_raise_softirq(this_cpu, NET_RX_SOFTIRQ); + local_irq_enable(); + + NET_PROFILE_LEAVE(softnet_process); return; } @@ -1276,23 +1256,26 @@ static int dev_get_info(char *buffer, char **start, off_t offset, int length) static int dev_proc_stats(char *buffer, char **start, off_t offset, int length, int *eof, void *data) { - int len; + int i; + int len=0; - len = sprintf(buffer, "%08x %08x %08x %08x %08x\n", - atomic_read(&netdev_rx_dropped), -#ifdef CONFIG_NET_HW_FLOWCONTROL - netdev_throttle_events, -#else - 0, -#endif -#ifdef CONFIG_NET_FASTROUTE - dev_fastroute_stat.hits, - dev_fastroute_stat.succeed, - dev_fastroute_stat.deferred + for (i=0; imaster; + + ASSERT_RTNL(); + + if (master) { + if (old) + return -EBUSY; + dev_hold(master); + } + + write_lock_bh(&ptype_lock); + slave->master = master; + write_unlock_bh(&ptype_lock); + + if (old) + dev_put(old); + + if (master) + slave->flags |= IFF_SLAVE; + else + slave->flags &= ~IFF_SLAVE; + + rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE); + return 0; +} + void dev_set_promiscuity(struct net_device *dev, int inc) { unsigned short old_flags = dev->flags; @@ -1438,8 +1449,7 @@ int dev_change_flags(struct net_device *dev, unsigned flags) * Set the flags on our device. */ - dev->flags = (flags & (IFF_DEBUG|IFF_NOTRAILERS|IFF_RUNNING|IFF_NOARP| - IFF_SLAVE|IFF_MASTER|IFF_DYNAMIC| + dev->flags = (flags & (IFF_DEBUG|IFF_NOTRAILERS|IFF_NOARP|IFF_DYNAMIC| IFF_MULTICAST|IFF_PORTSEL|IFF_AUTOMEDIA)) | (dev->flags & (IFF_UP|IFF_VOLATILE|IFF_PROMISC|IFF_ALLMULTI)); @@ -1465,7 +1475,7 @@ int dev_change_flags(struct net_device *dev, unsigned flags) } if (dev->flags&IFF_UP && - ((old_flags^dev->flags)&~(IFF_UP|IFF_RUNNING|IFF_PROMISC|IFF_ALLMULTI|IFF_VOLATILE))) + ((old_flags^dev->flags)&~(IFF_UP|IFF_PROMISC|IFF_ALLMULTI|IFF_VOLATILE))) notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev); if ((flags^dev->gflags)&IFF_PROMISC) { @@ -1484,6 +1494,9 @@ int dev_change_flags(struct net_device *dev, unsigned flags) dev_set_allmulti(dev, inc); } + if (old_flags^dev->flags) + rtmsg_ifinfo(RTM_NEWLINK, dev, old_flags^dev->flags); + return ret; } @@ -1502,8 +1515,10 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd) switch(cmd) { case SIOCGIFFLAGS: /* Get interface flags */ - ifr->ifr_flags = (dev->flags&~(IFF_PROMISC|IFF_ALLMULTI)) + ifr->ifr_flags = (dev->flags&~(IFF_PROMISC|IFF_ALLMULTI|IFF_RUNNING)) |(dev->gflags&(IFF_PROMISC|IFF_ALLMULTI)); + if (!test_bit(LINK_STATE_DOWN, &dev->state)) + ifr->ifr_flags |= IFF_RUNNING; return 0; case SIOCSIFFLAGS: /* Set interface flags */ @@ -1936,6 +1951,9 @@ int unregister_netdevice(struct net_device *dev) if (dev->uninit) dev->uninit(dev); + /* Notifier chain MUST detach us from master device. */ + BUG_TRAP(dev->master==NULL); + if (dev->new_style) { #ifdef NET_REFCNT_DEBUG if (atomic_read(&dev->refcnt) != 1) @@ -2012,16 +2030,24 @@ extern void ip_auto_config(void); int __init net_dev_init(void) { struct net_device *dev, **dp; + int i; #ifdef CONFIG_NET_SCHED pktsched_init(); #endif /* - * Initialise the packet receive queue. + * Initialise the packet receive queues. */ - - skb_queue_head_init(&backlog); + + for (i = 0; i < NR_CPUS; i++) { + struct softnet_data *queue; + + queue = &softnet_data[i]; + skb_queue_head_init(&queue->input_pkt_queue); + queue->throttle = 0; + queue->completion_queue = NULL; + } /* * The bridge has to be up before the devices @@ -2035,10 +2061,7 @@ int __init net_dev_init(void) #ifdef CONFIG_NET_PROFILE net_profile_init(); NET_PROFILE_REGISTER(dev_queue_xmit); - NET_PROFILE_REGISTER(net_bh); -#if 0 - NET_PROFILE_REGISTER(net_bh_skb); -#endif + NET_PROFILE_REGISTER(softnet_process); #endif /* * Add the devices. @@ -2054,6 +2077,9 @@ int __init net_dev_init(void) while ((dev = *dp) != NULL) { spin_lock_init(&dev->queue_lock); spin_lock_init(&dev->xmit_lock); +#ifdef CONFIG_NET_FASTROUTE + dev->fastpath_lock = RW_LOCK_UNLOCKED; +#endif dev->xmit_lock_owner = -1; dev->iflink = -1; dev_hold(dev); @@ -2085,16 +2111,17 @@ int __init net_dev_init(void) #ifdef CONFIG_PROC_FS proc_net_create("dev", 0, dev_get_info); - create_proc_read_entry("net/dev_stat", 0, 0, dev_proc_stats, NULL); + create_proc_read_entry("net/softnet_stat", 0, 0, dev_proc_stats, NULL); #ifdef WIRELESS_EXT proc_net_create("wireless", 0, dev_get_wireless_info); #endif /* WIRELESS_EXT */ #endif /* CONFIG_PROC_FS */ - init_bh(NET_BH, net_bh); - dev_boot_phase = 0; + open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL); + open_softirq(NET_RX_SOFTIRQ, net_rx_action, NULL); + dst_init(); dev_mcast_init(); diff --git a/net/core/neighbour.c b/net/core/neighbour.c index d0bf8d13dcff..d97bdc5f2eb7 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -209,10 +209,11 @@ int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev) } } - del_timer(&tbl->proxy_timer); skb_queue_purge(&tbl->proxy_queue); pneigh_ifdown(tbl, dev); write_unlock_bh(&tbl->lock); + + del_timer_sync(&tbl->proxy_timer); return 0; } @@ -533,7 +534,7 @@ static void neigh_sync(struct neighbour *n) } } -static void neigh_periodic_timer(unsigned long arg) +static void SMP_TIMER_NAME(neigh_periodic_timer)(unsigned long arg) { struct neigh_table *tbl = (struct neigh_table*)arg; unsigned long now = jiffies; @@ -592,11 +593,21 @@ next_elt: } } - tbl->gc_timer.expires = now + tbl->gc_interval; - add_timer(&tbl->gc_timer); + mod_timer(&tbl->gc_timer, now + tbl->gc_interval); write_unlock(&tbl->lock); } +#ifdef __SMP__ +static void neigh_periodic_timer(unsigned long arg) +{ + struct neigh_table *tbl = (struct neigh_table*)arg; + + tasklet_schedule(&tbl->gc_task); + + timer_exit(&tbl->gc_timer); +} +#endif + static __inline__ int neigh_max_probes(struct neighbour *n) { struct neigh_parms *p = n->parms; @@ -665,6 +676,7 @@ static void neigh_timer_handler(unsigned long arg) neigh->ops->solicit(neigh, skb_peek(&neigh->arp_queue)); atomic_inc(&neigh->probes); + timer_exit(&neigh->timer); return; out: @@ -673,6 +685,7 @@ out: if (notify && neigh->parms->app_probes) neigh_app_notify(neigh); #endif + timer_exit(&neigh->timer); neigh_release(neigh); } @@ -1008,6 +1021,7 @@ static void neigh_proxy_process(unsigned long arg) tbl->proxy_timer.expires = jiffies + sched_next; add_timer(&tbl->proxy_timer); } + timer_exit(&tbl->proxy_timer); } void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p, @@ -1092,6 +1106,9 @@ void neigh_table_init(struct neigh_table *tbl) 0, SLAB_HWCACHE_ALIGN, NULL, NULL); +#ifdef __SMP__ + tasklet_init(&tbl->gc_task, SMP_TIMER_NAME(neigh_periodic_timer), (unsigned long)tbl); +#endif init_timer(&tbl->gc_timer); tbl->lock = RW_LOCK_UNLOCKED; tbl->gc_timer.data = (unsigned long)tbl; @@ -1116,8 +1133,10 @@ int neigh_table_clear(struct neigh_table *tbl) { struct neigh_table **tp; - del_timer(&tbl->gc_timer); - del_timer(&tbl->proxy_timer); + /* It is not clean... Fix it to unload IPv6 module safely */ + del_timer_sync(&tbl->gc_timer); + tasklet_kill(&tbl->gc_task); + del_timer_sync(&tbl->proxy_timer); skb_queue_purge(&tbl->proxy_queue); neigh_ifdown(tbl, NULL); if (tbl->entries) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index b4d8582102e9..9cdc290bf1a8 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -171,6 +171,11 @@ static int rtnetlink_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, r->ifi_flags = dev->flags; r->ifi_change = change; + if (test_bit(LINK_STATE_DOWN, &dev->state)) + r->ifi_flags &= ~IFF_RUNNING; + else + r->ifi_flags |= IFF_RUNNING; + RTA_PUT(skb, IFLA_IFNAME, strlen(dev->name)+1, dev->name); if (dev->addr_len) { RTA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr); @@ -186,6 +191,8 @@ static int rtnetlink_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, RTA_PUT(skb, IFLA_QDISC, strlen(dev->qdisc_sleeping->ops->id) + 1, dev->qdisc_sleeping->ops->id); + if (dev->master) + RTA_PUT(skb, IFLA_MASTER, sizeof(int), &dev->master->ifindex); if (dev->get_stats) { struct net_device_stats *stats = dev->get_stats(dev); if (stats) @@ -243,7 +250,7 @@ int rtnetlink_dump_all(struct sk_buff *skb, struct netlink_callback *cb) return skb->len; } -void rtmsg_ifinfo(int type, struct net_device *dev) +void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change) { struct sk_buff *skb; int size = NLMSG_GOODSIZE; @@ -252,7 +259,7 @@ void rtmsg_ifinfo(int type, struct net_device *dev) if (!skb) return; - if (rtnetlink_fill_ifinfo(skb, dev, type, 0, 0, ~0U) < 0) { + if (rtnetlink_fill_ifinfo(skb, dev, type, 0, 0, change) < 0) { kfree_skb(skb); return; } @@ -488,10 +495,20 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi struct net_device *dev = ptr; switch (event) { case NETDEV_UNREGISTER: - rtmsg_ifinfo(RTM_DELLINK, dev); + rtmsg_ifinfo(RTM_DELLINK, dev, ~0U); + break; + case NETDEV_REGISTER: + rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); + break; + case NETDEV_UP: + case NETDEV_DOWN: + rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); + break; + case NETDEV_CHANGE: + case NETDEV_GOING_DOWN: break; default: - rtmsg_ifinfo(RTM_NEWLINK, dev); + rtmsg_ifinfo(RTM_NEWLINK, dev, 0); break; } return NOTIFY_DONE; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 3528c7510e3d..95e4d8e17a6e 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -4,7 +4,7 @@ * Authors: Alan Cox * Florian La Roche * - * Version: $Id: skbuff.c,v 1.64 2000/01/16 05:11:03 davem Exp $ + * Version: $Id: skbuff.c,v 1.66 2000/02/09 21:11:30 davem Exp $ * * Fixes: * Alan Cox : Fixed the worst of the load balancer bugs. @@ -61,18 +61,15 @@ #include #include -/* - * Resource tracking variables - */ - -static atomic_t net_skbcount = ATOMIC_INIT(0); -static atomic_t net_allocs = ATOMIC_INIT(0); -static atomic_t net_fails = ATOMIC_INIT(0); - -extern atomic_t ip_frag_mem; +int sysctl_hot_list_len = 128; static kmem_cache_t *skbuff_head_cache; +static union { + struct sk_buff_head list; + char pad[SMP_CACHE_BYTES]; +} skb_head_pool[NR_CPUS]; + /* * Keep out-of-line to prevent kernel bloat. * __builtin_return_address is not used because it is not always @@ -93,20 +90,39 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here) *(int*)0 = 0; } -void show_net_buffers(void) +static __inline__ struct sk_buff *skb_head_from_pool(void) { - printk("Networking buffers in use : %u\n", - atomic_read(&net_skbcount)); - printk("Total network buffer allocations : %u\n", - atomic_read(&net_allocs)); - printk("Total failed network buffer allocs : %u\n", - atomic_read(&net_fails)); -#ifdef CONFIG_INET - printk("IP fragment buffer size : %u\n", - atomic_read(&ip_frag_mem)); -#endif + struct sk_buff_head *list = &skb_head_pool[smp_processor_id()].list; + + if (skb_queue_len(list)) { + struct sk_buff *skb; + unsigned long flags; + + local_irq_save(flags); + skb = __skb_dequeue(list); + local_irq_restore(flags); + return skb; + } + return NULL; } +static __inline__ void skb_head_to_pool(struct sk_buff *skb) +{ + struct sk_buff_head *list = &skb_head_pool[smp_processor_id()].list; + + if (skb_queue_len(list) < sysctl_hot_list_len) { + unsigned long flags; + + local_irq_save(flags); + __skb_queue_head(list, skb); + local_irq_restore(flags); + + return; + } + kmem_cache_free(skbuff_head_cache, skb); +} + + /* Allocate a new skbuff. We do this ourselves so we can fill in a few * 'private' fields and also do memory statistics to find all the * [BEEP] leaks. @@ -129,9 +145,12 @@ struct sk_buff *alloc_skb(unsigned int size,int gfp_mask) } /* Get the HEAD */ - skb = kmem_cache_alloc(skbuff_head_cache, gfp_mask); - if (skb == NULL) - goto nohead; + skb = skb_head_from_pool(); + if (skb == NULL) { + skb = kmem_cache_alloc(skbuff_head_cache, gfp_mask); + if (skb == NULL) + goto nohead; + } /* Get the DATA. Size must match skb_add_mtu(). */ size = ((size + 15) & ~15); @@ -139,17 +158,9 @@ struct sk_buff *alloc_skb(unsigned int size,int gfp_mask) if (data == NULL) goto nodata; - /* Note that this counter is useless now - you can just look in the - * skbuff_head entry in /proc/slabinfo. We keep it only for emergency - * cases. - */ - atomic_inc(&net_allocs); - /* XXX: does not include slab overhead */ skb->truesize = size + sizeof(struct sk_buff); - atomic_inc(&net_skbcount); - /* Load the data pointers. */ skb->head = data; skb->data = data; @@ -166,9 +177,8 @@ struct sk_buff *alloc_skb(unsigned int size,int gfp_mask) return skb; nodata: - kmem_cache_free(skbuff_head_cache, skb); + skb_head_to_pool(skb); nohead: - atomic_inc(&net_fails); return NULL; } @@ -213,8 +223,7 @@ void kfree_skbmem(struct sk_buff *skb) if (!skb->cloned || atomic_dec_and_test(skb_datarefp(skb))) kfree(skb->head); - kmem_cache_free(skbuff_head_cache, skb); - atomic_dec(&net_skbcount); + skb_head_to_pool(skb); } /* @@ -230,8 +239,13 @@ void __kfree_skb(struct sk_buff *skb) } dst_release(skb->dst); - if(skb->destructor) + if(skb->destructor) { + if (in_irq()) { + printk(KERN_WARNING "Warning: kfree_skb on hard IRQ %p\n", + NET_CALLER(skb)); + } skb->destructor(skb); + } #ifdef CONFIG_NET if(skb->rx_dev) dev_put(skb->rx_dev); @@ -247,17 +261,18 @@ void __kfree_skb(struct sk_buff *skb) struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask) { struct sk_buff *n; - - n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); - if (!n) - return NULL; + + n = skb_head_from_pool(); + if (!n) { + n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); + if (!n) + return NULL; + } memcpy(n, skb, sizeof(*n)); atomic_inc(skb_datarefp(skb)); skb->cloned = 1; - atomic_inc(&net_allocs); - atomic_inc(&net_skbcount); dst_clone(n->dst); n->rx_dev = NULL; n->cloned = 1; @@ -379,6 +394,8 @@ void skb_add_mtu(int mtu) void __init skb_init(void) { + int i; + skbuff_head_cache = kmem_cache_create("skbuff_head_cache", sizeof(struct sk_buff), 0, @@ -386,4 +403,7 @@ void __init skb_init(void) skb_headerinit, NULL); if (!skbuff_head_cache) panic("cannot create skbuff cache"); + + for (i=0; itimer); } static void dn_dev_set_timer(struct net_device *dev) @@ -1010,8 +1011,7 @@ static void dn_dev_delete(struct net_device *dev) if (dn_db == NULL) return; - del_timer(&dn_db->timer); - synchronize_bh(); + del_timer_sync(&dn_db->timer); dn_dev_sysctl_unregister(&dn_db->parms); diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c index 703541225458..9cb0c6394cf5 100644 --- a/net/decnet/dn_nsp_in.c +++ b/net/decnet/dn_nsp_in.c @@ -411,7 +411,6 @@ static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig #ifdef CONFIG_FILTER struct sk_filter *filter; #endif - unsigned long flags; /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces number of warnings when compiling with -W --ANK @@ -433,7 +432,10 @@ static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig skb_set_owner_r(skb, sk); skb_queue_tail(queue, skb); - read_lock_irqsave(&sk->callback_lock, flags); + /* This code only runs from BH or BH protected context. + * Therefore the plain read_lock is ok here. -DaveM + */ + read_lock(&sk->callback_lock); if (!sk->dead) { struct socket *sock = sk->socket; wake_up_interruptible(sk->sleep); @@ -441,7 +443,7 @@ static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig kill_fasync(sock->fasync_list, sig, (sig == SIGURG) ? POLL_PRI : POLL_IN); } - read_unlock_irqrestore(&sk->callback_lock, flags); + read_unlock(&sk->callback_lock); return 0; } diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 2b61c67af397..c01d447b1474 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -3,7 +3,7 @@ * * Alan Cox, * - * Version: $Id: icmp.c,v 1.63 2000/01/09 02:19:45 davem Exp $ + * Version: $Id: icmp.c,v 1.64 2000/02/09 11:16:40 davem Exp $ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -468,7 +468,7 @@ static void icmp_out_count(int type) { if (type>NR_ICMP_TYPES) return; - (icmp_pointers[type].output)[(smp_processor_id()*2+!in_interrupt())*sizeof(struct icmp_mib)/sizeof(unsigned long)]++; + (icmp_pointers[type].output)[(smp_processor_id()*2+!in_softirq())*sizeof(struct icmp_mib)/sizeof(unsigned long)]++; ICMP_INC_STATS(IcmpOutMsgs); } diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 93dd763912ed..3aad90680d97 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -8,7 +8,7 @@ * the older version didn't come out right using gcc 2.5.8, the newer one * seems to fall out with gcc 2.6.2. * - * Version: $Id: igmp.c,v 1.36 2000/01/06 00:41:54 davem Exp $ + * Version: $Id: igmp.c,v 1.37 2000/02/09 11:16:40 davem Exp $ * * Authors: * Alan Cox @@ -154,11 +154,9 @@ static __inline__ void igmp_start_timer(struct ip_mc_list *im, int max_delay) int tv=net_random() % max_delay; spin_lock_bh(&im->lock); - if (!del_timer(&im->timer)) - atomic_inc(&im->refcnt); - im->timer.expires=jiffies+tv+2; im->tm_running=1; - add_timer(&im->timer); + if (!mod_timer(&im->timer, jiffies+tv+2)) + atomic_inc(&im->refcnt); spin_unlock_bh(&im->lock); } diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 1c0b9dae7c7a..852a4fb2cbb4 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -5,7 +5,7 @@ * * The IP fragmentation functionality. * - * Version: $Id: ip_fragment.c,v 1.46 2000/01/09 02:19:36 davem Exp $ + * Version: $Id: ip_fragment.c,v 1.47 2000/02/09 21:11:33 davem Exp $ * * Authors: Fred N. van Kempen * Alan Cox @@ -77,7 +77,7 @@ static spinlock_t ipfrag_lock = SPIN_LOCK_UNLOCKED; #define ipqhashfn(id, saddr, daddr, prot) \ ((((id) >> 1) ^ (saddr) ^ (daddr) ^ (prot)) & (IPQ_HASHSZ - 1)) -atomic_t ip_frag_mem = ATOMIC_INIT(0); /* Memory used for fragments */ +static atomic_t ip_frag_mem = ATOMIC_INIT(0); /* Memory used for fragments */ /* Memory Tracking Functions. */ extern __inline__ void frag_kfree_skb(struct sk_buff *skb) diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 730fc4790edc..e06825e2e6cd 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -5,7 +5,7 @@ * * The Internet Protocol (IP) output module. * - * Version: $Id: ip_output.c,v 1.79 2000/02/08 21:27:11 davem Exp $ + * Version: $Id: ip_output.c,v 1.80 2000/02/09 11:16:41 davem Exp $ * * Authors: Ross Biro, * Fred N. van Kempen, @@ -645,14 +645,14 @@ static int ip_build_xmit_slow(struct sock *sk, } while (offset >= 0); if (nfrags>1) - ip_statistics[smp_processor_id()*2 + !in_interrupt()].IpFragCreates += nfrags; + ip_statistics[smp_processor_id()*2 + !in_softirq()].IpFragCreates += nfrags; out: return 0; error: IP_INC_STATS(IpOutDiscards); if (nfrags>1) - ip_statistics[smp_processor_id()*2 + !in_interrupt()].IpFragCreates += nfrags; + ip_statistics[smp_processor_id()*2 + !in_softirq()].IpFragCreates += nfrags; return err; } diff --git a/net/ipv4/route.c b/net/ipv4/route.c index bbc6ec111fad..4e649eded39f 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -5,7 +5,7 @@ * * ROUTE - implementation of the IP router. * - * Version: $Id: route.c,v 1.80 2000/01/21 06:37:27 davem Exp $ + * Version: $Id: route.c,v 1.81 2000/02/09 11:16:42 davem Exp $ * * Authors: Ross Biro, * Fred N. van Kempen, @@ -313,7 +313,7 @@ static __inline__ int rt_may_expire(struct rtable *rth, int tmo1, int tmo2) } /* This runs via a timer and thus is always in BH context. */ -static void rt_check_expire(unsigned long dummy) +static void SMP_TIMER_NAME(rt_check_expire)(unsigned long dummy) { int i, t; static int rover; @@ -359,10 +359,12 @@ static void rt_check_expire(unsigned long dummy) mod_timer(&rt_periodic_timer, now + ip_rt_gc_interval); } +SMP_TIMER_DEFINE(rt_check_expire, rt_gc_task); + /* This can run from both BH and non-BH contexts, the latter * in the case of a forced flush event. */ -static void rt_run_flush(unsigned long dummy) +static void SMP_TIMER_NAME(rt_run_flush)(unsigned long dummy) { int i; struct rtable * rth, * next; @@ -382,13 +384,15 @@ static void rt_run_flush(unsigned long dummy) } } } + +SMP_TIMER_DEFINE(rt_run_flush, rt_cache_flush_task); static spinlock_t rt_flush_lock = SPIN_LOCK_UNLOCKED; void rt_cache_flush(int delay) { unsigned long now = jiffies; - int user_mode = !in_interrupt(); + int user_mode = !in_softirq(); if (delay < 0) delay = ip_rt_min_delay; @@ -414,7 +418,7 @@ void rt_cache_flush(int delay) if (delay <= 0) { spin_unlock_bh(&rt_flush_lock); - rt_run_flush(0); + SMP_TIMER_NAME(rt_run_flush)(0); return; } @@ -529,7 +533,7 @@ static int rt_garbage_collect(void) if (atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size) return 0; - } while (!in_interrupt() && jiffies - now < 1); + } while (!in_softirq() && jiffies - now < 1); if (atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size) return 0; @@ -552,7 +556,7 @@ static int rt_intern_hash(unsigned hash, struct rtable * rt, struct rtable ** rp { struct rtable *rth, **rthp; unsigned long now = jiffies; - int attempts = !in_interrupt(); + int attempts = !in_softirq(); restart: rthp = &rt_hash_table[hash].chain; diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 9623857e203a..33eea733d496 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -5,7 +5,7 @@ * * Implementation of the Transmission Control Protocol(TCP). * - * Version: $Id: tcp_timer.c,v 1.72 2000/02/08 21:27:20 davem Exp $ + * Version: $Id: tcp_timer.c,v 1.73 2000/02/09 11:16:42 davem Exp $ * * Authors: Ross Biro, * Fred N. van Kempen, @@ -216,6 +216,7 @@ static void tcp_delack_timer(unsigned long data) TCP_CHECK_TIMER(sk); out_unlock: + timer_exit(&tp->delack_timer); bh_unlock_sock(sk); sock_put(sk); } @@ -266,6 +267,7 @@ static void tcp_probe_timer(unsigned long data) TCP_CHECK_TIMER(sk); } out_unlock: + timer_exit(&tp->probe_timer); bh_unlock_sock(sk); sock_put(sk); } @@ -279,7 +281,7 @@ static struct tcp_tw_bucket *tcp_tw_death_row[TCP_TWKILL_SLOTS]; static spinlock_t tw_death_lock = SPIN_LOCK_UNLOCKED; static struct timer_list tcp_tw_timer = { function: tcp_twkill }; -static void tcp_twkill(unsigned long data) +static void SMP_TIMER_NAME(tcp_twkill)(unsigned long dummy) { struct tcp_tw_bucket *tw; int killed = 0; @@ -317,6 +319,8 @@ out: spin_unlock(&tw_death_lock); } +SMP_TIMER_DEFINE(tcp_twkill, tcp_twkill_task); + /* These are always called from BH context. See callers in * tcp_input.c to verify this. */ @@ -426,7 +430,7 @@ void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo) spin_unlock(&tw_death_lock); } -void tcp_twcal_tick(unsigned long dummy) +void SMP_TIMER_NAME(tcp_twcal_tick)(unsigned long dummy) { int n, slot; unsigned long j; @@ -477,6 +481,7 @@ out: spin_unlock(&tw_death_lock); } +SMP_TIMER_DEFINE(tcp_twcal_tick, tcp_twcal_tasklet); /* * The TCP retransmit timer. @@ -572,6 +577,7 @@ static void tcp_retransmit_timer(unsigned long data) TCP_CHECK_TIMER(sk); out_unlock: + timer_exit(&tp->retransmit_timer); bh_unlock_sock(sk); sock_put(sk); } @@ -770,6 +776,7 @@ death: tcp_done(sk); out: + timer_exit(&sk->timer); bh_unlock_sock(sk); sock_put(sk); } diff --git a/net/khttpd/structure.h b/net/khttpd/structure.h index 70a604aba921..5f6f2a619eba 100644 --- a/net/khttpd/structure.h +++ b/net/khttpd/structure.h @@ -42,7 +42,7 @@ struct http_request char LengthS[14]; /* File length, string representation */ char *MimeType; /* Pointer to a string with the mime-type based on the filename */ - int MimeLength; /* The length of this string */ + __kernel_size_t MimeLength; /* The length of this string */ }; diff --git a/net/khttpd/userspace.c b/net/khttpd/userspace.c index 948d770feb74..9c05d4788308 100644 --- a/net/khttpd/userspace.c +++ b/net/khttpd/userspace.c @@ -216,10 +216,10 @@ static int AddSocketToAcceptQueue(struct socket *sock,const int Port) sock->state = SS_UNCONNECTED; req->class = &Dummy; - write_lock_irq(&nsk->callback_lock); + write_lock_bh(&nsk->callback_lock); nsk->socket = NULL; nsk->sleep = NULL; - write_unlock_irq(&nsk->callback_lock); + write_unlock_bh(&nsk->callback_lock); tcp_acceptq_queue(sk, req, nsk); diff --git a/net/netsyms.c b/net/netsyms.c index 993f728f84e6..b6f367df23ce 100644 --- a/net/netsyms.c +++ b/net/netsyms.c @@ -487,6 +487,7 @@ EXPORT_SYMBOL(__dev_get_by_index); EXPORT_SYMBOL(dev_get_by_name); EXPORT_SYMBOL(__dev_get_by_name); EXPORT_SYMBOL(netdev_finish_unregister); +EXPORT_SYMBOL(netdev_set_master); EXPORT_SYMBOL(eth_type_trans); #ifdef CONFIG_FDDI EXPORT_SYMBOL(fddi_type_trans); @@ -510,7 +511,6 @@ EXPORT_SYMBOL(dev_load); #endif EXPORT_SYMBOL(dev_ioctl); EXPORT_SYMBOL(dev_queue_xmit); -EXPORT_SYMBOL(netdev_dropping); #ifdef CONFIG_NET_FASTROUTE EXPORT_SYMBOL(dev_fastroute_stat); #endif @@ -552,11 +552,9 @@ EXPORT_SYMBOL(ltalk_setup); EXPORT_SYMBOL(qdisc_destroy); EXPORT_SYMBOL(qdisc_reset); EXPORT_SYMBOL(qdisc_restart); -EXPORT_SYMBOL(qdisc_head); EXPORT_SYMBOL(qdisc_create_dflt); EXPORT_SYMBOL(noop_qdisc); EXPORT_SYMBOL(qdisc_tree_lock); -EXPORT_SYMBOL(qdisc_runqueue_lock); #ifdef CONFIG_NET_SCHED PSCHED_EXPORTLIST; EXPORT_SYMBOL(pfifo_qdisc_ops); @@ -598,4 +596,7 @@ EXPORT_SYMBOL(nf_hooks); EXPORT_SYMBOL(register_gifconf); +EXPORT_SYMBOL(softirq_state); +EXPORT_SYMBOL(softnet_data); + #endif /* CONFIG_NET */ diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 0308a02f1bad..d3c32be203bc 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -597,8 +597,9 @@ static void cbq_ovl_drop(struct cbq_class *cl) static void cbq_watchdog(unsigned long arg) { struct Qdisc *sch = (struct Qdisc*)arg; + sch->flags &= ~TCQ_F_THROTTLED; - qdisc_wakeup(sch->dev); + netif_schedule(sch->dev); } static unsigned long cbq_undelay_prio(struct cbq_sched_data *q, int prio) @@ -666,7 +667,7 @@ static void cbq_undelay(unsigned long arg) } sch->flags &= ~TCQ_F_THROTTLED; - qdisc_wakeup(sch->dev); + netif_schedule(sch->dev); } @@ -1052,7 +1053,7 @@ cbq_dequeue(struct Qdisc *sch) if (sch->q.qlen) { sch->stats.overlimits++; - if (q->wd_expires && !sch->dev->tbusy) { + if (q->wd_expires && !test_bit(LINK_STATE_XOFF, &sch->dev->state)) { long delay = PSCHED_US2JIFFIE(q->wd_expires); del_timer(&q->wd_timer); if (delay <= 0) diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 65e4c3e36148..2a9f9e69e032 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -34,9 +34,6 @@ /* Main transmission queue. */ -struct Qdisc_head qdisc_head = { &qdisc_head, &qdisc_head }; -spinlock_t qdisc_runqueue_lock = SPIN_LOCK_UNLOCKED; - /* Main qdisc structure lock. However, modifications @@ -55,11 +52,7 @@ spinlock_t qdisc_runqueue_lock = SPIN_LOCK_UNLOCKED; */ rwlock_t qdisc_tree_lock = RW_LOCK_UNLOCKED; -/* Anti deadlock rules: - - qdisc_runqueue_lock protects main transmission list qdisc_head. - Run list is accessed only under this spinlock. - +/* dev->queue_lock serializes queue accesses for this device AND dev->qdisc pointer itself. @@ -67,10 +60,6 @@ rwlock_t qdisc_tree_lock = RW_LOCK_UNLOCKED; dev->queue_lock and dev->xmit_lock are mutually exclusive, if one is grabbed, another must be free. - - qdisc_runqueue_lock may be requested under dev->queue_lock, - but neither dev->queue_lock nor dev->xmit_lock may be requested - under qdisc_runqueue_lock. */ @@ -99,17 +88,19 @@ int qdisc_restart(struct net_device *dev) /* And release queue */ spin_unlock(&dev->queue_lock); - if (netdev_nit) - dev_queue_xmit_nit(skb, dev); + if (!test_bit(LINK_STATE_XOFF, &dev->state)) { + if (netdev_nit) + dev_queue_xmit_nit(skb, dev); - if (dev->hard_start_xmit(skb, dev) == 0) { - dev->xmit_lock_owner = -1; - spin_unlock(&dev->xmit_lock); + if (dev->hard_start_xmit(skb, dev) == 0) { + dev->xmit_lock_owner = -1; + spin_unlock(&dev->xmit_lock); - spin_lock(&dev->queue_lock); - dev->qdisc->tx_last = jiffies; - return -1; + spin_lock(&dev->queue_lock); + return -1; + } } + /* Release the driver */ dev->xmit_lock_owner = -1; spin_unlock(&dev->xmit_lock); @@ -126,14 +117,10 @@ int qdisc_restart(struct net_device *dev) if (dev->xmit_lock_owner == smp_processor_id()) { kfree_skb(skb); if (net_ratelimit()) - printk(KERN_DEBUG "Dead loop on virtual %s, fix it urgently!\n", dev->name); + printk(KERN_DEBUG "Dead loop on netdevice %s, fix it urgently!\n", dev->name); return -1; } - - /* Otherwise, packet is requeued - and will be sent by the next net_bh run. - */ - mark_bh(NET_BH); + netdev_rx_stat[smp_processor_id()].cpu_collision++; } /* Device kicked us out :( @@ -147,139 +134,68 @@ int qdisc_restart(struct net_device *dev) */ q->ops->requeue(skb, q); - return -1; + netif_schedule(dev); + return 1; } return q->q.qlen; } -static __inline__ void -qdisc_stop_run(struct Qdisc *q) +static void dev_watchdog(unsigned long arg) { - q->h.forw->back = q->h.back; - q->h.back->forw = q->h.forw; - q->h.forw = NULL; -} + struct net_device *dev = (struct net_device *)arg; + + spin_lock(&dev->xmit_lock); + if (dev->qdisc != &noop_qdisc) { + if (test_bit(LINK_STATE_XOFF, &dev->state) && + (jiffies - dev->trans_start) > dev->watchdog_timeo) { + printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n", dev->name); + dev->tx_timeout(dev); + } + if (!del_timer(&dev->watchdog_timer)) + dev_hold(dev); -extern __inline__ void -qdisc_continue_run(struct Qdisc *q) -{ - if (!qdisc_on_runqueue(q) && q->dev) { - q->h.forw = &qdisc_head; - q->h.back = qdisc_head.back; - qdisc_head.back->forw = &q->h; - qdisc_head.back = &q->h; + dev->watchdog_timer.expires = jiffies + dev->watchdog_timeo; + add_timer(&dev->watchdog_timer); } + spin_unlock(&dev->xmit_lock); + + dev_put(dev); } -static __inline__ int -qdisc_init_run(struct Qdisc_head *lh) +static void dev_watchdog_init(struct net_device *dev) { - if (qdisc_head.forw != &qdisc_head) { - *lh = qdisc_head; - lh->forw->back = lh; - lh->back->forw = lh; - qdisc_head.forw = &qdisc_head; - qdisc_head.back = &qdisc_head; - return 1; - } - return 0; + init_timer(&dev->watchdog_timer); + dev->watchdog_timer.data = (unsigned long)dev; + dev->watchdog_timer.function = dev_watchdog; } -/* Scan transmission queue and kick devices. - - Deficiency: slow devices (ppp) and fast ones (100Mb ethernet) - share one queue. This means that if we have a lot of loaded ppp channels, - we will scan a long list on every 100Mb EOI. - I have no idea how to solve it using only "anonymous" Linux mark_bh(). - - To change queue from device interrupt? Ough... only not this... - - This function is called only from net_bh. - */ - -void qdisc_run_queues(void) +static void dev_watchdog_up(struct net_device *dev) { - struct Qdisc_head lh, *h; - - spin_lock(&qdisc_runqueue_lock); - if (!qdisc_init_run(&lh)) - goto out; - - while ((h = lh.forw) != &lh) { - int res; - struct net_device *dev; - struct Qdisc *q = (struct Qdisc*)h; - - qdisc_stop_run(q); - - dev = q->dev; - - res = -1; - if (spin_trylock(&dev->queue_lock)) { - spin_unlock(&qdisc_runqueue_lock); - while (!dev->tbusy && (res = qdisc_restart(dev)) < 0) - /* NOTHING */; - spin_lock(&qdisc_runqueue_lock); - spin_unlock(&dev->queue_lock); - } - - /* If qdisc is not empty add it to the tail of list */ - if (res) - qdisc_continue_run(dev->qdisc); + spin_lock_bh(&dev->xmit_lock); + + if (dev->tx_timeout) { + if (dev->watchdog_timeo <= 0) + dev->watchdog_timeo = 5*HZ; + if (!del_timer(&dev->watchdog_timer)) + dev_hold(dev); + dev->watchdog_timer.expires = jiffies + dev->watchdog_timeo; + add_timer(&dev->watchdog_timer); } -out: - spin_unlock(&qdisc_runqueue_lock); + spin_unlock_bh(&dev->xmit_lock); } -/* Periodic watchdog timer to recover from hard/soft device bugs. */ - -static void dev_do_watchdog(unsigned long dummy); - -static struct timer_list dev_watchdog = - { NULL, NULL, 0L, 0L, &dev_do_watchdog }; - -/* This function is called only from timer */ - -static void dev_do_watchdog(unsigned long dummy) +static void dev_watchdog_down(struct net_device *dev) { - struct Qdisc_head lh, *h; + spin_lock_bh(&dev->xmit_lock); - if (!spin_trylock(&qdisc_runqueue_lock)) { - /* No hurry with watchdog. */ - mod_timer(&dev_watchdog, jiffies + HZ/10); - return; + if (dev->tx_timeout) { + if (del_timer(&dev->watchdog_timer)) + __dev_put(dev); } - - if (!qdisc_init_run(&lh)) - goto out; - - while ((h = lh.forw) != &lh) { - struct net_device *dev; - struct Qdisc *q = (struct Qdisc*)h; - - qdisc_stop_run(q); - - dev = q->dev; - - if (spin_trylock(&dev->queue_lock)) { - spin_unlock(&qdisc_runqueue_lock); - q = dev->qdisc; - if (dev->tbusy && jiffies - q->tx_last > q->tx_timeo) - qdisc_restart(dev); - spin_lock(&qdisc_runqueue_lock); - spin_unlock(&dev->queue_lock); - } - - qdisc_continue_run(dev->qdisc); - } - -out: - mod_timer(&dev_watchdog, jiffies + 5*HZ); - spin_unlock(&qdisc_runqueue_lock); + spin_unlock_bh(&dev->xmit_lock); } - /* "NOOP" scheduler: the best scheduler, recommended for all interfaces under all circumstances. It is difficult to invent anything faster or cheaper. @@ -321,7 +237,6 @@ struct Qdisc_ops noop_qdisc_ops = struct Qdisc noop_qdisc = { - { NULL }, noop_enqueue, noop_dequeue, TCQ_F_BUILTIN, @@ -344,7 +259,6 @@ struct Qdisc_ops noqueue_qdisc_ops = struct Qdisc noqueue_qdisc = { - { NULL }, NULL, noop_dequeue, TCQ_F_BUILTIN, @@ -476,6 +390,7 @@ struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops) void qdisc_reset(struct Qdisc *qdisc) { struct Qdisc_ops *ops = qdisc->ops; + if (ops->reset) ops->reset(qdisc); } @@ -540,15 +455,10 @@ void dev_activate(struct net_device *dev) } spin_lock_bh(&dev->queue_lock); - spin_lock(&qdisc_runqueue_lock); if ((dev->qdisc = dev->qdisc_sleeping) != &noqueue_qdisc) { - dev->qdisc->tx_timeo = 5*HZ; - dev->qdisc->tx_last = jiffies - dev->qdisc->tx_timeo; - if (!del_timer(&dev_watchdog)) - dev_watchdog.expires = jiffies + 5*HZ; - add_timer(&dev_watchdog); + dev->trans_start = jiffies; + dev_watchdog_up(dev); } - spin_unlock(&qdisc_runqueue_lock); spin_unlock_bh(&dev->queue_lock); } @@ -557,17 +467,20 @@ void dev_deactivate(struct net_device *dev) struct Qdisc *qdisc; spin_lock_bh(&dev->queue_lock); - spin_lock(&qdisc_runqueue_lock); qdisc = dev->qdisc; dev->qdisc = &noop_qdisc; qdisc_reset(qdisc); - if (qdisc_on_runqueue(qdisc)) - qdisc_stop_run(qdisc); - spin_unlock(&qdisc_runqueue_lock); spin_unlock_bh(&dev->queue_lock); + dev_watchdog_down(dev); + + if (test_bit(LINK_STATE_SCHED, &dev->state)) { + current->policy |= SCHED_YIELD; + schedule(); + } + spin_unlock_wait(&dev->xmit_lock); } @@ -580,6 +493,8 @@ void dev_init_scheduler(struct net_device *dev) dev->qdisc_sleeping = &noop_qdisc; dev->qdisc_list = NULL; write_unlock(&qdisc_tree_lock); + + dev_watchdog_init(dev); } void dev_shutdown(struct net_device *dev) @@ -599,6 +514,7 @@ void dev_shutdown(struct net_device *dev) } #endif BUG_TRAP(dev->qdisc_list == NULL); + BUG_TRAP(dev->watchdog_timer.prev == NULL); dev->qdisc_list = NULL; spin_unlock_bh(&dev->queue_lock); write_unlock(&qdisc_tree_lock); diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 3a44f6dd776b..2681d71298e5 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -186,7 +186,7 @@ static void tbf_watchdog(unsigned long arg) struct Qdisc *sch = (struct Qdisc*)arg; sch->flags &= ~TCQ_F_THROTTLED; - qdisc_wakeup(sch->dev); + netif_schedule(sch->dev); } static struct sk_buff * @@ -226,7 +226,7 @@ tbf_dequeue(struct Qdisc* sch) return skb; } - if (!sch->dev->tbusy) { + if (!test_bit(LINK_STATE_XOFF, &sch->dev->state)) { long delay = PSCHED_US2JIFFIE(max(-toks, -ptoks)); if (delay == 0) diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index ede1e96cd631..e576dbb114ac 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c @@ -126,10 +126,7 @@ teql_dequeue(struct Qdisc* sch) struct net_device *m = dat->m->dev.qdisc->dev; if (m) { dat->m->slaves = sch; - spin_lock(&m->queue_lock); - m->tbusy = 0; - qdisc_restart(m); - spin_unlock(&m->queue_lock); + netif_wake_queue(m); } } sch->q.qlen = dat->q.qlen + dat->m->dev.qdisc->q.qlen; @@ -285,8 +282,6 @@ static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev) int len = skb->len; struct sk_buff *skb_res = NULL; - dev->tbusy = 1; - start = master->slaves; restart: @@ -301,23 +296,22 @@ restart: if (slave->qdisc_sleeping != q) continue; - if (slave->tbusy) { + if (test_bit(LINK_STATE_XOFF, &slave->state) || + test_bit(LINK_STATE_DOWN, &slave->state)) { busy = 1; continue; } - if (!qdisc_on_runqueue(q)) - qdisc_run(q); - switch (teql_resolve(skb, skb_res, slave)) { case 0: if (spin_trylock(&slave->xmit_lock)) { slave->xmit_lock_owner = smp_processor_id(); - if (slave->hard_start_xmit(skb, slave) == 0) { + if (!test_bit(LINK_STATE_XOFF, &slave->state) && + slave->hard_start_xmit(skb, slave) == 0) { slave->xmit_lock_owner = -1; spin_unlock(&slave->xmit_lock); master->slaves = NEXT_SLAVE(q); - dev->tbusy = 0; + netif_wake_queue(dev); master->stats.tx_packets++; master->stats.tx_bytes += len; return 0; @@ -325,12 +319,11 @@ restart: slave->xmit_lock_owner = -1; spin_unlock(&slave->xmit_lock); } - if (dev->tbusy) + if (test_bit(LINK_STATE_XOFF, &dev->state)) busy = 1; break; case 1: master->slaves = NEXT_SLAVE(q); - dev->tbusy = 0; return 0; default: nores = 1; @@ -344,9 +337,10 @@ restart: goto restart; } - dev->tbusy = busy; - if (busy) + if (busy) { + netif_stop_queue(dev); return 1; + } master->stats.tx_errors++; drop: @@ -393,13 +387,14 @@ static int teql_master_open(struct net_device *dev) m->dev.mtu = mtu; m->dev.flags = (m->dev.flags&~FMASK) | flags; - m->dev.tbusy = 0; + netif_start_queue(&m->dev); MOD_INC_USE_COUNT; return 0; } static int teql_master_close(struct net_device *dev) { + netif_stop_queue(dev); MOD_DEC_USE_COUNT; return 0; } diff --git a/net/socket.c b/net/socket.c index 0d1601cddeee..153fe83f3dfd 100644 --- a/net/socket.c +++ b/net/socket.c @@ -585,9 +585,9 @@ int sock_close(struct inode *inode, struct file *filp) * i.e. under semaphore. * 2. fasync_list is used under read_lock(&sk->callback_lock) * or under socket lock. - * 3. fasync_list is used from any context including IRQ, so that + * 3. fasync_list can be used from softirq context, so that * modification under socket lock have to be enhanced with - * write_lock_irq(&sk->callback_lock). + * write_lock_bh(&sk->callback_lock). * --ANK (990710) */ @@ -622,9 +622,9 @@ static int sock_fasync(int fd, struct file *filp, int on) { if(fa!=NULL) { - write_lock_irq(&sk->callback_lock); + write_lock_bh(&sk->callback_lock); fa->fa_fd=fd; - write_unlock_irq(&sk->callback_lock); + write_unlock_bh(&sk->callback_lock); kfree_s(fna,sizeof(struct fasync_struct)); goto out; @@ -633,17 +633,17 @@ static int sock_fasync(int fd, struct file *filp, int on) fna->fa_fd=fd; fna->magic=FASYNC_MAGIC; fna->fa_next=sock->fasync_list; - write_lock_irq(&sk->callback_lock); + write_lock_bh(&sk->callback_lock); sock->fasync_list=fna; - write_unlock_irq(&sk->callback_lock); + write_unlock_bh(&sk->callback_lock); } else { if (fa!=NULL) { - write_lock_irq(&sk->callback_lock); + write_lock_bh(&sk->callback_lock); *prev=fa->fa_next; - write_unlock_irq(&sk->callback_lock); + write_unlock_bh(&sk->callback_lock); kfree_s(fa,sizeof(struct fasync_struct)); } } diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 76c28d7ccb02..ffd4c18ad5a2 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -130,12 +130,11 @@ __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task) int rpc_add_wait_queue(struct rpc_wait_queue *q, struct rpc_task *task) { - unsigned long oldflags; int result; - spin_lock_irqsave(&rpc_queue_lock, oldflags); + spin_lock_bh(&rpc_queue_lock); result = __rpc_add_wait_queue(q, task); - spin_unlock_irqrestore(&rpc_queue_lock, oldflags); + spin_unlock_bh(&rpc_queue_lock); return result; } @@ -160,11 +159,9 @@ __rpc_remove_wait_queue(struct rpc_task *task) void rpc_remove_wait_queue(struct rpc_task *task) { - unsigned long oldflags; - - spin_lock_irqsave(&rpc_queue_lock, oldflags); + spin_lock_bh(&rpc_queue_lock); __rpc_remove_wait_queue(task); - spin_unlock_irqrestore(&rpc_queue_lock, oldflags); + spin_unlock_bh(&rpc_queue_lock); } /* @@ -286,13 +283,12 @@ void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, rpc_action action, rpc_action timer) { - unsigned long oldflags; /* * Protect the queue operations. */ - spin_lock_irqsave(&rpc_queue_lock, oldflags); + spin_lock_bh(&rpc_queue_lock); __rpc_sleep_on(q, task, action, timer); - spin_unlock_irqrestore(&rpc_queue_lock, oldflags); + spin_unlock_bh(&rpc_queue_lock); } /* @@ -342,11 +338,9 @@ __rpc_default_timer(struct rpc_task *task) void rpc_wake_up_task(struct rpc_task *task) { - unsigned long oldflags; - - spin_lock_irqsave(&rpc_queue_lock, oldflags); + spin_lock_bh(&rpc_queue_lock); __rpc_wake_up(task); - spin_unlock_irqrestore(&rpc_queue_lock, oldflags); + spin_unlock_bh(&rpc_queue_lock); } /* @@ -355,14 +349,13 @@ rpc_wake_up_task(struct rpc_task *task) struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue) { - unsigned long oldflags; struct rpc_task *task; dprintk("RPC: wake_up_next(%p \"%s\")\n", queue, rpc_qname(queue)); - spin_lock_irqsave(&rpc_queue_lock, oldflags); + spin_lock_bh(&rpc_queue_lock); if ((task = queue->task) != 0) __rpc_wake_up(task); - spin_unlock_irqrestore(&rpc_queue_lock, oldflags); + spin_unlock_bh(&rpc_queue_lock); return task; } @@ -373,12 +366,10 @@ rpc_wake_up_next(struct rpc_wait_queue *queue) void rpc_wake_up(struct rpc_wait_queue *queue) { - unsigned long oldflags; - - spin_lock_irqsave(&rpc_queue_lock, oldflags); + spin_lock_bh(&rpc_queue_lock); while (queue->task) __rpc_wake_up(queue->task); - spin_unlock_irqrestore(&rpc_queue_lock, oldflags); + spin_unlock_bh(&rpc_queue_lock); } /* @@ -388,14 +379,13 @@ void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) { struct rpc_task *task; - unsigned long oldflags; - spin_lock_irqsave(&rpc_queue_lock, oldflags); + spin_lock_bh(&rpc_queue_lock); while ((task = queue->task) != NULL) { task->tk_status = status; __rpc_wake_up(task); } - spin_unlock_irqrestore(&rpc_queue_lock, oldflags); + spin_unlock_bh(&rpc_queue_lock); } /* @@ -422,7 +412,6 @@ __rpc_atrun(struct rpc_task *task) static int __rpc_execute(struct rpc_task *task) { - unsigned long oldflags; int status = 0; dprintk("RPC: %4d rpc_execute flgs %x\n", @@ -476,13 +465,13 @@ __rpc_execute(struct rpc_task *task) * and the RPC reply arrives before we get here, it will * have state RUNNING, but will still be on schedq. */ - spin_lock_irqsave(&rpc_queue_lock, oldflags); + spin_lock_bh(&rpc_queue_lock); if (RPC_IS_RUNNING(task)) { if (task->tk_rpcwait == &schedq) __rpc_remove_wait_queue(task); } else while (!RPC_IS_RUNNING(task)) { if (RPC_IS_ASYNC(task)) { - spin_unlock_irqrestore(&rpc_queue_lock, oldflags); + spin_unlock_bh(&rpc_queue_lock); return 0; } @@ -492,9 +481,9 @@ __rpc_execute(struct rpc_task *task) if (current->pid == rpciod_pid) printk(KERN_ERR "RPC: rpciod waiting on sync task!\n"); - spin_unlock_irq(&rpc_queue_lock); + spin_unlock_bh(&rpc_queue_lock); __wait_event(task->tk_wait, RPC_IS_RUNNING(task)); - spin_lock_irq(&rpc_queue_lock); + spin_lock_bh(&rpc_queue_lock); /* * When the task received a signal, remove from @@ -506,7 +495,7 @@ __rpc_execute(struct rpc_task *task) dprintk("RPC: %4d sync task resuming\n", task->tk_pid); } - spin_unlock_irqrestore(&rpc_queue_lock, oldflags); + spin_unlock_bh(&rpc_queue_lock); /* * When a sync task receives a signal, it exits with @@ -562,20 +551,19 @@ __rpc_schedule(void) { struct rpc_task *task; int count = 0; - unsigned long oldflags; int need_resched = current->need_resched; dprintk("RPC: rpc_schedule enter\n"); while (1) { - spin_lock_irqsave(&rpc_queue_lock, oldflags); + spin_lock_bh(&rpc_queue_lock); if (!(task = schedq.task)) { - spin_unlock_irqrestore(&rpc_queue_lock, oldflags); + spin_unlock_bh(&rpc_queue_lock); break; } rpc_del_timer(task); __rpc_remove_wait_queue(task); task->tk_flags |= RPC_TASK_RUNNING; - spin_unlock_irqrestore(&rpc_queue_lock, oldflags); + spin_unlock_bh(&rpc_queue_lock); __rpc_execute(task); @@ -726,7 +714,6 @@ void rpc_release_task(struct rpc_task *task) { struct rpc_task *next, *prev; - unsigned long oldflags; dprintk("RPC: %4d release task\n", task->tk_pid); @@ -744,7 +731,7 @@ rpc_release_task(struct rpc_task *task) spin_unlock(&rpc_sched_lock); /* Protect the execution below. */ - spin_lock_irqsave(&rpc_queue_lock, oldflags); + spin_lock_bh(&rpc_queue_lock); /* Delete any running timer */ rpc_del_timer(task); @@ -752,7 +739,7 @@ rpc_release_task(struct rpc_task *task) /* Remove from any wait queue we're still on */ __rpc_remove_wait_queue(task); - spin_unlock_irqrestore(&rpc_queue_lock, oldflags); + spin_unlock_bh(&rpc_queue_lock); /* Release resources */ if (task->tk_rqstp) @@ -800,15 +787,14 @@ rpc_find_parent(struct rpc_task *child) static void rpc_child_exit(struct rpc_task *child) { - unsigned long oldflags; struct rpc_task *parent; - spin_lock_irqsave(&rpc_queue_lock, oldflags); + spin_lock_bh(&rpc_queue_lock); if ((parent = rpc_find_parent(child)) != NULL) { parent->tk_status = child->tk_status; __rpc_wake_up(parent); } - spin_unlock_irqrestore(&rpc_queue_lock, oldflags); + spin_unlock_bh(&rpc_queue_lock); rpc_release_task(child); } @@ -835,13 +821,11 @@ fail: void rpc_run_child(struct rpc_task *task, struct rpc_task *child, rpc_action func) { - unsigned long oldflags; - - spin_lock_irqsave(&rpc_queue_lock, oldflags); + spin_lock_bh(&rpc_queue_lock); /* N.B. Is it possible for the child to have already finished? */ __rpc_sleep_on(&childq, task, func, NULL); rpc_make_runnable(child); - spin_unlock_irqrestore(&rpc_queue_lock, oldflags); + spin_unlock_bh(&rpc_queue_lock); } /* diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 8f5218082b40..385c0f30b677 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -45,6 +45,7 @@ svc_create(struct svc_program *prog, unsigned int bufsize, unsigned int xdrsize) serv->sv_stats = prog->pg_stats; serv->sv_bufsz = bufsize? bufsize : 4096; serv->sv_xdrsize = xdrsize; + spin_lock_init(&serv->sv_lock); serv->sv_name = prog->pg_name; diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index c669bae786df..34ea71e122c1 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -99,6 +99,8 @@ svc_sock_enqueue(struct svc_sock *svsk) struct svc_serv *serv = svsk->sk_server; struct svc_rqst *rqstp; + BUG_TRAP(spin_is_locked(&svsk->sk_lock)); + if (serv->sv_threads && serv->sv_sockets) printk(KERN_ERR "svc_sock_enqueue: threads and sockets both waiting??\n"); @@ -141,10 +143,10 @@ svc_sock_dequeue(struct svc_serv *serv) { struct svc_sock *svsk; - start_bh_atomic(); + spin_lock_bh(&serv->sv_lock); if ((svsk = serv->sv_sockets) != NULL) rpc_remove_list(&serv->sv_sockets, svsk); - end_bh_atomic(); + spin_unlock_bh(&serv->sv_lock); if (svsk) { dprintk("svc: socket %p dequeued, inuse=%d\n", @@ -162,7 +164,7 @@ svc_sock_dequeue(struct svc_serv *serv) static inline void svc_sock_received(struct svc_sock *svsk, int count) { - start_bh_atomic(); + spin_lock_bh(&svsk->sk_lock); if ((svsk->sk_data -= count) < 0) { printk(KERN_NOTICE "svc: sk_data negative!\n"); svsk->sk_data = 0; @@ -174,7 +176,7 @@ svc_sock_received(struct svc_sock *svsk, int count) svsk->sk_sk); svc_sock_enqueue(svsk); } - end_bh_atomic(); + spin_unlock_bh(&svsk->sk_lock); } /* @@ -183,7 +185,7 @@ svc_sock_received(struct svc_sock *svsk, int count) static inline void svc_sock_accepted(struct svc_sock *svsk) { - start_bh_atomic(); + spin_lock_bh(&svsk->sk_lock); svsk->sk_busy = 0; svsk->sk_conn--; if (svsk->sk_conn || svsk->sk_data || svsk->sk_close) { @@ -191,7 +193,7 @@ svc_sock_accepted(struct svc_sock *svsk) svsk->sk_sk); svc_sock_enqueue(svsk); } - end_bh_atomic(); + spin_unlock_bh(&svsk->sk_lock); } /* @@ -342,8 +344,10 @@ svc_udp_data_ready(struct sock *sk, int count) return; dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n", svsk, sk, count, svsk->sk_busy); + spin_lock_bh(&svsk->sk_lock); svsk->sk_data = 1; svc_sock_enqueue(svsk); + spin_unlock_bh(&svsk->sk_lock); } /* @@ -454,8 +458,10 @@ svc_tcp_state_change1(struct sock *sk) printk("svc: socket %p: no user data\n", sk); return; } + spin_lock_bh(&svsk->sk_lock); svsk->sk_conn++; svc_sock_enqueue(svsk); + spin_unlock_bh(&svsk->sk_lock); } /* @@ -473,8 +479,10 @@ svc_tcp_state_change2(struct sock *sk) printk("svc: socket %p: no user data\n", sk); return; } + spin_lock_bh(&svsk->sk_lock); svsk->sk_close = 1; svc_sock_enqueue(svsk); + spin_unlock_bh(&svsk->sk_lock); } static void @@ -492,8 +500,10 @@ svc_tcp_data_ready(struct sock *sk, int count) sk, sk->user_data); if (!(svsk = (struct svc_sock *)(sk->user_data))) return; + spin_lock_bh(&svsk->sk_lock); svsk->sk_data++; svc_sock_enqueue(svsk); + spin_unlock_bh(&svsk->sk_lock); } /* @@ -560,9 +570,11 @@ svc_tcp_accept(struct svc_sock *svsk) /* Precharge. Data may have arrived on the socket before we * installed the data_ready callback. */ + spin_lock_bh(&newsvsk->sk_lock); newsvsk->sk_data = 1; newsvsk->sk_temp = 1; svc_sock_enqueue(newsvsk); + spin_unlock_bh(&newsvsk->sk_lock); if (serv->sv_stats) serv->sv_stats->nettcpconn++; @@ -756,7 +768,7 @@ again: if (signalled()) return -EINTR; - start_bh_atomic(); + spin_lock_bh(&serv->sv_lock); if ((svsk = svc_sock_dequeue(serv)) != NULL) { rqstp->rq_sock = svsk; svsk->sk_inuse++; @@ -770,20 +782,21 @@ again: */ current->state = TASK_INTERRUPTIBLE; add_wait_queue(&rqstp->rq_wait, &wait); - end_bh_atomic(); + spin_unlock_bh(&serv->sv_lock); + schedule_timeout(timeout); + spin_lock_bh(&serv->sv_lock); remove_wait_queue(&rqstp->rq_wait, &wait); - start_bh_atomic(); if (!(svsk = rqstp->rq_sock)) { svc_serv_dequeue(serv, rqstp); - end_bh_atomic(); + spin_unlock_bh(&serv->sv_lock); dprintk("svc: server %p, no data yet\n", rqstp); return signalled()? -EINTR : -EAGAIN; } } - end_bh_atomic(); + spin_unlock_bh(&serv->sv_lock); dprintk("svc: server %p, socket %p, inuse=%d\n", rqstp, svsk, svsk->sk_inuse); @@ -876,6 +889,7 @@ svc_setup_socket(struct svc_serv *serv, struct socket *sock, svsk->sk_ostate = inet->state_change; svsk->sk_odata = inet->data_ready; svsk->sk_server = serv; + spin_lock_init(&svsk->sk_lock); /* Initialize the socket */ if (sock->type == SOCK_DGRAM) diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 459de5e7fde6..9a22a63c3e4d 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -409,19 +409,19 @@ xprt_reconnect(struct rpc_task *task) if (!xprt->stream) return; - start_bh_atomic(); + spin_lock_bh(&xprt_lock); if (xprt->connected) { - end_bh_atomic(); + spin_unlock_bh(&xprt_lock); return; } if (xprt->connecting) { task->tk_timeout = xprt->timeout.to_maxval; rpc_sleep_on(&xprt->reconn, task, NULL, NULL); - end_bh_atomic(); + spin_unlock_bh(&xprt_lock); return; } xprt->connecting = 1; - end_bh_atomic(); + spin_unlock_bh(&xprt_lock); /* Create an unconnected socket */ if (!(sock = xprt_create_socket(xprt->prot, NULL, &xprt->timeout))) { @@ -460,22 +460,22 @@ xprt_reconnect(struct rpc_task *task) task->tk_pid, status, xprt->connected); task->tk_timeout = 60 * HZ; - start_bh_atomic(); + spin_lock_bh(&xprt_lock); if (!xprt->connected) { rpc_sleep_on(&xprt->reconn, task, NULL, xprt_reconn_timeout); - end_bh_atomic(); + spin_unlock_bh(&xprt_lock); return; } - end_bh_atomic(); + spin_unlock_bh(&xprt_lock); } defer: - start_bh_atomic(); + spin_lock_bh(&xprt_lock); if (!xprt->connected) rpc_wake_up_next(&xprt->reconn); - end_bh_atomic(); + spin_unlock_bh(&xprt_lock); } /* @@ -485,34 +485,36 @@ defer: static void xprt_reconn_timeout(struct rpc_task *task) { + spin_lock_bh(&xprt_lock); dprintk("RPC: %4d xprt_reconn_timeout %d\n", task->tk_pid, task->tk_status); task->tk_status = -ENOTCONN; - start_bh_atomic(); if (task->tk_xprt->connecting) task->tk_xprt->connecting = 0; if (!task->tk_xprt->connected) task->tk_status = -ENOTCONN; else task->tk_status = -ETIMEDOUT; - end_bh_atomic(); task->tk_timeout = 0; rpc_wake_up_task(task); + spin_unlock_bh(&xprt_lock); } extern spinlock_t rpc_queue_lock; /* * Look up the RPC request corresponding to a reply. + * + * RED-PEN: Niiice... Guys, when will we learn finally that locking + * in this manner is NOOP? --ANK */ static inline struct rpc_rqst * xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid) { struct rpc_task *head, *task; struct rpc_rqst *req; - unsigned long oldflags; int safe = 0; - spin_lock_irqsave(&rpc_queue_lock, oldflags); + spin_lock_bh(&rpc_queue_lock); if ((head = xprt->pending.task) != NULL) { task = head; do { @@ -529,7 +531,7 @@ xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid) out_bad: req = NULL; out: - spin_unlock_irqrestore(&rpc_queue_lock, oldflags); + spin_unlock_bh(&rpc_queue_lock); return req; } @@ -858,9 +860,10 @@ do_rpciod_tcp_dispatcher(void) void rpciod_tcp_dispatcher(void) { - start_bh_atomic(); + /* mama... start_bh_atomic was here... + Calls to sock->ops _are_ _impossible_ with disabled bh. Period. --ANK + */ do_rpciod_tcp_dispatcher(); - end_bh_atomic(); } int xprt_tcp_pending(void) @@ -1027,8 +1030,7 @@ xprt_down_transmit(struct rpc_task *task) struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; struct rpc_rqst *req = task->tk_rqstp; - start_bh_atomic(); - spin_lock(&xprt_lock); + spin_lock_bh(&xprt_lock); if (xprt->snd_task && xprt->snd_task != task) { dprintk("RPC: %4d TCP write queue full (task %d)\n", task->tk_pid, xprt->snd_task->tk_pid); @@ -1041,8 +1043,7 @@ xprt_down_transmit(struct rpc_task *task) #endif req->rq_bytes_sent = 0; } - spin_unlock(&xprt_lock); - end_bh_atomic(); + spin_unlock_bh(&xprt_lock); return xprt->snd_task == task; } @@ -1055,10 +1056,10 @@ xprt_up_transmit(struct rpc_task *task) struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; if (xprt->snd_task && xprt->snd_task == task) { - start_bh_atomic(); + spin_lock_bh(&xprt_lock); xprt->snd_task = NULL; rpc_wake_up_next(&xprt->sending); - end_bh_atomic(); + spin_unlock_bh(&xprt_lock); } } @@ -1175,16 +1176,16 @@ do_xprt_transmit(struct rpc_task *task) rpc_remove_wait_queue(task); /* Protect against (udp|tcp)_write_space */ - start_bh_atomic(); + spin_lock_bh(&xprt_lock); if (status == -ENOMEM || status == -EAGAIN) { task->tk_timeout = req->rq_timeout.to_current; if (!xprt->write_space) rpc_sleep_on(&xprt->sending, task, xprt_transmit_status, xprt_transmit_timeout); - end_bh_atomic(); + spin_unlock_bh(&xprt_lock); return; } - end_bh_atomic(); + spin_unlock_bh(&xprt_lock); out_release: xprt_up_transmit(task); @@ -1238,22 +1239,22 @@ xprt_receive(struct rpc_task *task) */ task->tk_timeout = req->rq_timeout.to_current; - start_bh_atomic(); + spin_lock_bh(&xprt_lock); if (task->tk_rpcwait) rpc_remove_wait_queue(task); if (task->tk_status < 0 || xprt->shutdown) { - end_bh_atomic(); + spin_unlock_bh(&xprt_lock); goto out; } if (!req->rq_gotit) { rpc_sleep_on(&xprt->pending, task, xprt_receive_status, xprt_timer); - end_bh_atomic(); + spin_unlock_bh(&xprt_lock); return; } - end_bh_atomic(); + spin_unlock_bh(&xprt_lock); dprintk("RPC: %4d xprt_receive returns %d\n", task->tk_pid, task->tk_status); @@ -1385,13 +1386,13 @@ xprt_release(struct rpc_task *task) spin_unlock(&xprt_lock); /* remove slot from queue of pending */ - start_bh_atomic(); + spin_lock_bh(&xprt_lock); if (task->tk_rpcwait) { printk("RPC: task of released request still queued!\n"); rpc_del_timer(task); rpc_remove_wait_queue(task); } - end_bh_atomic(); + spin_unlock_bh(&xprt_lock); /* Decrease congestion value. */ xprt->cong -= RPC_CWNDSCALE; -- 2.39.5